summaryrefslogtreecommitdiff
path: root/deps/v8/src
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2017-03-21 10:16:54 +0100
committerMichaël Zasso <targos@protonmail.com>2017-03-25 09:44:10 +0100
commitc459d8ea5d402c702948c860d9497b2230ff7e8a (patch)
tree56c282fc4d40e5cb613b47cf7be3ea0526ed5b6f /deps/v8/src
parente0bc5a7361b1d29c3ed034155fd779ce6f44fb13 (diff)
downloadandroid-node-v8-c459d8ea5d402c702948c860d9497b2230ff7e8a.tar.gz
android-node-v8-c459d8ea5d402c702948c860d9497b2230ff7e8a.tar.bz2
android-node-v8-c459d8ea5d402c702948c860d9497b2230ff7e8a.zip
deps: update V8 to 5.7.492.69
PR-URL: https://github.com/nodejs/node/pull/11752 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: Franziska Hinkelmann <franziska.hinkelmann@gmail.com>
Diffstat (limited to 'deps/v8/src')
-rw-r--r--deps/v8/src/DEPS2
-rw-r--r--deps/v8/src/accessors.cc101
-rw-r--r--deps/v8/src/accessors.h1
-rw-r--r--deps/v8/src/allocation.cc17
-rw-r--r--deps/v8/src/allocation.h19
-rw-r--r--deps/v8/src/api-arguments-inl.h20
-rw-r--r--deps/v8/src/api-arguments.cc15
-rw-r--r--deps/v8/src/api-arguments.h2
-rw-r--r--deps/v8/src/api-experimental.cc3
-rw-r--r--deps/v8/src/api-natives.cc67
-rw-r--r--deps/v8/src/api.cc727
-rw-r--r--deps/v8/src/api.h3
-rw-r--r--deps/v8/src/arguments.h3
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h2
-rw-r--r--deps/v8/src/arm/assembler-arm.cc786
-rw-r--r--deps/v8/src/arm/assembler-arm.h131
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc486
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h19
-rw-r--r--deps/v8/src/arm/codegen-arm.cc358
-rw-r--r--deps/v8/src/arm/constants-arm.h25
-rw-r--r--deps/v8/src/arm/disasm-arm.cc379
-rw-r--r--deps/v8/src/arm/interface-descriptors-arm.cc8
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc430
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h104
-rw-r--r--deps/v8/src/arm/simulator-arm.cc1201
-rw-r--r--deps/v8/src/arm/simulator-arm.h11
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc15
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h3
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc452
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.h8
-rw-r--r--deps/v8/src/arm64/codegen-arm64.cc290
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc12
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc316
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h94
-rw-r--r--deps/v8/src/asmjs/OWNERS1
-rw-r--r--deps/v8/src/asmjs/asm-js.cc129
-rw-r--r--deps/v8/src/asmjs/asm-js.h4
-rw-r--r--deps/v8/src/asmjs/asm-typer.cc407
-rw-r--r--deps/v8/src/asmjs/asm-typer.h80
-rw-r--r--deps/v8/src/asmjs/asm-types.cc1
-rw-r--r--deps/v8/src/asmjs/asm-wasm-builder.cc465
-rw-r--r--deps/v8/src/asmjs/asm-wasm-builder.h14
-rw-r--r--deps/v8/src/assembler-inl.h32
-rw-r--r--deps/v8/src/assembler.cc74
-rw-r--r--deps/v8/src/assembler.h47
-rw-r--r--deps/v8/src/assert-scope.cc10
-rw-r--r--deps/v8/src/assert-scope.h23
-rw-r--r--deps/v8/src/ast/ast-expression-rewriter.cc6
-rw-r--r--deps/v8/src/ast/ast-function-literal-id-reindexer.cc29
-rw-r--r--deps/v8/src/ast/ast-function-literal-id-reindexer.h36
-rw-r--r--deps/v8/src/ast/ast-literal-reindexer.cc4
-rw-r--r--deps/v8/src/ast/ast-numbering.cc119
-rw-r--r--deps/v8/src/ast/ast-numbering.h15
-rw-r--r--deps/v8/src/ast/ast-traversal-visitor.h8
-rw-r--r--deps/v8/src/ast/ast-types.cc7
-rw-r--r--deps/v8/src/ast/ast-value-factory.cc45
-rw-r--r--deps/v8/src/ast/ast-value-factory.h129
-rw-r--r--deps/v8/src/ast/ast.cc226
-rw-r--r--deps/v8/src/ast/ast.h353
-rw-r--r--deps/v8/src/ast/compile-time-value.cc4
-rw-r--r--deps/v8/src/ast/compile-time-value.h4
-rw-r--r--deps/v8/src/ast/modules.cc2
-rw-r--r--deps/v8/src/ast/prettyprinter.cc53
-rw-r--r--deps/v8/src/ast/prettyprinter.h4
-rw-r--r--deps/v8/src/ast/scopes.cc511
-rw-r--r--deps/v8/src/ast/scopes.h118
-rw-r--r--deps/v8/src/ast/variables.cc1
-rw-r--r--deps/v8/src/bailout-reason.h9
-rw-r--r--deps/v8/src/base.isolate3
-rw-r--r--deps/v8/src/base/cpu.cc10
-rw-r--r--deps/v8/src/base/cpu.h1
-rw-r--r--deps/v8/src/base/hashmap.h17
-rw-r--r--deps/v8/src/base/iterator.h2
-rw-r--r--deps/v8/src/base/logging.cc15
-rw-r--r--deps/v8/src/base/logging.h105
-rw-r--r--deps/v8/src/base/macros.h19
-rw-r--r--deps/v8/src/base/platform/platform-linux.cc110
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc23
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc7
-rw-r--r--deps/v8/src/base/platform/platform.h8
-rw-r--r--deps/v8/src/bit-vector.cc1
-rw-r--r--deps/v8/src/bit-vector.h2
-rw-r--r--deps/v8/src/bootstrapper.cc981
-rw-r--r--deps/v8/src/bootstrapper.h1
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc156
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc164
-rw-r--r--deps/v8/src/builtins/builtins-api.cc1
-rw-r--r--deps/v8/src/builtins/builtins-array.cc1806
-rw-r--r--deps/v8/src/builtins/builtins-boolean.cc29
-rw-r--r--deps/v8/src/builtins/builtins-constructor.cc772
-rw-r--r--deps/v8/src/builtins/builtins-constructor.h68
-rw-r--r--deps/v8/src/builtins/builtins-conversion.cc321
-rw-r--r--deps/v8/src/builtins/builtins-date.cc308
-rw-r--r--deps/v8/src/builtins/builtins-function.cc208
-rw-r--r--deps/v8/src/builtins/builtins-generator.cc36
-rw-r--r--deps/v8/src/builtins/builtins-global.cc107
-rw-r--r--deps/v8/src/builtins/builtins-handler.cc239
-rw-r--r--deps/v8/src/builtins/builtins-ic.cc78
-rw-r--r--deps/v8/src/builtins/builtins-internal.cc268
-rw-r--r--deps/v8/src/builtins/builtins-iterator.cc68
-rw-r--r--deps/v8/src/builtins/builtins-math.cc515
-rw-r--r--deps/v8/src/builtins/builtins-number.cc1663
-rw-r--r--deps/v8/src/builtins/builtins-object.cc698
-rw-r--r--deps/v8/src/builtins/builtins-promise.cc1574
-rw-r--r--deps/v8/src/builtins/builtins-promise.h120
-rw-r--r--deps/v8/src/builtins/builtins-reflect.cc34
-rw-r--r--deps/v8/src/builtins/builtins-regexp.cc3066
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer.cc155
-rw-r--r--deps/v8/src/builtins/builtins-string.cc1451
-rw-r--r--deps/v8/src/builtins/builtins-symbol.cc76
-rw-r--r--deps/v8/src/builtins/builtins-typedarray.cc152
-rw-r--r--deps/v8/src/builtins/builtins-utils.h35
-rw-r--r--deps/v8/src/builtins/builtins.cc17
-rw-r--r--deps/v8/src/builtins/builtins.h1396
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc160
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc171
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc469
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc174
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc168
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc175
-rw-r--r--deps/v8/src/builtins/x87/OWNERS1
-rw-r--r--deps/v8/src/builtins/x87/builtins-x87.cc168
-rw-r--r--deps/v8/src/cancelable-task.cc27
-rw-r--r--deps/v8/src/cancelable-task.h16
-rw-r--r--deps/v8/src/code-events.h2
-rw-r--r--deps/v8/src/code-factory.cc144
-rw-r--r--deps/v8/src/code-factory.h25
-rw-r--r--deps/v8/src/code-stub-assembler.cc3305
-rw-r--r--deps/v8/src/code-stub-assembler.h1469
-rw-r--r--deps/v8/src/code-stubs-hydrogen.cc460
-rw-r--r--deps/v8/src/code-stubs.cc1828
-rw-r--r--deps/v8/src/code-stubs.h687
-rw-r--r--deps/v8/src/codegen.cc92
-rw-r--r--deps/v8/src/codegen.h37
-rw-r--r--deps/v8/src/compilation-info.cc13
-rw-r--r--deps/v8/src/compilation-info.h12
-rw-r--r--deps/v8/src/compilation-statistics.cc10
-rw-r--r--deps/v8/src/compilation-statistics.h1
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc213
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h28
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc34
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.h11
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc631
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher.h175
-rw-r--r--deps/v8/src/compiler.cc415
-rw-r--r--deps/v8/src/compiler.h26
-rw-r--r--deps/v8/src/compiler/OWNERS1
-rw-r--r--deps/v8/src/compiler/access-builder.cc549
-rw-r--r--deps/v8/src/compiler/access-builder.h24
-rw-r--r--deps/v8/src/compiler/access-info.cc48
-rw-r--r--deps/v8/src/compiler/access-info.h3
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc276
-rw-r--r--deps/v8/src/compiler/arm/instruction-codes-arm.h23
-rw-r--r--deps/v8/src/compiler/arm/instruction-scheduler-arm.cc21
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc180
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc66
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc44
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.cc1361
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.h61
-rw-r--r--deps/v8/src/compiler/ast-loop-assignment-analyzer.cc2
-rw-r--r--deps/v8/src/compiler/branch-elimination.cc21
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.cc622
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.h126
-rw-r--r--deps/v8/src/compiler/bytecode-branch-analysis.cc43
-rw-r--r--deps/v8/src/compiler/bytecode-branch-analysis.h65
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc541
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h57
-rw-r--r--deps/v8/src/compiler/bytecode-liveness-map.cc42
-rw-r--r--deps/v8/src/compiler/bytecode-liveness-map.h119
-rw-r--r--deps/v8/src/compiler/bytecode-loop-analysis.cc100
-rw-r--r--deps/v8/src/compiler/bytecode-loop-analysis.h67
-rw-r--r--deps/v8/src/compiler/code-assembler.cc1051
-rw-r--r--deps/v8/src/compiler/code-assembler.h369
-rw-r--r--deps/v8/src/compiler/code-generator.cc162
-rw-r--r--deps/v8/src/compiler/code-generator.h24
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc69
-rw-r--r--deps/v8/src/compiler/common-operator.cc301
-rw-r--r--deps/v8/src/compiler/common-operator.h136
-rw-r--r--deps/v8/src/compiler/control-builders.cc61
-rw-r--r--deps/v8/src/compiler/control-builders.h53
-rw-r--r--deps/v8/src/compiler/dead-code-elimination.cc31
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc3526
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.h244
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.cc42
-rw-r--r--deps/v8/src/compiler/escape-analysis-reducer.h1
-rw-r--r--deps/v8/src/compiler/escape-analysis.cc74
-rw-r--r--deps/v8/src/compiler/escape-analysis.h2
-rw-r--r--deps/v8/src/compiler/frame-elider.cc35
-rw-r--r--deps/v8/src/compiler/frame-states.cc1
-rw-r--r--deps/v8/src/compiler/frame.h32
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc287
-rw-r--r--deps/v8/src/compiler/graph-assembler.h449
-rw-r--r--deps/v8/src/compiler/graph-reducer.cc43
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc6
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc196
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc94
-rw-r--r--deps/v8/src/compiler/instruction-codes.h9
-rw-r--r--deps/v8/src/compiler/instruction-selector-impl.h23
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc261
-rw-r--r--deps/v8/src/compiler/instruction-selector.h20
-rw-r--r--deps/v8/src/compiler/instruction.cc16
-rw-r--r--deps/v8/src/compiler/instruction.h146
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc10
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.cc310
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.h3
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc200
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h13
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc116
-rw-r--r--deps/v8/src/compiler/js-context-specialization.h6
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc227
-rw-r--r--deps/v8/src/compiler/js-frame-specialization.cc3
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc207
-rw-r--r--deps/v8/src/compiler/js-global-object-specialization.cc61
-rw-r--r--deps/v8/src/compiler/js-graph.cc26
-rw-r--r--deps/v8/src/compiler/js-graph.h6
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc4
-rw-r--r--deps/v8/src/compiler/js-inlining.cc87
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc31
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.h3
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc452
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h7
-rw-r--r--deps/v8/src/compiler/js-operator.cc196
-rw-r--r--deps/v8/src/compiler/js-operator.h90
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc178
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h3
-rw-r--r--deps/v8/src/compiler/linkage.cc3
-rw-r--r--deps/v8/src/compiler/load-elimination.cc270
-rw-r--r--deps/v8/src/compiler/load-elimination.h55
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc194
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.h3
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc80
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.h5
-rw-r--r--deps/v8/src/compiler/machine-operator.cc166
-rw-r--r--deps/v8/src/compiler/machine-operator.h14
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc185
-rw-r--r--deps/v8/src/compiler/memory-optimizer.h3
-rw-r--r--deps/v8/src/compiler/mips/code-generator-mips.cc223
-rw-r--r--deps/v8/src/compiler/mips/instruction-codes-mips.h4
-rw-r--r--deps/v8/src/compiler/mips/instruction-selector-mips.cc123
-rw-r--r--deps/v8/src/compiler/mips64/code-generator-mips64.cc383
-rw-r--r--deps/v8/src/compiler/mips64/instruction-codes-mips64.h4
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc202
-rw-r--r--deps/v8/src/compiler/node-marker.h13
-rw-r--r--deps/v8/src/compiler/node-properties.cc14
-rw-r--r--deps/v8/src/compiler/node-properties.h5
-rw-r--r--deps/v8/src/compiler/node.cc6
-rw-r--r--deps/v8/src/compiler/node.h167
-rw-r--r--deps/v8/src/compiler/opcodes.h67
-rw-r--r--deps/v8/src/compiler/operation-typer.cc84
-rw-r--r--deps/v8/src/compiler/operator-properties.cc3
-rw-r--r--deps/v8/src/compiler/osr.cc23
-rw-r--r--deps/v8/src/compiler/pipeline.cc329
-rw-r--r--deps/v8/src/compiler/pipeline.h21
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc210
-rw-r--r--deps/v8/src/compiler/ppc/instruction-codes-ppc.h5
-rw-r--r--deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc3
-rw-r--r--deps/v8/src/compiler/ppc/instruction-selector-ppc.cc40
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc318
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h64
-rw-r--r--deps/v8/src/compiler/redundancy-elimination.cc58
-rw-r--r--deps/v8/src/compiler/redundancy-elimination.h3
-rw-r--r--deps/v8/src/compiler/register-allocator-verifier.cc32
-rw-r--r--deps/v8/src/compiler/register-allocator.cc41
-rw-r--r--deps/v8/src/compiler/representation-change.cc65
-rw-r--r--deps/v8/src/compiler/representation-change.h1
-rw-r--r--deps/v8/src/compiler/s390/code-generator-s390.cc176
-rw-r--r--deps/v8/src/compiler/s390/instruction-codes-s390.h1
-rw-r--r--deps/v8/src/compiler/s390/instruction-scheduler-s390.cc1
-rw-r--r--deps/v8/src/compiler/s390/instruction-selector-s390.cc139
-rw-r--r--deps/v8/src/compiler/schedule.cc2
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc254
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.h7
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc185
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.cc9
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc116
-rw-r--r--deps/v8/src/compiler/simplified-operator.h76
-rw-r--r--deps/v8/src/compiler/state-values-utils.cc323
-rw-r--r--deps/v8/src/compiler/state-values-utils.h49
-rw-r--r--deps/v8/src/compiler/type-cache.h16
-rw-r--r--deps/v8/src/compiler/type-hint-analyzer.cc128
-rw-r--r--deps/v8/src/compiler/type-hint-analyzer.h57
-rw-r--r--deps/v8/src/compiler/typed-optimization.cc37
-rw-r--r--deps/v8/src/compiler/typed-optimization.h1
-rw-r--r--deps/v8/src/compiler/typer.cc105
-rw-r--r--deps/v8/src/compiler/types.cc27
-rw-r--r--deps/v8/src/compiler/types.h29
-rw-r--r--deps/v8/src/compiler/value-numbering-reducer.cc19
-rw-r--r--deps/v8/src/compiler/verifier.cc59
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc858
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h81
-rw-r--r--deps/v8/src/compiler/wasm-linkage.cc28
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc311
-rw-r--r--deps/v8/src/compiler/x64/instruction-codes-x64.h8
-rw-r--r--deps/v8/src/compiler/x64/instruction-scheduler-x64.cc4
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc266
-rw-r--r--deps/v8/src/compiler/x87/code-generator-x87.cc8
-rw-r--r--deps/v8/src/compiler/x87/instruction-selector-x87.cc14
-rw-r--r--deps/v8/src/contexts-inl.h7
-rw-r--r--deps/v8/src/contexts.cc200
-rw-r--r--deps/v8/src/contexts.h203
-rw-r--r--deps/v8/src/conversions-inl.h54
-rw-r--r--deps/v8/src/conversions.cc144
-rw-r--r--deps/v8/src/conversions.h8
-rw-r--r--deps/v8/src/counters-inl.h51
-rw-r--r--deps/v8/src/counters.cc50
-rw-r--r--deps/v8/src/counters.h186
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc30
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc16
-rw-r--r--deps/v8/src/crankshaft/hydrogen-instructions.h8
-rw-r--r--deps/v8/src/crankshaft/hydrogen-types.cc1
-rw-r--r--deps/v8/src/crankshaft/hydrogen.cc204
-rw-r--r--deps/v8/src/crankshaft/hydrogen.h48
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc33
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc40
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc33
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc55
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc226
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-s390.cc31
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-s390.h16
-rw-r--r--deps/v8/src/crankshaft/typing.cc1
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc20
-rw-r--r--deps/v8/src/crankshaft/x87/OWNERS1
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc33
-rw-r--r--deps/v8/src/d8.cc305
-rw-r--r--deps/v8/src/d8.h8
-rw-r--r--deps/v8/src/debug/arm/debug-arm.cc2
-rw-r--r--deps/v8/src/debug/arm64/debug-arm64.cc2
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc316
-rw-r--r--deps/v8/src/debug/debug-evaluate.h11
-rw-r--r--deps/v8/src/debug/debug-frames.cc49
-rw-r--r--deps/v8/src/debug/debug-frames.h6
-rw-r--r--deps/v8/src/debug/debug-interface.h343
-rw-r--r--deps/v8/src/debug/debug-scopes.cc64
-rw-r--r--deps/v8/src/debug/debug-scopes.h8
-rw-r--r--deps/v8/src/debug/debug.cc571
-rw-r--r--deps/v8/src/debug/debug.h145
-rw-r--r--deps/v8/src/debug/debug.js106
-rw-r--r--deps/v8/src/debug/ia32/debug-ia32.cc2
-rw-r--r--deps/v8/src/debug/interface-types.h75
-rw-r--r--deps/v8/src/debug/liveedit.cc46
-rw-r--r--deps/v8/src/debug/liveedit.h10
-rw-r--r--deps/v8/src/debug/liveedit.js119
-rw-r--r--deps/v8/src/debug/mips/debug-mips.cc2
-rw-r--r--deps/v8/src/debug/mips64/debug-mips64.cc2
-rw-r--r--deps/v8/src/debug/mirrors.js36
-rw-r--r--deps/v8/src/debug/ppc/debug-ppc.cc2
-rw-r--r--deps/v8/src/debug/s390/debug-s390.cc2
-rw-r--r--deps/v8/src/debug/x64/debug-x64.cc2
-rw-r--r--deps/v8/src/debug/x87/OWNERS1
-rw-r--r--deps/v8/src/debug/x87/debug-x87.cc2
-rw-r--r--deps/v8/src/deoptimizer.cc542
-rw-r--r--deps/v8/src/deoptimizer.h10
-rw-r--r--deps/v8/src/elements-kind.cc1
-rw-r--r--deps/v8/src/elements.cc200
-rw-r--r--deps/v8/src/elements.h3
-rw-r--r--deps/v8/src/execution.cc70
-rw-r--r--deps/v8/src/execution.h33
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.cc1
-rw-r--r--deps/v8/src/external-reference-table.cc18
-rw-r--r--deps/v8/src/factory.cc247
-rw-r--r--deps/v8/src/factory.h53
-rw-r--r--deps/v8/src/fast-accessor-assembler.cc141
-rw-r--r--deps/v8/src/fast-accessor-assembler.h14
-rw-r--r--deps/v8/src/field-type.cc1
-rw-r--r--deps/v8/src/flag-definitions.h119
-rw-r--r--deps/v8/src/frames-inl.h14
-rw-r--r--deps/v8/src/frames.cc471
-rw-r--r--deps/v8/src/frames.h279
-rw-r--r--deps/v8/src/full-codegen/arm/full-codegen-arm.cc951
-rw-r--r--deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc954
-rw-r--r--deps/v8/src/full-codegen/full-codegen.cc552
-rw-r--r--deps/v8/src/full-codegen/full-codegen.h134
-rw-r--r--deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc912
-rw-r--r--deps/v8/src/full-codegen/mips/full-codegen-mips.cc937
-rw-r--r--deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc935
-rw-r--r--deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc944
-rw-r--r--deps/v8/src/full-codegen/s390/full-codegen-s390.cc989
-rw-r--r--deps/v8/src/full-codegen/x64/full-codegen-x64.cc918
-rw-r--r--deps/v8/src/full-codegen/x87/OWNERS1
-rw-r--r--deps/v8/src/full-codegen/x87/full-codegen-x87.cc912
-rw-r--r--deps/v8/src/futex-emulation.cc1
-rw-r--r--deps/v8/src/global-handles.cc31
-rw-r--r--deps/v8/src/global-handles.h11
-rw-r--r--deps/v8/src/globals.h75
-rw-r--r--deps/v8/src/handles.h9
-rw-r--r--deps/v8/src/heap-symbols.h39
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.cc4
-rw-r--r--deps/v8/src/heap/embedder-tracing.cc72
-rw-r--r--deps/v8/src/heap/embedder-tracing.h67
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.cc1
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.h2
-rw-r--r--deps/v8/src/heap/gc-tracer.cc14
-rw-r--r--deps/v8/src/heap/gc-tracer.h6
-rw-r--r--deps/v8/src/heap/heap-inl.h15
-rw-r--r--deps/v8/src/heap/heap.cc397
-rw-r--r--deps/v8/src/heap/heap.h129
-rw-r--r--deps/v8/src/heap/incremental-marking.cc126
-rw-r--r--deps/v8/src/heap/incremental-marking.h8
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h9
-rw-r--r--deps/v8/src/heap/mark-compact.cc341
-rw-r--r--deps/v8/src/heap/mark-compact.h62
-rw-r--r--deps/v8/src/heap/memory-reducer.cc40
-rw-r--r--deps/v8/src/heap/memory-reducer.h15
-rw-r--r--deps/v8/src/heap/object-stats.cc13
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h38
-rw-r--r--deps/v8/src/heap/objects-visiting.cc2
-rw-r--r--deps/v8/src/heap/objects-visiting.h15
-rw-r--r--deps/v8/src/heap/remembered-set.h15
-rw-r--r--deps/v8/src/heap/scavenger.cc2
-rw-r--r--deps/v8/src/heap/slot-set.h12
-rw-r--r--deps/v8/src/heap/spaces-inl.h67
-rw-r--r--deps/v8/src/heap/spaces.cc88
-rw-r--r--deps/v8/src/heap/spaces.h72
-rw-r--r--deps/v8/src/heap/store-buffer.cc34
-rw-r--r--deps/v8/src/heap/store-buffer.h93
-rw-r--r--deps/v8/src/i18n.cc84
-rw-r--r--deps/v8/src/i18n.h38
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc9
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h3
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc568
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.h18
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc331
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc3
-rw-r--r--deps/v8/src/ia32/interface-descriptors-ia32.cc9
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc292
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h81
-rw-r--r--deps/v8/src/ic/accessor-assembler-impl.h203
-rw-r--r--deps/v8/src/ic/accessor-assembler.cc1933
-rw-r--r--deps/v8/src/ic/accessor-assembler.h45
-rw-r--r--deps/v8/src/ic/arm/handler-compiler-arm.cc79
-rw-r--r--deps/v8/src/ic/arm/ic-arm.cc485
-rw-r--r--deps/v8/src/ic/arm/ic-compiler-arm.cc33
-rw-r--r--deps/v8/src/ic/arm/stub-cache-arm.cc157
-rw-r--r--deps/v8/src/ic/arm64/handler-compiler-arm64.cc78
-rw-r--r--deps/v8/src/ic/arm64/ic-arm64.cc450
-rw-r--r--deps/v8/src/ic/arm64/ic-compiler-arm64.cc33
-rw-r--r--deps/v8/src/ic/arm64/stub-cache-arm64.cc156
-rw-r--r--deps/v8/src/ic/handler-compiler.cc320
-rw-r--r--deps/v8/src/ic/handler-compiler.h43
-rw-r--r--deps/v8/src/ic/handler-configuration-inl.h3
-rw-r--r--deps/v8/src/ic/ia32/handler-compiler-ia32.cc82
-rw-r--r--deps/v8/src/ic/ia32/ic-compiler-ia32.cc45
-rw-r--r--deps/v8/src/ic/ia32/ic-ia32.cc478
-rw-r--r--deps/v8/src/ic/ia32/stub-cache-ia32.cc185
-rw-r--r--deps/v8/src/ic/ic-compiler.cc45
-rw-r--r--deps/v8/src/ic/ic-compiler.h15
-rw-r--r--deps/v8/src/ic/ic-inl.h4
-rw-r--r--deps/v8/src/ic/ic-state.cc17
-rw-r--r--deps/v8/src/ic/ic-state.h8
-rw-r--r--deps/v8/src/ic/ic-stats.cc144
-rw-r--r--deps/v8/src/ic/ic-stats.h77
-rw-r--r--deps/v8/src/ic/ic.cc523
-rw-r--r--deps/v8/src/ic/ic.h23
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc355
-rw-r--r--deps/v8/src/ic/keyed-store-generic.h9
-rw-r--r--deps/v8/src/ic/mips/handler-compiler-mips.cc79
-rw-r--r--deps/v8/src/ic/mips/ic-compiler-mips.cc33
-rw-r--r--deps/v8/src/ic/mips/ic-mips.cc483
-rw-r--r--deps/v8/src/ic/mips/stub-cache-mips.cc157
-rw-r--r--deps/v8/src/ic/mips64/handler-compiler-mips64.cc79
-rw-r--r--deps/v8/src/ic/mips64/ic-compiler-mips64.cc33
-rw-r--r--deps/v8/src/ic/mips64/ic-mips64.cc484
-rw-r--r--deps/v8/src/ic/mips64/stub-cache-mips64.cc161
-rw-r--r--deps/v8/src/ic/ppc/handler-compiler-ppc.cc80
-rw-r--r--deps/v8/src/ic/ppc/ic-compiler-ppc.cc31
-rw-r--r--deps/v8/src/ic/ppc/ic-ppc.cc482
-rw-r--r--deps/v8/src/ic/ppc/stub-cache-ppc.cc176
-rw-r--r--deps/v8/src/ic/s390/handler-compiler-s390.cc72
-rw-r--r--deps/v8/src/ic/s390/ic-compiler-s390.cc29
-rw-r--r--deps/v8/src/ic/s390/ic-s390.cc477
-rw-r--r--deps/v8/src/ic/s390/stub-cache-s390.cc173
-rw-r--r--deps/v8/src/ic/stub-cache.h7
-rw-r--r--deps/v8/src/ic/x64/handler-compiler-x64.cc82
-rw-r--r--deps/v8/src/ic/x64/ic-compiler-x64.cc39
-rw-r--r--deps/v8/src/ic/x64/ic-x64.cc476
-rw-r--r--deps/v8/src/ic/x64/stub-cache-x64.cc153
-rw-r--r--deps/v8/src/ic/x87/OWNERS1
-rw-r--r--deps/v8/src/ic/x87/handler-compiler-x87.cc82
-rw-r--r--deps/v8/src/ic/x87/ic-compiler-x87.cc45
-rw-r--r--deps/v8/src/ic/x87/ic-x87.cc478
-rw-r--r--deps/v8/src/ic/x87/stub-cache-x87.cc185
-rw-r--r--deps/v8/src/inspector/BUILD.gn5
-rw-r--r--deps/v8/src/inspector/DEPS1
-rw-r--r--deps/v8/src/inspector/debugger-script.js74
-rw-r--r--deps/v8/src/inspector/debugger_script_externs.js51
-rw-r--r--deps/v8/src/inspector/injected-script-native.cc6
-rw-r--r--deps/v8/src/inspector/injected-script-native.h3
-rw-r--r--deps/v8/src/inspector/injected-script-source.js21
-rw-r--r--deps/v8/src/inspector/injected-script.cc28
-rw-r--r--deps/v8/src/inspector/injected-script.h6
-rw-r--r--deps/v8/src/inspector/inspected-context.cc12
-rw-r--r--deps/v8/src/inspector/inspected-context.h2
-rw-r--r--deps/v8/src/inspector/inspector.gyp28
-rw-r--r--deps/v8/src/inspector/inspector.gypi5
-rw-r--r--deps/v8/src/inspector/inspector_protocol_config.json29
-rw-r--r--deps/v8/src/inspector/java-script-call-frame.cc16
-rw-r--r--deps/v8/src/inspector/java-script-call-frame.h7
-rw-r--r--deps/v8/src/inspector/js_protocol.json12
-rw-r--r--deps/v8/src/inspector/protocol-platform.h21
-rw-r--r--deps/v8/src/inspector/remote-object-id.cc3
-rw-r--r--deps/v8/src/inspector/script-breakpoint.h21
-rw-r--r--deps/v8/src/inspector/search-util.cc3
-rw-r--r--deps/v8/src/inspector/string-16.cc27
-rw-r--r--deps/v8/src/inspector/string-16.h2
-rw-r--r--deps/v8/src/inspector/string-util.cc16
-rw-r--r--deps/v8/src/inspector/string-util.h8
-rw-r--r--deps/v8/src/inspector/test-interface.cc18
-rw-r--r--deps/v8/src/inspector/test-interface.h18
-rw-r--r--deps/v8/src/inspector/v8-console-message.cc43
-rw-r--r--deps/v8/src/inspector/v8-console-message.h8
-rw-r--r--deps/v8/src/inspector/v8-console.cc23
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc202
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.h3
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.cc230
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.h60
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc428
-rw-r--r--deps/v8/src/inspector/v8-debugger.h49
-rw-r--r--deps/v8/src/inspector/v8-function-call.cc3
-rw-r--r--deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc10
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.cc137
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.h22
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.cc69
-rw-r--r--deps/v8/src/inspector/v8-inspector-session-impl.h6
-rw-r--r--deps/v8/src/inspector/v8-internal-value-type.cc1
-rw-r--r--deps/v8/src/inspector/v8-profiler-agent-impl.cc4
-rw-r--r--deps/v8/src/inspector/v8-profiler-agent-impl.h2
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.cc32
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.cc38
-rw-r--r--deps/v8/src/inspector/wasm-translation.cc309
-rw-r--r--deps/v8/src/inspector/wasm-translation.h75
-rw-r--r--deps/v8/src/interface-descriptors.cc110
-rw-r--r--deps/v8/src/interface-descriptors.h74
-rw-r--r--deps/v8/src/interpreter/OWNERS1
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.cc205
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.h76
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc116
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h51
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.cc175
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.h49
-rw-r--r--deps/v8/src/interpreter/bytecode-array-random-iterator.cc37
-rw-r--r--deps/v8/src/interpreter/bytecode-array-random-iterator.h78
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.cc11
-rw-r--r--deps/v8/src/interpreter/bytecode-flags.cc8
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc459
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h13
-rw-r--r--deps/v8/src/interpreter/bytecode-label.cc1
-rw-r--r--deps/v8/src/interpreter/bytecode-label.h4
-rw-r--r--deps/v8/src/interpreter/bytecode-operands.h55
-rw-r--r--deps/v8/src/interpreter/bytecode-peephole-optimizer.cc73
-rw-r--r--deps/v8/src/interpreter/bytecode-peephole-table.h21
-rw-r--r--deps/v8/src/interpreter/bytecode-pipeline.h138
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.cc34
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.h27
-rw-r--r--deps/v8/src/interpreter/bytecodes.h221
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.cc34
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.h5
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.cc7
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.h19
-rw-r--r--deps/v8/src/interpreter/handler-table-builder.h2
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc217
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h50
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics.cc33
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics.h37
-rw-r--r--deps/v8/src/interpreter/interpreter.cc1052
-rw-r--r--deps/v8/src/interpreter/interpreter.h9
-rw-r--r--deps/v8/src/interpreter/mkpeephole.cc22
-rw-r--r--deps/v8/src/isolate-inl.h5
-rw-r--r--deps/v8/src/isolate.cc563
-rw-r--r--deps/v8/src/isolate.h93
-rw-r--r--deps/v8/src/js/array.js23
-rw-r--r--deps/v8/src/js/arraybuffer.js8
-rw-r--r--deps/v8/src/js/async-await.js61
-rw-r--r--deps/v8/src/js/collection.js16
-rw-r--r--deps/v8/src/js/i18n.js385
-rw-r--r--deps/v8/src/js/macros.py1
-rw-r--r--deps/v8/src/js/prologue.js26
-rw-r--r--deps/v8/src/js/promise.js524
-rw-r--r--deps/v8/src/js/string.js2
-rw-r--r--deps/v8/src/js/symbol.js68
-rw-r--r--deps/v8/src/js/typedarray.js8
-rw-r--r--deps/v8/src/json-parser.cc5
-rw-r--r--deps/v8/src/json-stringifier.cc3
-rw-r--r--deps/v8/src/keys.cc7
-rw-r--r--deps/v8/src/layout-descriptor-inl.h2
-rw-r--r--deps/v8/src/layout-descriptor.cc3
-rw-r--r--deps/v8/src/layout-descriptor.h1
-rw-r--r--deps/v8/src/libplatform/default-platform.cc48
-rw-r--r--deps/v8/src/libplatform/default-platform.h6
-rw-r--r--deps/v8/src/libplatform/tracing/trace-config.cc9
-rw-r--r--deps/v8/src/list-inl.h3
-rw-r--r--deps/v8/src/list.h6
-rw-r--r--deps/v8/src/log-utils.h4
-rw-r--r--deps/v8/src/log.cc55
-rw-r--r--deps/v8/src/lookup.cc41
-rw-r--r--deps/v8/src/lookup.h2
-rw-r--r--deps/v8/src/machine-type.h30
-rw-r--r--deps/v8/src/macro-assembler.h21
-rw-r--r--deps/v8/src/map-updater.cc615
-rw-r--r--deps/v8/src/map-updater.h173
-rw-r--r--deps/v8/src/messages.cc359
-rw-r--r--deps/v8/src/messages.h79
-rw-r--r--deps/v8/src/mips/assembler-mips.cc44
-rw-r--r--deps/v8/src/mips/assembler-mips.h33
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc491
-rw-r--r--deps/v8/src/mips/code-stubs-mips.h19
-rw-r--r--deps/v8/src/mips/codegen-mips.cc373
-rw-r--r--deps/v8/src/mips/interface-descriptors-mips.cc8
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc675
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h146
-rw-r--r--deps/v8/src/mips/simulator-mips.cc8
-rw-r--r--deps/v8/src/mips64/assembler-mips64.cc134
-rw-r--r--deps/v8/src/mips64/assembler-mips64.h19
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.cc491
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.h19
-rw-r--r--deps/v8/src/mips64/codegen-mips64.cc370
-rw-r--r--deps/v8/src/mips64/interface-descriptors-mips64.cc8
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc666
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.h148
-rw-r--r--deps/v8/src/mips64/simulator-mips64.cc8
-rw-r--r--deps/v8/src/objects-body-descriptors-inl.h7
-rw-r--r--deps/v8/src/objects-debug.cc111
-rw-r--r--deps/v8/src/objects-inl.h1514
-rw-r--r--deps/v8/src/objects-printer.cc300
-rw-r--r--deps/v8/src/objects.cc2428
-rw-r--r--deps/v8/src/objects.h1282
-rw-r--r--deps/v8/src/objects/module-info.h129
-rw-r--r--deps/v8/src/objects/object-macros-undef.h9
-rw-r--r--deps/v8/src/objects/object-macros.h32
-rw-r--r--deps/v8/src/objects/scope-info.cc (renamed from deps/v8/src/ast/scopeinfo.cc)54
-rw-r--r--deps/v8/src/objects/scope-info.h345
-rw-r--r--deps/v8/src/parsing/OWNERS1
-rw-r--r--deps/v8/src/parsing/duplicate-finder.cc69
-rw-r--r--deps/v8/src/parsing/duplicate-finder.h28
-rw-r--r--deps/v8/src/parsing/func-name-inferrer.cc7
-rw-r--r--deps/v8/src/parsing/parameter-initializer-rewriter.cc3
-rw-r--r--deps/v8/src/parsing/parse-info.cc18
-rw-r--r--deps/v8/src/parsing/parse-info.h24
-rw-r--r--deps/v8/src/parsing/parser-base.h304
-rw-r--r--deps/v8/src/parsing/parser.cc875
-rw-r--r--deps/v8/src/parsing/parser.h122
-rw-r--r--deps/v8/src/parsing/parsing.cc62
-rw-r--r--deps/v8/src/parsing/parsing.h34
-rw-r--r--deps/v8/src/parsing/pattern-rewriter.cc15
-rw-r--r--deps/v8/src/parsing/preparse-data-format.h2
-rw-r--r--deps/v8/src/parsing/preparse-data.cc5
-rw-r--r--deps/v8/src/parsing/preparse-data.h12
-rw-r--r--deps/v8/src/parsing/preparser.cc89
-rw-r--r--deps/v8/src/parsing/preparser.h298
-rw-r--r--deps/v8/src/parsing/rewriter.cc36
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.cc73
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.h4
-rw-r--r--deps/v8/src/parsing/scanner.cc105
-rw-r--r--deps/v8/src/parsing/scanner.h90
-rw-r--r--deps/v8/src/perf-jit.cc98
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc56
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h20
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc492
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.h13
-rw-r--r--deps/v8/src/ppc/codegen-ppc.cc324
-rw-r--r--deps/v8/src/ppc/constants-ppc.h53
-rw-r--r--deps/v8/src/ppc/disasm-ppc.cc45
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc8
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc260
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h76
-rw-r--r--deps/v8/src/ppc/simulator-ppc.cc128
-rw-r--r--deps/v8/src/ppc/simulator-ppc.h1
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator-inl.h14
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc33
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h17
-rw-r--r--deps/v8/src/profiler/profile-generator.cc26
-rw-r--r--deps/v8/src/profiler/profiler-listener.cc7
-rw-r--r--deps/v8/src/promise-utils.cc75
-rw-r--r--deps/v8/src/promise-utils.h32
-rw-r--r--deps/v8/src/property-descriptor.cc23
-rw-r--r--deps/v8/src/property-details.h62
-rw-r--r--deps/v8/src/property.cc77
-rw-r--r--deps/v8/src/property.h75
-rw-r--r--deps/v8/src/prototype.h4
-rw-r--r--deps/v8/src/regexp/interpreter-irregexp.cc1
-rw-r--r--deps/v8/src/regexp/jsregexp.cc31
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc3
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-tracer.cc1
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc56
-rw-r--r--deps/v8/src/regexp/regexp-utils.cc9
-rw-r--r--deps/v8/src/regexp/x87/OWNERS1
-rw-r--r--deps/v8/src/runtime-profiler.cc6
-rw-r--r--deps/v8/src/runtime/runtime-array.cc99
-rw-r--r--deps/v8/src/runtime/runtime-atomics.cc16
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc182
-rw-r--r--deps/v8/src/runtime/runtime-collections.cc54
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc35
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc414
-rw-r--r--deps/v8/src/runtime/runtime-error.cc2
-rw-r--r--deps/v8/src/runtime/runtime-function.cc38
-rw-r--r--deps/v8/src/runtime/runtime-futex.cc6
-rw-r--r--deps/v8/src/runtime/runtime-generator.cc83
-rw-r--r--deps/v8/src/runtime/runtime-i18n.cc453
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc166
-rw-r--r--deps/v8/src/runtime/runtime-interpreter.cc20
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc38
-rw-r--r--deps/v8/src/runtime/runtime-liveedit.cc40
-rw-r--r--deps/v8/src/runtime/runtime-maths.cc2
-rw-r--r--deps/v8/src/runtime/runtime-module.cc6
-rw-r--r--deps/v8/src/runtime/runtime-numbers.cc22
-rw-r--r--deps/v8/src/runtime/runtime-object.cc172
-rw-r--r--deps/v8/src/runtime/runtime-promise.cc270
-rw-r--r--deps/v8/src/runtime/runtime-proxy.cc12
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc263
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc95
-rw-r--r--deps/v8/src/runtime/runtime-simd.cc64
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc170
-rw-r--r--deps/v8/src/runtime/runtime-symbol.cc15
-rw-r--r--deps/v8/src/runtime/runtime-test.cc218
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc22
-rw-r--r--deps/v8/src/runtime/runtime-utils.h11
-rw-r--r--deps/v8/src/runtime/runtime-wasm.cc142
-rw-r--r--deps/v8/src/runtime/runtime.cc1
-rw-r--r--deps/v8/src/runtime/runtime.h146
-rw-r--r--deps/v8/src/s390/assembler-s390.cc603
-rw-r--r--deps/v8/src/s390/assembler-s390.h354
-rw-r--r--deps/v8/src/s390/code-stubs-s390.cc495
-rw-r--r--deps/v8/src/s390/code-stubs-s390.h13
-rw-r--r--deps/v8/src/s390/codegen-s390.cc326
-rw-r--r--deps/v8/src/s390/constants-s390.h2403
-rw-r--r--deps/v8/src/s390/disasm-s390.cc36
-rw-r--r--deps/v8/src/s390/interface-descriptors-s390.cc8
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.cc471
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.h101
-rw-r--r--deps/v8/src/s390/simulator-s390.cc244
-rw-r--r--deps/v8/src/s390/simulator-s390.h9
-rw-r--r--deps/v8/src/snapshot/code-serializer.cc27
-rw-r--r--deps/v8/src/snapshot/deserializer.cc22
-rw-r--r--deps/v8/src/snapshot/deserializer.h9
-rw-r--r--deps/v8/src/snapshot/partial-serializer.cc12
-rw-r--r--deps/v8/src/snapshot/partial-serializer.h2
-rw-r--r--deps/v8/src/snapshot/serializer-common.cc12
-rw-r--r--deps/v8/src/snapshot/serializer-common.h2
-rw-r--r--deps/v8/src/snapshot/snapshot-common.cc8
-rw-r--r--deps/v8/src/snapshot/snapshot-source-sink.cc2
-rw-r--r--deps/v8/src/snapshot/snapshot.h3
-rw-r--r--deps/v8/src/snapshot/startup-serializer.cc13
-rw-r--r--deps/v8/src/snapshot/startup-serializer.h1
-rw-r--r--deps/v8/src/source-position.cc33
-rw-r--r--deps/v8/src/source-position.h9
-rw-r--r--deps/v8/src/string-case.cc130
-rw-r--r--deps/v8/src/string-case.h17
-rw-r--r--deps/v8/src/string-stream.cc56
-rw-r--r--deps/v8/src/string-stream.h116
-rw-r--r--deps/v8/src/tracing/traced-value.cc40
-rw-r--r--deps/v8/src/tracing/traced-value.h9
-rw-r--r--deps/v8/src/tracing/tracing-category-observer.cc7
-rw-r--r--deps/v8/src/transitions.cc6
-rw-r--r--deps/v8/src/trap-handler/trap-handler.h26
-rw-r--r--deps/v8/src/type-feedback-vector-inl.h23
-rw-r--r--deps/v8/src/type-feedback-vector.cc205
-rw-r--r--deps/v8/src/type-feedback-vector.h107
-rw-r--r--deps/v8/src/type-hints.cc51
-rw-r--r--deps/v8/src/type-hints.h4
-rw-r--r--deps/v8/src/type-info.cc4
-rw-r--r--deps/v8/src/utils.h51
-rw-r--r--deps/v8/src/v8.cc2
-rw-r--r--deps/v8/src/v8.gyp79
-rw-r--r--deps/v8/src/value-serializer.cc70
-rw-r--r--deps/v8/src/value-serializer.h3
-rw-r--r--deps/v8/src/vector.h5
-rw-r--r--deps/v8/src/version.cc22
-rw-r--r--deps/v8/src/wasm/OWNERS1
-rw-r--r--deps/v8/src/wasm/decoder.h43
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc (renamed from deps/v8/src/wasm/ast-decoder.cc)936
-rw-r--r--deps/v8/src/wasm/function-body-decoder.h (renamed from deps/v8/src/wasm/ast-decoder.h)166
-rw-r--r--deps/v8/src/wasm/module-decoder.cc389
-rw-r--r--deps/v8/src/wasm/module-decoder.h24
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc341
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.cc13
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.h6
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc199
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.h36
-rw-r--r--deps/v8/src/wasm/wasm-js.cc719
-rw-r--r--deps/v8/src/wasm/wasm-js.h11
-rw-r--r--deps/v8/src/wasm/wasm-limits.h45
-rw-r--r--deps/v8/src/wasm/wasm-macro-gen.h62
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc118
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h30
-rw-r--r--deps/v8/src/wasm/wasm-module.cc1913
-rw-r--r--deps/v8/src/wasm/wasm-module.h234
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc678
-rw-r--r--deps/v8/src/wasm/wasm-objects.h321
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc5
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h219
-rw-r--r--deps/v8/src/wasm/wasm-result.cc11
-rw-r--r--deps/v8/src/wasm/wasm-result.h3
-rw-r--r--deps/v8/src/wasm/wasm-text.cc312
-rw-r--r--deps/v8/src/wasm/wasm-text.h38
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h2
-rw-r--r--deps/v8/src/x64/assembler-x64.cc187
-rw-r--r--deps/v8/src/x64/assembler-x64.h3
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc428
-rw-r--r--deps/v8/src/x64/code-stubs-x64.h17
-rw-r--r--deps/v8/src/x64/codegen-x64.cc331
-rw-r--r--deps/v8/src/x64/interface-descriptors-x64.cc9
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc282
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h85
-rw-r--r--deps/v8/src/x87/OWNERS1
-rw-r--r--deps/v8/src/x87/assembler-x87.cc9
-rw-r--r--deps/v8/src/x87/assembler-x87.h3
-rw-r--r--deps/v8/src/x87/code-stubs-x87.cc565
-rw-r--r--deps/v8/src/x87/code-stubs-x87.h18
-rw-r--r--deps/v8/src/x87/codegen-x87.cc296
-rw-r--r--deps/v8/src/x87/deoptimizer-x87.cc3
-rw-r--r--deps/v8/src/x87/interface-descriptors-x87.cc9
-rw-r--r--deps/v8/src/x87/macro-assembler-x87.cc290
-rw-r--r--deps/v8/src/x87/macro-assembler-x87.h80
-rw-r--r--deps/v8/src/zone/zone-chunk-list.h1
-rw-r--r--deps/v8/src/zone/zone-containers.h7
-rw-r--r--deps/v8/src/zone/zone-handle-set.h165
-rw-r--r--deps/v8/src/zone/zone.cc1
816 files changed, 62205 insertions, 67957 deletions
diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS
index 9114669a6d..e9026b130d 100644
--- a/deps/v8/src/DEPS
+++ b/deps/v8/src/DEPS
@@ -10,7 +10,9 @@ include_rules = [
"+src/heap/heap-inl.h",
"-src/inspector",
"-src/interpreter",
+ "+src/interpreter/bytecode-array-accessor.h",
"+src/interpreter/bytecode-array-iterator.h",
+ "+src/interpreter/bytecode-array-random-iterator.h",
"+src/interpreter/bytecode-decoder.h",
"+src/interpreter/bytecode-flags.h",
"+src/interpreter/bytecode-register.h",
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index 9ec24b84c7..1f2ce97240 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -167,16 +167,38 @@ void Accessors::ArrayLengthSetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
+ DCHECK(Utils::OpenHandle(*name)->SameValue(isolate->heap()->length_string()));
+
Handle<JSReceiver> object = Utils::OpenHandle(*info.Holder());
Handle<JSArray> array = Handle<JSArray>::cast(object);
Handle<Object> length_obj = Utils::OpenHandle(*val);
+ bool was_readonly = JSArray::HasReadOnlyLength(array);
+
uint32_t length = 0;
if (!JSArray::AnythingToArrayLength(isolate, length_obj, &length)) {
isolate->OptionalRescheduleException(false);
return;
}
+ if (!was_readonly && V8_UNLIKELY(JSArray::HasReadOnlyLength(array)) &&
+ length != array->length()->Number()) {
+ // AnythingToArrayLength() may have called setter re-entrantly and modified
+ // its property descriptor. Don't perform this check if "length" was
+ // previously readonly, as this may have been called during
+ // DefineOwnPropertyIgnoreAttributes().
+ if (info.ShouldThrowOnError()) {
+ Factory* factory = isolate->factory();
+ isolate->Throw(*factory->NewTypeError(
+ MessageTemplate::kStrictReadOnlyProperty, Utils::OpenHandle(*name),
+ i::Object::TypeOf(isolate, object), object));
+ isolate->OptionalRescheduleException(false);
+ } else {
+ info.GetReturnValue().Set(false);
+ }
+ return;
+ }
+
JSArray::SetLength(array, length);
uint32_t actual_new_len = 0;
@@ -518,34 +540,6 @@ Handle<AccessorInfo> Accessors::ScriptSourceMappingUrlInfo(
//
-// Accessors::ScriptIsEmbedderDebugScript
-//
-
-
-void Accessors::ScriptIsEmbedderDebugScriptGetter(
- v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
- DisallowHeapAllocation no_allocation;
- HandleScope scope(isolate);
- Object* object = *Utils::OpenHandle(*info.Holder());
- bool is_embedder_debug_script = Script::cast(JSValue::cast(object)->value())
- ->origin_options()
- .IsEmbedderDebugScript();
- Object* res = *isolate->factory()->ToBoolean(is_embedder_debug_script);
- info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
-}
-
-
-Handle<AccessorInfo> Accessors::ScriptIsEmbedderDebugScriptInfo(
- Isolate* isolate, PropertyAttributes attributes) {
- Handle<String> name(isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("is_debugger_script")));
- return MakeAccessor(isolate, name, &ScriptIsEmbedderDebugScriptGetter,
- nullptr, attributes);
-}
-
-
-//
// Accessors::ScriptGetContextData
//
@@ -829,8 +823,8 @@ static Handle<Object> ArgumentsForInlinedFunction(
Handle<FixedArray> array = factory->NewFixedArray(argument_count);
bool should_deoptimize = false;
for (int i = 0; i < argument_count; ++i) {
- // If we materialize any object, we should deopt because we might alias
- // an object that was eliminated by escape analysis.
+ // If we materialize any object, we should deoptimize the frame because we
+ // might alias an object that was eliminated by escape analysis.
should_deoptimize = should_deoptimize || iter->IsMaterializedObject();
Handle<Object> value = iter->GetValue();
array->set(i, *value);
@@ -839,7 +833,7 @@ static Handle<Object> ArgumentsForInlinedFunction(
arguments->set_elements(*array);
if (should_deoptimize) {
- translated_values.StoreMaterializedValuesAndDeopt();
+ translated_values.StoreMaterializedValuesAndDeopt(frame);
}
// Return the freshly allocated arguments object.
@@ -850,10 +844,10 @@ static Handle<Object> ArgumentsForInlinedFunction(
static int FindFunctionInFrame(JavaScriptFrame* frame,
Handle<JSFunction> function) {
DisallowHeapAllocation no_allocation;
- List<JSFunction*> functions(2);
- frame->GetFunctions(&functions);
- for (int i = functions.length() - 1; i >= 0; i--) {
- if (functions[i] == *function) return i;
+ List<FrameSummary> frames(2);
+ frame->Summarize(&frames);
+ for (int i = frames.length() - 1; i >= 0; i--) {
+ if (*frames[i].AsJavaScript().function() == *function) return i;
}
return -1;
}
@@ -957,19 +951,16 @@ static inline bool AllowAccessToFunction(Context* current_context,
class FrameFunctionIterator {
public:
FrameFunctionIterator(Isolate* isolate, const DisallowHeapAllocation& promise)
- : isolate_(isolate),
- frame_iterator_(isolate),
- functions_(2),
- index_(0) {
- GetFunctions();
+ : isolate_(isolate), frame_iterator_(isolate), frames_(2), index_(0) {
+ GetFrames();
}
JSFunction* next() {
while (true) {
- if (functions_.length() == 0) return NULL;
- JSFunction* next_function = functions_[index_];
+ if (frames_.length() == 0) return NULL;
+ JSFunction* next_function = *frames_[index_].AsJavaScript().function();
index_--;
if (index_ < 0) {
- GetFunctions();
+ GetFrames();
}
// Skip functions from other origins.
if (!AllowAccessToFunction(isolate_->context(), next_function)) continue;
@@ -990,18 +981,18 @@ class FrameFunctionIterator {
}
private:
- void GetFunctions() {
- functions_.Rewind(0);
+ void GetFrames() {
+ frames_.Rewind(0);
if (frame_iterator_.done()) return;
JavaScriptFrame* frame = frame_iterator_.frame();
- frame->GetFunctions(&functions_);
- DCHECK(functions_.length() > 0);
+ frame->Summarize(&frames_);
+ DCHECK(frames_.length() > 0);
frame_iterator_.Advance();
- index_ = functions_.length() - 1;
+ index_ = frames_.length() - 1;
}
Isolate* isolate_;
JavaScriptFrameIterator frame_iterator_;
- List<JSFunction*> functions_;
+ List<FrameSummary> frames_;
int index_;
};
@@ -1025,10 +1016,11 @@ MaybeHandle<JSFunction> FindCaller(Isolate* isolate,
if (caller == NULL) return MaybeHandle<JSFunction>();
} while (caller->shared()->is_toplevel());
- // If caller is a built-in function and caller's caller is also built-in,
+ // If caller is not user code and caller's caller is also not user code,
// use that instead.
JSFunction* potential_caller = caller;
- while (potential_caller != NULL && potential_caller->shared()->IsBuiltin()) {
+ while (potential_caller != NULL &&
+ !potential_caller->shared()->IsUserJavaScript()) {
caller = potential_caller;
potential_caller = it.next();
}
@@ -1210,7 +1202,8 @@ void Accessors::ErrorStackGetter(
// If stack is still an accessor (this could have changed in the meantime
// since FormatStackTrace can execute arbitrary JS), replace it with a data
// property.
- Handle<Object> receiver = Utils::OpenHandle(*info.This());
+ Handle<Object> receiver =
+ Utils::OpenHandle(*v8::Local<v8::Value>(info.This()));
Handle<Name> name = Utils::OpenHandle(*key);
if (IsAccessor(receiver, name, holder)) {
result = ReplaceAccessorWithDataProperty(isolate, receiver, holder, name,
@@ -1236,8 +1229,8 @@ void Accessors::ErrorStackSetter(
const v8::PropertyCallbackInfo<v8::Boolean>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
- Handle<JSObject> obj =
- Handle<JSObject>::cast(Utils::OpenHandle(*info.This()));
+ Handle<JSObject> obj = Handle<JSObject>::cast(
+ Utils::OpenHandle(*v8::Local<v8::Value>(info.This())));
// Clear internal properties to avoid memory leaks.
Handle<Symbol> stack_trace_symbol = isolate->factory()->stack_trace_symbol();
diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h
index f53d30986c..218fb3572f 100644
--- a/deps/v8/src/accessors.h
+++ b/deps/v8/src/accessors.h
@@ -43,7 +43,6 @@ class AccessorInfo;
V(ScriptType) \
V(ScriptSourceUrl) \
V(ScriptSourceMappingUrl) \
- V(ScriptIsEmbedderDebugScript) \
V(StringLength)
#define ACCESSOR_SETTER_LIST(V) \
diff --git a/deps/v8/src/allocation.cc b/deps/v8/src/allocation.cc
index 195a5443c8..fde01f6447 100644
--- a/deps/v8/src/allocation.cc
+++ b/deps/v8/src/allocation.cc
@@ -32,23 +32,6 @@ void Malloced::Delete(void* p) {
}
-#ifdef DEBUG
-
-static void* invalid = static_cast<void*>(NULL);
-
-void* Embedded::operator new(size_t size) {
- UNREACHABLE();
- return invalid;
-}
-
-
-void Embedded::operator delete(void* p) {
- UNREACHABLE();
-}
-
-#endif
-
-
char* StrDup(const char* str) {
int length = StrLength(str);
char* result = NewArray<char>(length + 1);
diff --git a/deps/v8/src/allocation.h b/deps/v8/src/allocation.h
index e87a3f1b1c..36019d9ab3 100644
--- a/deps/v8/src/allocation.h
+++ b/deps/v8/src/allocation.h
@@ -26,24 +26,9 @@ class V8_EXPORT_PRIVATE Malloced {
static void Delete(void* p);
};
-
-// A macro is used for defining the base class used for embedded instances.
-// The reason is some compilers allocate a minimum of one word for the
-// superclass. The macro prevents the use of new & delete in debug mode.
-// In release mode we are not willing to pay this overhead.
-
-#ifdef DEBUG
-// Superclass for classes with instances allocated inside stack
-// activations or inside other objects.
-class Embedded {
- public:
- void* operator new(size_t size);
- void operator delete(void* p);
-};
-#define BASE_EMBEDDED : public NON_EXPORTED_BASE(Embedded)
-#else
+// DEPRECATED
+// TODO(leszeks): Delete this during a quiet period
#define BASE_EMBEDDED
-#endif
// Superclass for classes only using static method functions.
diff --git a/deps/v8/src/api-arguments-inl.h b/deps/v8/src/api-arguments-inl.h
index bf72fc4e6f..91ac253396 100644
--- a/deps/v8/src/api-arguments-inl.h
+++ b/deps/v8/src/api-arguments-inl.h
@@ -10,6 +10,14 @@
namespace v8 {
namespace internal {
+#define SIDE_EFFECT_CHECK(ISOLATE, F, RETURN_TYPE) \
+ do { \
+ if (ISOLATE->needs_side_effect_check() && \
+ !PerformSideEffectCheck(ISOLATE, FUNCTION_ADDR(F))) { \
+ return Handle<RETURN_TYPE>(); \
+ } \
+ } while (false)
+
#define FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(F) \
F(AccessorNameGetterCallback, "get", v8::Value, Object) \
F(GenericNamedPropertyQueryCallback, "has", v8::Integer, Object) \
@@ -19,6 +27,7 @@ namespace internal {
Handle<InternalReturn> PropertyCallbackArguments::Call(Function f, \
Handle<Name> name) { \
Isolate* isolate = this->isolate(); \
+ SIDE_EFFECT_CHECK(isolate, f, InternalReturn); \
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Function); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
@@ -43,6 +52,7 @@ FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(WRITE_CALL_1_NAME)
Handle<InternalReturn> PropertyCallbackArguments::Call(Function f, \
uint32_t index) { \
Isolate* isolate = this->isolate(); \
+ SIDE_EFFECT_CHECK(isolate, f, InternalReturn); \
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Function); \
VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
@@ -62,6 +72,7 @@ Handle<Object> PropertyCallbackArguments::Call(
GenericNamedPropertySetterCallback f, Handle<Name> name,
Handle<Object> value) {
Isolate* isolate = this->isolate();
+ SIDE_EFFECT_CHECK(isolate, f, Object);
RuntimeCallTimerScope timer(
isolate, &RuntimeCallStats::GenericNamedPropertySetterCallback);
VMState<EXTERNAL> state(isolate);
@@ -77,6 +88,7 @@ Handle<Object> PropertyCallbackArguments::Call(
GenericNamedPropertyDefinerCallback f, Handle<Name> name,
const v8::PropertyDescriptor& desc) {
Isolate* isolate = this->isolate();
+ SIDE_EFFECT_CHECK(isolate, f, Object);
RuntimeCallTimerScope timer(
isolate, &RuntimeCallStats::GenericNamedPropertyDefinerCallback);
VMState<EXTERNAL> state(isolate);
@@ -92,6 +104,7 @@ Handle<Object> PropertyCallbackArguments::Call(IndexedPropertySetterCallback f,
uint32_t index,
Handle<Object> value) {
Isolate* isolate = this->isolate();
+ SIDE_EFFECT_CHECK(isolate, f, Object);
RuntimeCallTimerScope timer(isolate,
&RuntimeCallStats::IndexedPropertySetterCallback);
VMState<EXTERNAL> state(isolate);
@@ -107,6 +120,7 @@ Handle<Object> PropertyCallbackArguments::Call(
IndexedPropertyDefinerCallback f, uint32_t index,
const v8::PropertyDescriptor& desc) {
Isolate* isolate = this->isolate();
+ SIDE_EFFECT_CHECK(isolate, f, Object);
RuntimeCallTimerScope timer(
isolate, &RuntimeCallStats::IndexedPropertyDefinerCallback);
VMState<EXTERNAL> state(isolate);
@@ -121,6 +135,10 @@ Handle<Object> PropertyCallbackArguments::Call(
void PropertyCallbackArguments::Call(AccessorNameSetterCallback f,
Handle<Name> name, Handle<Object> value) {
Isolate* isolate = this->isolate();
+ if (isolate->needs_side_effect_check() &&
+ !PerformSideEffectCheck(isolate, FUNCTION_ADDR(f))) {
+ return;
+ }
RuntimeCallTimerScope timer(isolate,
&RuntimeCallStats::AccessorNameSetterCallback);
VMState<EXTERNAL> state(isolate);
@@ -131,5 +149,7 @@ void PropertyCallbackArguments::Call(AccessorNameSetterCallback f,
f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
}
+#undef SIDE_EFFECT_CHECK
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/api-arguments.cc b/deps/v8/src/api-arguments.cc
index f8d6c8fcc3..c7c54e5de1 100644
--- a/deps/v8/src/api-arguments.cc
+++ b/deps/v8/src/api-arguments.cc
@@ -4,6 +4,8 @@
#include "src/api-arguments.h"
+#include "src/debug/debug.h"
+#include "src/objects-inl.h"
#include "src/tracing/trace-event.h"
#include "src/vm-state-inl.h"
@@ -12,6 +14,10 @@ namespace internal {
Handle<Object> FunctionCallbackArguments::Call(FunctionCallback f) {
Isolate* isolate = this->isolate();
+ if (isolate->needs_side_effect_check() &&
+ !isolate->debug()->PerformSideEffectCheckForCallback(FUNCTION_ADDR(f))) {
+ return Handle<Object>();
+ }
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::FunctionCallback);
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
@@ -23,6 +29,10 @@ Handle<Object> FunctionCallbackArguments::Call(FunctionCallback f) {
Handle<JSObject> PropertyCallbackArguments::Call(
IndexedPropertyEnumeratorCallback f) {
Isolate* isolate = this->isolate();
+ if (isolate->needs_side_effect_check() &&
+ !isolate->debug()->PerformSideEffectCheckForCallback(FUNCTION_ADDR(f))) {
+ return Handle<JSObject>();
+ }
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::PropertyCallback);
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
@@ -31,5 +41,10 @@ Handle<JSObject> PropertyCallbackArguments::Call(
return GetReturnValue<JSObject>(isolate);
}
+bool PropertyCallbackArguments::PerformSideEffectCheck(Isolate* isolate,
+ Address function) {
+ return isolate->debug()->PerformSideEffectCheckForCallback(function);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/api-arguments.h b/deps/v8/src/api-arguments.h
index d6d1b951af..6c9ad7ad6b 100644
--- a/deps/v8/src/api-arguments.h
+++ b/deps/v8/src/api-arguments.h
@@ -136,6 +136,8 @@ class PropertyCallbackArguments
inline JSObject* holder() {
return JSObject::cast(this->begin()[T::kHolderIndex]);
}
+
+ bool PerformSideEffectCheck(Isolate* isolate, Address function);
};
class FunctionCallbackArguments
diff --git a/deps/v8/src/api-experimental.cc b/deps/v8/src/api-experimental.cc
index 934b27aa5d..a9b5bd043b 100644
--- a/deps/v8/src/api-experimental.cc
+++ b/deps/v8/src/api-experimental.cc
@@ -8,10 +8,11 @@
#include "src/api-experimental.h"
-#include "include/v8.h"
#include "include/v8-experimental.h"
+#include "include/v8.h"
#include "src/api.h"
#include "src/fast-accessor-assembler.h"
+#include "src/objects-inl.h"
namespace {
diff --git a/deps/v8/src/api-natives.cc b/deps/v8/src/api-natives.cc
index 3fe59e293d..87138bd5cf 100644
--- a/deps/v8/src/api-natives.cc
+++ b/deps/v8/src/api-natives.cc
@@ -395,6 +395,28 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
return result;
}
+namespace {
+MaybeHandle<Object> GetInstancePrototype(Isolate* isolate,
+ Object* function_template) {
+ // Enter a new scope. Recursion could otherwise create a lot of handles.
+ HandleScope scope(isolate);
+ Handle<JSFunction> parent_instance;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, parent_instance,
+ InstantiateFunction(
+ isolate,
+ handle(FunctionTemplateInfo::cast(function_template), isolate)),
+ JSFunction);
+ Handle<Object> instance_prototype;
+ // TODO(cbruni): decide what to do here.
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, instance_prototype,
+ JSObject::GetProperty(parent_instance,
+ isolate->factory()->prototype_string()),
+ JSFunction);
+ return scope.CloseAndEscape(instance_prototype);
+}
+} // namespace
MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
Handle<FunctionTemplateInfo> data,
@@ -406,11 +428,18 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
return Handle<JSFunction>::cast(result);
}
}
- Handle<JSObject> prototype;
+ Handle<Object> prototype;
if (!data->remove_prototype()) {
Object* prototype_templ = data->prototype_template();
if (prototype_templ->IsUndefined(isolate)) {
- prototype = isolate->factory()->NewJSObject(isolate->object_function());
+ Object* protoype_provider_templ = data->prototype_provider_template();
+ if (protoype_provider_templ->IsUndefined(isolate)) {
+ prototype = isolate->factory()->NewJSObject(isolate->object_function());
+ } else {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, prototype,
+ GetInstancePrototype(isolate, protoype_provider_templ), JSFunction);
+ }
} else {
ASSIGN_RETURN_ON_EXCEPTION(
isolate, prototype,
@@ -422,22 +451,12 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
}
Object* parent = data->parent_template();
if (!parent->IsUndefined(isolate)) {
- // Enter a new scope. Recursion could otherwise create a lot of handles.
- HandleScope scope(isolate);
- Handle<JSFunction> parent_instance;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, parent_instance,
- InstantiateFunction(
- isolate, handle(FunctionTemplateInfo::cast(parent), isolate)),
- JSFunction);
- // TODO(dcarney): decide what to do here.
Handle<Object> parent_prototype;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, parent_prototype,
- JSObject::GetProperty(parent_instance,
- isolate->factory()->prototype_string()),
- JSFunction);
- JSObject::ForceSetPrototype(prototype, parent_prototype);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, parent_prototype,
+ GetInstancePrototype(isolate, parent),
+ JSFunction);
+ JSObject::ForceSetPrototype(Handle<JSObject>::cast(prototype),
+ parent_prototype);
}
}
Handle<JSFunction> function = ApiNatives::CreateApiFunction(
@@ -531,7 +550,7 @@ MaybeHandle<JSObject> ApiNatives::InstantiateRemoteObject(
void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
Handle<Name> name, Handle<Object> value,
PropertyAttributes attributes) {
- PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
+ PropertyDetails details(kData, attributes, 0, PropertyCellType::kNoCell);
auto details_handle = handle(details.AsSmi(), isolate);
Handle<Object> data[] = {name, details_handle, value};
AddPropertyToPropertyList(isolate, info, arraysize(data), data);
@@ -543,7 +562,7 @@ void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
PropertyAttributes attributes) {
auto value = handle(Smi::FromInt(intrinsic), isolate);
auto intrinsic_marker = isolate->factory()->true_value();
- PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
+ PropertyDetails details(kData, attributes, 0, PropertyCellType::kNoCell);
auto details_handle = handle(details.AsSmi(), isolate);
Handle<Object> data[] = {name, intrinsic_marker, details_handle, value};
AddPropertyToPropertyList(isolate, info, arraysize(data), data);
@@ -556,7 +575,7 @@ void ApiNatives::AddAccessorProperty(Isolate* isolate,
Handle<FunctionTemplateInfo> getter,
Handle<FunctionTemplateInfo> setter,
PropertyAttributes attributes) {
- PropertyDetails details(attributes, ACCESSOR, 0, PropertyCellType::kNoCell);
+ PropertyDetails details(kAccessor, attributes, 0, PropertyCellType::kNoCell);
auto details_handle = handle(details.AsSmi(), isolate);
Handle<Object> data[] = {name, details_handle, getter, setter};
AddPropertyToPropertyList(isolate, info, arraysize(data), data);
@@ -606,7 +625,7 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
if (prototype->IsTheHole(isolate)) {
prototype = isolate->factory()->NewFunctionPrototype(result);
- } else {
+ } else if (obj->prototype_provider_template()->IsUndefined(isolate)) {
JSObject::AddProperty(Handle<JSObject>::cast(prototype),
isolate->factory()->constructor_string(), result,
DONT_ENUM);
@@ -656,6 +675,12 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
// Mark as undetectable if needed.
if (obj->undetectable()) {
+ // We only allow callable undetectable receivers here, since this whole
+ // undetectable business is only to support document.all, which is both
+ // undetectable and callable. If we ever see the need to have an object
+ // that is undetectable but not callable, we need to update the types.h
+ // to allow encoding this.
+ CHECK(!obj->instance_call_handler()->IsUndefined(isolate));
map->set_is_undetectable();
}
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index da7f2ef414..04ba55c1dd 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -29,6 +29,7 @@
#include "src/bootstrapper.h"
#include "src/char-predicates-inl.h"
#include "src/code-stubs.h"
+#include "src/compiler-dispatcher/compiler-dispatcher.h"
#include "src/compiler.h"
#include "src/context-measure.h"
#include "src/contexts.h"
@@ -97,6 +98,15 @@ namespace v8 {
ENTER_V8(isolate); \
bool has_pending_exception = false
+#define PREPARE_FOR_DEBUG_INTERFACE_EXECUTION_WITH_ISOLATE(isolate, T) \
+ if (IsExecutionTerminatingCheck(isolate)) { \
+ return MaybeLocal<T>(); \
+ } \
+ InternalEscapableScope handle_scope(isolate); \
+ CallDepthScope<false> call_depth_scope(isolate, v8::Local<v8::Context>()); \
+ ENTER_V8(isolate); \
+ bool has_pending_exception = false
+
#define PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, class_name, function_name, \
bailout_value, HandleScopeClass, \
do_callback) \
@@ -141,6 +151,23 @@ namespace v8 {
PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, class_name, function_name, \
false, i::HandleScope, false)
+#ifdef DEBUG
+#define ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate) \
+ i::VMState<v8::OTHER> __state__((isolate)); \
+ i::DisallowJavascriptExecutionDebugOnly __no_script__((isolate)); \
+ i::DisallowExceptions __no_exceptions__((isolate))
+
+#define ENTER_V8_FOR_NEW_CONTEXT(isolate) \
+ i::VMState<v8::OTHER> __state__((isolate)); \
+ i::DisallowExceptions __no_exceptions__((isolate))
+#else
+#define ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate) \
+ i::VMState<v8::OTHER> __state__((isolate));
+
+#define ENTER_V8_FOR_NEW_CONTEXT(isolate) \
+ i::VMState<v8::OTHER> __state__((isolate));
+#endif // DEBUG
+
#define EXCEPTION_BAILOUT_CHECK_SCOPED(isolate, value) \
do { \
if (has_pending_exception) { \
@@ -243,7 +270,7 @@ class CallDepthScope {
static ScriptOrigin GetScriptOriginForScript(i::Isolate* isolate,
i::Handle<i::Script> script) {
- i::Handle<i::Object> scriptName(i::Script::GetNameOrSourceURL(script));
+ i::Handle<i::Object> scriptName(script->GetNameOrSourceURL(), isolate);
i::Handle<i::Object> source_map_url(script->source_mapping_url(), isolate);
v8::Isolate* v8_isolate =
reinterpret_cast<v8::Isolate*>(script->GetIsolate());
@@ -254,9 +281,9 @@ static ScriptOrigin GetScriptOriginForScript(i::Isolate* isolate,
v8::Integer::New(v8_isolate, script->column_offset()),
v8::Boolean::New(v8_isolate, options.IsSharedCrossOrigin()),
v8::Integer::New(v8_isolate, script->id()),
- v8::Boolean::New(v8_isolate, options.IsEmbedderDebugScript()),
Utils::ToLocal(source_map_url),
- v8::Boolean::New(v8_isolate, options.IsOpaque()));
+ v8::Boolean::New(v8_isolate, options.IsOpaque()),
+ v8::Boolean::New(v8_isolate, script->type() == i::Script::TYPE_WASM));
return origin;
}
@@ -452,6 +479,7 @@ bool RunExtraCode(Isolate* isolate, Local<Context> context,
struct SnapshotCreatorData {
explicit SnapshotCreatorData(Isolate* isolate)
: isolate_(isolate),
+ default_context_(),
contexts_(isolate),
templates_(isolate),
created_(false) {}
@@ -462,8 +490,10 @@ struct SnapshotCreatorData {
ArrayBufferAllocator allocator_;
Isolate* isolate_;
+ Persistent<Context> default_context_;
PersistentValueVector<Context> contexts_;
PersistentValueVector<Template> templates_;
+ std::vector<SerializeInternalFieldsCallback> internal_fields_serializers_;
bool created_;
};
@@ -500,7 +530,18 @@ Isolate* SnapshotCreator::GetIsolate() {
return SnapshotCreatorData::cast(data_)->isolate_;
}
-size_t SnapshotCreator::AddContext(Local<Context> context) {
+void SnapshotCreator::SetDefaultContext(Local<Context> context) {
+ DCHECK(!context.IsEmpty());
+ SnapshotCreatorData* data = SnapshotCreatorData::cast(data_);
+ DCHECK(!data->created_);
+ DCHECK(data->default_context_.IsEmpty());
+ Isolate* isolate = data->isolate_;
+ CHECK_EQ(isolate, context->GetIsolate());
+ data->default_context_.Reset(isolate, context);
+}
+
+size_t SnapshotCreator::AddContext(Local<Context> context,
+ SerializeInternalFieldsCallback callback) {
DCHECK(!context.IsEmpty());
SnapshotCreatorData* data = SnapshotCreatorData::cast(data_);
DCHECK(!data->created_);
@@ -508,6 +549,7 @@ size_t SnapshotCreator::AddContext(Local<Context> context) {
CHECK_EQ(isolate, context->GetIsolate());
size_t index = static_cast<int>(data->contexts_.Size());
data->contexts_.Append(context);
+ data->internal_fields_serializers_.push_back(callback);
return index;
}
@@ -523,11 +565,13 @@ size_t SnapshotCreator::AddTemplate(Local<Template> template_obj) {
}
StartupData SnapshotCreator::CreateBlob(
- SnapshotCreator::FunctionCodeHandling function_code_handling,
- SerializeInternalFieldsCallback callback) {
+ SnapshotCreator::FunctionCodeHandling function_code_handling) {
SnapshotCreatorData* data = SnapshotCreatorData::cast(data_);
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(data->isolate_);
DCHECK(!data->created_);
+ DCHECK(!data->default_context_.IsEmpty());
+
+ int num_additional_contexts = static_cast<int>(data->contexts_.Size());
{
int num_templates = static_cast<int>(data->templates_.Size());
@@ -539,6 +583,18 @@ StartupData SnapshotCreator::CreateBlob(
}
isolate->heap()->SetSerializedTemplates(*templates);
data->templates_.Clear();
+
+ // We need to store the global proxy size upfront in case we need the
+ // bootstrapper to create a global proxy before we deserialize the context.
+ i::Handle<i::FixedArray> global_proxy_sizes =
+ isolate->factory()->NewFixedArray(num_additional_contexts, i::TENURED);
+ for (int i = 0; i < num_additional_contexts; i++) {
+ i::Handle<i::Context> context =
+ v8::Utils::OpenHandle(*data->contexts_.Get(i));
+ global_proxy_sizes->set(i,
+ i::Smi::FromInt(context->global_proxy()->Size()));
+ }
+ isolate->heap()->SetSerializedGlobalProxySizes(*global_proxy_sizes);
}
// If we don't do this then we end up with a stray root pointing at the
@@ -549,15 +605,20 @@ StartupData SnapshotCreator::CreateBlob(
i::DisallowHeapAllocation no_gc_from_here_on;
- int num_contexts = static_cast<int>(data->contexts_.Size());
- i::List<i::Object*> contexts(num_contexts);
- for (int i = 0; i < num_contexts; i++) {
+ i::List<i::Object*> contexts(num_additional_contexts);
+ i::Object* default_context;
+ {
i::HandleScope scope(isolate);
- i::Handle<i::Context> context =
- v8::Utils::OpenHandle(*data->contexts_.Get(i));
- contexts.Add(*context);
+ default_context =
+ *v8::Utils::OpenHandle(*data->default_context_.Get(data->isolate_));
+ data->default_context_.Reset();
+ for (int i = 0; i < num_additional_contexts; i++) {
+ i::Handle<i::Context> context =
+ v8::Utils::OpenHandle(*data->contexts_.Get(i));
+ contexts.Add(*context);
+ }
+ data->contexts_.Clear();
}
- data->contexts_.Clear();
#ifdef DEBUG
i::ExternalReferenceTable::instance(isolate)->ResetCount();
@@ -567,11 +628,20 @@ StartupData SnapshotCreator::CreateBlob(
startup_serializer.SerializeStrongReferences();
// Serialize each context with a new partial serializer.
- i::List<i::SnapshotData*> context_snapshots(num_contexts);
- for (int i = 0; i < num_contexts; i++) {
- i::PartialSerializer partial_serializer(isolate, &startup_serializer,
- callback);
- partial_serializer.Serialize(&contexts[i]);
+ i::List<i::SnapshotData*> context_snapshots(num_additional_contexts + 1);
+
+ {
+ // The default snapshot does not support internal fields.
+ i::PartialSerializer partial_serializer(
+ isolate, &startup_serializer, v8::SerializeInternalFieldsCallback());
+ partial_serializer.Serialize(&default_context, false);
+ context_snapshots.Add(new i::SnapshotData(&partial_serializer));
+ }
+
+ for (int i = 0; i < num_additional_contexts; i++) {
+ i::PartialSerializer partial_serializer(
+ isolate, &startup_serializer, data->internal_fields_serializers_[i]);
+ partial_serializer.Serialize(&contexts[i], true);
context_snapshots.Add(new i::SnapshotData(&partial_serializer));
}
@@ -611,7 +681,7 @@ StartupData V8::CreateSnapshotDataBlob(const char* embedded_source) {
!RunExtraCode(isolate, context, embedded_source, "<embedded>")) {
return result;
}
- snapshot_creator.AddContext(context);
+ snapshot_creator.SetDefaultContext(context);
}
result = snapshot_creator.CreateBlob(
SnapshotCreator::FunctionCodeHandling::kClear);
@@ -652,7 +722,7 @@ StartupData V8::WarmUpSnapshotDataBlob(StartupData cold_snapshot_blob,
HandleScope handle_scope(isolate);
isolate->ContextDisposedNotification(false);
Local<Context> context = Context::New(isolate);
- snapshot_creator.AddContext(context);
+ snapshot_creator.SetDefaultContext(context);
}
result = snapshot_creator.CreateBlob(
SnapshotCreator::FunctionCodeHandling::kKeep);
@@ -929,6 +999,12 @@ HandleScope::~HandleScope() {
i::HandleScope::CloseScope(isolate_, prev_next_, prev_limit_);
}
+V8_NORETURN void* HandleScope::operator new(size_t) {
+ base::OS::Abort();
+ abort();
+}
+
+void HandleScope::operator delete(void*, size_t) { base::OS::Abort(); }
int HandleScope::NumberOfHandles(Isolate* isolate) {
return i::HandleScope::NumberOfHandles(
@@ -967,6 +1043,13 @@ i::Object** EscapableHandleScope::Escape(i::Object** escape_value) {
return escape_slot_;
}
+V8_NORETURN void* EscapableHandleScope::operator new(size_t) {
+ base::OS::Abort();
+ abort();
+}
+
+void EscapableHandleScope::operator delete(void*, size_t) { base::OS::Abort(); }
+
SealHandleScope::SealHandleScope(Isolate* isolate)
: isolate_(reinterpret_cast<i::Isolate*>(isolate)) {
i::HandleScopeData* current = isolate_->handle_scope_data();
@@ -985,11 +1068,17 @@ SealHandleScope::~SealHandleScope() {
current->sealed_level = prev_sealed_level_;
}
+V8_NORETURN void* SealHandleScope::operator new(size_t) {
+ base::OS::Abort();
+ abort();
+}
+
+void SealHandleScope::operator delete(void*, size_t) { base::OS::Abort(); }
void Context::Enter() {
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Isolate* isolate = env->GetIsolate();
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScopeImplementer* impl = isolate->handle_scope_implementer();
impl->EnterContext(env);
impl->SaveContext(isolate->context());
@@ -1000,7 +1089,7 @@ void Context::Enter() {
void Context::Exit() {
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Isolate* isolate = env->GetIsolate();
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScopeImplementer* impl = isolate->handle_scope_implementer();
if (!Utils::ApiCheck(impl->LastEnteredContextWas(env),
"v8::Context::Exit()",
@@ -1099,7 +1188,7 @@ void Template::Set(v8::Local<Name> name, v8::Local<Data> value,
v8::PropertyAttribute attribute) {
auto templ = Utils::OpenHandle(this);
i::Isolate* isolate = templ->GetIsolate();
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
auto value_obj = Utils::OpenHandle(*value);
CHECK(!value_obj->IsJSReceiver() || value_obj->IsTemplateInfo());
@@ -1130,7 +1219,7 @@ void Template::SetAccessorProperty(
DCHECK_EQ(v8::DEFAULT, access_control);
auto templ = Utils::OpenHandle(this);
auto isolate = templ->GetIsolate();
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
DCHECK(!name.IsEmpty());
DCHECK(!getter.IsEmpty() || !setter.IsEmpty());
i::HandleScope scope(isolate);
@@ -1154,7 +1243,7 @@ static Local<ObjectTemplate> ObjectTemplateNew(
Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate() {
i::Isolate* i_isolate = Utils::OpenHandle(this)->GetIsolate();
- ENTER_V8(i_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::Object> result(Utils::OpenHandle(this)->prototype_template(),
i_isolate);
if (result->IsUndefined(i_isolate)) {
@@ -1166,6 +1255,16 @@ Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate() {
return ToApiHandle<ObjectTemplate>(result);
}
+void FunctionTemplate::SetPrototypeProviderTemplate(
+ Local<FunctionTemplate> prototype_provider) {
+ i::Isolate* i_isolate = Utils::OpenHandle(this)->GetIsolate();
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
+ i::Handle<i::Object> result = Utils::OpenHandle(*prototype_provider);
+ auto info = Utils::OpenHandle(this);
+ CHECK(info->prototype_template()->IsUndefined(i_isolate));
+ CHECK(info->parent_template()->IsUndefined(i_isolate));
+ info->set_prototype_provider_template(*result);
+}
static void EnsureNotInstantiated(i::Handle<i::FunctionTemplateInfo> info,
const char* func) {
@@ -1177,8 +1276,9 @@ static void EnsureNotInstantiated(i::Handle<i::FunctionTemplateInfo> info,
void FunctionTemplate::Inherit(v8::Local<FunctionTemplate> value) {
auto info = Utils::OpenHandle(this);
EnsureNotInstantiated(info, "v8::FunctionTemplate::Inherit");
- i::Isolate* isolate = info->GetIsolate();
- ENTER_V8(isolate);
+ i::Isolate* i_isolate = info->GetIsolate();
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
+ CHECK(info->prototype_provider_template()->IsUndefined(i_isolate));
info->set_parent_template(*Utils::OpenHandle(*value));
}
@@ -1193,7 +1293,7 @@ static Local<FunctionTemplate> FunctionTemplateNew(
i::Handle<i::FunctionTemplateInfo>::cast(struct_obj);
InitializeFunctionTemplate(obj);
obj->set_do_not_cache(do_not_cache);
- int next_serial_number = 0;
+ int next_serial_number = i::FunctionTemplateInfo::kInvalidSerialNumber;
if (!do_not_cache) {
next_serial_number = isolate->heap()->GetNextTemplateSerialNumber();
}
@@ -1224,7 +1324,7 @@ Local<FunctionTemplate> FunctionTemplate::New(
// Changes to the environment cannot be captured in the snapshot. Expect no
// function templates when the isolate is created for serialization.
LOG_API(i_isolate, FunctionTemplate, New);
- ENTER_V8(i_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
auto templ = FunctionTemplateNew(i_isolate, callback, nullptr, data,
signature, length, false);
if (behavior == ConstructorBehavior::kThrow) templ->RemovePrototype();
@@ -1252,7 +1352,7 @@ Local<FunctionTemplate> FunctionTemplate::NewWithFastHandler(
v8::Local<Signature> signature, int length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, FunctionTemplate, NewWithFastHandler);
- ENTER_V8(i_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
return FunctionTemplateNew(i_isolate, callback, fast_handler, data, signature,
length, false);
}
@@ -1261,8 +1361,8 @@ Local<FunctionTemplate> FunctionTemplate::NewWithCache(
Isolate* isolate, FunctionCallback callback, Local<Private> cache_property,
Local<Value> data, Local<Signature> signature, int length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, FunctionTemplate, NewWithFastHandler);
- ENTER_V8(i_isolate);
+ LOG_API(i_isolate, FunctionTemplate, NewWithCache);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
return FunctionTemplateNew(i_isolate, callback, nullptr, data, signature,
length, false, cache_property);
}
@@ -1291,7 +1391,7 @@ void FunctionTemplate::SetCallHandler(
auto info = Utils::OpenHandle(this);
EnsureNotInstantiated(info, "v8::FunctionTemplate::SetCallHandler");
i::Isolate* isolate = info->GetIsolate();
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
i::Handle<i::Struct> struct_obj =
isolate->factory()->NewStruct(i::CALL_HANDLER_INFO_TYPE);
@@ -1363,7 +1463,7 @@ Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() {
return Local<ObjectTemplate>();
}
i::Isolate* isolate = handle->GetIsolate();
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
if (handle->instance_template()->IsUndefined(isolate)) {
Local<ObjectTemplate> templ =
ObjectTemplate::New(isolate, ToApiHandle<FunctionTemplate>(handle));
@@ -1379,7 +1479,7 @@ void FunctionTemplate::SetLength(int length) {
auto info = Utils::OpenHandle(this);
EnsureNotInstantiated(info, "v8::FunctionTemplate::SetLength");
auto isolate = info->GetIsolate();
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
info->set_length(length);
}
@@ -1388,7 +1488,7 @@ void FunctionTemplate::SetClassName(Local<String> name) {
auto info = Utils::OpenHandle(this);
EnsureNotInstantiated(info, "v8::FunctionTemplate::SetClassName");
auto isolate = info->GetIsolate();
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
info->set_class_name(*Utils::OpenHandle(*name));
}
@@ -1397,7 +1497,7 @@ void FunctionTemplate::SetAcceptAnyReceiver(bool value) {
auto info = Utils::OpenHandle(this);
EnsureNotInstantiated(info, "v8::FunctionTemplate::SetAcceptAnyReceiver");
auto isolate = info->GetIsolate();
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
info->set_accept_any_receiver(value);
}
@@ -1406,7 +1506,7 @@ void FunctionTemplate::SetHiddenPrototype(bool value) {
auto info = Utils::OpenHandle(this);
EnsureNotInstantiated(info, "v8::FunctionTemplate::SetHiddenPrototype");
auto isolate = info->GetIsolate();
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
info->set_hidden_prototype(value);
}
@@ -1415,7 +1515,7 @@ void FunctionTemplate::ReadOnlyPrototype() {
auto info = Utils::OpenHandle(this);
EnsureNotInstantiated(info, "v8::FunctionTemplate::ReadOnlyPrototype");
auto isolate = info->GetIsolate();
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
info->set_read_only_prototype(true);
}
@@ -1424,7 +1524,7 @@ void FunctionTemplate::RemovePrototype() {
auto info = Utils::OpenHandle(this);
EnsureNotInstantiated(info, "v8::FunctionTemplate::RemovePrototype");
auto isolate = info->GetIsolate();
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
info->set_remove_prototype(true);
}
@@ -1446,7 +1546,7 @@ static Local<ObjectTemplate> ObjectTemplateNew(
i::Isolate* isolate, v8::Local<FunctionTemplate> constructor,
bool do_not_cache) {
LOG_API(isolate, ObjectTemplate, New);
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::Handle<i::Struct> struct_obj =
isolate->factory()->NewStruct(i::OBJECT_TEMPLATE_INFO_TYPE);
i::Handle<i::ObjectTemplateInfo> obj =
@@ -1511,7 +1611,7 @@ static bool TemplateSetAccessor(Template* template_obj, v8::Local<Name> name,
bool replace_on_access) {
auto info = Utils::OpenHandle(template_obj);
auto isolate = info->GetIsolate();
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
auto obj =
MakeAccessorInfo(name, getter, setter, data, settings, attribute,
@@ -1558,7 +1658,7 @@ void Template::SetIntrinsicDataProperty(Local<Name> name, Intrinsic intrinsic,
PropertyAttribute attribute) {
auto templ = Utils::OpenHandle(this);
i::Isolate* isolate = templ->GetIsolate();
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
i::ApiNatives::AddDataProperty(isolate, templ, Utils::OpenHandle(*name),
intrinsic,
@@ -1631,7 +1731,7 @@ static void ObjectTemplateSetNamedPropertyHandler(
Descriptor descriptor, Deleter remover, Enumerator enumerator,
Definer definer, Local<Value> data, PropertyHandlerFlags flags) {
i::Isolate* isolate = Utils::OpenHandle(templ)->GetIsolate();
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
auto cons = EnsureConstructor(isolate, templ);
EnsureNotInstantiated(cons, "ObjectTemplateSetNamedPropertyHandler");
@@ -1660,7 +1760,7 @@ void ObjectTemplate::SetHandler(
void ObjectTemplate::MarkAsUndetectable() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
auto cons = EnsureConstructor(isolate, this);
EnsureNotInstantiated(cons, "v8::ObjectTemplate::MarkAsUndetectable");
@@ -1671,7 +1771,7 @@ void ObjectTemplate::MarkAsUndetectable() {
void ObjectTemplate::SetAccessCheckCallback(AccessCheckCallback callback,
Local<Value> data) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
auto cons = EnsureConstructor(isolate, this);
EnsureNotInstantiated(cons, "v8::ObjectTemplate::SetAccessCheckCallback");
@@ -1700,7 +1800,7 @@ void ObjectTemplate::SetAccessCheckCallbackAndHandler(
const IndexedPropertyHandlerConfiguration& indexed_handler,
Local<Value> data) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
auto cons = EnsureConstructor(isolate, this);
EnsureNotInstantiated(
@@ -1736,7 +1836,7 @@ void ObjectTemplate::SetAccessCheckCallbackAndHandler(
void ObjectTemplate::SetHandler(
const IndexedPropertyHandlerConfiguration& config) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
auto cons = EnsureConstructor(isolate, this);
EnsureNotInstantiated(cons, "v8::ObjectTemplate::SetHandler");
@@ -1751,7 +1851,7 @@ void ObjectTemplate::SetHandler(
void ObjectTemplate::SetCallAsFunctionHandler(FunctionCallback callback,
Local<Value> data) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
auto cons = EnsureConstructor(isolate, this);
EnsureNotInstantiated(cons, "v8::ObjectTemplate::SetCallAsFunctionHandler");
@@ -1780,7 +1880,7 @@ void ObjectTemplate::SetInternalFieldCount(int value) {
"Invalid internal field count")) {
return;
}
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
if (value > 0) {
// The internal field count is set by the constructor function's
// construct code, so we ensure that there is a constructor
@@ -1796,7 +1896,7 @@ bool ObjectTemplate::IsImmutableProto() {
void ObjectTemplate::SetImmutableProto() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
Utils::OpenHandle(this)->set_immutable_proto(true);
}
@@ -2454,6 +2554,12 @@ v8::TryCatch::~TryCatch() {
}
}
+V8_NORETURN void* v8::TryCatch::operator new(size_t) {
+ base::OS::Abort();
+ abort();
+}
+
+void v8::TryCatch::operator delete(void*, size_t) { base::OS::Abort(); }
bool v8::TryCatch::HasCaught() const {
return !reinterpret_cast<i::Object*>(exception_)->IsTheHole(isolate_);
@@ -2620,6 +2726,10 @@ int Message::GetEndPosition() const {
return self->end_position();
}
+int Message::ErrorLevel() const {
+ auto self = Utils::OpenHandle(this);
+ return self->error_level();
+}
Maybe<int> Message::GetStartColumn(Local<Context> context) const {
auto self = Utils::OpenHandle(this);
@@ -2821,7 +2931,7 @@ bool StackFrame::IsConstructor() const {
Local<NativeWeakMap> NativeWeakMap::New(Isolate* v8_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::Handle<i::JSWeakMap> weakmap = isolate->factory()->NewJSWeakMap();
i::JSWeakCollection::Initialize(weakmap, isolate);
return Utils::NativeWeakMapToLocal(weakmap);
@@ -2982,6 +3092,15 @@ Maybe<bool> ValueSerializer::Delegate::WriteHostObject(Isolate* v8_isolate,
return Nothing<bool>();
}
+Maybe<uint32_t> ValueSerializer::Delegate::GetSharedArrayBufferId(
+ Isolate* v8_isolate, Local<SharedArrayBuffer> shared_array_buffer) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ isolate->ScheduleThrow(*isolate->factory()->NewError(
+ isolate->error_function(), i::MessageTemplate::kDataCloneError,
+ Utils::OpenHandle(*shared_array_buffer)));
+ return Nothing<uint32_t>();
+}
+
void* ValueSerializer::Delegate::ReallocateBufferMemory(void* old_buffer,
size_t size,
size_t* actual_size) {
@@ -4411,12 +4530,14 @@ MaybeLocal<Array> v8::Object::GetOwnPropertyNames(Local<Context> context,
MaybeLocal<String> v8::Object::ObjectProtoToString(Local<Context> context) {
PREPARE_FOR_EXECUTION(context, Object, ObjectProtoToString, String);
- auto obj = Utils::OpenHandle(this);
- Local<String> result;
+ auto self = Utils::OpenHandle(this);
+ Local<Value> result;
has_pending_exception =
- !ToLocal<String>(i::JSObject::ObjectProtoToString(isolate, obj), &result);
+ !ToLocal<Value>(i::Execution::Call(isolate, isolate->object_to_string(),
+ self, 0, nullptr),
+ &result);
RETURN_ON_FAILED_EXECUTION(String);
- RETURN_ESCAPED(result);
+ RETURN_ESCAPED(Local<String>::Cast(result));
}
@@ -4826,7 +4947,7 @@ Maybe<PropertyAttribute> v8::Object::GetRealNamedPropertyAttributes(
Local<v8::Object> v8::Object::Clone() {
auto self = i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
auto isolate = self->GetIsolate();
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
auto result = isolate->factory()->CopyJSObject(self);
CHECK(!result.is_null());
return Utils::ToLocal(result);
@@ -4915,7 +5036,7 @@ MaybeLocal<Function> Function::New(Local<Context> context,
int length, ConstructorBehavior behavior) {
i::Isolate* isolate = Utils::OpenHandle(*context)->GetIsolate();
LOG_API(isolate, Function, New);
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
auto templ = FunctionTemplateNew(isolate, callback, nullptr, data,
Local<Signature>(), length, true);
if (behavior == ConstructorBehavior::kThrow) templ->RemovePrototype();
@@ -5109,7 +5230,7 @@ bool Function::IsBuiltin() const {
return false;
}
auto func = i::Handle<i::JSFunction>::cast(self);
- return func->shared()->IsBuiltin();
+ return !func->shared()->IsUserJavaScript();
}
@@ -6069,10 +6190,11 @@ struct InvokeBootstrapper<i::Context> {
i::Handle<i::Context> Invoke(
i::Isolate* isolate, i::MaybeHandle<i::JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_object_template,
- v8::ExtensionConfiguration* extensions, size_t context_snapshot_index) {
+ v8::ExtensionConfiguration* extensions, size_t context_snapshot_index,
+ v8::DeserializeInternalFieldsCallback internal_fields_deserializer) {
return isolate->bootstrapper()->CreateEnvironment(
maybe_global_proxy, global_object_template, extensions,
- context_snapshot_index);
+ context_snapshot_index, internal_fields_deserializer);
}
};
@@ -6081,7 +6203,8 @@ struct InvokeBootstrapper<i::JSGlobalProxy> {
i::Handle<i::JSGlobalProxy> Invoke(
i::Isolate* isolate, i::MaybeHandle<i::JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_object_template,
- v8::ExtensionConfiguration* extensions, size_t context_snapshot_index) {
+ v8::ExtensionConfiguration* extensions, size_t context_snapshot_index,
+ v8::DeserializeInternalFieldsCallback internal_fields_deserializer) {
USE(extensions);
USE(context_snapshot_index);
return isolate->bootstrapper()->NewRemoteContext(maybe_global_proxy,
@@ -6093,15 +6216,19 @@ template <typename ObjectType>
static i::Handle<ObjectType> CreateEnvironment(
i::Isolate* isolate, v8::ExtensionConfiguration* extensions,
v8::MaybeLocal<ObjectTemplate> maybe_global_template,
- v8::MaybeLocal<Value> maybe_global_proxy, size_t context_snapshot_index) {
+ v8::MaybeLocal<Value> maybe_global_proxy, size_t context_snapshot_index,
+ v8::DeserializeInternalFieldsCallback internal_fields_deserializer) {
i::Handle<ObjectType> result;
- // Enter V8 via an ENTER_V8 scope.
{
- ENTER_V8(isolate);
+ ENTER_V8_FOR_NEW_CONTEXT(isolate);
v8::Local<ObjectTemplate> proxy_template;
i::Handle<i::FunctionTemplateInfo> proxy_constructor;
i::Handle<i::FunctionTemplateInfo> global_constructor;
+ i::Handle<i::Object> named_interceptor(
+ isolate->factory()->undefined_value());
+ i::Handle<i::Object> indexed_interceptor(
+ isolate->factory()->undefined_value());
if (!maybe_global_template.IsEmpty()) {
v8::Local<v8::ObjectTemplate> global_template =
@@ -6134,6 +6261,24 @@ static i::Handle<ObjectType> CreateEnvironment(
global_constructor->set_access_check_info(
isolate->heap()->undefined_value());
}
+
+ // Same for other interceptors. If the global constructor has
+ // interceptors, we need to replace them temporarily with noop
+ // interceptors, so the map is correctly marked as having interceptors,
+ // but we don't invoke any.
+ if (!global_constructor->named_property_handler()->IsUndefined(isolate)) {
+ named_interceptor =
+ handle(global_constructor->named_property_handler(), isolate);
+ global_constructor->set_named_property_handler(
+ isolate->heap()->noop_interceptor_info());
+ }
+ if (!global_constructor->indexed_property_handler()->IsUndefined(
+ isolate)) {
+ indexed_interceptor =
+ handle(global_constructor->indexed_property_handler(), isolate);
+ global_constructor->set_indexed_property_handler(
+ isolate->heap()->noop_interceptor_info());
+ }
}
i::MaybeHandle<i::JSGlobalProxy> maybe_proxy;
@@ -6143,10 +6288,11 @@ static i::Handle<ObjectType> CreateEnvironment(
}
// Create the environment.
InvokeBootstrapper<ObjectType> invoke;
- result = invoke.Invoke(isolate, maybe_proxy, proxy_template, extensions,
- context_snapshot_index);
+ result =
+ invoke.Invoke(isolate, maybe_proxy, proxy_template, extensions,
+ context_snapshot_index, internal_fields_deserializer);
- // Restore the access check info on the global template.
+ // Restore the access check info and interceptors on the global template.
if (!maybe_global_template.IsEmpty()) {
DCHECK(!global_constructor.is_null());
DCHECK(!proxy_constructor.is_null());
@@ -6154,6 +6300,8 @@ static i::Handle<ObjectType> CreateEnvironment(
proxy_constructor->access_check_info());
global_constructor->set_needs_access_check(
proxy_constructor->needs_access_check());
+ global_constructor->set_named_property_handler(*named_interceptor);
+ global_constructor->set_indexed_property_handler(*indexed_interceptor);
}
}
// Leave V8.
@@ -6161,20 +6309,20 @@ static i::Handle<ObjectType> CreateEnvironment(
return result;
}
-Local<Context> NewContext(v8::Isolate* external_isolate,
- v8::ExtensionConfiguration* extensions,
- v8::MaybeLocal<ObjectTemplate> global_template,
- v8::MaybeLocal<Value> global_object,
- size_t context_snapshot_index) {
+Local<Context> NewContext(
+ v8::Isolate* external_isolate, v8::ExtensionConfiguration* extensions,
+ v8::MaybeLocal<ObjectTemplate> global_template,
+ v8::MaybeLocal<Value> global_object, size_t context_snapshot_index,
+ v8::DeserializeInternalFieldsCallback internal_fields_deserializer) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.NewContext");
LOG_API(isolate, Context, New);
i::HandleScope scope(isolate);
ExtensionConfiguration no_extensions;
if (extensions == NULL) extensions = &no_extensions;
- i::Handle<i::Context> env =
- CreateEnvironment<i::Context>(isolate, extensions, global_template,
- global_object, context_snapshot_index);
+ i::Handle<i::Context> env = CreateEnvironment<i::Context>(
+ isolate, extensions, global_template, global_object,
+ context_snapshot_index, internal_fields_deserializer);
if (env.is_null()) {
if (isolate->has_pending_exception()) {
isolate->OptionalRescheduleException(true);
@@ -6189,21 +6337,22 @@ Local<Context> v8::Context::New(v8::Isolate* external_isolate,
v8::MaybeLocal<ObjectTemplate> global_template,
v8::MaybeLocal<Value> global_object) {
return NewContext(external_isolate, extensions, global_template,
- global_object, 0);
+ global_object, 0, DeserializeInternalFieldsCallback());
}
MaybeLocal<Context> v8::Context::FromSnapshot(
v8::Isolate* external_isolate, size_t context_snapshot_index,
- v8::ExtensionConfiguration* extensions,
- v8::MaybeLocal<ObjectTemplate> global_template,
- v8::MaybeLocal<Value> global_object) {
+ v8::DeserializeInternalFieldsCallback internal_fields_deserializer,
+ v8::ExtensionConfiguration* extensions, MaybeLocal<Value> global_object) {
+ size_t index_including_default_context = context_snapshot_index + 1;
if (!i::Snapshot::HasContextSnapshot(
reinterpret_cast<i::Isolate*>(external_isolate),
- context_snapshot_index)) {
+ index_including_default_context)) {
return MaybeLocal<Context>();
}
- return NewContext(external_isolate, extensions, global_template,
- global_object, context_snapshot_index);
+ return NewContext(external_isolate, extensions, MaybeLocal<ObjectTemplate>(),
+ global_object, index_including_default_context,
+ internal_fields_deserializer);
}
MaybeLocal<Object> v8::Context::NewRemoteContext(
@@ -6225,7 +6374,8 @@ MaybeLocal<Object> v8::Context::NewRemoteContext(
"Global template needs to have access check handlers.");
i::Handle<i::JSGlobalProxy> global_proxy =
CreateEnvironment<i::JSGlobalProxy>(isolate, nullptr, global_template,
- global_object, 0);
+ global_object, 0,
+ DeserializeInternalFieldsCallback());
if (global_proxy.is_null()) {
if (isolate->has_pending_exception()) {
isolate->OptionalRescheduleException(true);
@@ -6263,7 +6413,6 @@ v8::Isolate* Context::GetIsolate() {
return reinterpret_cast<Isolate*>(env->GetIsolate());
}
-
v8::Local<v8::Object> Context::Global() {
i::Handle<i::Context> context = Utils::OpenHandle(this);
i::Isolate* isolate = context->GetIsolate();
@@ -6281,7 +6430,7 @@ v8::Local<v8::Object> Context::Global() {
void Context::DetachGlobal() {
i::Handle<i::Context> context = Utils::OpenHandle(this);
i::Isolate* isolate = context->GetIsolate();
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
isolate->bootstrapper()->DetachGlobal(context);
}
@@ -6297,7 +6446,7 @@ Local<v8::Object> Context::GetExtrasBindingObject() {
void Context::AllowCodeGenerationFromStrings(bool allow) {
i::Handle<i::Context> context = Utils::OpenHandle(this);
i::Isolate* isolate = context->GetIsolate();
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
context->set_allow_code_gen_from_strings(
allow ? isolate->heap()->true_value() : isolate->heap()->false_value());
}
@@ -6403,7 +6552,7 @@ Local<External> v8::External::New(Isolate* isolate, void* value) {
STATIC_ASSERT(sizeof(value) == sizeof(i::Address));
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, External, New);
- ENTER_V8(i_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::JSObject> external = i_isolate->factory()->NewExternal(value);
return Utils::ExternalToLocal(external);
}
@@ -6482,7 +6631,7 @@ STATIC_ASSERT(v8::String::kMaxLength == i::String::kMaxLength);
result = MaybeLocal<String>(); \
} else { \
i::Isolate* i_isolate = reinterpret_cast<internal::Isolate*>(isolate); \
- ENTER_V8(i_isolate); \
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); \
LOG_API(i_isolate, class_name, function_name); \
if (length < 0) length = StringLength(data); \
i::Handle<i::String> handle_result = \
@@ -6569,7 +6718,7 @@ MaybeLocal<String> v8::String::NewExternalTwoByte(
return MaybeLocal<String>();
}
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- ENTER_V8(i_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
LOG_API(i_isolate, String, NewExternalTwoByte);
i::Handle<i::String> string = i_isolate->factory()
->NewExternalStringFromTwoByte(resource)
@@ -6593,7 +6742,7 @@ MaybeLocal<String> v8::String::NewExternalOneByte(
return MaybeLocal<String>();
}
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- ENTER_V8(i_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
LOG_API(i_isolate, String, NewExternalOneByte);
i::Handle<i::String> string = i_isolate->factory()
->NewExternalStringFromOneByte(resource)
@@ -6615,7 +6764,7 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
if (i::StringShape(*obj).IsExternal()) {
return false; // Already an external string.
}
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
if (isolate->heap()->IsInGCPostProcessing()) {
return false;
}
@@ -6639,7 +6788,7 @@ bool v8::String::MakeExternal(
if (i::StringShape(*obj).IsExternal()) {
return false; // Already an external string.
}
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
if (isolate->heap()->IsInGCPostProcessing()) {
return false;
}
@@ -6675,7 +6824,7 @@ Isolate* v8::Object::GetIsolate() {
Local<v8::Object> v8::Object::New(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, Object, New);
- ENTER_V8(i_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::JSObject> obj =
i_isolate->factory()->NewJSObject(i_isolate->object_function());
return Utils::ToLocal(obj);
@@ -6685,7 +6834,7 @@ Local<v8::Object> v8::Object::New(Isolate* isolate) {
Local<v8::Value> v8::NumberObject::New(Isolate* isolate, double value) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, NumberObject, New);
- ENTER_V8(i_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::Object> number = i_isolate->factory()->NewNumber(value);
i::Handle<i::Object> obj =
i::Object::ToObject(i_isolate, number).ToHandleChecked();
@@ -6705,7 +6854,7 @@ double v8::NumberObject::ValueOf() const {
Local<v8::Value> v8::BooleanObject::New(Isolate* isolate, bool value) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, BooleanObject, New);
- ENTER_V8(i_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::Object> boolean(value ? i_isolate->heap()->true_value()
: i_isolate->heap()->false_value(),
i_isolate);
@@ -6733,7 +6882,7 @@ Local<v8::Value> v8::StringObject::New(Local<String> value) {
i::Handle<i::String> string = Utils::OpenHandle(*value);
i::Isolate* isolate = string->GetIsolate();
LOG_API(isolate, StringObject, New);
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::Handle<i::Object> obj =
i::Object::ToObject(isolate, string).ToHandleChecked();
return Utils::ToLocal(obj);
@@ -6753,7 +6902,7 @@ Local<v8::String> v8::StringObject::ValueOf() const {
Local<v8::Value> v8::SymbolObject::New(Isolate* isolate, Local<Symbol> value) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, SymbolObject, New);
- ENTER_V8(i_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::Object> obj = i::Object::ToObject(
i_isolate, Utils::OpenHandle(*value)).ToHandleChecked();
return Utils::ToLocal(obj);
@@ -6803,7 +6952,7 @@ double v8::Date::ValueOf() const {
void v8::Date::DateTimeConfigurationChangeNotification(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, Date, DateTimeConfigurationChangeNotification);
- ENTER_V8(i_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i_isolate->date_cache()->ResetDateCache();
if (!i_isolate->eternal_handles()->Exists(
i::EternalHandles::DATE_CACHE_VERSION)) {
@@ -6868,7 +7017,7 @@ v8::RegExp::Flags v8::RegExp::GetFlags() const {
Local<v8::Array> v8::Array::New(Isolate* isolate, int length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, Array, New);
- ENTER_V8(i_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
int real_length = length > 0 ? length : 0;
i::Handle<i::JSArray> obj = i_isolate->factory()->NewJSArray(real_length);
i::Handle<i::Object> length_obj =
@@ -6913,7 +7062,7 @@ Local<Object> Array::CloneElementAt(uint32_t index) { return Local<Object>(); }
Local<v8::Map> v8::Map::New(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, Map, New);
- ENTER_V8(i_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::JSMap> obj = i_isolate->factory()->NewJSMap();
return Utils::ToLocal(obj);
}
@@ -7021,7 +7170,7 @@ Local<Array> Map::AsArray() const {
Local<v8::Set> v8::Set::New(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, Set, New);
- ENTER_V8(i_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::JSSet> obj = i_isolate->factory()->NewJSSet();
return Utils::ToLocal(obj);
}
@@ -7113,7 +7262,7 @@ MaybeLocal<Promise::Resolver> Promise::Resolver::New(Local<Context> context) {
PREPARE_FOR_EXECUTION(context, Promise_Resolver, New, Resolver);
i::Handle<i::Object> result;
has_pending_exception =
- !i::Execution::Call(isolate, isolate->promise_create(),
+ !i::Execution::Call(isolate, isolate->promise_internal_constructor(),
isolate->factory()->undefined_value(), 0, NULL)
.ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(Promise::Resolver);
@@ -7158,9 +7307,12 @@ Maybe<bool> Promise::Resolver::Reject(Local<Context> context,
Local<Value> value) {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, Promise_Resolver, Resolve, bool);
auto self = Utils::OpenHandle(this);
- i::Handle<i::Object> argv[] = {self, Utils::OpenHandle(*value)};
+
+ // We pass true to trigger the debugger's on exception handler.
+ i::Handle<i::Object> argv[] = {self, Utils::OpenHandle(*value),
+ isolate->factory()->ToBoolean(true)};
has_pending_exception =
- i::Execution::Call(isolate, isolate->promise_reject(),
+ i::Execution::Call(isolate, isolate->promise_internal_reject(),
isolate->factory()->undefined_value(), arraysize(argv),
argv)
.is_null();
@@ -7220,10 +7372,31 @@ bool Promise::HasHandler() {
i::Isolate* isolate = promise->GetIsolate();
LOG_API(isolate, Promise, HasRejectHandler);
ENTER_V8(isolate);
- i::Handle<i::Symbol> key = isolate->factory()->promise_has_handler_symbol();
- return i::JSReceiver::GetDataProperty(promise, key)->IsTrue(isolate);
+ if (promise->IsJSPromise()) {
+ i::Handle<i::JSPromise> js_promise = i::Handle<i::JSPromise>::cast(promise);
+ return js_promise->has_handler();
+ }
+ return false;
+}
+
+Local<Value> Promise::Result() {
+ i::Handle<i::JSReceiver> promise = Utils::OpenHandle(this);
+ i::Isolate* isolate = promise->GetIsolate();
+ LOG_API(isolate, Promise, Result);
+ i::Handle<i::JSPromise> js_promise = i::Handle<i::JSPromise>::cast(promise);
+ Utils::ApiCheck(js_promise->status() != kPending, "v8_Promise_Result",
+ "Promise is still pending");
+ i::Handle<i::Object> result(js_promise->result(), isolate);
+ return Utils::ToLocal(result);
}
+Promise::PromiseState Promise::State() {
+ i::Handle<i::JSReceiver> promise = Utils::OpenHandle(this);
+ i::Isolate* isolate = promise->GetIsolate();
+ LOG_API(isolate, Promise, Status);
+ i::Handle<i::JSPromise> js_promise = i::Handle<i::JSPromise>::cast(promise);
+ return static_cast<PromiseState>(js_promise->status());
+}
Local<Object> Proxy::GetTarget() {
i::Handle<i::JSProxy> self = Utils::OpenHandle(this);
@@ -7268,7 +7441,7 @@ Local<String> WasmCompiledModule::GetWasmWireBytes() {
i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
i::Handle<i::WasmCompiledModule> compiled_part =
i::handle(i::WasmCompiledModule::cast(obj->GetInternalField(0)));
- i::Handle<i::String> wire_bytes = compiled_part->module_bytes();
+ i::Handle<i::String> wire_bytes(compiled_part->module_bytes());
return Local<String>::Cast(Utils::ToLocal(wire_bytes));
}
@@ -7330,7 +7503,7 @@ MaybeLocal<WasmCompiledModule> WasmCompiledModule::Compile(Isolate* isolate,
i::wasm::CreateModuleObjectFromBytes(
i_isolate, start, start + length, &thrower,
i::wasm::ModuleOrigin::kWasmOrigin, i::Handle<i::Script>::null(),
- nullptr, nullptr);
+ i::Vector<const uint8_t>::empty());
if (maybe_compiled.is_null()) return MaybeLocal<WasmCompiledModule>();
return Local<WasmCompiledModule>::Cast(
Utils::ToLocal(maybe_compiled.ToHandleChecked()));
@@ -7382,7 +7555,7 @@ void v8::ArrayBuffer::Neuter() {
Utils::ApiCheck(obj->is_neuterable(), "v8::ArrayBuffer::Neuter",
"Only neuterable ArrayBuffers can be neutered");
LOG_API(isolate, ArrayBuffer, Neuter);
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
obj->Neuter();
}
@@ -7396,7 +7569,7 @@ size_t v8::ArrayBuffer::ByteLength() const {
Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, size_t byte_length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, ArrayBuffer, New);
- ENTER_V8(i_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::JSArrayBuffer> obj =
i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kNotShared);
// TODO(jbroman): It may be useful in the future to provide a MaybeLocal
@@ -7415,7 +7588,7 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, void* data,
CHECK(byte_length == 0 || data != NULL);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, ArrayBuffer, New);
- ENTER_V8(i_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::JSArrayBuffer> obj =
i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kNotShared);
i::JSArrayBuffer::Setup(obj, i_isolate,
@@ -7491,7 +7664,7 @@ size_t v8::TypedArray::Length() {
size_t byte_offset, size_t length) { \
i::Isolate* isolate = Utils::OpenHandle(*array_buffer)->GetIsolate(); \
LOG_API(isolate, Type##Array, New); \
- ENTER_V8(isolate); \
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); \
if (!Utils::ApiCheck(length <= static_cast<size_t>(i::Smi::kMaxValue), \
"v8::" #Type \
"Array::New(Local<ArrayBuffer>, size_t, size_t)", \
@@ -7510,7 +7683,7 @@ size_t v8::TypedArray::Length() {
i::Isolate* isolate = \
Utils::OpenHandle(*shared_array_buffer)->GetIsolate(); \
LOG_API(isolate, Type##Array, New); \
- ENTER_V8(isolate); \
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); \
if (!Utils::ApiCheck( \
length <= static_cast<size_t>(i::Smi::kMaxValue), \
"v8::" #Type \
@@ -7533,7 +7706,7 @@ Local<DataView> DataView::New(Local<ArrayBuffer> array_buffer,
i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*array_buffer);
i::Isolate* isolate = buffer->GetIsolate();
LOG_API(isolate, DataView, New);
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::Handle<i::JSDataView> obj =
isolate->factory()->NewJSDataView(buffer, byte_offset, byte_length);
return Utils::ToLocal(obj);
@@ -7546,7 +7719,7 @@ Local<DataView> DataView::New(Local<SharedArrayBuffer> shared_array_buffer,
i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*shared_array_buffer);
i::Isolate* isolate = buffer->GetIsolate();
LOG_API(isolate, DataView, New);
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::Handle<i::JSDataView> obj =
isolate->factory()->NewJSDataView(buffer, byte_offset, byte_length);
return Utils::ToLocal(obj);
@@ -7590,7 +7763,7 @@ Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(Isolate* isolate,
CHECK(i::FLAG_harmony_sharedarraybuffer);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, SharedArrayBuffer, New);
- ENTER_V8(i_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::JSArrayBuffer> obj =
i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kShared);
// TODO(jbroman): It may be useful in the future to provide a MaybeLocal
@@ -7611,7 +7784,7 @@ Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(
CHECK(byte_length == 0 || data != NULL);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, SharedArrayBuffer, New);
- ENTER_V8(i_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::JSArrayBuffer> obj =
i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kShared);
i::JSArrayBuffer::Setup(obj, i_isolate,
@@ -7624,49 +7797,26 @@ Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(
Local<Symbol> v8::Symbol::New(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, Symbol, New);
- ENTER_V8(i_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::Symbol> result = i_isolate->factory()->NewSymbol();
if (!name.IsEmpty()) result->set_name(*Utils::OpenHandle(*name));
return Utils::ToLocal(result);
}
-static i::Handle<i::Symbol> SymbolFor(i::Isolate* isolate,
- i::Handle<i::String> name,
- i::Handle<i::String> part,
- bool private_symbol) {
- i::Handle<i::JSObject> registry = isolate->GetSymbolRegistry();
- i::Handle<i::JSObject> symbols =
- i::Handle<i::JSObject>::cast(
- i::Object::GetPropertyOrElement(registry, part).ToHandleChecked());
- i::Handle<i::Object> symbol =
- i::Object::GetPropertyOrElement(symbols, name).ToHandleChecked();
- if (!symbol->IsSymbol()) {
- DCHECK(symbol->IsUndefined(isolate));
- if (private_symbol)
- symbol = isolate->factory()->NewPrivateSymbol();
- else
- symbol = isolate->factory()->NewSymbol();
- i::Handle<i::Symbol>::cast(symbol)->set_name(*name);
- i::Object::SetPropertyOrElement(symbols, name, symbol, i::STRICT).Assert();
- }
- return i::Handle<i::Symbol>::cast(symbol);
-}
-
-
Local<Symbol> v8::Symbol::For(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Handle<i::String> i_name = Utils::OpenHandle(*name);
- i::Handle<i::String> part = i_isolate->factory()->for_string();
- return Utils::ToLocal(SymbolFor(i_isolate, i_name, part, false));
+ return Utils::ToLocal(i_isolate->SymbolFor(
+ i::Heap::kPublicSymbolTableRootIndex, i_name, false));
}
Local<Symbol> v8::Symbol::ForApi(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Handle<i::String> i_name = Utils::OpenHandle(*name);
- i::Handle<i::String> part = i_isolate->factory()->for_api_string();
- return Utils::ToLocal(SymbolFor(i_isolate, i_name, part, false));
+ return Utils::ToLocal(
+ i_isolate->SymbolFor(i::Heap::kApiSymbolTableRootIndex, i_name, false));
}
@@ -7681,6 +7831,10 @@ Local<Symbol> v8::Symbol::GetUnscopables(Isolate* isolate) {
return Utils::ToLocal(i_isolate->factory()->unscopables_symbol());
}
+Local<Symbol> v8::Symbol::GetToPrimitive(Isolate* isolate) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ return Utils::ToLocal(i_isolate->factory()->to_primitive_symbol());
+}
Local<Symbol> v8::Symbol::GetToStringTag(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -7697,7 +7851,7 @@ Local<Symbol> v8::Symbol::GetIsConcatSpreadable(Isolate* isolate) {
Local<Private> v8::Private::New(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, Private, New);
- ENTER_V8(i_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::Symbol> symbol = i_isolate->factory()->NewPrivateSymbol();
if (!name.IsEmpty()) symbol->set_name(*Utils::OpenHandle(*name));
Local<Symbol> result = Utils::ToLocal(symbol);
@@ -7708,9 +7862,8 @@ Local<Private> v8::Private::New(Isolate* isolate, Local<String> name) {
Local<Private> v8::Private::ForApi(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Handle<i::String> i_name = Utils::OpenHandle(*name);
- i::Handle<i::String> part = i_isolate->factory()->private_api_string();
- Local<Symbol> result =
- Utils::ToLocal(SymbolFor(i_isolate, i_name, part, true));
+ Local<Symbol> result = Utils::ToLocal(i_isolate->SymbolFor(
+ i::Heap::kApiPrivateSymbolTableRootIndex, i_name, true));
return v8::Local<Private>(reinterpret_cast<Private*>(*result));
}
@@ -7721,7 +7874,7 @@ Local<Number> v8::Number::New(Isolate* isolate, double value) {
// Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
value = std::numeric_limits<double>::quiet_NaN();
}
- ENTER_V8(internal_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(internal_isolate);
i::Handle<i::Object> result = internal_isolate->factory()->NewNumber(value);
return Utils::NumberToLocal(result);
}
@@ -7733,7 +7886,7 @@ Local<Integer> v8::Integer::New(Isolate* isolate, int32_t value) {
return Utils::IntegerToLocal(i::Handle<i::Object>(i::Smi::FromInt(value),
internal_isolate));
}
- ENTER_V8(internal_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(internal_isolate);
i::Handle<i::Object> result = internal_isolate->factory()->NewNumber(value);
return Utils::IntegerToLocal(result);
}
@@ -7745,7 +7898,7 @@ Local<Integer> v8::Integer::NewFromUnsigned(Isolate* isolate, uint32_t value) {
if (fits_into_int32_t) {
return Integer::New(isolate, static_cast<int32_t>(value));
}
- ENTER_V8(internal_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(internal_isolate);
i::Handle<i::Object> result = internal_isolate->factory()->NewNumber(value);
return Utils::IntegerToLocal(result);
}
@@ -7969,8 +8122,6 @@ Isolate* Isolate::New(const Isolate::CreateParams& params) {
}
isolate->set_api_external_references(params.external_references);
- isolate->set_deserialize_internal_fields_callback(
- params.deserialize_internal_fields_callback);
SetResourceConstraints(isolate, params.constraints);
// TODO(jochen): Once we got rid of Isolate::Current(), we can remove this.
Isolate::Scope isolate_scope(v8_isolate);
@@ -8227,6 +8378,10 @@ void Isolate::RemoveCallCompletedCallback(
reinterpret_cast<CallCompletedCallback>(callback));
}
+void Isolate::SetPromiseHook(PromiseHook hook) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->SetPromiseHook(hook);
+}
void Isolate::SetPromiseRejectCallback(PromiseRejectCallback callback) {
if (callback == NULL) return;
@@ -8378,6 +8533,8 @@ void Isolate::MemoryPressureNotification(MemoryPressureLevel level) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->heap()->MemoryPressureNotification(level, Locker::IsLocked(this));
isolate->allocator()->MemoryPressureNotification(level);
+ isolate->compiler_dispatcher()->MemoryPressureNotification(
+ level, Locker::IsLocked(this));
}
void Isolate::SetRAILMode(RAILMode rail_mode) {
@@ -8385,6 +8542,21 @@ void Isolate::SetRAILMode(RAILMode rail_mode) {
return isolate->SetRAILMode(rail_mode);
}
+void Isolate::IncreaseHeapLimitForDebugging() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->heap()->IncreaseHeapLimitForDebugging();
+}
+
+void Isolate::RestoreOriginalHeapLimit() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->heap()->RestoreOriginalHeapLimit();
+}
+
+bool Isolate::IsHeapLimitIncreasedForDebugging() {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ return isolate->heap()->IsHeapLimitIncreasedForDebugging();
+}
+
void Isolate::SetJitCodeEventHandler(JitCodeEventOptions options,
JitCodeEventHandler event_handler) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@@ -8400,7 +8572,6 @@ void Isolate::SetStackLimit(uintptr_t stack_limit) {
isolate->stack_guard()->SetStackLimit(stack_limit);
}
-
void Isolate::GetCodeRange(void** start, size_t* length_in_bytes) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
if (isolate->heap()->memory_allocator()->code_range()->valid()) {
@@ -8430,24 +8601,40 @@ void Isolate::SetAllowCodeGenerationFromStringsCallback(
isolate->set_allow_code_gen_callback(callback);
}
+void Isolate::SetAllowWasmCompileCallback(AllowWasmCompileCallback callback) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->set_allow_wasm_compile_callback(callback);
+}
+
+void Isolate::SetAllowWasmInstantiateCallback(
+ AllowWasmInstantiateCallback callback) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->set_allow_wasm_instantiate_callback(callback);
+}
bool Isolate::IsDead() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
return isolate->IsDead();
}
-
bool Isolate::AddMessageListener(MessageCallback that, Local<Value> data) {
+ return AddMessageListenerWithErrorLevel(that, kMessageError, data);
+}
+
+bool Isolate::AddMessageListenerWithErrorLevel(MessageCallback that,
+ int message_levels,
+ Local<Value> data) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
i::Handle<i::TemplateList> list = isolate->factory()->message_listeners();
- i::Handle<i::FixedArray> listener = isolate->factory()->NewFixedArray(2);
+ i::Handle<i::FixedArray> listener = isolate->factory()->NewFixedArray(3);
i::Handle<i::Foreign> foreign =
isolate->factory()->NewForeign(FUNCTION_ADDR(that));
listener->set(0, *foreign);
listener->set(1, data.IsEmpty() ? isolate->heap()->undefined_value()
: *Utils::OpenHandle(*data));
+ listener->set(2, i::Smi::FromInt(message_levels));
list = i::TemplateList::Add(isolate, list, listener);
isolate->heap()->SetMessageListeners(*list);
return true;
@@ -8456,7 +8643,7 @@ bool Isolate::AddMessageListener(MessageCallback that, Local<Value> data) {
void Isolate::RemoveMessageListeners(MessageCallback that) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
i::DisallowHeapAllocation no_gc;
i::TemplateList* listeners = isolate->heap()->message_listeners();
@@ -8638,7 +8825,7 @@ String::Value::~Value() {
Local<Value> Exception::NAME(v8::Local<v8::String> raw_message) { \
i::Isolate* isolate = i::Isolate::Current(); \
LOG_API(isolate, NAME, New); \
- ENTER_V8(isolate); \
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); \
i::Object* error; \
{ \
i::HandleScope scope(isolate); \
@@ -8703,7 +8890,6 @@ bool Debug::SetDebugEventListener(Isolate* isolate, EventCallback that,
return true;
}
-
void Debug::DebugBreak(Isolate* isolate) {
reinterpret_cast<i::Isolate*>(isolate)->stack_guard()->RequestDebugBreak();
}
@@ -8778,16 +8964,13 @@ MaybeLocal<Value> Debug::GetMirror(Local<Context> context,
RETURN_ESCAPED(result);
}
-
void Debug::ProcessDebugMessages(Isolate* isolate) {
reinterpret_cast<i::Isolate*>(isolate)->debug()->ProcessDebugMessages(true);
}
Local<Context> Debug::GetDebugContext(Isolate* isolate) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- ENTER_V8(i_isolate);
- return Utils::ToLocal(i_isolate->debug()->GetDebugContext());
+ return debug::GetDebugContext(isolate);
}
@@ -8826,9 +9009,8 @@ MaybeLocal<Array> Debug::GetInternalProperties(Isolate* v8_isolate,
return Utils::ToLocal(result);
}
-bool DebugInterface::SetDebugEventListener(Isolate* isolate,
- DebugInterface::EventCallback that,
- Local<Value> data) {
+bool debug::SetDebugEventListener(Isolate* isolate, debug::EventCallback that,
+ Local<Value> data) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8(i_isolate);
i::HandleScope scope(i_isolate);
@@ -8840,35 +9022,34 @@ bool DebugInterface::SetDebugEventListener(Isolate* isolate,
return true;
}
-Local<Context> DebugInterface::GetDebugContext(Isolate* isolate) {
- return Debug::GetDebugContext(isolate);
+Local<Context> debug::GetDebugContext(Isolate* isolate) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ENTER_V8(i_isolate);
+ return Utils::ToLocal(i_isolate->debug()->GetDebugContext());
}
-MaybeLocal<Value> DebugInterface::Call(Local<Context> context,
- v8::Local<v8::Function> fun,
- v8::Local<v8::Value> data) {
+MaybeLocal<Value> debug::Call(Local<Context> context,
+ v8::Local<v8::Function> fun,
+ v8::Local<v8::Value> data) {
return Debug::Call(context, fun, data);
}
-void DebugInterface::SetLiveEditEnabled(Isolate* isolate, bool enable) {
+void debug::SetLiveEditEnabled(Isolate* isolate, bool enable) {
Debug::SetLiveEditEnabled(isolate, enable);
}
-void DebugInterface::DebugBreak(Isolate* isolate) {
- Debug::DebugBreak(isolate);
-}
+void debug::DebugBreak(Isolate* isolate) { Debug::DebugBreak(isolate); }
-void DebugInterface::CancelDebugBreak(Isolate* isolate) {
+void debug::CancelDebugBreak(Isolate* isolate) {
Debug::CancelDebugBreak(isolate);
}
-MaybeLocal<Array> DebugInterface::GetInternalProperties(Isolate* isolate,
- Local<Value> value) {
+MaybeLocal<Array> debug::GetInternalProperties(Isolate* isolate,
+ Local<Value> value) {
return Debug::GetInternalProperties(isolate, value);
}
-void DebugInterface::ChangeBreakOnException(Isolate* isolate,
- ExceptionBreakState type) {
+void debug::ChangeBreakOnException(Isolate* isolate, ExceptionBreakState type) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
internal_isolate->debug()->ChangeBreakOnException(
i::BreakException, type == BreakOnAnyException);
@@ -8876,7 +9057,14 @@ void DebugInterface::ChangeBreakOnException(Isolate* isolate,
type != NoBreakOnException);
}
-void DebugInterface::PrepareStep(Isolate* v8_isolate, StepAction action) {
+void debug::SetOutOfMemoryCallback(Isolate* isolate,
+ OutOfMemoryCallback callback, void* data) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ENTER_V8(i_isolate);
+ i_isolate->heap()->SetOutOfMemoryCallback(callback, data);
+}
+
+void debug::PrepareStep(Isolate* v8_isolate, StepAction action) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
ENTER_V8(isolate);
CHECK(isolate->debug()->CheckExecutionState());
@@ -8886,38 +9074,39 @@ void DebugInterface::PrepareStep(Isolate* v8_isolate, StepAction action) {
isolate->debug()->PrepareStep(static_cast<i::StepAction>(action));
}
-void DebugInterface::ClearStepping(Isolate* v8_isolate) {
+void debug::ClearStepping(Isolate* v8_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
ENTER_V8(isolate);
// Clear all current stepping setup.
isolate->debug()->ClearStepping();
}
-v8::Isolate* DebugInterface::Script::GetIsolate() const {
+v8::Isolate* debug::Script::GetIsolate() const {
return reinterpret_cast<v8::Isolate*>(Utils::OpenHandle(this)->GetIsolate());
}
-ScriptOriginOptions DebugInterface::Script::OriginOptions() const {
+ScriptOriginOptions debug::Script::OriginOptions() const {
return Utils::OpenHandle(this)->origin_options();
}
-bool DebugInterface::Script::WasCompiled() const {
+bool debug::Script::WasCompiled() const {
return Utils::OpenHandle(this)->compilation_state() ==
i::Script::COMPILATION_STATE_COMPILED;
}
-int DebugInterface::Script::Id() const { return Utils::OpenHandle(this)->id(); }
+int debug::Script::Id() const { return Utils::OpenHandle(this)->id(); }
-int DebugInterface::Script::LineOffset() const {
+int debug::Script::LineOffset() const {
return Utils::OpenHandle(this)->line_offset();
}
-int DebugInterface::Script::ColumnOffset() const {
+int debug::Script::ColumnOffset() const {
return Utils::OpenHandle(this)->column_offset();
}
-std::vector<int> DebugInterface::Script::LineEnds() const {
+std::vector<int> debug::Script::LineEnds() const {
i::Handle<i::Script> script = Utils::OpenHandle(this);
+ if (script->type() == i::Script::TYPE_WASM) return std::vector<int>();
i::Isolate* isolate = script->GetIsolate();
i::HandleScope scope(isolate);
i::Script::InitLineEnds(script);
@@ -8931,7 +9120,7 @@ std::vector<int> DebugInterface::Script::LineEnds() const {
return result;
}
-MaybeLocal<String> DebugInterface::Script::Name() const {
+MaybeLocal<String> debug::Script::Name() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
i::HandleScope handle_scope(isolate);
i::Handle<i::Script> script = Utils::OpenHandle(this);
@@ -8941,7 +9130,7 @@ MaybeLocal<String> DebugInterface::Script::Name() const {
handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
}
-MaybeLocal<String> DebugInterface::Script::SourceURL() const {
+MaybeLocal<String> debug::Script::SourceURL() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
i::HandleScope handle_scope(isolate);
i::Handle<i::Script> script = Utils::OpenHandle(this);
@@ -8951,7 +9140,7 @@ MaybeLocal<String> DebugInterface::Script::SourceURL() const {
handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
}
-MaybeLocal<String> DebugInterface::Script::SourceMappingURL() const {
+MaybeLocal<String> debug::Script::SourceMappingURL() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
i::HandleScope handle_scope(isolate);
i::Handle<i::Script> script = Utils::OpenHandle(this);
@@ -8961,17 +9150,15 @@ MaybeLocal<String> DebugInterface::Script::SourceMappingURL() const {
handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
}
-MaybeLocal<String> DebugInterface::Script::ContextData() const {
+MaybeLocal<Value> debug::Script::ContextData() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
i::HandleScope handle_scope(isolate);
i::Handle<i::Script> script = Utils::OpenHandle(this);
i::Handle<i::Object> value(script->context_data(), isolate);
- if (!value->IsString()) return MaybeLocal<String>();
- return Utils::ToLocal(
- handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
+ return Utils::ToLocal(handle_scope.CloseAndEscape(value));
}
-MaybeLocal<String> DebugInterface::Script::Source() const {
+MaybeLocal<String> debug::Script::Source() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
i::HandleScope handle_scope(isolate);
i::Handle<i::Script> script = Utils::OpenHandle(this);
@@ -8981,17 +9168,25 @@ MaybeLocal<String> DebugInterface::Script::Source() const {
handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
}
+bool debug::Script::IsWasm() const {
+ return Utils::OpenHandle(this)->type() == i::Script::TYPE_WASM;
+}
+
namespace {
int GetSmiValue(i::Handle<i::FixedArray> array, int index) {
return i::Smi::cast(array->get(index))->value();
}
} // namespace
-bool DebugInterface::Script::GetPossibleBreakpoints(
- const Location& start, const Location& end,
- std::vector<Location>* locations) const {
+bool debug::Script::GetPossibleBreakpoints(
+ const debug::Location& start, const debug::Location& end,
+ std::vector<debug::Location>* locations) const {
CHECK(!start.IsEmpty());
i::Handle<i::Script> script = Utils::OpenHandle(this);
+ if (script->type() == i::Script::TYPE_WASM) {
+ // TODO(clemensh): Return the proper thing once we support wasm breakpoints.
+ return false;
+ }
i::Script::InitLineEnds(script);
CHECK(script->line_ends()->IsFixedArray());
@@ -9027,7 +9222,7 @@ bool DebugInterface::Script::GetPossibleBreakpoints(
if (current_line_end_index > 0) {
line_offset = GetSmiValue(line_ends, current_line_end_index - 1) + 1;
}
- locations->push_back(Location(
+ locations->push_back(debug::Location(
current_line_end_index + script->line_offset(),
offset - line_offset +
(current_line_end_index == 0 ? script->column_offset() : 0)));
@@ -9035,8 +9230,12 @@ bool DebugInterface::Script::GetPossibleBreakpoints(
return true;
}
-int DebugInterface::Script::GetSourcePosition(const Location& location) const {
+int debug::Script::GetSourcePosition(const debug::Location& location) const {
i::Handle<i::Script> script = Utils::OpenHandle(this);
+ if (script->type() == i::Script::TYPE_WASM) {
+ // TODO(clemensh): Return the proper thing for wasm.
+ return 0;
+ }
int line = std::max(location.GetLineNumber() - script->line_offset(), 0);
int column = location.GetColumnNumber();
@@ -9057,8 +9256,8 @@ int DebugInterface::Script::GetSourcePosition(const Location& location) const {
return std::min(prev_line_offset + column + 1, line_offset);
}
-MaybeLocal<DebugInterface::Script> DebugInterface::Script::Wrap(
- v8::Isolate* v8_isolate, v8::Local<v8::Object> script) {
+MaybeLocal<debug::Script> debug::Script::Wrap(v8::Isolate* v8_isolate,
+ v8::Local<v8::Object> script) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
ENTER_V8(isolate);
i::HandleScope handle_scope(isolate);
@@ -9070,36 +9269,71 @@ MaybeLocal<DebugInterface::Script> DebugInterface::Script::Wrap(
return MaybeLocal<Script>();
}
i::Handle<i::Script> script_obj = i::Handle<i::Script>::cast(script_value);
- if (script_obj->type() != i::Script::TYPE_NORMAL) return MaybeLocal<Script>();
- return ToApiHandle<DebugInterface::Script>(
- handle_scope.CloseAndEscape(script_obj));
+ if (script_obj->type() != i::Script::TYPE_NORMAL &&
+ script_obj->type() != i::Script::TYPE_WASM) {
+ return MaybeLocal<Script>();
+ }
+ return ToApiHandle<debug::Script>(handle_scope.CloseAndEscape(script_obj));
+}
+
+debug::WasmScript* debug::WasmScript::Cast(debug::Script* script) {
+ CHECK(script->IsWasm());
+ return static_cast<WasmScript*>(script);
+}
+
+int debug::WasmScript::NumFunctions() const {
+ i::DisallowHeapAllocation no_gc;
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ DCHECK_EQ(i::Script::TYPE_WASM, script->type());
+ i::WasmCompiledModule* compiled_module =
+ i::WasmCompiledModule::cast(script->wasm_compiled_module());
+ DCHECK_GE(i::kMaxInt, compiled_module->module()->functions.size());
+ return static_cast<int>(compiled_module->module()->functions.size());
}
-DebugInterface::Location::Location(int lineNumber, int columnNumber)
- : lineNumber_(lineNumber), columnNumber_(columnNumber) {
- CHECK(lineNumber >= 0);
- CHECK(columnNumber >= 0);
+int debug::WasmScript::NumImportedFunctions() const {
+ i::DisallowHeapAllocation no_gc;
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ DCHECK_EQ(i::Script::TYPE_WASM, script->type());
+ i::WasmCompiledModule* compiled_module =
+ i::WasmCompiledModule::cast(script->wasm_compiled_module());
+ DCHECK_GE(i::kMaxInt, compiled_module->module()->num_imported_functions);
+ return static_cast<int>(compiled_module->module()->num_imported_functions);
+}
+
+debug::WasmDisassembly debug::WasmScript::DisassembleFunction(
+ int function_index) const {
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ DCHECK_EQ(i::Script::TYPE_WASM, script->type());
+ i::WasmCompiledModule* compiled_module =
+ i::WasmCompiledModule::cast(script->wasm_compiled_module());
+ return compiled_module->DisassembleFunction(function_index);
+}
+
+debug::Location::Location(int line_number, int column_number)
+ : line_number_(line_number), column_number_(column_number) {
+ CHECK(line_number >= 0);
+ CHECK(column_number >= 0);
}
-DebugInterface::Location::Location() : lineNumber_(-1), columnNumber_(-1) {}
+debug::Location::Location() : line_number_(-1), column_number_(-1) {}
-int DebugInterface::Location::GetLineNumber() const {
- CHECK(lineNumber_ >= 0);
- return lineNumber_;
+int debug::Location::GetLineNumber() const {
+ CHECK(line_number_ >= 0);
+ return line_number_;
}
-int DebugInterface::Location::GetColumnNumber() const {
- CHECK(columnNumber_ >= 0);
- return columnNumber_;
+int debug::Location::GetColumnNumber() const {
+ CHECK(column_number_ >= 0);
+ return column_number_;
}
-bool DebugInterface::Location::IsEmpty() const {
- return lineNumber_ == -1 && columnNumber_ == -1;
+bool debug::Location::IsEmpty() const {
+ return line_number_ == -1 && column_number_ == -1;
}
-void DebugInterface::GetLoadedScripts(
- v8::Isolate* v8_isolate,
- PersistentValueVector<DebugInterface::Script>& scripts) {
+void debug::GetLoadedScripts(v8::Isolate* v8_isolate,
+ PersistentValueVector<debug::Script>& scripts) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
ENTER_V8(isolate);
// TODO(kozyatinskiy): remove this GC once tests are dealt with.
@@ -9120,6 +9354,42 @@ void DebugInterface::GetLoadedScripts(
}
}
+MaybeLocal<UnboundScript> debug::CompileInspectorScript(Isolate* v8_isolate,
+ Local<String> source) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ PREPARE_FOR_DEBUG_INTERFACE_EXECUTION_WITH_ISOLATE(isolate, UnboundScript);
+ i::ScriptData* script_data = NULL;
+ i::Handle<i::String> str = Utils::OpenHandle(*source);
+ i::Handle<i::SharedFunctionInfo> result;
+ {
+ ScriptOriginOptions origin_options;
+ result = i::Compiler::GetSharedFunctionInfoForScript(
+ str, i::Handle<i::Object>(), 0, 0, origin_options,
+ i::Handle<i::Object>(), isolate->native_context(), NULL, &script_data,
+ ScriptCompiler::kNoCompileOptions, i::INSPECTOR_CODE, false);
+ has_pending_exception = result.is_null();
+ RETURN_ON_FAILED_EXECUTION(UnboundScript);
+ }
+ RETURN_ESCAPED(ToApiHandle<UnboundScript>(result));
+}
+
+void debug::SetAsyncTaskListener(Isolate* v8_isolate,
+ debug::AsyncTaskListener listener,
+ void* data) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ENTER_V8(isolate);
+ isolate->debug()->SetAsyncTaskListener(listener, data);
+}
+
+int debug::EstimatedValueSize(Isolate* v8_isolate, v8::Local<v8::Value> value) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ENTER_V8(isolate);
+ i::Handle<i::Object> object = Utils::OpenHandle(*value);
+ if (object->IsSmi()) return i::kPointerSize;
+ CHECK(object->IsHeapObject());
+ return i::Handle<i::HeapObject>::cast(object)->Size();
+}
+
Local<String> CpuProfileNode::GetFunctionName() const {
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
i::Isolate* isolate = node->isolate();
@@ -9392,13 +9662,12 @@ size_t HeapGraphNode::GetShallowSize() const {
int HeapGraphNode::GetChildrenCount() const {
- return ToInternal(this)->children().length();
+ return ToInternal(this)->children_count();
}
const HeapGraphEdge* HeapGraphNode::GetChild(int index) const {
- return reinterpret_cast<const HeapGraphEdge*>(
- ToInternal(this)->children()[index]);
+ return reinterpret_cast<const HeapGraphEdge*>(ToInternal(this)->child(index));
}
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index 6fcaa90a5e..ce9a6aae8a 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -11,7 +11,6 @@
#include "src/factory.h"
#include "src/isolate.h"
#include "src/list.h"
-#include "src/objects-inl.h"
namespace v8 {
@@ -110,7 +109,7 @@ class RegisteredExtension {
V(StackFrame, JSObject) \
V(Proxy, JSProxy) \
V(NativeWeakMap, JSWeakMap) \
- V(DebugInterface::Script, Script)
+ V(debug::Script, Script)
class Utils {
public:
diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h
index d5d2c02bf0..965232438a 100644
--- a/deps/v8/src/arguments.h
+++ b/deps/v8/src/arguments.h
@@ -41,7 +41,8 @@ class Arguments BASE_EMBEDDED {
index * kPointerSize));
}
- template <class S> Handle<S> at(int index) {
+ template <class S = Object>
+ Handle<S> at(int index) {
Object** value = &((*this)[index]);
// This cast checks that the object we're accessing does indeed have the
// expected type.
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index bc501b1099..4e97f95ed3 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -48,7 +48,7 @@ namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; }
-bool CpuFeatures::SupportsSimd128() { return false; }
+bool CpuFeatures::SupportsSimd128() { return true; }
int DoubleRegister::NumRegisters() {
return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index d90dc76252..1350dc41a3 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -351,13 +351,18 @@ Address RelocInfo::wasm_global_reference() {
return Assembler::target_address_at(pc_, host_);
}
+uint32_t RelocInfo::wasm_function_table_size_reference() {
+ DCHECK(IsWasmFunctionTableSizeReference(rmode_));
+ return reinterpret_cast<uint32_t>(Assembler::target_address_at(pc_, host_));
+}
+
void RelocInfo::unchecked_update_wasm_memory_reference(
Address address, ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
}
-void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
- ICacheFlushMode flush_mode) {
+void RelocInfo::unchecked_update_wasm_size(uint32_t size,
+ ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate_, pc_, host_,
reinterpret_cast<Address>(size), flush_mode);
}
@@ -483,30 +488,6 @@ void NeonMemOperand::SetAlignment(int align) {
}
}
-
-NeonListOperand::NeonListOperand(DoubleRegister base, int registers_count) {
- base_ = base;
- switch (registers_count) {
- case 1:
- type_ = nlt_1;
- break;
- case 2:
- type_ = nlt_2;
- break;
- case 3:
- type_ = nlt_3;
- break;
- case 4:
- type_ = nlt_4;
- break;
- default:
- UNREACHABLE();
- type_ = nlt_1;
- break;
- }
-}
-
-
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
@@ -2873,7 +2854,6 @@ void Assembler::vmov(const DwVfpRegister dst,
vm);
}
-
void Assembler::vmov(const DwVfpRegister dst,
const VmovIndex index,
const Register src,
@@ -2969,7 +2949,6 @@ void Assembler::vmov(const Register dst,
emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);
}
-
// Type of data to read from or write to VFP register.
// Used as specifier in generic vcvt instruction.
enum VFPType { S32, U32, F32, F64 };
@@ -3903,28 +3882,743 @@ void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) {
(dt & NeonDataTypeSizeMask)*B19 | vd*B12 | 0xA*B8 | m*B5 | B4 | vm);
}
-void Assembler::vswp(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
- DCHECK(VfpRegisterIsAvailable(srcdst0));
- DCHECK(VfpRegisterIsAvailable(srcdst1));
- DCHECK(!srcdst0.is(kScratchDoubleReg));
- DCHECK(!srcdst1.is(kScratchDoubleReg));
+static int EncodeScalar(NeonDataType dt, int index) {
+ int opc1_opc2 = 0;
+ DCHECK_LE(0, index);
+ switch (dt) {
+ case NeonS8:
+ case NeonU8:
+ DCHECK_GT(8, index);
+ opc1_opc2 = 0x8 | index;
+ break;
+ case NeonS16:
+ case NeonU16:
+ DCHECK_GT(4, index);
+ opc1_opc2 = 0x1 | (index << 1);
+ break;
+ case NeonS32:
+ case NeonU32:
+ DCHECK_GT(2, index);
+ opc1_opc2 = index << 2;
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return (opc1_opc2 >> 2) * B21 | (opc1_opc2 & 0x3) * B5;
+}
+
+void Assembler::vmov(NeonDataType dt, DwVfpRegister dst, int index,
+ Register src) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.940.
+ // vmov ARM core register to scalar.
+ DCHECK(dt == NeonS32 || dt == NeonU32 || IsEnabled(NEON));
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int opc1_opc2 = EncodeScalar(dt, index);
+ emit(0xEEu * B24 | vd * B16 | src.code() * B12 | 0xB * B8 | d * B7 | B4 |
+ opc1_opc2);
+}
+
+void Assembler::vmov(NeonDataType dt, Register dst, DwVfpRegister src,
+ int index) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.942.
+ // vmov Arm scalar to core register.
+ DCHECK(dt == NeonS32 || dt == NeonU32 || IsEnabled(NEON));
+ int vn, n;
+ src.split_code(&vn, &n);
+ int opc1_opc2 = EncodeScalar(dt, index);
+ int u = (dt & NeonDataTypeUMask) != 0 ? 1 : 0;
+ emit(0xEEu * B24 | u * B23 | B20 | vn * B16 | dst.code() * B12 | 0xB * B8 |
+ n * B7 | B4 | opc1_opc2);
+}
- if (srcdst0.is(srcdst1)) return; // Swapping aliased registers emits nothing.
+void Assembler::vmov(const QwNeonRegister dst, const QwNeonRegister src) {
+ // Instruction details available in ARM DDI 0406C.b, A8-938.
+ // vmov is encoded as vorr.
+ vorr(dst, src, src);
+}
- if (CpuFeatures::IsSupported(NEON)) {
- // Instruction details available in ARM DDI 0406C.b, A8.8.418.
- // 1111(31-28) | 00111(27-23) | D(22) | 110010(21-16) |
- // Vd(15-12) | 000000(11-6) | M(5) | 0(4) | Vm(3-0)
- int vd, d;
- srcdst0.split_code(&vd, &d);
- int vm, m;
- srcdst1.split_code(&vm, &m);
- emit(0xFU * B28 | 7 * B23 | d * B22 | 0x32 * B16 | vd * B12 | m * B5 | vm);
+void Assembler::vmvn(const QwNeonRegister dst, const QwNeonRegister src) {
+ DCHECK(IsEnabled(NEON));
+ // Instruction details available in ARM DDI 0406C.b, A8-966.
+ DCHECK(VfpRegisterIsAvailable(dst));
+ DCHECK(VfpRegisterIsAvailable(src));
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ emit(0x1E7U * B23 | d * B22 | 3 * B20 | vd * B12 | 0x17 * B6 | m * B5 | vm);
+}
+
+void Assembler::vswp(DwVfpRegister dst, DwVfpRegister src) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.418.
+ // 1111(31-28) | 00111(27-23) | D(22) | 110010(21-16) |
+ // Vd(15-12) | 000000(11-6) | M(5) | 0(4) | Vm(3-0)
+ DCHECK(IsEnabled(NEON));
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ emit(0xFU * B28 | 7 * B23 | d * B22 | 0x32 * B16 | vd * B12 | m * B5 | vm);
+}
+
+void Assembler::vswp(QwNeonRegister dst, QwNeonRegister src) {
+ // Instruction details available in ARM DDI 0406C.b, A8.8.418.
+ // 1111(31-28) | 00111(27-23) | D(22) | 110010(21-16) |
+ // Vd(15-12) | 000000(11-6) | M(5) | 0(4) | Vm(3-0)
+ DCHECK(IsEnabled(NEON));
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ emit(0xFU * B28 | 7 * B23 | d * B22 | 0x32 * B16 | vd * B12 | B6 | m * B5 |
+ vm);
+}
+
+void Assembler::vdup(NeonSize size, const QwNeonRegister dst,
+ const Register src) {
+ DCHECK(IsEnabled(NEON));
+ // Instruction details available in ARM DDI 0406C.b, A8-886.
+ int B = 0, E = 0;
+ switch (size) {
+ case Neon8:
+ B = 1;
+ break;
+ case Neon16:
+ E = 1;
+ break;
+ case Neon32:
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ int vd, d;
+ dst.split_code(&vd, &d);
+
+ emit(al | 0x1D * B23 | B * B22 | B21 | vd * B16 | src.code() * B12 |
+ 0xB * B8 | d * B7 | E * B5 | B4);
+}
+
+void Assembler::vdup(const QwNeonRegister dst, const SwVfpRegister src) {
+ DCHECK(IsEnabled(NEON));
+ // Instruction details available in ARM DDI 0406C.b, A8-884.
+ int index = src.code() & 1;
+ int d_reg = src.code() / 2;
+ int imm4 = 4 | index << 3; // esize = 32, index in bit 3.
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ DwVfpRegister::from_code(d_reg).split_code(&vm, &m);
+
+ emit(0x1E7U * B23 | d * B22 | 0x3 * B20 | imm4 * B16 | vd * B12 | 0x18 * B7 |
+ B6 | m * B5 | vm);
+}
+
+// Encode NEON vcvt.src_type.dst_type instruction.
+static Instr EncodeNeonVCVT(const VFPType dst_type, const QwNeonRegister dst,
+ const VFPType src_type, const QwNeonRegister src) {
+ DCHECK(src_type != dst_type);
+ DCHECK(src_type == F32 || dst_type == F32);
+ // Instruction details available in ARM DDI 0406C.b, A8.8.868.
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+
+ int op = 0;
+ if (src_type == F32) {
+ DCHECK(dst_type == S32 || dst_type == U32);
+ op = dst_type == U32 ? 3 : 2;
} else {
- vmov(kScratchDoubleReg, srcdst0);
- vmov(srcdst0, srcdst1);
- vmov(srcdst1, kScratchDoubleReg);
+ DCHECK(src_type == S32 || src_type == U32);
+ op = src_type == U32 ? 1 : 0;
+ }
+
+ return 0x1E7U * B23 | d * B22 | 0x3B * B16 | vd * B12 | 0x3 * B9 | op * B7 |
+ B6 | m * B5 | vm;
+}
+
+void Assembler::vcvt_f32_s32(const QwNeonRegister dst,
+ const QwNeonRegister src) {
+ DCHECK(IsEnabled(NEON));
+ DCHECK(VfpRegisterIsAvailable(dst));
+ DCHECK(VfpRegisterIsAvailable(src));
+ emit(EncodeNeonVCVT(F32, dst, S32, src));
+}
+
+void Assembler::vcvt_f32_u32(const QwNeonRegister dst,
+ const QwNeonRegister src) {
+ DCHECK(IsEnabled(NEON));
+ DCHECK(VfpRegisterIsAvailable(dst));
+ DCHECK(VfpRegisterIsAvailable(src));
+ emit(EncodeNeonVCVT(F32, dst, U32, src));
+}
+
+void Assembler::vcvt_s32_f32(const QwNeonRegister dst,
+ const QwNeonRegister src) {
+ DCHECK(IsEnabled(NEON));
+ DCHECK(VfpRegisterIsAvailable(dst));
+ DCHECK(VfpRegisterIsAvailable(src));
+ emit(EncodeNeonVCVT(S32, dst, F32, src));
+}
+
+void Assembler::vcvt_u32_f32(const QwNeonRegister dst,
+ const QwNeonRegister src) {
+ DCHECK(IsEnabled(NEON));
+ DCHECK(VfpRegisterIsAvailable(dst));
+ DCHECK(VfpRegisterIsAvailable(src));
+ emit(EncodeNeonVCVT(U32, dst, F32, src));
+}
+
+// op is instr->Bits(11, 7).
+static Instr EncodeNeonUnaryOp(int op, bool is_float, NeonSize size,
+ const QwNeonRegister dst,
+ const QwNeonRegister src) {
+ DCHECK_IMPLIES(is_float, size == Neon32);
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ int F = is_float ? 1 : 0;
+ return 0x1E7U * B23 | d * B22 | 0x3 * B20 | size * B18 | B16 | vd * B12 |
+ F * B10 | B8 | op * B7 | B6 | m * B5 | vm;
+}
+
+void Assembler::vabs(const QwNeonRegister dst, const QwNeonRegister src) {
+ // Qd = vabs.f<size>(Qn, Qm) SIMD floating point absolute value.
+ // Instruction details available in ARM DDI 0406C.b, A8.8.824.
+ DCHECK(IsEnabled(NEON));
+ emit(EncodeNeonUnaryOp(0x6, true, Neon32, dst, src));
+}
+
+void Assembler::vabs(NeonSize size, const QwNeonRegister dst,
+ const QwNeonRegister src) {
+ // Qd = vabs.s<size>(Qn, Qm) SIMD integer absolute value.
+ // Instruction details available in ARM DDI 0406C.b, A8.8.824.
+ DCHECK(IsEnabled(NEON));
+ emit(EncodeNeonUnaryOp(0x6, false, size, dst, src));
+}
+
+void Assembler::vneg(const QwNeonRegister dst, const QwNeonRegister src) {
+ // Qd = vabs.f<size>(Qn, Qm) SIMD floating point negate.
+ // Instruction details available in ARM DDI 0406C.b, A8.8.968.
+ DCHECK(IsEnabled(NEON));
+ emit(EncodeNeonUnaryOp(0x7, true, Neon32, dst, src));
+}
+
+void Assembler::vneg(NeonSize size, const QwNeonRegister dst,
+ const QwNeonRegister src) {
+ // Qd = vabs.s<size>(Qn, Qm) SIMD integer negate.
+ // Instruction details available in ARM DDI 0406C.b, A8.8.968.
+ DCHECK(IsEnabled(NEON));
+ emit(EncodeNeonUnaryOp(0x7, false, size, dst, src));
+}
+
+void Assembler::veor(DwVfpRegister dst, DwVfpRegister src1,
+ DwVfpRegister src2) {
+ // Dd = veor(Dn, Dm) 64 bit integer exclusive OR.
+ // Instruction details available in ARM DDI 0406C.b, A8.8.888.
+ DCHECK(IsEnabled(NEON));
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ emit(0x1E6U * B23 | d * B22 | vn * B16 | vd * B12 | B8 | n * B7 | m * B5 |
+ B4 | vm);
+}
+
+enum BinaryBitwiseOp { VAND, VBIC, VBIF, VBIT, VBSL, VEOR, VORR, VORN };
+
+static Instr EncodeNeonBinaryBitwiseOp(BinaryBitwiseOp op,
+ const QwNeonRegister dst,
+ const QwNeonRegister src1,
+ const QwNeonRegister src2) {
+ int op_encoding = 0;
+ switch (op) {
+ case VBIC:
+ op_encoding = 0x1 * B20;
+ break;
+ case VBIF:
+ op_encoding = B24 | 0x3 * B20;
+ break;
+ case VBIT:
+ op_encoding = B24 | 0x2 * B20;
+ break;
+ case VBSL:
+ op_encoding = B24 | 0x1 * B20;
+ break;
+ case VEOR:
+ op_encoding = B24;
+ break;
+ case VORR:
+ op_encoding = 0x2 * B20;
+ break;
+ case VORN:
+ op_encoding = 0x3 * B20;
+ break;
+ case VAND:
+ // op_encoding is 0.
+ break;
+ default:
+ UNREACHABLE();
+ break;
}
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ return 0x1E4U * B23 | op_encoding | d * B22 | vn * B16 | vd * B12 | B8 |
+ n * B7 | B6 | m * B5 | B4 | vm;
+}
+
+void Assembler::vand(QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2) {
+ // Qd = vand(Qn, Qm) SIMD AND.
+ // Instruction details available in ARM DDI 0406C.b, A8.8.836.
+ DCHECK(IsEnabled(NEON));
+ emit(EncodeNeonBinaryBitwiseOp(VAND, dst, src1, src2));
+}
+
+void Assembler::vbsl(QwNeonRegister dst, const QwNeonRegister src1,
+ const QwNeonRegister src2) {
+ DCHECK(IsEnabled(NEON));
+ // Qd = vbsl(Qn, Qm) SIMD bitwise select.
+ // Instruction details available in ARM DDI 0406C.b, A8-844.
+ emit(EncodeNeonBinaryBitwiseOp(VBSL, dst, src1, src2));
+}
+
+void Assembler::veor(QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2) {
+ // Qd = veor(Qn, Qm) SIMD exclusive OR.
+ // Instruction details available in ARM DDI 0406C.b, A8.8.888.
+ DCHECK(IsEnabled(NEON));
+ emit(EncodeNeonBinaryBitwiseOp(VEOR, dst, src1, src2));
+}
+
+void Assembler::vorr(QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2) {
+ // Qd = vorr(Qn, Qm) SIMD OR.
+ // Instruction details available in ARM DDI 0406C.b, A8.8.976.
+ DCHECK(IsEnabled(NEON));
+ emit(EncodeNeonBinaryBitwiseOp(VORR, dst, src1, src2));
+}
+
+void Assembler::vadd(QwNeonRegister dst, const QwNeonRegister src1,
+ const QwNeonRegister src2) {
+ DCHECK(IsEnabled(NEON));
+ // Qd = vadd(Qn, Qm) SIMD floating point addition.
+ // Instruction details available in ARM DDI 0406C.b, A8-830.
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ emit(0x1E4U * B23 | d * B22 | vn * B16 | vd * B12 | 0xD * B8 | n * B7 | B6 |
+ m * B5 | vm);
+}
+
+void Assembler::vadd(NeonSize size, QwNeonRegister dst,
+ const QwNeonRegister src1, const QwNeonRegister src2) {
+ DCHECK(IsEnabled(NEON));
+ // Qd = vadd(Qn, Qm) SIMD integer addition.
+ // Instruction details available in ARM DDI 0406C.b, A8-828.
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ int sz = static_cast<int>(size);
+ emit(0x1E4U * B23 | d * B22 | sz * B20 | vn * B16 | vd * B12 | 0x8 * B8 |
+ n * B7 | B6 | m * B5 | vm);
+}
+
+void Assembler::vsub(QwNeonRegister dst, const QwNeonRegister src1,
+ const QwNeonRegister src2) {
+ DCHECK(IsEnabled(NEON));
+ // Qd = vsub(Qn, Qm) SIMD floating point subtraction.
+ // Instruction details available in ARM DDI 0406C.b, A8-1086.
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ emit(0x1E4U * B23 | d * B22 | B21 | vn * B16 | vd * B12 | 0xD * B8 | n * B7 |
+ B6 | m * B5 | vm);
+}
+
+void Assembler::vsub(NeonSize size, QwNeonRegister dst,
+ const QwNeonRegister src1, const QwNeonRegister src2) {
+ DCHECK(IsEnabled(NEON));
+ // Qd = vsub(Qn, Qm) SIMD integer subtraction.
+ // Instruction details available in ARM DDI 0406C.b, A8-1084.
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ int sz = static_cast<int>(size);
+ emit(0x1E6U * B23 | d * B22 | sz * B20 | vn * B16 | vd * B12 | 0x8 * B8 |
+ n * B7 | B6 | m * B5 | vm);
+}
+
+void Assembler::vmul(QwNeonRegister dst, const QwNeonRegister src1,
+ const QwNeonRegister src2) {
+ DCHECK(IsEnabled(NEON));
+ // Qd = vadd(Qn, Qm) SIMD floating point multiply.
+ // Instruction details available in ARM DDI 0406C.b, A8-958.
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ emit(0x1E6U * B23 | d * B22 | vn * B16 | vd * B12 | 0xD * B8 | n * B7 | B6 |
+ m * B5 | B4 | vm);
+}
+
+void Assembler::vmul(NeonSize size, QwNeonRegister dst,
+ const QwNeonRegister src1, const QwNeonRegister src2) {
+ DCHECK(IsEnabled(NEON));
+ // Qd = vadd(Qn, Qm) SIMD integer multiply.
+ // Instruction details available in ARM DDI 0406C.b, A8-960.
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ int sz = static_cast<int>(size);
+ emit(0x1E4U * B23 | d * B22 | sz * B20 | vn * B16 | vd * B12 | 0x9 * B8 |
+ n * B7 | B6 | m * B5 | B4 | vm);
+}
+
+static Instr EncodeNeonMinMax(bool is_min, QwNeonRegister dst,
+ QwNeonRegister src1, QwNeonRegister src2) {
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ int min = is_min ? 1 : 0;
+ return 0x1E4U * B23 | d * B22 | min * B21 | vn * B16 | vd * B12 | 0xF * B8 |
+ n * B7 | B6 | m * B5 | vm;
+}
+
+static Instr EncodeNeonMinMax(bool is_min, NeonDataType dt, QwNeonRegister dst,
+ QwNeonRegister src1, QwNeonRegister src2) {
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ int min = is_min ? 1 : 0;
+ int size = (dt & NeonDataTypeSizeMask) / 2;
+ int U = dt & NeonDataTypeUMask;
+ return 0x1E4U * B23 | U | d * B22 | size * B20 | vn * B16 | vd * B12 |
+ 0x6 * B8 | B6 | m * B5 | min * B4 | vm;
+}
+
+void Assembler::vmin(const QwNeonRegister dst, const QwNeonRegister src1,
+ const QwNeonRegister src2) {
+ DCHECK(IsEnabled(NEON));
+ // Qd = vmin(Qn, Qm) SIMD floating point MIN.
+ // Instruction details available in ARM DDI 0406C.b, A8-928.
+ emit(EncodeNeonMinMax(true, dst, src1, src2));
+}
+
+void Assembler::vmin(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2) {
+ DCHECK(IsEnabled(NEON));
+ // Qd = vmin(Qn, Qm) SIMD integer MIN.
+ // Instruction details available in ARM DDI 0406C.b, A8-926.
+ emit(EncodeNeonMinMax(true, dt, dst, src1, src2));
+}
+
+void Assembler::vmax(QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2) {
+ DCHECK(IsEnabled(NEON));
+ // Qd = vmax(Qn, Qm) SIMD floating point MAX.
+ // Instruction details available in ARM DDI 0406C.b, A8-928.
+ emit(EncodeNeonMinMax(false, dst, src1, src2));
+}
+
+void Assembler::vmax(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
+ QwNeonRegister src2) {
+ DCHECK(IsEnabled(NEON));
+ // Qd = vmax(Qn, Qm) SIMD integer MAX.
+ // Instruction details available in ARM DDI 0406C.b, A8-926.
+ emit(EncodeNeonMinMax(false, dt, dst, src1, src2));
+}
+
+static Instr EncodeNeonEstimateOp(bool is_rsqrt, QwNeonRegister dst,
+ QwNeonRegister src) {
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ int rsqrt = is_rsqrt ? 1 : 0;
+ return 0x1E7U * B23 | d * B22 | 0x3B * B16 | vd * B12 | 0x5 * B8 |
+ rsqrt * B7 | B6 | m * B5 | vm;
+}
+
+void Assembler::vrecpe(const QwNeonRegister dst, const QwNeonRegister src) {
+ DCHECK(IsEnabled(NEON));
+ // Qd = vrecpe(Qm) SIMD reciprocal estimate.
+ // Instruction details available in ARM DDI 0406C.b, A8-1024.
+ emit(EncodeNeonEstimateOp(false, dst, src));
+}
+
+void Assembler::vrsqrte(const QwNeonRegister dst, const QwNeonRegister src) {
+ DCHECK(IsEnabled(NEON));
+ // Qd = vrsqrte(Qm) SIMD reciprocal square root estimate.
+ // Instruction details available in ARM DDI 0406C.b, A8-1038.
+ emit(EncodeNeonEstimateOp(true, dst, src));
+}
+
+static Instr EncodeNeonRefinementOp(bool is_rsqrt, QwNeonRegister dst,
+ QwNeonRegister src1, QwNeonRegister src2) {
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ int rsqrt = is_rsqrt ? 1 : 0;
+ return 0x1E4U * B23 | d * B22 | rsqrt * B21 | vn * B16 | vd * B12 | 0xF * B8 |
+ n * B7 | B6 | m * B5 | B4 | vm;
+}
+
+void Assembler::vrecps(const QwNeonRegister dst, const QwNeonRegister src1,
+ const QwNeonRegister src2) {
+ DCHECK(IsEnabled(NEON));
+ // Qd = vrecps(Qn, Qm) SIMD reciprocal refinement step.
+ // Instruction details available in ARM DDI 0406C.b, A8-1026.
+ emit(EncodeNeonRefinementOp(false, dst, src1, src2));
+}
+
+void Assembler::vrsqrts(const QwNeonRegister dst, const QwNeonRegister src1,
+ const QwNeonRegister src2) {
+ DCHECK(IsEnabled(NEON));
+ // Qd = vrsqrts(Qn, Qm) SIMD reciprocal square root refinement step.
+ // Instruction details available in ARM DDI 0406C.b, A8-1040.
+ emit(EncodeNeonRefinementOp(true, dst, src1, src2));
+}
+
+void Assembler::vtst(NeonSize size, QwNeonRegister dst,
+ const QwNeonRegister src1, const QwNeonRegister src2) {
+ DCHECK(IsEnabled(NEON));
+ // Qd = vtst(Qn, Qm) SIMD test integer operands.
+ // Instruction details available in ARM DDI 0406C.b, A8-1098.
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ int sz = static_cast<int>(size);
+ emit(0x1E4U * B23 | d * B22 | sz * B20 | vn * B16 | vd * B12 | 0x8 * B8 |
+ n * B7 | B6 | m * B5 | B4 | vm);
+}
+
+void Assembler::vceq(const QwNeonRegister dst, const QwNeonRegister src1,
+ const QwNeonRegister src2) {
+ DCHECK(IsEnabled(NEON));
+ // Qd = vceq(Qn, Qm) SIMD floating point compare equal.
+ // Instruction details available in ARM DDI 0406C.b, A8-844.
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ emit(0x1E4U * B23 | d * B22 | vn * B16 | vd * B12 | 0xe * B8 | n * B7 | B6 |
+ m * B5 | vm);
+}
+
+void Assembler::vceq(NeonSize size, QwNeonRegister dst,
+ const QwNeonRegister src1, const QwNeonRegister src2) {
+ DCHECK(IsEnabled(NEON));
+ // Qd = vceq(Qn, Qm) SIMD integer compare equal.
+ // Instruction details available in ARM DDI 0406C.b, A8-844.
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ int sz = static_cast<int>(size);
+ emit(0x1E6U * B23 | d * B22 | sz * B20 | vn * B16 | vd * B12 | 0x8 * B8 |
+ n * B7 | B6 | m * B5 | B4 | vm);
+}
+
+static Instr EncodeNeonCompareOp(const QwNeonRegister dst,
+ const QwNeonRegister src1,
+ const QwNeonRegister src2, Condition cond) {
+ DCHECK(cond == ge || cond == gt);
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ int is_gt = (cond == gt) ? 1 : 0;
+ return 0x1E6U * B23 | d * B22 | is_gt * B21 | vn * B16 | vd * B12 | 0xe * B8 |
+ n * B7 | B6 | m * B5 | vm;
+}
+
+static Instr EncodeNeonCompareOp(NeonDataType dt, const QwNeonRegister dst,
+ const QwNeonRegister src1,
+ const QwNeonRegister src2, Condition cond) {
+ DCHECK(cond == ge || cond == gt);
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ int size = (dt & NeonDataTypeSizeMask) / 2;
+ int U = dt & NeonDataTypeUMask;
+ int is_ge = (cond == ge) ? 1 : 0;
+ return 0x1E4U * B23 | U | d * B22 | size * B20 | vn * B16 | vd * B12 |
+ 0x3 * B8 | n * B7 | B6 | m * B5 | is_ge * B4 | vm;
+}
+
+void Assembler::vcge(const QwNeonRegister dst, const QwNeonRegister src1,
+ const QwNeonRegister src2) {
+ DCHECK(IsEnabled(NEON));
+ // Qd = vcge(Qn, Qm) SIMD floating point compare greater or equal.
+ // Instruction details available in ARM DDI 0406C.b, A8-848.
+ emit(EncodeNeonCompareOp(dst, src1, src2, ge));
+}
+
+void Assembler::vcge(NeonDataType dt, QwNeonRegister dst,
+ const QwNeonRegister src1, const QwNeonRegister src2) {
+ DCHECK(IsEnabled(NEON));
+ // Qd = vcge(Qn, Qm) SIMD integer compare greater or equal.
+ // Instruction details available in ARM DDI 0406C.b, A8-848.
+ emit(EncodeNeonCompareOp(dt, dst, src1, src2, ge));
+}
+
+void Assembler::vcgt(const QwNeonRegister dst, const QwNeonRegister src1,
+ const QwNeonRegister src2) {
+ DCHECK(IsEnabled(NEON));
+ // Qd = vcgt(Qn, Qm) SIMD floating point compare greater than.
+ // Instruction details available in ARM DDI 0406C.b, A8-852.
+ emit(EncodeNeonCompareOp(dst, src1, src2, gt));
+}
+
+void Assembler::vcgt(NeonDataType dt, QwNeonRegister dst,
+ const QwNeonRegister src1, const QwNeonRegister src2) {
+ DCHECK(IsEnabled(NEON));
+ // Qd = vcgt(Qn, Qm) SIMD integer compare greater than.
+ // Instruction details available in ARM DDI 0406C.b, A8-852.
+ emit(EncodeNeonCompareOp(dt, dst, src1, src2, gt));
+}
+
+void Assembler::vext(QwNeonRegister dst, const QwNeonRegister src1,
+ const QwNeonRegister src2, int bytes) {
+ DCHECK(IsEnabled(NEON));
+ // Qd = vext(Qn, Qm) SIMD byte extract.
+ // Instruction details available in ARM DDI 0406C.b, A8-890.
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ DCHECK_GT(16, bytes);
+ emit(0x1E5U * B23 | d * B22 | 0x3 * B20 | vn * B16 | vd * B12 | bytes * B8 |
+ n * B7 | B6 | m * B5 | vm);
+}
+
+void Assembler::vzip(NeonSize size, QwNeonRegister dst,
+ const QwNeonRegister src) {
+ DCHECK(IsEnabled(NEON));
+ // Qd = vzip.<size>(Qn, Qm) SIMD zip (interleave).
+ // Instruction details available in ARM DDI 0406C.b, A8-1102.
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ int sz = static_cast<int>(size);
+ emit(0x1E7U * B23 | d * B22 | 0x3 * B20 | sz * B18 | 2 * B16 | vd * B12 |
+ 0x3 * B7 | B6 | m * B5 | vm);
+}
+
+static Instr EncodeNeonVREV(NeonSize op_size, NeonSize size,
+ const QwNeonRegister dst,
+ const QwNeonRegister src) {
+ // Qd = vrev<op_size>.<size>(Qn, Qm) SIMD scalar reverse.
+ // Instruction details available in ARM DDI 0406C.b, A8-1028.
+ DCHECK_GT(op_size, static_cast<int>(size));
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ int sz = static_cast<int>(size);
+ int op = static_cast<int>(Neon64) - static_cast<int>(op_size);
+ return 0x1E7U * B23 | d * B22 | 0x3 * B20 | sz * B18 | vd * B12 | op * B7 |
+ B6 | m * B5 | vm;
+}
+
+void Assembler::vrev16(NeonSize size, const QwNeonRegister dst,
+ const QwNeonRegister src) {
+ DCHECK(IsEnabled(NEON));
+ emit(EncodeNeonVREV(Neon16, size, dst, src));
+}
+
+void Assembler::vrev32(NeonSize size, const QwNeonRegister dst,
+ const QwNeonRegister src) {
+ DCHECK(IsEnabled(NEON));
+ emit(EncodeNeonVREV(Neon32, size, dst, src));
+}
+
+void Assembler::vrev64(NeonSize size, const QwNeonRegister dst,
+ const QwNeonRegister src) {
+ DCHECK(IsEnabled(NEON));
+ emit(EncodeNeonVREV(Neon64, size, dst, src));
+}
+
+// Encode NEON vtbl / vtbx instruction.
+static Instr EncodeNeonVTB(const DwVfpRegister dst, const NeonListOperand& list,
+ const DwVfpRegister index, bool vtbx) {
+ // Dd = vtbl(table, Dm) SIMD vector permute, zero at out of range indices.
+ // Instruction details available in ARM DDI 0406C.b, A8-1094.
+ // Dd = vtbx(table, Dm) SIMD vector permute, skip out of range indices.
+ // Instruction details available in ARM DDI 0406C.b, A8-1094.
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ list.base().split_code(&vn, &n);
+ int vm, m;
+ index.split_code(&vm, &m);
+ int op = vtbx ? 1 : 0; // vtbl = 0, vtbx = 1.
+ return 0x1E7U * B23 | d * B22 | 0x3 * B20 | vn * B16 | vd * B12 | 0x2 * B10 |
+ list.length() * B8 | n * B7 | op * B6 | m * B5 | vm;
+}
+
+void Assembler::vtbl(const DwVfpRegister dst, const NeonListOperand& list,
+ const DwVfpRegister index) {
+ DCHECK(IsEnabled(NEON));
+ emit(EncodeNeonVTB(dst, list, index, false));
+}
+
+void Assembler::vtbx(const DwVfpRegister dst, const NeonListOperand& list,
+ const DwVfpRegister index) {
+ DCHECK(IsEnabled(NEON));
+ emit(EncodeNeonVTB(dst, list, index, true));
}
// Pseudo instructions.
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 1283c3984a..e73c5a170f 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -302,6 +302,20 @@ struct QwNeonRegister {
*m = (encoded_code & 0x10) >> 4;
*vm = encoded_code & 0x0F;
}
+ DwVfpRegister low() const {
+ DwVfpRegister reg;
+ reg.reg_code = reg_code * 2;
+
+ DCHECK(reg.is_valid());
+ return reg;
+ }
+ DwVfpRegister high() const {
+ DwVfpRegister reg;
+ reg.reg_code = reg_code * 2 + 1;
+
+ DCHECK(reg.is_valid());
+ return reg;
+ }
int reg_code;
};
@@ -403,9 +417,11 @@ const QwNeonRegister q15 = { 15 };
// compilation unit that includes this header doesn't use the variables.
#define kFirstCalleeSavedDoubleReg d8
#define kLastCalleeSavedDoubleReg d15
+// kDoubleRegZero and kScratchDoubleReg must pair to form kScratchQuadReg.
#define kDoubleRegZero d14
#define kScratchDoubleReg d15
-
+// After using kScratchQuadReg, kDoubleRegZero must be reset to 0.
+#define kScratchQuadReg q7
// Coprocessor register
struct CRegister {
@@ -624,12 +640,26 @@ class NeonMemOperand BASE_EMBEDDED {
// Class NeonListOperand represents a list of NEON registers
class NeonListOperand BASE_EMBEDDED {
public:
- explicit NeonListOperand(DoubleRegister base, int registers_count = 1);
+ explicit NeonListOperand(DoubleRegister base, int register_count = 1)
+ : base_(base), register_count_(register_count) {}
+ explicit NeonListOperand(QwNeonRegister q_reg)
+ : base_(q_reg.low()), register_count_(2) {}
DoubleRegister base() const { return base_; }
- NeonListType type() const { return type_; }
+ int register_count() { return register_count_; }
+ int length() const { return register_count_ - 1; }
+ NeonListType type() const {
+ switch (register_count_) {
+ default: UNREACHABLE();
+ // Fall through.
+ case 1: return nlt_1;
+ case 2: return nlt_2;
+ case 3: return nlt_3;
+ case 4: return nlt_4;
+ }
+ }
private:
DoubleRegister base_;
- NeonListType type_;
+ int register_count_;
};
@@ -1133,6 +1163,8 @@ class Assembler : public AssemblerBase {
void vmov(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond = al);
+ // TODO(bbudge) Replace uses of these with the more general core register to
+ // scalar register vmov's.
void vmov(const DwVfpRegister dst,
const VmovIndex index,
const Register src,
@@ -1313,8 +1345,86 @@ class Assembler : public AssemblerBase {
const NeonMemOperand& dst);
void vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src);
- // Currently, vswp supports only D0 to D31.
- void vswp(DwVfpRegister srcdst0, DwVfpRegister srcdst1);
+ // Only unconditional core <-> scalar moves are currently supported.
+ void vmov(NeonDataType dt, DwVfpRegister dst, int index, Register src);
+ void vmov(NeonDataType dt, Register dst, DwVfpRegister src, int index);
+
+ void vmov(const QwNeonRegister dst, const QwNeonRegister src);
+ void vmvn(const QwNeonRegister dst, const QwNeonRegister src);
+ void vswp(DwVfpRegister dst, DwVfpRegister src);
+ void vswp(QwNeonRegister dst, QwNeonRegister src);
+ // vdup conditional execution isn't supported.
+ void vdup(NeonSize size, const QwNeonRegister dst, const Register src);
+ void vdup(const QwNeonRegister dst, const SwVfpRegister src);
+
+ void vcvt_f32_s32(const QwNeonRegister dst, const QwNeonRegister src);
+ void vcvt_f32_u32(const QwNeonRegister dst, const QwNeonRegister src);
+ void vcvt_s32_f32(const QwNeonRegister dst, const QwNeonRegister src);
+ void vcvt_u32_f32(const QwNeonRegister dst, const QwNeonRegister src);
+
+ void vabs(const QwNeonRegister dst, const QwNeonRegister src);
+ void vabs(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src);
+ void vneg(const QwNeonRegister dst, const QwNeonRegister src);
+ void vneg(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src);
+ void veor(DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2);
+ void vand(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
+ void vbsl(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
+ void veor(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
+ void vorr(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
+ void vadd(const QwNeonRegister dst, const QwNeonRegister src1,
+ const QwNeonRegister src2);
+ void vadd(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src1,
+ const QwNeonRegister src2);
+ void vsub(const QwNeonRegister dst, const QwNeonRegister src1,
+ const QwNeonRegister src2);
+ void vsub(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src1,
+ const QwNeonRegister src2);
+ void vmul(const QwNeonRegister dst, const QwNeonRegister src1,
+ const QwNeonRegister src2);
+ void vmul(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src1,
+ const QwNeonRegister src2);
+ void vmin(const QwNeonRegister dst, const QwNeonRegister src1,
+ const QwNeonRegister src2);
+ void vmin(NeonDataType dt, const QwNeonRegister dst,
+ const QwNeonRegister src1, const QwNeonRegister src2);
+ void vmax(const QwNeonRegister dst, const QwNeonRegister src1,
+ const QwNeonRegister src2);
+ void vmax(NeonDataType dt, const QwNeonRegister dst,
+ const QwNeonRegister src1, const QwNeonRegister src2);
+ // vrecpe and vrsqrte only support floating point lanes.
+ void vrecpe(const QwNeonRegister dst, const QwNeonRegister src);
+ void vrsqrte(const QwNeonRegister dst, const QwNeonRegister src);
+ void vrecps(const QwNeonRegister dst, const QwNeonRegister src1,
+ const QwNeonRegister src2);
+ void vrsqrts(const QwNeonRegister dst, const QwNeonRegister src1,
+ const QwNeonRegister src2);
+ void vtst(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src1,
+ const QwNeonRegister src2);
+ void vceq(const QwNeonRegister dst, const QwNeonRegister src1,
+ const QwNeonRegister src2);
+ void vceq(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src1,
+ const QwNeonRegister src2);
+ void vcge(const QwNeonRegister dst, const QwNeonRegister src1,
+ const QwNeonRegister src2);
+ void vcge(NeonDataType dt, const QwNeonRegister dst,
+ const QwNeonRegister src1, const QwNeonRegister src2);
+ void vcgt(const QwNeonRegister dst, const QwNeonRegister src1,
+ const QwNeonRegister src2);
+ void vcgt(NeonDataType dt, const QwNeonRegister dst,
+ const QwNeonRegister src1, const QwNeonRegister src2);
+ void vext(const QwNeonRegister dst, const QwNeonRegister src1,
+ const QwNeonRegister src2, int bytes);
+ void vzip(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src);
+ void vrev16(NeonSize size, const QwNeonRegister dst,
+ const QwNeonRegister src);
+ void vrev32(NeonSize size, const QwNeonRegister dst,
+ const QwNeonRegister src);
+ void vrev64(NeonSize size, const QwNeonRegister dst,
+ const QwNeonRegister src);
+ void vtbl(const DwVfpRegister dst, const NeonListOperand& list,
+ const DwVfpRegister index);
+ void vtbx(const DwVfpRegister dst, const NeonListOperand& list,
+ const DwVfpRegister index);
// Pseudo instructions
@@ -1395,9 +1505,6 @@ class Assembler : public AssemblerBase {
// Debugging
- // Mark generator continuation.
- void RecordGeneratorContinuation();
-
// Mark address of a debug break slot.
void RecordDebugBreakSlot(RelocInfo::Mode mode);
@@ -1611,6 +1718,12 @@ class Assembler : public AssemblerBase {
(reg.reg_code < LowDwVfpRegister::kMaxNumLowRegisters);
}
+ bool VfpRegisterIsAvailable(QwNeonRegister reg) {
+ DCHECK(reg.is_valid());
+ return IsEnabled(VFP32DREGS) ||
+ (reg.reg_code < LowDwVfpRegister::kMaxNumLowRegisters / 2);
+ }
+
private:
int next_buffer_check_; // pc offset of next buffer check
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 59f304d51d..60a8322d36 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -33,17 +33,6 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kNewArray);
}
-void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
- descriptor->Initialize(r0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
-void FastFunctionBindStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
- descriptor->Initialize(r0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
Condition cond);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
@@ -635,8 +624,11 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cc == eq) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(lhs, rhs);
- __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+ __ Push(cp);
+ __ Call(strict() ? isolate()->builtins()->StrictEqual()
+ : isolate()->builtins()->Equal(),
+ RelocInfo::CODE_TARGET);
+ __ Pop(cp);
}
// Turn true into 0 and false into some non-zero value.
STATIC_ASSERT(EQUAL == 0);
@@ -805,7 +797,6 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
SaveFPRegsMode mode = kSaveFPRegs;
CEntryStub(isolate, 1, mode).GetCode();
StoreBufferOverflowStub(isolate, mode).GetCode();
- isolate->set_fp_stubs_generated(true);
}
@@ -2075,46 +2066,6 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
}
-
-enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- String::Encoding encoding) {
- if (FLAG_debug_code) {
- // Check that destination is word aligned.
- __ tst(dest, Operand(kPointerAlignmentMask));
- __ Check(eq, kDestinationOfCopyNotAligned);
- }
-
- // Assumes word reads and writes are little endian.
- // Nothing to do for zero characters.
- Label done;
- if (encoding == String::TWO_BYTE_ENCODING) {
- __ add(count, count, Operand(count), SetCC);
- }
-
- Register limit = count; // Read until dest equals this.
- __ add(limit, dest, Operand(count));
-
- Label loop_entry, loop;
- // Copy bytes from src to dest until dest hits limit.
- __ b(&loop_entry);
- __ bind(&loop);
- __ ldrb(scratch, MemOperand(src, 1, PostIndex), lt);
- __ strb(scratch, MemOperand(dest, 1, PostIndex));
- __ bind(&loop_entry);
- __ cmp(dest, Operand(limit));
- __ b(lt, &loop);
-
- __ bind(&done);
-}
-
-
void StringHelper::GenerateFlatOneByteStringEquals(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3) {
@@ -2690,84 +2641,6 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ b(ne, miss);
}
-
-// Probe the name dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found. Jump to
-// the |miss| label otherwise.
-// If lookup was successful |scratch2| will be equal to elements + 4 * index.
-void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register scratch1,
- Register scratch2) {
- DCHECK(!elements.is(scratch1));
- DCHECK(!elements.is(scratch2));
- DCHECK(!name.is(scratch1));
- DCHECK(!name.is(scratch2));
-
- __ AssertName(name);
-
- // Compute the capacity mask.
- __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
- __ SmiUntag(scratch1);
- __ sub(scratch1, scratch1, Operand(1));
-
- // Generate an unrolled loop that performs a few probes before
- // giving up. Measurements done on Gmail indicate that 2 probes
- // cover ~93% of loads from dictionaries.
- for (int i = 0; i < kInlinedProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
- if (i > 0) {
- // Add the probe offset (i + i * i) left shifted to avoid right shifting
- // the hash in a separate instruction. The value hash + i + i * i is right
- // shifted in the following and instruction.
- DCHECK(NameDictionary::GetProbeOffset(i) <
- 1 << (32 - Name::kHashFieldOffset));
- __ add(scratch2, scratch2, Operand(
- NameDictionary::GetProbeOffset(i) << Name::kHashShift));
- }
- __ and_(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
-
- // Scale the index by multiplying by the entry size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- // scratch2 = scratch2 * 3.
- __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
-
- // Check if the key is identical to the name.
- __ add(scratch2, elements, Operand(scratch2, LSL, 2));
- __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
- __ cmp(name, Operand(ip));
- __ b(eq, done);
- }
-
- const int spill_mask =
- (lr.bit() | r6.bit() | r5.bit() | r4.bit() |
- r3.bit() | r2.bit() | r1.bit() | r0.bit()) &
- ~(scratch1.bit() | scratch2.bit());
-
- __ stm(db_w, sp, spill_mask);
- if (name.is(r0)) {
- DCHECK(!elements.is(r1));
- __ Move(r1, name);
- __ Move(r0, elements);
- } else {
- __ Move(r0, elements);
- __ Move(r1, name);
- }
- NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
- __ CallStub(&stub);
- __ cmp(r0, Operand::Zero());
- __ mov(scratch2, Operand(r2));
- __ ldm(ia_w, sp, spill_mask);
-
- __ b(ne, done);
- __ b(eq, miss);
-}
-
-
void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
@@ -3057,238 +2930,6 @@ void CallICTrampolineStub::Generate(MacroAssembler* masm) {
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
-
-static void HandleArrayCases(MacroAssembler* masm, Register feedback,
- Register receiver_map, Register scratch1,
- Register scratch2, bool is_polymorphic,
- Label* miss) {
- // feedback initially contains the feedback array
- Label next_loop, prepare_next;
- Label start_polymorphic;
-
- Register cached_map = scratch1;
-
- __ ldr(cached_map,
- FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
- __ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
- __ cmp(receiver_map, cached_map);
- __ b(ne, &start_polymorphic);
- // found, now call handler.
- Register handler = feedback;
- __ ldr(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
- __ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
-
-
- Register length = scratch2;
- __ bind(&start_polymorphic);
- __ ldr(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
- if (!is_polymorphic) {
- // If the IC could be monomorphic we have to make sure we don't go past the
- // end of the feedback array.
- __ cmp(length, Operand(Smi::FromInt(2)));
- __ b(eq, miss);
- }
-
- Register too_far = length;
- Register pointer_reg = feedback;
-
- // +-----+------+------+-----+-----+ ... ----+
- // | map | len | wm0 | h0 | wm1 | hN |
- // +-----+------+------+-----+-----+ ... ----+
- // 0 1 2 len-1
- // ^ ^
- // | |
- // pointer_reg too_far
- // aka feedback scratch2
- // also need receiver_map
- // use cached_map (scratch1) to look in the weak map values.
- __ add(too_far, feedback, Operand::PointerOffsetFromSmiKey(length));
- __ add(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(pointer_reg, feedback,
- Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
-
- __ bind(&next_loop);
- __ ldr(cached_map, MemOperand(pointer_reg));
- __ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
- __ cmp(receiver_map, cached_map);
- __ b(ne, &prepare_next);
- __ ldr(handler, MemOperand(pointer_reg, kPointerSize));
- __ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- __ bind(&prepare_next);
- __ add(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
- __ cmp(pointer_reg, too_far);
- __ b(lt, &next_loop);
-
- // We exhausted our array of map handler pairs.
- __ jmp(miss);
-}
-
-
-static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
- Register receiver_map, Register feedback,
- Register vector, Register slot,
- Register scratch, Label* compare_map,
- Label* load_smi_map, Label* try_array) {
- __ JumpIfSmi(receiver, load_smi_map);
- __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ bind(compare_map);
- Register cached_map = scratch;
- // Move the weak map into the weak_cell register.
- __ ldr(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
- __ cmp(cached_map, receiver_map);
- __ b(ne, try_array);
- Register handler = feedback;
- __ add(handler, vector, Operand::PointerOffsetFromSmiKey(slot));
- __ ldr(handler,
- FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
- __ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
-}
-
-void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
- KeyedStoreICStub stub(isolate(), state());
- stub.GenerateForTrampoline(masm);
-}
-
-void KeyedStoreICStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
-
-void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-
-static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
- Register receiver_map, Register scratch1,
- Register scratch2, Label* miss) {
- // feedback initially contains the feedback array
- Label next_loop, prepare_next;
- Label start_polymorphic;
- Label transition_call;
-
- Register cached_map = scratch1;
- Register too_far = scratch2;
- Register pointer_reg = feedback;
- __ ldr(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
-
- // +-----+------+------+-----+-----+-----+ ... ----+
- // | map | len | wm0 | wt0 | h0 | wm1 | hN |
- // +-----+------+------+-----+-----+ ----+ ... ----+
- // 0 1 2 len-1
- // ^ ^
- // | |
- // pointer_reg too_far
- // aka feedback scratch2
- // also need receiver_map
- // use cached_map (scratch1) to look in the weak map values.
- __ add(too_far, feedback, Operand::PointerOffsetFromSmiKey(too_far));
- __ add(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(pointer_reg, feedback,
- Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
-
- __ bind(&next_loop);
- __ ldr(cached_map, MemOperand(pointer_reg));
- __ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
- __ cmp(receiver_map, cached_map);
- __ b(ne, &prepare_next);
- // Is it a transitioning store?
- __ ldr(too_far, MemOperand(pointer_reg, kPointerSize));
- __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
- __ b(ne, &transition_call);
- __ ldr(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
- __ add(pc, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- __ bind(&transition_call);
- __ ldr(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
- __ JumpIfSmi(too_far, miss);
-
- __ ldr(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
-
- // Load the map into the correct register.
- DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
- __ mov(feedback, too_far);
-
- __ add(pc, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- __ bind(&prepare_next);
- __ add(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
- __ cmp(pointer_reg, too_far);
- __ b(lt, &next_loop);
-
- // We exhausted our array of map handler pairs.
- __ jmp(miss);
-}
-
-void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // r1
- Register key = StoreWithVectorDescriptor::NameRegister(); // r2
- Register vector = StoreWithVectorDescriptor::VectorRegister(); // r3
- Register slot = StoreWithVectorDescriptor::SlotRegister(); // r4
- DCHECK(StoreWithVectorDescriptor::ValueRegister().is(r0)); // r0
- Register feedback = r5;
- Register receiver_map = r6;
- Register scratch1 = r9;
-
- __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
- __ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
- scratch1, &compare_map, &load_smi_map, &try_array);
-
- __ bind(&try_array);
- // Is it a fixed array?
- __ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
- __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
- __ b(ne, &not_array);
-
- // We have a polymorphic element handler.
- Label polymorphic, try_poly_name;
- __ bind(&polymorphic);
-
- // We are using register r8, which is used for the embedded constant pool
- // when FLAG_enable_embedded_constant_pool is true.
- DCHECK(!FLAG_enable_embedded_constant_pool);
- Register scratch2 = r8;
-
- HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
- &miss);
-
- __ bind(&not_array);
- // Is it generic?
- __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
- __ b(ne, &try_poly_name);
- Handle<Code> megamorphic_stub =
- KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
- __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
-
- __ bind(&try_poly_name);
- // We might have a name in feedback, and a fixed array in the next slot.
- __ cmp(key, feedback);
- __ b(ne, &miss);
- // If the name comparison succeeded, we know we have a fixed array with
- // at least one map/handler pair.
- __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
- __ ldr(feedback,
- FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
- HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
- &miss);
-
- __ bind(&miss);
- KeyedStoreIC::GenerateMiss(masm);
-
- __ bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ jmp(&compare_map);
-}
-
-
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
@@ -3648,123 +3289,6 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
-void FastNewObjectStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r1 : target
- // -- r3 : new target
- // -- cp : context
- // -- lr : return address
- // -----------------------------------
- __ AssertFunction(r1);
- __ AssertReceiver(r3);
-
- // Verify that the new target is a JSFunction.
- Label new_object;
- __ CompareObjectType(r3, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &new_object);
-
- // Load the initial map and verify that it's in fact a map.
- __ ldr(r2, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(r2, &new_object);
- __ CompareObjectType(r2, r0, r0, MAP_TYPE);
- __ b(ne, &new_object);
-
- // Fall back to runtime if the target differs from the new target's
- // initial map constructor.
- __ ldr(r0, FieldMemOperand(r2, Map::kConstructorOrBackPointerOffset));
- __ cmp(r0, r1);
- __ b(ne, &new_object);
-
- // Allocate the JSObject on the heap.
- Label allocate, done_allocate;
- __ ldrb(r4, FieldMemOperand(r2, Map::kInstanceSizeOffset));
- __ Allocate(r4, r0, r5, r6, &allocate, SIZE_IN_WORDS);
- __ bind(&done_allocate);
-
- // Initialize the JSObject fields.
- __ str(r2, FieldMemOperand(r0, JSObject::kMapOffset));
- __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
- __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
- STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ add(r1, r0, Operand(JSObject::kHeaderSize - kHeapObjectTag));
-
- // ----------- S t a t e -------------
- // -- r0 : result (tagged)
- // -- r1 : result fields (untagged)
- // -- r5 : result end (untagged)
- // -- r2 : initial map
- // -- cp : context
- // -- lr : return address
- // -----------------------------------
-
- // Perform in-object slack tracking if requested.
- Label slack_tracking;
- STATIC_ASSERT(Map::kNoSlackTracking == 0);
- __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
- __ ldr(r3, FieldMemOperand(r2, Map::kBitField3Offset));
- __ tst(r3, Operand(Map::ConstructionCounter::kMask));
- __ b(ne, &slack_tracking);
- {
- // Initialize all in-object fields with undefined.
- __ InitializeFieldsWithFiller(r1, r5, r6);
- __ Ret();
- }
- __ bind(&slack_tracking);
- {
- // Decrease generous allocation count.
- STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
- __ sub(r3, r3, Operand(1 << Map::ConstructionCounter::kShift));
- __ str(r3, FieldMemOperand(r2, Map::kBitField3Offset));
-
- // Initialize the in-object fields with undefined.
- __ ldrb(r4, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
- __ sub(r4, r5, Operand(r4, LSL, kPointerSizeLog2));
- __ InitializeFieldsWithFiller(r1, r4, r6);
-
- // Initialize the remaining (reserved) fields with one pointer filler map.
- __ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex);
- __ InitializeFieldsWithFiller(r1, r5, r6);
-
- // Check if we can finalize the instance size.
- STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
- __ tst(r3, Operand(Map::ConstructionCounter::kMask));
- __ Ret(ne);
-
- // Finalize the instance size.
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r0, r2);
- __ CallRuntime(Runtime::kFinalizeInstanceSize);
- __ Pop(r0);
- }
- __ Ret();
- }
-
- // Fall back to %AllocateInNewSpace.
- __ bind(&allocate);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- __ mov(r4, Operand(r4, LSL, kPointerSizeLog2 + 1));
- __ Push(r2, r4);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ Pop(r2);
- }
- __ ldrb(r5, FieldMemOperand(r2, Map::kInstanceSizeOffset));
- __ add(r5, r0, Operand(r5, LSL, kPointerSizeLog2));
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ sub(r5, r5, Operand(kHeapObjectTag));
- __ b(&done_allocate);
-
- // Fall back to %NewObject.
- __ bind(&new_object);
- __ Push(r1, r3);
- __ TailCallRuntime(Runtime::kNewObject);
-}
-
-
void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r1 : function
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index 30ae358eb0..6ec86ab292 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -16,17 +16,6 @@ void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
class StringHelper : public AllStatic {
public:
- // Generate code for copying a large number of characters. This function
- // is allowed to spend extra time setting up conditions to make copying
- // faster. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- String::Encoding encoding);
-
// Compares two flat one-byte strings and returns result in r0.
static void GenerateCompareFlatOneByteStrings(
MacroAssembler* masm, Register left, Register right, Register scratch1,
@@ -280,14 +269,6 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
Handle<Name> name,
Register scratch0);
- static void GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register r0,
- Register r1);
-
bool SometimesSetsUpAFrame() override { return false; }
private:
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index e63da5c766..06e92168b6 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -317,337 +317,6 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ ACCESS_MASM(masm)
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register target_map,
- AllocationSiteMode mode,
- Label* allocation_memento_found) {
- Register scratch_elements = r4;
- DCHECK(!AreAliased(receiver, key, value, target_map,
- scratch_elements));
-
- if (mode == TRACK_ALLOCATION_SITE) {
- DCHECK(allocation_memento_found != NULL);
- __ JumpIfJSArrayHasAllocationMemento(
- receiver, scratch_elements, allocation_memento_found);
- }
-
- // Set transitioned map.
- __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver,
- HeapObject::kMapOffset,
- target_map,
- r9,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateSmiToDouble(
- MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register target_map,
- AllocationSiteMode mode,
- Label* fail) {
- // Register lr contains the return address.
- Label loop, entry, convert_hole, gc_required, only_change_map, done;
- Register elements = r4;
- Register length = r5;
- Register array = r6;
- Register array_end = array;
-
- // target_map parameter can be clobbered.
- Register scratch1 = target_map;
- Register scratch2 = r9;
-
- // Verify input registers don't conflict with locals.
- DCHECK(!AreAliased(receiver, key, value, target_map,
- elements, length, array, scratch2));
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
- __ b(eq, &only_change_map);
-
- __ push(lr);
- __ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
- // length: number of elements (smi-tagged)
-
- // Allocate new FixedDoubleArray.
- // Use lr as a temporary register.
- __ mov(lr, Operand(length, LSL, 2));
- __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
- __ Allocate(lr, array, elements, scratch2, &gc_required, DOUBLE_ALIGNMENT);
- __ sub(array, array, Operand(kHeapObjectTag));
- // array: destination FixedDoubleArray, not tagged as heap object.
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // r4: source FixedArray.
-
- // Set destination FixedDoubleArray's length and map.
- __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
- __ str(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
- // Update receiver's map.
- __ str(scratch2, MemOperand(array, HeapObject::kMapOffset));
-
- __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver,
- HeapObject::kMapOffset,
- target_map,
- scratch2,
- kLRHasBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- // Replace receiver's backing store with newly created FixedDoubleArray.
- __ add(scratch1, array, Operand(kHeapObjectTag));
- __ str(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ RecordWriteField(receiver,
- JSObject::kElementsOffset,
- scratch1,
- scratch2,
- kLRHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- // Prepare for conversion loop.
- __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(scratch2, array, Operand(FixedDoubleArray::kHeaderSize));
- __ add(array_end, scratch2, Operand(length, LSL, 2));
-
- // Repurpose registers no longer in use.
- Register hole_lower = elements;
- Register hole_upper = length;
-
- __ mov(hole_lower, Operand(kHoleNanLower32));
- __ mov(hole_upper, Operand(kHoleNanUpper32));
- // scratch1: begin of source FixedArray element fields, not tagged
- // hole_lower: kHoleNanLower32
- // hole_upper: kHoleNanUpper32
- // array_end: end of destination FixedDoubleArray, not tagged
- // scratch2: begin of FixedDoubleArray element fields, not tagged
-
- __ b(&entry);
-
- __ bind(&only_change_map);
- __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver,
- HeapObject::kMapOffset,
- target_map,
- scratch2,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ b(&done);
-
- // Call into runtime if GC is required.
- __ bind(&gc_required);
- __ pop(lr);
- __ b(fail);
-
- // Convert and copy elements.
- __ bind(&loop);
- __ ldr(lr, MemOperand(scratch1, 4, PostIndex));
- // lr: current element
- __ UntagAndJumpIfNotSmi(lr, lr, &convert_hole);
-
- // Normal smi, convert to double and store.
- __ vmov(s0, lr);
- __ vcvt_f64_s32(d0, s0);
- __ vstr(d0, scratch2, 0);
- __ add(scratch2, scratch2, Operand(8));
- __ b(&entry);
-
- // Hole found, store the-hole NaN.
- __ bind(&convert_hole);
- if (FLAG_debug_code) {
- // Restore a "smi-untagged" heap object.
- __ SmiTag(lr);
- __ orr(lr, lr, Operand(1));
- __ CompareRoot(lr, Heap::kTheHoleValueRootIndex);
- __ Assert(eq, kObjectFoundInSmiOnlyArray);
- }
- __ Strd(hole_lower, hole_upper, MemOperand(scratch2, 8, PostIndex));
-
- __ bind(&entry);
- __ cmp(scratch2, array_end);
- __ b(lt, &loop);
-
- __ pop(lr);
- __ bind(&done);
-}
-
-
-void ElementsTransitionGenerator::GenerateDoubleToObject(
- MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register target_map,
- AllocationSiteMode mode,
- Label* fail) {
- // Register lr contains the return address.
- Label entry, loop, convert_hole, gc_required, only_change_map;
- Register elements = r4;
- Register array = r6;
- Register length = r5;
- Register scratch = r9;
-
- // Verify input registers don't conflict with locals.
- DCHECK(!AreAliased(receiver, key, value, target_map,
- elements, array, length, scratch));
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
- __ b(eq, &only_change_map);
-
- __ push(lr);
- __ Push(target_map, receiver, key, value);
- __ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
- // elements: source FixedDoubleArray
- // length: number of elements (smi-tagged)
-
- // Allocate new FixedArray.
- // Re-use value and target_map registers, as they have been saved on the
- // stack.
- Register array_size = value;
- Register allocate_scratch = target_map;
- __ mov(array_size, Operand(FixedDoubleArray::kHeaderSize));
- __ add(array_size, array_size, Operand(length, LSL, 1));
- __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
- NO_ALLOCATION_FLAGS);
- // array: destination FixedArray, tagged as heap object
- // Set destination FixedDoubleArray's length and map.
- __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
- __ str(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
- __ str(scratch, FieldMemOperand(array, HeapObject::kMapOffset));
-
- __ sub(array, array, Operand(kHeapObjectTag));
-
- // Prepare for conversion loop.
- Register src_elements = elements;
- Register dst_elements = target_map;
- Register dst_end = length;
- Register heap_number_map = scratch;
- __ add(src_elements, elements,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
- __ add(dst_elements, array, Operand(FixedArray::kHeaderSize));
- __ add(dst_end, dst_elements, Operand(length, LSL, 1));
-
- // Allocating heap numbers in the loop below can fail and cause a jump to
- // gc_required. We can't leave a partly initialized FixedArray behind,
- // so pessimistically fill it with holes now.
- Label initialization_loop, initialization_loop_entry;
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ b(&initialization_loop_entry);
- __ bind(&initialization_loop);
- __ str(scratch, MemOperand(dst_elements, kPointerSize, PostIndex));
- __ bind(&initialization_loop_entry);
- __ cmp(dst_elements, dst_end);
- __ b(lt, &initialization_loop);
-
- __ add(dst_elements, array, Operand(FixedArray::kHeaderSize));
- __ add(array, array, Operand(kHeapObjectTag));
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- // Using offsetted addresses in src_elements to fully take advantage of
- // post-indexing.
- // dst_elements: begin of destination FixedArray element fields, not tagged
- // src_elements: begin of source FixedDoubleArray element fields,
- // not tagged, +4
- // dst_end: end of destination FixedArray, not tagged
- // array: destination FixedArray
- // heap_number_map: heap number map
- __ b(&entry);
-
- // Call into runtime if GC is required.
- __ bind(&gc_required);
- __ Pop(target_map, receiver, key, value);
- __ pop(lr);
- __ b(fail);
-
- __ bind(&loop);
- Register upper_bits = key;
- __ ldr(upper_bits, MemOperand(src_elements, 8, PostIndex));
- // upper_bits: current element's upper 32 bit
- // src_elements: address of next element's upper 32 bit
- __ cmp(upper_bits, Operand(kHoleNanUpper32));
- __ b(eq, &convert_hole);
-
- // Non-hole double, copy value into a heap number.
- Register heap_number = receiver;
- Register scratch2 = value;
- __ AllocateHeapNumber(heap_number, scratch2, lr, heap_number_map,
- &gc_required);
- // heap_number: new heap number
- __ ldr(scratch2, MemOperand(src_elements, 12, NegOffset));
- __ Strd(scratch2, upper_bits,
- FieldMemOperand(heap_number, HeapNumber::kValueOffset));
- __ mov(scratch2, dst_elements);
- __ str(heap_number, MemOperand(dst_elements, 4, PostIndex));
- __ RecordWrite(array,
- scratch2,
- heap_number,
- kLRHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ b(&entry);
-
- // Replace the-hole NaN with the-hole pointer.
- __ bind(&convert_hole);
- __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
- __ str(scratch2, MemOperand(dst_elements, 4, PostIndex));
-
- __ bind(&entry);
- __ cmp(dst_elements, dst_end);
- __ b(lt, &loop);
-
- __ Pop(target_map, receiver, key, value);
- // Replace receiver's backing store with newly created and filled FixedArray.
- __ str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ RecordWriteField(receiver,
- JSObject::kElementsOffset,
- array,
- scratch,
- kLRHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ pop(lr);
-
- __ bind(&only_change_map);
- // Update receiver's map.
- __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver,
- HeapObject::kMapOffset,
- target_map,
- scratch,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-
void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Register string,
Register index,
@@ -771,31 +440,23 @@ bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
return result;
}
+Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
+ if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
-void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
- MarkingParity* parity) {
- if (IsYoungSequence(isolate, sequence)) {
- *age = kNoAgeCodeAge;
- *parity = NO_MARKING_PARITY;
- } else {
- Address target_address = Memory::Address_at(
- sequence + (kNoCodeAgeSequenceLength - Assembler::kInstrSize));
- Code* stub = GetCodeFromTargetAddress(target_address);
- GetCodeAgeAndParity(stub, age, parity);
- }
+ Address target_address = Memory::Address_at(
+ sequence + (kNoCodeAgeSequenceLength - Assembler::kInstrSize));
+ Code* stub = GetCodeFromTargetAddress(target_address);
+ return GetAgeOfCodeAgeStub(stub);
}
-
-void Code::PatchPlatformCodeAge(Isolate* isolate,
- byte* sequence,
- Code::Age age,
- MarkingParity parity) {
+void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
+ Code::Age age) {
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
Assembler::FlushICache(isolate, sequence, young_length);
} else {
- Code* stub = GetCodeAgeStub(isolate, age, parity);
+ Code* stub = GetCodeAgeStub(isolate, age);
CodePatcher patcher(isolate, sequence,
young_length / Assembler::kInstrSize);
patcher.masm()->add(r0, pc, Operand(-8));
@@ -804,7 +465,6 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
}
}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index 2bade20fed..e0c91fd4bf 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -190,6 +190,7 @@ enum {
B7 = 1 << 7,
B8 = 1 << 8,
B9 = 1 << 9,
+ B10 = 1 << 10,
B12 = 1 << 12,
B16 = 1 << 16,
B17 = 1 << 17,
@@ -218,7 +219,6 @@ enum {
kOff8Mask = (1 << 8) - 1
};
-
enum BarrierOption {
OSHLD = 0x1,
OSHST = 0x2,
@@ -327,12 +327,12 @@ enum LFlag {
// NEON data type
enum NeonDataType {
- NeonS8 = 0x1, // U = 0, imm3 = 0b001
- NeonS16 = 0x2, // U = 0, imm3 = 0b010
- NeonS32 = 0x4, // U = 0, imm3 = 0b100
+ NeonS8 = 0x1, // U = 0, imm3 = 0b001
+ NeonS16 = 0x2, // U = 0, imm3 = 0b010
+ NeonS32 = 0x4, // U = 0, imm3 = 0b100
NeonU8 = 1 << 24 | 0x1, // U = 1, imm3 = 0b001
NeonU16 = 1 << 24 | 0x2, // U = 1, imm3 = 0b010
- NeonU32 = 1 << 24 | 0x4, // U = 1, imm3 = 0b100
+ NeonU32 = 1 << 24 | 0x4, // U = 1, imm3 = 0b100
NeonDataTypeSizeMask = 0x7,
NeonDataTypeUMask = 1 << 24
};
@@ -374,10 +374,10 @@ const int32_t kDefaultStopCode = -1;
// Type of VFP register. Determines register encoding.
enum VFPRegPrecision {
kSinglePrecision = 0,
- kDoublePrecision = 1
+ kDoublePrecision = 1,
+ kSimd128Precision = 2
};
-
// VFP FPSCR constants.
enum VFPConversionMode {
kFPSCRRounding = 0,
@@ -667,15 +667,22 @@ class Instruction {
private:
- // Join split register codes, depending on single or double precision.
+ // Join split register codes, depending on register precision.
// four_bit is the position of the least-significant bit of the four
// bit specifier. one_bit is the position of the additional single bit
// specifier.
inline int VFPGlueRegValue(VFPRegPrecision pre, int four_bit, int one_bit) {
if (pre == kSinglePrecision) {
return (Bits(four_bit + 3, four_bit) << 1) | Bit(one_bit);
+ } else {
+ int reg_num = (Bit(one_bit) << 4) | Bits(four_bit + 3, four_bit);
+ if (pre == kDoublePrecision) {
+ return reg_num;
+ }
+ DCHECK_EQ(kSimd128Precision, pre);
+ DCHECK_EQ(reg_num & 1, 0);
+ return reg_num / 2;
}
- return (Bit(one_bit) << 4) | Bits(four_bit + 3, four_bit);
}
// We need to prevent the creation of instances of class Instruction.
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index e408e85da3..db32fc98ce 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -1419,6 +1419,9 @@ int Decoder::DecodeType7(Instruction* instr) {
// Sd = vsqrt(Sm)
// vmrs
// vmsr
+// Qd = vdup.size(Qd, Rt)
+// vmov.size: Dd[i] = Rt
+// vmov.sign.size: Rt = Dn[i]
void Decoder::DecodeTypeVFP(Instruction* instr) {
VERIFY((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
VERIFY(instr->Bits(11, 9) == 0x5);
@@ -1531,21 +1534,71 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
if ((instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x0)) {
DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
- } else if ((instr->VLValue() == 0x0) &&
- (instr->VCValue() == 0x1) &&
- (instr->Bit(23) == 0x0)) {
- if (instr->Bit(21) == 0x0) {
- Format(instr, "vmov'cond.32 'Dd[0], 'rt");
+ } else if ((instr->VLValue() == 0x0) && (instr->VCValue() == 0x1)) {
+ if (instr->Bit(23) == 0) {
+ int opc1_opc2 = (instr->Bits(22, 21) << 2) | instr->Bits(6, 5);
+ if ((opc1_opc2 & 0xb) == 0) {
+ // NeonS32/NeonU32
+ if (instr->Bit(21) == 0x0) {
+ Format(instr, "vmov'cond.32 'Dd[0], 'rt");
+ } else {
+ Format(instr, "vmov'cond.32 'Dd[1], 'rt");
+ }
+ } else {
+ int vd = instr->VFPNRegValue(kDoublePrecision);
+ int rt = instr->RtValue();
+ if ((opc1_opc2 & 0x8) != 0) {
+ // NeonS8 / NeonU8
+ int i = opc1_opc2 & 0x7;
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vmov.8 d%d[%d], r%d", vd, i, rt);
+ } else if ((opc1_opc2 & 0x1) != 0) {
+ // NeonS16 / NeonU16
+ int i = (opc1_opc2 >> 1) & 0x3;
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vmov.16 d%d[%d], r%d", vd, i, rt);
+ } else {
+ Unknown(instr);
+ }
+ }
} else {
- Format(instr, "vmov'cond.32 'Dd[1], 'rt");
- }
- } else if ((instr->VLValue() == 0x1) &&
- (instr->VCValue() == 0x1) &&
- (instr->Bit(23) == 0x0)) {
- if (instr->Bit(21) == 0x0) {
- Format(instr, "vmov'cond.32 'rt, 'Dd[0]");
+ int size = 32;
+ if (instr->Bit(5) != 0)
+ size = 16;
+ else if (instr->Bit(22) != 0)
+ size = 8;
+ int Vd = instr->VFPNRegValue(kSimd128Precision);
+ int Rt = instr->RtValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vdup.%i q%d, r%d", size, Vd, Rt);
+ }
+ } else if ((instr->VLValue() == 0x1) && (instr->VCValue() == 0x1)) {
+ int opc1_opc2 = (instr->Bits(22, 21) << 2) | instr->Bits(6, 5);
+ if ((opc1_opc2 & 0xb) == 0) {
+ // NeonS32 / NeonU32
+ if (instr->Bit(21) == 0x0) {
+ Format(instr, "vmov'cond.32 'rt, 'Dd[0]");
+ } else {
+ Format(instr, "vmov'cond.32 'rt, 'Dd[1]");
+ }
} else {
- Format(instr, "vmov'cond.32 'rt, 'Dd[1]");
+ const char* sign = instr->Bit(23) != 0 ? "u" : "s";
+ int rt = instr->RtValue();
+ int vn = instr->VFPNRegValue(kDoublePrecision);
+ if ((opc1_opc2 & 0x8) != 0) {
+ // NeonS8 / NeonU8
+ int i = opc1_opc2 & 0x7;
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vmov.%s8 r%d, d%d[%d]", sign, rt, vn, i);
+ } else if ((opc1_opc2 & 0x1) != 0) {
+ // NeonS16 / NeonU16
+ int i = (opc1_opc2 >> 1) & 0x3;
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vmov.%s16 r%d, d%d[%d]",
+ sign, rt, vn, i);
+ } else {
+ Unknown(instr);
+ }
}
} else if ((instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x7) &&
@@ -1563,6 +1616,8 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
Format(instr, "vmrs'cond 'rt, FPSCR");
}
}
+ } else {
+ Unknown(instr); // Not used by V8.
}
}
}
@@ -1801,6 +1856,104 @@ static const char* const barrier_option_names[] = {
void Decoder::DecodeSpecialCondition(Instruction* instr) {
switch (instr->SpecialValue()) {
+ case 4:
+ if (instr->Bits(11, 8) == 1 && instr->Bits(21, 20) == 2 &&
+ instr->Bit(6) == 1 && instr->Bit(4) == 1) {
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ if (Vm == Vn) {
+ // vmov Qd, Qm
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vmov q%d, q%d", Vd, Vm);
+ } else {
+ // vorr Qd, Qm, Qn.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vorr q%d, q%d, q%d", Vd, Vn, Vm);
+ }
+ } else if (instr->Bits(11, 8) == 8) {
+ const char* op = (instr->Bit(4) == 0) ? "vadd" : "vtst";
+ int size = kBitsPerByte * (1 << instr->Bits(21, 20));
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ // vadd/vtst.i<size> Qd, Qm, Qn.
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%s.i%d q%d, q%d, q%d", op,
+ size, Vd, Vn, Vm);
+ } else if (instr->Bits(11, 8) == 0xd && instr->Bit(4) == 0) {
+ const char* op = (instr->Bits(21, 20) == 0) ? "vadd" : "vsub";
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ // vadd/vsub.f32 Qd, Qm, Qn.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
+ } else if (instr->Bits(11, 8) == 0x9 && instr->Bit(6) == 1 &&
+ instr->Bit(4) == 1) {
+ int size = kBitsPerByte * (1 << instr->Bits(21, 20));
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ // vmul.i<size> Qd, Qm, Qn.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vmul.i%d q%d, q%d, q%d", size, Vd, Vn, Vm);
+ } else if (instr->Bits(11, 8) == 0xe && instr->Bits(21, 20) == 0 &&
+ instr->Bit(4) == 0) {
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ // vceq.f32 Qd, Qm, Qn.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vceq.f32 q%d, q%d, q%d", Vd, Vn, Vm);
+ } else if (instr->Bits(11, 8) == 1 && instr->Bits(21, 20) == 0 &&
+ instr->Bit(6) == 1 && instr->Bit(4) == 1) {
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ // vand Qd, Qm, Qn.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vand q%d, q%d, q%d", Vd, Vn, Vm);
+ } else if (instr->Bits(11, 8) == 0x3) {
+ int size = kBitsPerByte * (1 << instr->Bits(21, 20));
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ const char* op = (instr->Bit(4) == 1) ? "vcge" : "vcgt";
+ // vcge/vcgt.s<size> Qd, Qm, Qn.
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%s.s%d q%d, q%d, q%d", op,
+ size, Vd, Vn, Vm);
+ } else if (instr->Bits(11, 8) == 0xf && instr->Bit(20) == 0 &&
+ instr->Bit(6) == 1) {
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ if (instr->Bit(4) == 1) {
+ // vrecps/vrsqrts.f32 Qd, Qm, Qn.
+ const char* op = instr->Bit(21) == 0 ? "vrecps" : "vrsqrts";
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
+ } else {
+ // vmin/max.f32 Qd, Qm, Qn.
+ const char* op = instr->Bit(21) == 1 ? "vmin" : "vmax";
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
+ }
+ } else if (instr->Bits(11, 8) == 0x6) {
+ int size = kBitsPerByte * (1 << instr->Bits(21, 20));
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ // vmin/vmax.s<size> Qd, Qm, Qn.
+ const char* op = instr->Bit(4) == 1 ? "vmin" : "vmax";
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%s.s%d q%d, q%d, q%d", op,
+ size, Vd, Vn, Vm);
+ } else {
+ Unknown(instr);
+ }
+ break;
case 5:
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
@@ -1811,6 +1964,96 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
int imm3 = instr->Bits(21, 19);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmovl.s%d q%d, d%d", imm3*8, Vd, Vm);
+ } else if (instr->Bits(21, 20) == 3 && instr->Bit(4) == 0) {
+ // vext.8 Qd, Qm, Qn, imm4
+ int imm4 = instr->Bits(11, 8);
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vext.8 q%d, q%d, q%d, #%d",
+ Vd, Vn, Vm, imm4);
+ } else {
+ Unknown(instr);
+ }
+ break;
+ case 6:
+ if (instr->Bits(11, 8) == 8) {
+ int size = kBitsPerByte * (1 << instr->Bits(21, 20));
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ if (instr->Bit(4) == 0) {
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vsub.i%d q%d, q%d, q%d",
+ size, Vd, Vn, Vm);
+ } else {
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vceq.i%d q%d, q%d, q%d",
+ size, Vd, Vn, Vm);
+ }
+ } else if (instr->Bits(11, 8) == 1 && instr->Bits(21, 20) == 1 &&
+ instr->Bit(4) == 1) {
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vbsl q%d, q%d, q%d", Vd, Vn, Vm);
+ } else if (instr->Bits(11, 8) == 1 && instr->Bits(21, 20) == 0 &&
+ instr->Bit(4) == 1) {
+ if (instr->Bit(6) == 0) {
+ // veor Dd, Dn, Dm
+ int Vd = instr->VFPDRegValue(kDoublePrecision);
+ int Vn = instr->VFPNRegValue(kDoublePrecision);
+ int Vm = instr->VFPMRegValue(kDoublePrecision);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "veor d%d, d%d, d%d", Vd, Vn, Vm);
+
+ } else {
+ // veor Qd, Qn, Qm
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "veor q%d, q%d, q%d", Vd, Vn, Vm);
+ }
+ } else if (instr->Bits(11, 8) == 0xd && instr->Bit(21) == 0 &&
+ instr->Bit(6) == 1 && instr->Bit(4) == 1) {
+ // vmul.f32 Qd, Qn, Qm
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vmul.f32 q%d, q%d, q%d", Vd, Vn, Vm);
+ } else if (instr->Bits(11, 8) == 0xe && instr->Bit(20) == 0 &&
+ instr->Bit(4) == 0) {
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ const char* op = (instr->Bit(21) == 0) ? "vcge" : "vcgt";
+ // vcge/vcgt.f32 Qd, Qm, Qn.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
+ } else if (instr->Bits(11, 8) == 0x3) {
+ int size = kBitsPerByte * (1 << instr->Bits(21, 20));
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ const char* op = (instr->Bit(4) == 1) ? "vcge" : "vcgt";
+ // vcge/vcgt.u<size> Qd, Qm, Qn.
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%s.u%d q%d, q%d, q%d", op,
+ size, Vd, Vn, Vm);
+ } else if (instr->Bits(11, 8) == 0x6) {
+ int size = kBitsPerByte * (1 << instr->Bits(21, 20));
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ // vmin/vmax.u<size> Qd, Qm, Qn.
+ const char* op = instr->Bit(4) == 1 ? "vmin" : "vmax";
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%s.u%d q%d, q%d, q%d", op,
+ size, Vd, Vn, Vm);
} else {
Unknown(instr);
}
@@ -1825,13 +2068,109 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
int imm3 = instr->Bits(21, 19);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmovl.u%d q%d, d%d", imm3*8, Vd, Vm);
- } else if ((instr->Bits(21, 16) == 0x32) && (instr->Bits(11, 7) == 0) &&
- (instr->Bit(4) == 0)) {
- int Vd = instr->VFPDRegValue(kDoublePrecision);
- int Vm = instr->VFPMRegValue(kDoublePrecision);
- char rtype = (instr->Bit(6) == 0) ? 'd' : 'q';
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vswp %c%d, %c%d", rtype, Vd, rtype, Vm);
+ } else if (instr->Opc1Value() == 7 && instr->Bits(21, 20) == 0x3 &&
+ instr->Bit(4) == 0) {
+ if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 7) == 0) {
+ if (instr->Bit(6) == 0) {
+ int Vd = instr->VFPDRegValue(kDoublePrecision);
+ int Vm = instr->VFPMRegValue(kDoublePrecision);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vswp d%d, d%d", Vd, Vm);
+ } else {
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vswp q%d, q%d", Vd, Vm);
+ }
+ } else if (instr->Bits(11, 7) == 0x18) {
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kDoublePrecision);
+ int index = instr->Bit(19);
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vdup q%d, d%d[%d]", Vd, Vm, index);
+ } else if (instr->Bits(19, 16) == 0 && instr->Bits(11, 6) == 0x17) {
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vmvn q%d, q%d", Vd, Vm);
+ } else if (instr->Bits(19, 16) == 0xB && instr->Bits(11, 9) == 0x3 &&
+ instr->Bit(6) == 1) {
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ const char* suffix = nullptr;
+ int op = instr->Bits(8, 7);
+ switch (op) {
+ case 0:
+ suffix = "f32.s32";
+ break;
+ case 1:
+ suffix = "f32.u32";
+ break;
+ case 2:
+ suffix = "s32.f32";
+ break;
+ case 3:
+ suffix = "u32.f32";
+ break;
+ }
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vcvt.%s q%d, q%d", suffix, Vd, Vm);
+ } else if (instr->Bits(11, 10) == 0x2) {
+ int Vd = instr->VFPDRegValue(kDoublePrecision);
+ int Vn = instr->VFPNRegValue(kDoublePrecision);
+ int Vm = instr->VFPMRegValue(kDoublePrecision);
+ int len = instr->Bits(9, 8);
+ NeonListOperand list(DwVfpRegister::from_code(Vn), len + 1);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%s d%d, ",
+ instr->Bit(6) == 0 ? "vtbl.8" : "vtbx.8", Vd);
+ FormatNeonList(Vn, list.type());
+ Print(", ");
+ PrintDRegister(Vm);
+ } else if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 6) == 0x7) {
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int size = kBitsPerByte * (1 << instr->Bits(19, 18));
+ // vzip.<size> Qd, Qm.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vzip.%d q%d, q%d", size, Vd, Vm);
+ } else if (instr->Bits(17, 16) == 0 && instr->Bits(11, 9) == 0) {
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int size = kBitsPerByte * (1 << instr->Bits(19, 18));
+ int op = kBitsPerByte
+ << (static_cast<int>(Neon64) - instr->Bits(8, 7));
+ // vrev<op>.<size> Qd, Qm.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vrev%d.%d q%d, q%d", op, size, Vd, Vm);
+ } else if (instr->Bits(17, 16) == 0x1 && instr->Bit(11) == 0) {
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int size = kBitsPerByte * (1 << instr->Bits(19, 18));
+ const char* type = instr->Bit(10) != 0 ? "f" : "s";
+ if (instr->Bits(9, 6) == 0xd) {
+ // vabs<type>.<size> Qd, Qm.
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vabs.%s%d q%d, q%d",
+ type, size, Vd, Vm);
+ } else if (instr->Bits(9, 6) == 0xf) {
+ // vneg<type>.<size> Qd, Qm.
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vneg.%s%d q%d, q%d",
+ type, size, Vd, Vm);
+ } else {
+ Unknown(instr);
+ }
+ } else if (instr->Bits(19, 18) == 0x2 && instr->Bits(11, 8) == 0x5) {
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ const char* op = instr->Bit(7) == 0 ? "vrecpe" : "vrsqrte";
+ // vrecpe/vrsqrte.f32 Qd, Qm.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%s.f32 q%d, q%d", op, Vd, Vm);
+ } else {
+ Unknown(instr);
+ }
} else {
Unknown(instr);
}
diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/arm/interface-descriptors-arm.cc
index 75161afcd7..506c891038 100644
--- a/deps/v8/src/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/arm/interface-descriptors-arm.cc
@@ -66,13 +66,7 @@ const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {r2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastNewObjectDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r1, r3};
+ Register registers[] = {r1, r2, r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index c67fad8e1d..c3d825b87b 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -264,6 +264,35 @@ void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src,
}
}
+void MacroAssembler::Move(QwNeonRegister dst, QwNeonRegister src) {
+ if (!dst.is(src)) {
+ vmov(dst, src);
+ }
+}
+
+void MacroAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
+ if (srcdst0.is(srcdst1)) return; // Swapping aliased registers emits nothing.
+
+ DCHECK(VfpRegisterIsAvailable(srcdst0));
+ DCHECK(VfpRegisterIsAvailable(srcdst1));
+
+ if (CpuFeatures::IsSupported(NEON)) {
+ vswp(srcdst0, srcdst1);
+ } else {
+ DCHECK(!srcdst0.is(kScratchDoubleReg));
+ DCHECK(!srcdst1.is(kScratchDoubleReg));
+ vmov(kScratchDoubleReg, srcdst0);
+ vmov(srcdst0, srcdst1);
+ vmov(srcdst1, kScratchDoubleReg);
+ }
+}
+
+void MacroAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) {
+ if (!srcdst0.is(srcdst1)) {
+ vswp(srcdst0, srcdst1);
+ }
+}
+
void MacroAssembler::Mls(Register dst, Register src1, Register src2,
Register srcA, Condition cond) {
if (CpuFeatures::IsSupported(ARMv7)) {
@@ -1052,8 +1081,8 @@ void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
}
void MacroAssembler::VmovExtended(Register dst, int src_code) {
- DCHECK_LE(32, src_code);
- DCHECK_GT(64, src_code);
+ DCHECK_LE(SwVfpRegister::kMaxNumRegisters, src_code);
+ DCHECK_GT(SwVfpRegister::kMaxNumRegisters * 2, src_code);
if (src_code & 0x1) {
VmovHigh(dst, DwVfpRegister::from_code(src_code / 2));
} else {
@@ -1062,8 +1091,8 @@ void MacroAssembler::VmovExtended(Register dst, int src_code) {
}
void MacroAssembler::VmovExtended(int dst_code, Register src) {
- DCHECK_LE(32, dst_code);
- DCHECK_GT(64, dst_code);
+ DCHECK_LE(SwVfpRegister::kMaxNumRegisters, dst_code);
+ DCHECK_GT(SwVfpRegister::kMaxNumRegisters * 2, dst_code);
if (dst_code & 0x1) {
VmovHigh(DwVfpRegister::from_code(dst_code / 2), src);
} else {
@@ -1073,22 +1102,23 @@ void MacroAssembler::VmovExtended(int dst_code, Register src) {
void MacroAssembler::VmovExtended(int dst_code, int src_code,
Register scratch) {
- if (src_code < 32 && dst_code < 32) {
+ if (src_code < SwVfpRegister::kMaxNumRegisters &&
+ dst_code < SwVfpRegister::kMaxNumRegisters) {
// src and dst are both s-registers.
vmov(SwVfpRegister::from_code(dst_code),
SwVfpRegister::from_code(src_code));
- } else if (src_code < 32) {
+ } else if (src_code < SwVfpRegister::kMaxNumRegisters) {
// src is an s-register.
vmov(scratch, SwVfpRegister::from_code(src_code));
VmovExtended(dst_code, scratch);
- } else if (dst_code < 32) {
+ } else if (dst_code < SwVfpRegister::kMaxNumRegisters) {
// dst is an s-register.
VmovExtended(scratch, src_code);
vmov(SwVfpRegister::from_code(dst_code), scratch);
} else {
// Neither src or dst are s-registers.
- DCHECK_GT(64, src_code);
- DCHECK_GT(64, dst_code);
+ DCHECK_GT(SwVfpRegister::kMaxNumRegisters * 2, src_code);
+ DCHECK_GT(SwVfpRegister::kMaxNumRegisters * 2, dst_code);
VmovExtended(scratch, src_code);
VmovExtended(dst_code, scratch);
}
@@ -1096,7 +1126,7 @@ void MacroAssembler::VmovExtended(int dst_code, int src_code,
void MacroAssembler::VmovExtended(int dst_code, const MemOperand& src,
Register scratch) {
- if (dst_code >= 32) {
+ if (dst_code >= SwVfpRegister::kMaxNumRegisters) {
ldr(scratch, src);
VmovExtended(dst_code, scratch);
} else {
@@ -1106,7 +1136,7 @@ void MacroAssembler::VmovExtended(int dst_code, const MemOperand& src,
void MacroAssembler::VmovExtended(const MemOperand& dst, int src_code,
Register scratch) {
- if (src_code >= 32) {
+ if (src_code >= SwVfpRegister::kMaxNumRegisters) {
VmovExtended(scratch, src_code);
str(scratch, dst);
} else {
@@ -1114,6 +1144,105 @@ void MacroAssembler::VmovExtended(const MemOperand& dst, int src_code,
}
}
+void MacroAssembler::ExtractLane(Register dst, QwNeonRegister src,
+ NeonDataType dt, int lane) {
+ int bytes_per_lane = dt & NeonDataTypeSizeMask; // 1, 2, 4
+ int log2_bytes_per_lane = bytes_per_lane / 2; // 0, 1, 2
+ int byte = lane << log2_bytes_per_lane;
+ int double_word = byte >> kDoubleSizeLog2;
+ int double_byte = byte & (kDoubleSize - 1);
+ int double_lane = double_byte >> log2_bytes_per_lane;
+ DwVfpRegister double_source =
+ DwVfpRegister::from_code(src.code() * 2 + double_word);
+ vmov(dt, dst, double_source, double_lane);
+}
+
+void MacroAssembler::ExtractLane(SwVfpRegister dst, QwNeonRegister src,
+ Register scratch, int lane) {
+ int s_code = src.code() * 4 + lane;
+ VmovExtended(dst.code(), s_code, scratch);
+}
+
+void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
+ Register src_lane, NeonDataType dt, int lane) {
+ Move(dst, src);
+ int bytes_per_lane = dt & NeonDataTypeSizeMask; // 1, 2, 4
+ int log2_bytes_per_lane = bytes_per_lane / 2; // 0, 1, 2
+ int byte = lane << log2_bytes_per_lane;
+ int double_word = byte >> kDoubleSizeLog2;
+ int double_byte = byte & (kDoubleSize - 1);
+ int double_lane = double_byte >> log2_bytes_per_lane;
+ DwVfpRegister double_dst =
+ DwVfpRegister::from_code(dst.code() * 2 + double_word);
+ vmov(dt, double_dst, double_lane, src_lane);
+}
+
+void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
+ SwVfpRegister src_lane, Register scratch,
+ int lane) {
+ Move(dst, src);
+ int s_code = dst.code() * 4 + lane;
+ VmovExtended(s_code, src_lane.code(), scratch);
+}
+
+void MacroAssembler::Swizzle(QwNeonRegister dst, QwNeonRegister src,
+ Register scratch, NeonSize size, uint32_t lanes) {
+ // TODO(bbudge) Handle Int16x8, Int8x16 vectors.
+ DCHECK_EQ(Neon32, size);
+ DCHECK_IMPLIES(size == Neon32, lanes < 0xFFFFu);
+ if (size == Neon32) {
+ switch (lanes) {
+ // TODO(bbudge) Handle more special cases.
+ case 0x3210: // Identity.
+ Move(dst, src);
+ return;
+ case 0x1032: // Swap top and bottom.
+ vext(dst, src, src, 8);
+ return;
+ case 0x2103: // Rotation.
+ vext(dst, src, src, 12);
+ return;
+ case 0x0321: // Rotation.
+ vext(dst, src, src, 4);
+ return;
+ case 0x0000: // Equivalent to vdup.
+ case 0x1111:
+ case 0x2222:
+ case 0x3333: {
+ int lane_code = src.code() * 4 + (lanes & 0xF);
+ if (lane_code >= SwVfpRegister::kMaxNumRegisters) {
+ // TODO(bbudge) use vdup (vdup.32 dst, D<src>[lane]) once implemented.
+ int temp_code = kScratchDoubleReg.code() * 2;
+ VmovExtended(temp_code, lane_code, scratch);
+ lane_code = temp_code;
+ }
+ vdup(dst, SwVfpRegister::from_code(lane_code));
+ return;
+ }
+ case 0x2301: // Swap lanes 0, 1 and lanes 2, 3.
+ vrev64(Neon32, dst, src);
+ return;
+ default: // Handle all other cases with vmovs.
+ int src_code = src.code() * 4;
+ int dst_code = dst.code() * 4;
+ bool in_place = src.is(dst);
+ if (in_place) {
+ vmov(kScratchQuadReg, src);
+ src_code = kScratchQuadReg.code() * 4;
+ }
+ for (int i = 0; i < 4; i++) {
+ int lane = (lanes >> (i * 4) & 0xF);
+ VmovExtended(dst_code + i, src_code + lane, scratch);
+ }
+ if (in_place) {
+ // Restore zero reg.
+ veor(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero);
+ }
+ return;
+ }
+ }
+}
+
void MacroAssembler::LslPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
@@ -1629,18 +1758,16 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
}
-
-void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual) {
- Label skip_flooding;
- ExternalReference last_step_action =
- ExternalReference::debug_last_step_action_address(isolate());
- STATIC_ASSERT(StepFrame > StepIn);
- mov(r4, Operand(last_step_action));
+void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_hook;
+ ExternalReference debug_hook_avtive =
+ ExternalReference::debug_hook_on_function_call_address(isolate());
+ mov(r4, Operand(debug_hook_avtive));
ldrsb(r4, MemOperand(r4));
- cmp(r4, Operand(StepIn));
- b(lt, &skip_flooding);
+ cmp(r4, Operand(0));
+ b(eq, &skip_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -1657,7 +1784,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
}
Push(fun);
Push(fun);
- CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ CallRuntime(Runtime::kDebugOnFunctionCall);
Pop(fun);
if (new_target.is_valid()) {
Pop(new_target);
@@ -1671,7 +1798,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
SmiUntag(expected.reg());
}
}
- bind(&skip_flooding);
+ bind(&skip_hook);
}
@@ -1685,8 +1812,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK(function.is(r1));
DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r3));
- if (call_wrapper.NeedsDebugStepCheck()) {
- FloodFunctionIfStepping(function, new_target, expected, actual);
+ if (call_wrapper.NeedsDebugHookCheck()) {
+ CheckDebugHook(function, new_target, expected, actual);
}
// Clear the new.target register if not given.
@@ -2177,112 +2304,6 @@ void MacroAssembler::FastAllocate(int object_size, Register result,
add(result, result, Operand(kHeapObjectTag));
}
-void MacroAssembler::AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars.
- add(scratch1, scratch1,
- Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
- and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
-
- // Allocate two-byte string in new space.
- Allocate(scratch1, result, scratch2, scratch3, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map, length and hash field.
- InitializeNewString(result,
- length,
- Heap::kStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteString(Register result, Register length,
- Register scratch1, Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- DCHECK(kCharSize == 1);
- add(scratch1, length,
- Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
- and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
-
- // Allocate one-byte string in new space.
- Allocate(scratch1, result, scratch2, scratch3, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map, length and hash field.
- InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
- scratch1, scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- InitializeNewString(result,
- length,
- Heap::kConsStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
- scratch1, scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- InitializeNewString(result,
- length,
- Heap::kSlicedStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
- scratch1, scratch2);
-}
-
-
void MacroAssembler::CompareObjectType(Register object,
Register map,
Register type_reg,
@@ -2314,68 +2335,6 @@ void MacroAssembler::CompareRoot(Register obj,
cmp(obj, ip);
}
-void MacroAssembler::CheckFastObjectElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
- b(ls, fail);
- cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
- b(hi, fail);
-}
-
-
-void MacroAssembler::CheckFastSmiElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
- b(hi, fail);
-}
-
-
-void MacroAssembler::StoreNumberToDoubleElements(
- Register value_reg,
- Register key_reg,
- Register elements_reg,
- Register scratch1,
- LowDwVfpRegister double_scratch,
- Label* fail,
- int elements_offset) {
- DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
- Label smi_value, store;
-
- // Handle smi values specially.
- JumpIfSmi(value_reg, &smi_value);
-
- // Ensure that the object is a heap number
- CheckMap(value_reg,
- scratch1,
- isolate()->factory()->heap_number_map(),
- fail,
- DONT_DO_SMI_CHECK);
-
- vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
- VFPCanonicalizeNaN(double_scratch);
- b(&store);
-
- bind(&smi_value);
- SmiToDouble(double_scratch, value_reg);
-
- bind(&store);
- add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg));
- vstr(double_scratch,
- FieldMemOperand(scratch1,
- FixedDoubleArray::kHeaderSize - elements_offset));
-}
-
-
void MacroAssembler::CompareMap(Register obj,
Register scratch,
Handle<Map> map,
@@ -2878,28 +2837,6 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
}
-
-void MacroAssembler::LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match) {
- DCHECK(IsFastElementsKind(expected_kind));
- DCHECK(IsFastElementsKind(transitioned_kind));
-
- // Check that the function's map is the same as the expected cached map.
- ldr(scratch, NativeContextMemOperand());
- ldr(ip, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
- cmp(map_in_out, ip);
- b(ne, no_map_match);
-
- // Use the transitioned cached map.
- ldr(map_in_out,
- ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
-}
-
-
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
ldr(dst, NativeContextMemOperand());
ldr(dst, ContextMemOperand(dst, index));
@@ -2962,15 +2899,6 @@ void MacroAssembler::UntagAndJumpIfSmi(
b(cc, smi_case); // Shifter carry is not set for a smi.
}
-
-void MacroAssembler::UntagAndJumpIfNotSmi(
- Register dst, Register src, Label* non_smi_case) {
- STATIC_ASSERT(kSmiTag == 0);
- SmiUntag(dst, src, SetCC);
- b(cs, non_smi_case); // Shifter carry is set for a non-smi.
-}
-
-
void MacroAssembler::JumpIfEitherSmi(Register reg1,
Register reg2,
Label* on_either_smi) {
@@ -3411,19 +3339,6 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
b(ne, failure);
}
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
- Register scratch,
- Label* failure) {
- const int kFlatOneByteStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatOneByteStringTag =
- kStringTag | kOneByteStringTag | kSeqStringTag;
- and_(scratch, type, Operand(kFlatOneByteStringMask));
- cmp(scratch, Operand(kFlatOneByteStringTag));
- b(ne, failure);
-}
-
static const int kRegisterPassedArguments = 4;
@@ -3861,45 +3776,6 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
return no_reg;
}
-
-void MacroAssembler::JumpIfDictionaryInPrototypeChain(
- Register object,
- Register scratch0,
- Register scratch1,
- Label* found) {
- DCHECK(!scratch1.is(scratch0));
- Register current = scratch0;
- Label loop_again, end;
-
- // scratch contained elements pointer.
- mov(current, object);
- ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
- ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
- CompareRoot(current, Heap::kNullValueRootIndex);
- b(eq, &end);
-
- // Loop based on the map going up the prototype chain.
- bind(&loop_again);
- ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
-
- STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
- STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
- ldrb(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
- cmp(scratch1, Operand(JS_OBJECT_TYPE));
- b(lo, found);
-
- ldr(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
- DecodeField<Map::ElementsKindBits>(scratch1);
- cmp(scratch1, Operand(DICTIONARY_ELEMENTS));
- b(eq, found);
- ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
- CompareRoot(current, Heap::kNullValueRootIndex);
- b(ne, &loop_again);
-
- bind(&end);
-}
-
-
#ifdef DEBUG
bool AreAliased(Register reg1,
Register reg2,
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 4f0ee82c00..1f537b0157 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -184,6 +184,10 @@ class MacroAssembler: public Assembler {
}
void Move(SwVfpRegister dst, SwVfpRegister src, Condition cond = al);
void Move(DwVfpRegister dst, DwVfpRegister src, Condition cond = al);
+ void Move(QwNeonRegister dst, QwNeonRegister src);
+ // Register swap.
+ void Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1);
+ void Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1);
void Load(Register dst, const MemOperand& src, Representation r);
void Store(Register src, const MemOperand& dst, Representation r);
@@ -557,6 +561,16 @@ class MacroAssembler: public Assembler {
void VmovExtended(int dst_code, const MemOperand& src, Register scratch);
void VmovExtended(const MemOperand& dst, int src_code, Register scratch);
+ void ExtractLane(Register dst, QwNeonRegister src, NeonDataType dt, int lane);
+ void ExtractLane(SwVfpRegister dst, QwNeonRegister src, Register scratch,
+ int lane);
+ void ReplaceLane(QwNeonRegister dst, QwNeonRegister src, Register src_lane,
+ NeonDataType dt, int lane);
+ void ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
+ SwVfpRegister src_lane, Register scratch, int lane);
+ void Swizzle(QwNeonRegister dst, QwNeonRegister src, Register scratch,
+ NeonSize size, uint32_t lanes);
+
void LslPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, Register scratch, Register shift);
void LslPair(Register dst_low, Register dst_high, Register src_low,
@@ -635,17 +649,6 @@ class MacroAssembler: public Assembler {
LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
}
- // Conditionally load the cached Array transitioned map of type
- // transitioned_kind from the native context if the map in register
- // map_in_out is the cached Array map in the native context of
- // expected_kind.
- void LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match);
-
void LoadNativeContextSlot(int index, Register dst);
// Load the initial map from the global function. The registers
@@ -678,9 +681,10 @@ class MacroAssembler: public Assembler {
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
- void FloodFunctionIfStepping(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual);
+ // On function call, call into the debugger if necessary.
+ void CheckDebugHook(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
@@ -794,32 +798,6 @@ class MacroAssembler: public Assembler {
void FastAllocate(Register object_size, Register result, Register result_end,
Register scratch, AllocationFlags flags);
- void AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
- void AllocateOneByteString(Register result, Register length,
- Register scratch1, Register scratch2,
- Register scratch3, Label* gc_required);
- void AllocateTwoByteConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateOneByteConsString(Register result, Register length,
- Register scratch1, Register scratch2,
- Label* gc_required);
- void AllocateTwoByteSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateOneByteSlicedString(Register result, Register length,
- Register scratch1, Register scratch2,
- Label* gc_required);
-
// Allocates a heap number or jumps to the gc_required label if the young
// space is full and a scavenge is needed. All registers are clobbered also
// when control continues at the gc_required label.
@@ -884,29 +862,6 @@ class MacroAssembler: public Assembler {
Register type_reg,
InstanceType type);
- // Check if a map for a JSObject indicates that the object can have both smi
- // and HeapObject elements. Jump to the specified label if it does not.
- void CheckFastObjectElements(Register map,
- Register scratch,
- Label* fail);
-
- // Check if a map for a JSObject indicates that the object has fast smi only
- // elements. Jump to the specified label if it does not.
- void CheckFastSmiElements(Register map,
- Register scratch,
- Label* fail);
-
- // Check to see if maybe_number can be stored as a double in
- // FastDoubleElements. If it can, store it at the index specified by key in
- // the FastDoubleElements array elements. Otherwise jump to fail.
- void StoreNumberToDoubleElements(Register value_reg,
- Register key_reg,
- Register elements_reg,
- Register scratch1,
- LowDwVfpRegister double_scratch,
- Label* fail,
- int elements_offset = 0);
-
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
// set with result of map compare. If multiple map compares are required, the
@@ -1287,10 +1242,6 @@ class MacroAssembler: public Assembler {
// Souce and destination can be the same register.
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
- // Untag the source value into destination and jump if source is not a smi.
- // Souce and destination can be the same register.
- void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
-
// Test if the register contains a smi (Z == 0 (eq) if true).
inline void SmiTst(Register value) {
tst(value, Operand(kSmiTagMask));
@@ -1380,11 +1331,6 @@ class MacroAssembler: public Assembler {
Register first_object_instance_type, Register second_object_instance_type,
Register scratch1, Register scratch2, Label* failure);
- // Check if instance type is sequential one-byte string and jump to label if
- // it is not.
- void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
- Label* failure);
-
void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
void EmitSeqStringSetCharCheck(Register string,
@@ -1464,20 +1410,6 @@ class MacroAssembler: public Assembler {
Register scratch_reg,
Label* no_memento_found);
- void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
- Register scratch_reg,
- Label* memento_found) {
- Label no_memento_found;
- TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
- &no_memento_found);
- b(eq, memento_found);
- bind(&no_memento_found);
- }
-
- // Jumps to found label if a prototype map has dictionary elements.
- void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
- Register scratch1, Label* found);
-
// Loads the constant pool pointer (pp) register.
void LoadConstantPoolPointerRegisterFromCodeTargetAddress(
Register code_target_address);
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 331a7e9dfd..39e7a8e837 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -895,28 +895,16 @@ void Simulator::set_d_register(int dreg, const uint32_t* value) {
memcpy(vfp_registers_ + dreg * 2, value, sizeof(*value) * 2);
}
-
-void Simulator::get_q_register(int qreg, uint64_t* value) {
- DCHECK((qreg >= 0) && (qreg < num_q_registers));
- memcpy(value, vfp_registers_ + qreg * 4, sizeof(*value) * 2);
-}
-
-
-void Simulator::set_q_register(int qreg, const uint64_t* value) {
+template <typename T>
+void Simulator::get_q_register(int qreg, T* value) {
DCHECK((qreg >= 0) && (qreg < num_q_registers));
- memcpy(vfp_registers_ + qreg * 4, value, sizeof(*value) * 2);
+ memcpy(value, vfp_registers_ + qreg * 4, kSimd128Size);
}
-
-void Simulator::get_q_register(int qreg, uint32_t* value) {
- DCHECK((qreg >= 0) && (qreg < num_q_registers));
- memcpy(value, vfp_registers_ + qreg * 4, sizeof(*value) * 4);
-}
-
-
-void Simulator::set_q_register(int qreg, const uint32_t* value) {
+template <typename T>
+void Simulator::set_q_register(int qreg, const T* value) {
DCHECK((qreg >= 0) && (qreg < num_q_registers));
- memcpy(vfp_registers_ + qreg * 4, value, sizeof(*value) * 4);
+ memcpy(vfp_registers_ + qreg * 4, value, kSimd128Size);
}
@@ -3067,6 +3055,7 @@ void Simulator::DecodeType7(Instruction* instr) {
// Dd = vsqrt(Dm)
// Sd = vsqrt(Sm)
// vmrs
+// vdup.size Qd, Rt.
void Simulator::DecodeTypeVFP(Instruction* instr) {
DCHECK((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
DCHECK(instr->Bits(11, 9) == 0x5);
@@ -3277,24 +3266,117 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
if ((instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x0)) {
DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
- } else if ((instr->VLValue() == 0x0) &&
- (instr->VCValue() == 0x1) &&
- (instr->Bit(23) == 0x0)) {
- // vmov (ARM core register to scalar)
- int vd = instr->Bits(19, 16) | (instr->Bit(7) << 4);
- uint32_t data[2];
- get_d_register(vd, data);
- data[instr->Bit(21)] = get_register(instr->RtValue());
- set_d_register(vd, data);
- } else if ((instr->VLValue() == 0x1) &&
- (instr->VCValue() == 0x1) &&
- (instr->Bit(23) == 0x0)) {
+ } else if ((instr->VLValue() == 0x0) && (instr->VCValue() == 0x1)) {
+ if (instr->Bit(23) == 0) {
+ // vmov (ARM core register to scalar)
+ int vd = instr->VFPNRegValue(kDoublePrecision);
+ int rt = instr->RtValue();
+ int opc1_opc2 = (instr->Bits(22, 21) << 2) | instr->Bits(6, 5);
+ if ((opc1_opc2 & 0xb) == 0) {
+ // NeonS32/NeonU32
+ uint32_t data[2];
+ get_d_register(vd, data);
+ data[instr->Bit(21)] = get_register(rt);
+ set_d_register(vd, data);
+ } else {
+ uint64_t data;
+ get_d_register(vd, &data);
+ uint64_t rt_value = get_register(rt);
+ if ((opc1_opc2 & 0x8) != 0) {
+ // NeonS8 / NeonU8
+ int i = opc1_opc2 & 0x7;
+ int shift = i * kBitsPerByte;
+ const uint64_t mask = 0xFF;
+ data &= ~(mask << shift);
+ data |= (rt_value & mask) << shift;
+ set_d_register(vd, &data);
+ } else if ((opc1_opc2 & 0x1) != 0) {
+ // NeonS16 / NeonU16
+ int i = (opc1_opc2 >> 1) & 0x3;
+ int shift = i * kBitsPerByte * kShortSize;
+ const uint64_t mask = 0xFFFF;
+ data &= ~(mask << shift);
+ data |= (rt_value & mask) << shift;
+ set_d_register(vd, &data);
+ } else {
+ UNREACHABLE(); // Not used by V8.
+ }
+ }
+ } else {
+ // vdup.size Qd, Rt.
+ NeonSize size = Neon32;
+ if (instr->Bit(5) != 0)
+ size = Neon16;
+ else if (instr->Bit(22) != 0)
+ size = Neon8;
+ int vd = instr->VFPNRegValue(kSimd128Precision);
+ int rt = instr->RtValue();
+ uint32_t rt_value = get_register(rt);
+ uint32_t q_data[4];
+ switch (size) {
+ case Neon8: {
+ rt_value &= 0xFF;
+ uint8_t* dst = reinterpret_cast<uint8_t*>(q_data);
+ for (int i = 0; i < 16; i++) {
+ dst[i] = rt_value;
+ }
+ break;
+ }
+ case Neon16: {
+ // Perform pairwise op.
+ rt_value &= 0xFFFFu;
+ uint32_t rt_rt = (rt_value << 16) | (rt_value & 0xFFFFu);
+ for (int i = 0; i < 4; i++) {
+ q_data[i] = rt_rt;
+ }
+ break;
+ }
+ case Neon32: {
+ for (int i = 0; i < 4; i++) {
+ q_data[i] = rt_value;
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ set_q_register(vd, q_data);
+ }
+ } else if ((instr->VLValue() == 0x1) && (instr->VCValue() == 0x1)) {
// vmov (scalar to ARM core register)
- int vn = instr->Bits(19, 16) | (instr->Bit(7) << 4);
- double dn_value = get_double_from_d_register(vn);
- int32_t data[2];
- memcpy(data, &dn_value, 8);
- set_register(instr->RtValue(), data[instr->Bit(21)]);
+ int vn = instr->VFPNRegValue(kDoublePrecision);
+ int rt = instr->RtValue();
+ int opc1_opc2 = (instr->Bits(22, 21) << 2) | instr->Bits(6, 5);
+ uint64_t data;
+ get_d_register(vn, &data);
+ if ((opc1_opc2 & 0xb) == 0) {
+ // NeonS32 / NeonU32
+ int32_t int_data[2];
+ memcpy(int_data, &data, sizeof(int_data));
+ set_register(rt, int_data[instr->Bit(21)]);
+ } else {
+ uint64_t data;
+ get_d_register(vn, &data);
+ bool u = instr->Bit(23) != 0;
+ if ((opc1_opc2 & 0x8) != 0) {
+ // NeonS8 / NeonU8
+ int i = opc1_opc2 & 0x7;
+ int shift = i * kBitsPerByte;
+ uint32_t scalar = (data >> shift) & 0xFFu;
+ if (!u && (scalar & 0x80) != 0) scalar |= 0xffffff00;
+ set_register(rt, scalar);
+ } else if ((opc1_opc2 & 0x1) != 0) {
+ // NeonS16 / NeonU16
+ int i = (opc1_opc2 >> 1) & 0x3;
+ int shift = i * kBitsPerByte * kShortSize;
+ uint32_t scalar = (data >> shift) & 0xFFFFu;
+ if (!u && (scalar & 0x8000) != 0) scalar |= 0xffff0000;
+ set_register(rt, scalar);
+ } else {
+ UNREACHABLE(); // Not used by V8.
+ }
+ }
} else if ((instr->VLValue() == 0x1) &&
(instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x7) &&
@@ -3520,6 +3602,48 @@ int VFPConversionSaturate(double val, bool unsigned_res) {
}
}
+int32_t Simulator::ConvertDoubleToInt(double val, bool unsigned_integer,
+ VFPRoundingMode mode) {
+ int32_t result =
+ unsigned_integer ? static_cast<uint32_t>(val) : static_cast<int32_t>(val);
+
+ inv_op_vfp_flag_ = get_inv_op_vfp_flag(mode, val, unsigned_integer);
+
+ double abs_diff = unsigned_integer
+ ? std::fabs(val - static_cast<uint32_t>(result))
+ : std::fabs(val - result);
+
+ inexact_vfp_flag_ = (abs_diff != 0);
+
+ if (inv_op_vfp_flag_) {
+ result = VFPConversionSaturate(val, unsigned_integer);
+ } else {
+ switch (mode) {
+ case RN: {
+ int val_sign = (val > 0) ? 1 : -1;
+ if (abs_diff > 0.5) {
+ result += val_sign;
+ } else if (abs_diff == 0.5) {
+ // Round to even if exactly halfway.
+ result = ((result % 2) == 0) ? result : result + val_sign;
+ }
+ break;
+ }
+
+ case RM:
+ result = result > val ? result - 1 : result;
+ break;
+
+ case RZ:
+ // Nothing to do.
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+ return result;
+}
void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
DCHECK((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7) &&
@@ -3556,44 +3680,7 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
double val = double_precision ? get_double_from_d_register(src)
: get_float_from_s_register(src);
- int temp = unsigned_integer ? static_cast<uint32_t>(val)
- : static_cast<int32_t>(val);
-
- inv_op_vfp_flag_ = get_inv_op_vfp_flag(mode, val, unsigned_integer);
-
- double abs_diff =
- unsigned_integer ? std::fabs(val - static_cast<uint32_t>(temp))
- : std::fabs(val - temp);
-
- inexact_vfp_flag_ = (abs_diff != 0);
-
- if (inv_op_vfp_flag_) {
- temp = VFPConversionSaturate(val, unsigned_integer);
- } else {
- switch (mode) {
- case RN: {
- int val_sign = (val > 0) ? 1 : -1;
- if (abs_diff > 0.5) {
- temp += val_sign;
- } else if (abs_diff == 0.5) {
- // Round to even if exactly halfway.
- temp = ((temp % 2) == 0) ? temp : temp + val_sign;
- }
- break;
- }
-
- case RM:
- temp = temp > val ? temp - 1 : temp;
- break;
-
- case RZ:
- // Nothing to do.
- break;
-
- default:
- UNREACHABLE();
- }
- }
+ int32_t temp = ConvertDoubleToInt(val, unsigned_integer, mode);
// Update the destination register.
set_s_register_from_sinteger(dst, temp);
@@ -3740,9 +3827,334 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
}
}
-
void Simulator::DecodeSpecialCondition(Instruction* instr) {
switch (instr->SpecialValue()) {
+ case 4:
+ if (instr->Bits(11, 8) == 1 && instr->Bits(21, 20) == 2 &&
+ instr->Bit(4) == 1) {
+ // vmov Qd, Qm.
+ // vorr, Qd, Qm, Qn.
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ uint32_t src1[4];
+ get_q_register(Vm, src1);
+ if (Vm != Vn) {
+ uint32_t src2[4];
+ get_q_register(Vn, src2);
+ for (int i = 0; i < 4; i++) {
+ src1[i] = src1[i] | src2[i];
+ }
+ }
+ set_q_register(Vd, src1);
+ } else if (instr->Bits(11, 8) == 8) {
+ // vadd/vtst
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ if (instr->Bit(4) == 0) {
+ // vadd.i<size> Qd, Qm, Qn.
+ switch (size) {
+ case Neon8: {
+ uint8_t src1[16], src2[16];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 16; i++) {
+ src1[i] += src2[i];
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ case Neon16: {
+ uint16_t src1[8], src2[8];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 8; i++) {
+ src1[i] += src2[i];
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ case Neon32: {
+ uint32_t src1[4], src2[4];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 4; i++) {
+ src1[i] += src2[i];
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ // vtst.i<size> Qd, Qm, Qn.
+ switch (size) {
+ case Neon8: {
+ uint8_t src1[16], src2[16];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 16; i++) {
+ src1[i] = (src1[i] & src2[i]) != 0 ? 0xFFu : 0;
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ case Neon16: {
+ uint16_t src1[8], src2[8];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 8; i++) {
+ src1[i] = (src1[i] & src2[i]) != 0 ? 0xFFFFu : 0;
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ case Neon32: {
+ uint32_t src1[4], src2[4];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 4; i++) {
+ src1[i] = (src1[i] & src2[i]) != 0 ? 0xFFFFFFFFu : 0;
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ } else if (instr->Bits(11, 8) == 0xd && instr->Bit(20) == 0 &&
+ instr->Bit(4) == 0) {
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ float src1[4], src2[4];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 4; i++) {
+ if (instr->Bit(21) == 0) {
+ // vadd.f32 Qd, Qm, Qn.
+ src1[i] = src1[i] + src2[i];
+ } else {
+ // vsub.f32 Qd, Qm, Qn.
+ src1[i] = src1[i] - src2[i];
+ }
+ }
+ set_q_register(Vd, src1);
+ } else if (instr->Bits(11, 8) == 0x9 && instr->Bit(6) == 1 &&
+ instr->Bit(4) == 1) {
+ // vmul.i<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ switch (size) {
+ case Neon8: {
+ uint8_t src1[16], src2[16];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 16; i++) {
+ src1[i] *= src2[i];
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ case Neon16: {
+ uint16_t src1[8], src2[8];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 8; i++) {
+ src1[i] *= src2[i];
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ case Neon32: {
+ uint32_t src1[4], src2[4];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 4; i++) {
+ src1[i] *= src2[i];
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ } else if (instr->Bits(11, 8) == 0xe && instr->Bits(21, 20) == 0 &&
+ instr->Bit(4) == 0) {
+ // vceq.f32.
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ float src1[4], src2[4];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ uint32_t dst[4];
+ for (int i = 0; i < 4; i++) {
+ dst[i] = (src1[i] == src2[i]) ? 0xFFFFFFFF : 0;
+ }
+ set_q_register(Vd, dst);
+ } else if (instr->Bits(11, 8) == 1 && instr->Bits(21, 20) == 0 &&
+ instr->Bit(6) == 1 && instr->Bit(4) == 1) {
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ // vand Qd, Qm, Qn.
+ uint32_t src1[4], src2[4];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 4; i++) {
+ src1[i] = src1[i] & src2[i];
+ }
+ set_q_register(Vd, src1);
+ } else if (instr->Bits(11, 8) == 0x3) {
+ // vcge/vcgt.s<size> Qd, Qm, Qn.
+ bool ge = instr->Bit(4) == 1;
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ switch (size) {
+ case Neon8: {
+ int8_t src1[16], src2[16];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 16; i++) {
+ if (ge)
+ src1[i] = src1[i] >= src2[i] ? 0xFF : 0;
+ else
+ src1[i] = src1[i] > src2[i] ? 0xFF : 0;
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ case Neon16: {
+ int16_t src1[8], src2[8];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 8; i++) {
+ if (ge)
+ src1[i] = src1[i] >= src2[i] ? 0xFFFF : 0;
+ else
+ src1[i] = src1[i] > src2[i] ? 0xFFFF : 0;
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ case Neon32: {
+ int32_t src1[4], src2[4];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 4; i++) {
+ if (ge)
+ src1[i] = src1[i] >= src2[i] ? 0xFFFFFFFF : 0;
+ else
+ src1[i] = src1[i] > src2[i] ? 0xFFFFFFFF : 0;
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (instr->Bits(11, 8) == 0xf && instr->Bit(20) == 0 &&
+ instr->Bit(6) == 1) {
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ float src1[4], src2[4];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ if (instr->Bit(4) == 1) {
+ if (instr->Bit(21) == 0) {
+ // vrecps.f32 Qd, Qm, Qn.
+ for (int i = 0; i < 4; i++) {
+ src1[i] = 2.0f - src1[i] * src2[i];
+ }
+ } else {
+ // vrsqrts.f32 Qd, Qm, Qn.
+ for (int i = 0; i < 4; i++) {
+ src1[i] = (3.0f - src1[i] * src2[i]) * 0.5f;
+ }
+ }
+ } else {
+ if (instr->Bit(21) == 1) {
+ // vmin.f32 Qd, Qm, Qn.
+ for (int i = 0; i < 4; i++) {
+ src1[i] = std::min(src1[i], src2[i]);
+ }
+ } else {
+ // vmax.f32 Qd, Qm, Qn.
+ for (int i = 0; i < 4; i++) {
+ src1[i] = std::max(src1[i], src2[i]);
+ }
+ }
+ }
+ set_q_register(Vd, src1);
+ } else if (instr->Bits(11, 8) == 0x6) {
+ // vmin/vmax.s<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ bool min = instr->Bit(4) != 0;
+ switch (size) {
+ case Neon8: {
+ int8_t src1[16], src2[16];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 16; i++) {
+ if (min)
+ src1[i] = std::min(src1[i], src2[i]);
+ else
+ src1[i] = std::max(src1[i], src2[i]);
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ case Neon16: {
+ int16_t src1[8], src2[8];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 8; i++) {
+ if (min)
+ src1[i] = std::min(src1[i], src2[i]);
+ else
+ src1[i] = std::max(src1[i], src2[i]);
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ case Neon32: {
+ int32_t src1[4], src2[4];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 4; i++) {
+ if (min)
+ src1[i] = std::min(src1[i], src2[i]);
+ else
+ src1[i] = std::max(src1[i], src2[i]);
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
case 5:
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
@@ -3763,6 +4175,283 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
e++;
}
set_q_register(Vd, reinterpret_cast<uint64_t*>(to));
+ } else if (instr->Bits(21, 20) == 3 && instr->Bit(4) == 0) {
+ // vext.
+ int imm4 = instr->Bits(11, 8);
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ uint8_t src1[16], src2[16], dst[16];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ int boundary = kSimd128Size - imm4;
+ int i = 0;
+ for (; i < boundary; i++) {
+ dst[i] = src1[i + imm4];
+ }
+ for (; i < 16; i++) {
+ dst[i] = src2[i - boundary];
+ }
+ set_q_register(Vd, dst);
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
+ case 6:
+ if (instr->Bits(11, 8) == 8 && instr->Bit(4) == 0) {
+ // vsub.size Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ switch (size) {
+ case Neon8: {
+ uint8_t src1[16], src2[16];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 16; i++) {
+ src1[i] -= src2[i];
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ case Neon16: {
+ uint16_t src1[8], src2[8];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 8; i++) {
+ src1[i] -= src2[i];
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ case Neon32: {
+ uint32_t src1[4], src2[4];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 4; i++) {
+ src1[i] -= src2[i];
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (instr->Bits(11, 8) == 8 && instr->Bit(4) == 1) {
+ // vceq.size Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ switch (size) {
+ case Neon8: {
+ uint8_t src1[16], src2[16];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 16; i++) {
+ src1[i] = (src1[i] == src2[i]) ? 0xFFu : 0;
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ case Neon16: {
+ uint16_t src1[8], src2[8];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 8; i++) {
+ src1[i] = (src1[i] == src2[i]) ? 0xFFFFu : 0;
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ case Neon32: {
+ uint32_t src1[4], src2[4];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 4; i++) {
+ src1[i] = (src1[i] == src2[i]) ? 0xFFFFFFFFu : 0;
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (instr->Bits(11, 8) == 1 && instr->Bits(21, 20) == 1 &&
+ instr->Bit(4) == 1) {
+ // vbsl.size Qd, Qm, Qn.
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ uint32_t dst[4], src1[4], src2[4];
+ get_q_register(Vd, dst);
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 4; i++) {
+ dst[i] = (dst[i] & src1[i]) | (~dst[i] & src2[i]);
+ }
+ set_q_register(Vd, dst);
+ } else if (instr->Bits(11, 8) == 1 && instr->Bits(21, 20) == 0 &&
+ instr->Bit(4) == 1) {
+ if (instr->Bit(6) == 0) {
+ // veor Dd, Dn, Dm
+ int Vd = instr->VFPDRegValue(kDoublePrecision);
+ int Vn = instr->VFPNRegValue(kDoublePrecision);
+ int Vm = instr->VFPMRegValue(kDoublePrecision);
+ uint64_t src1, src2;
+ get_d_register(Vn, &src1);
+ get_d_register(Vm, &src2);
+ src1 ^= src2;
+ set_d_register(Vd, &src1);
+
+ } else {
+ // veor Qd, Qn, Qm
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ uint32_t src1[4], src2[4];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 4; i++) src1[i] ^= src2[i];
+ set_q_register(Vd, src1);
+ }
+ } else if (instr->Bits(11, 8) == 0xd && instr->Bit(21) == 0 &&
+ instr->Bit(6) == 1 && instr->Bit(4) == 1) {
+ // vmul.f32 Qd, Qn, Qm
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ float src1[4], src2[4];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 4; i++) {
+ src1[i] = src1[i] * src2[i];
+ }
+ set_q_register(Vd, src1);
+ } else if (instr->Bits(11, 8) == 0xe && instr->Bit(20) == 0 &&
+ instr->Bit(4) == 0) {
+ // vcge/vcgt.f32 Qd, Qm, Qn
+ bool ge = instr->Bit(21) == 0;
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ float src1[4], src2[4];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ uint32_t dst[4];
+ for (int i = 0; i < 4; i++) {
+ if (ge) {
+ dst[i] = src1[i] >= src2[i] ? 0xFFFFFFFFu : 0;
+ } else {
+ dst[i] = src1[i] > src2[i] ? 0xFFFFFFFFu : 0;
+ }
+ }
+ set_q_register(Vd, dst);
+ } else if (instr->Bits(11, 8) == 0x3) {
+ // vcge/vcgt.u<size> Qd, Qm, Qn.
+ bool ge = instr->Bit(4) == 1;
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ switch (size) {
+ case Neon8: {
+ uint8_t src1[16], src2[16];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 16; i++) {
+ if (ge)
+ src1[i] = src1[i] >= src2[i] ? 0xFFu : 0;
+ else
+ src1[i] = src1[i] > src2[i] ? 0xFFu : 0;
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ case Neon16: {
+ uint16_t src1[8], src2[8];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 8; i++) {
+ if (ge)
+ src1[i] = src1[i] >= src2[i] ? 0xFFFFu : 0;
+ else
+ src1[i] = src1[i] > src2[i] ? 0xFFFFu : 0;
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ case Neon32: {
+ uint32_t src1[4], src2[4];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 4; i++) {
+ if (ge)
+ src1[i] = src1[i] >= src2[i] ? 0xFFFFFFFFu : 0;
+ else
+ src1[i] = src1[i] > src2[i] ? 0xFFFFFFFFu : 0;
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (instr->Bits(11, 8) == 0x6) {
+ // vmin/vmax.u<size> Qd, Qm, Qn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ int Vn = instr->VFPNRegValue(kSimd128Precision);
+ bool min = instr->Bit(4) != 0;
+ switch (size) {
+ case Neon8: {
+ uint8_t src1[16], src2[16];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 16; i++) {
+ if (min)
+ src1[i] = std::min(src1[i], src2[i]);
+ else
+ src1[i] = std::max(src1[i], src2[i]);
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ case Neon16: {
+ uint16_t src1[8], src2[8];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 8; i++) {
+ if (min)
+ src1[i] = std::min(src1[i], src2[i]);
+ else
+ src1[i] = std::max(src1[i], src2[i]);
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ case Neon32: {
+ uint32_t src1[4], src2[4];
+ get_q_register(Vn, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 4; i++) {
+ if (min)
+ src1[i] = std::min(src1[i], src2[i]);
+ else
+ src1[i] = std::max(src1[i], src2[i]);
+ }
+ set_q_register(Vd, src1);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
} else {
UNIMPLEMENTED();
}
@@ -3787,19 +4476,359 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
e++;
}
set_q_register(Vd, reinterpret_cast<uint64_t*>(to));
- } else if ((instr->Bits(21, 16) == 0x32) && (instr->Bits(11, 7) == 0) &&
- (instr->Bit(4) == 0)) {
- int vd = instr->VFPDRegValue(kDoublePrecision);
- int vm = instr->VFPMRegValue(kDoublePrecision);
- if (instr->Bit(6) == 0) {
- // vswp Dd, Dm.
- uint64_t dval, mval;
- get_d_register(vd, &dval);
- get_d_register(vm, &mval);
- set_d_register(vm, &dval);
- set_d_register(vd, &mval);
+ } else if (instr->Opc1Value() == 7 && instr->Bit(4) == 0) {
+ if (instr->Bits(19, 16) == 0xB && instr->Bits(11, 9) == 0x3 &&
+ instr->Bit(6) == 1) {
+ // vcvt.<Td>.<Tm> Qd, Qm.
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ uint32_t q_data[4];
+ get_q_register(Vm, q_data);
+ int op = instr->Bits(8, 7);
+ for (int i = 0; i < 4; i++) {
+ switch (op) {
+ case 0:
+ // f32 <- s32, round towards nearest.
+ q_data[i] = bit_cast<uint32_t>(std::round(
+ static_cast<float>(bit_cast<int32_t>(q_data[i]))));
+ break;
+ case 1:
+ // f32 <- u32, round towards nearest.
+ q_data[i] = bit_cast<uint32_t>(
+ std::round(static_cast<float>(q_data[i])));
+ break;
+ case 2:
+ // s32 <- f32, round to zero.
+ q_data[i] = static_cast<uint32_t>(
+ ConvertDoubleToInt(bit_cast<float>(q_data[i]), false, RZ));
+ break;
+ case 3:
+ // u32 <- f32, round to zero.
+ q_data[i] = static_cast<uint32_t>(
+ ConvertDoubleToInt(bit_cast<float>(q_data[i]), true, RZ));
+ break;
+ }
+ }
+ set_q_register(Vd, q_data);
+ } else if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 7) == 0) {
+ if (instr->Bit(6) == 0) {
+ // vswp Dd, Dm.
+ uint64_t dval, mval;
+ int vd = instr->VFPDRegValue(kDoublePrecision);
+ int vm = instr->VFPMRegValue(kDoublePrecision);
+ get_d_register(vd, &dval);
+ get_d_register(vm, &mval);
+ set_d_register(vm, &dval);
+ set_d_register(vd, &mval);
+ } else {
+ // vswp Qd, Qm.
+ uint32_t dval[4], mval[4];
+ int vd = instr->VFPDRegValue(kSimd128Precision);
+ int vm = instr->VFPMRegValue(kSimd128Precision);
+ get_q_register(vd, dval);
+ get_q_register(vm, mval);
+ set_q_register(vm, dval);
+ set_q_register(vd, mval);
+ }
+ } else if (instr->Bits(11, 7) == 0x18) {
+ // vdup.32 Qd, Sm.
+ int vd = instr->VFPDRegValue(kSimd128Precision);
+ int vm = instr->VFPMRegValue(kDoublePrecision);
+ int index = instr->Bit(19);
+ uint32_t s_data = get_s_register(vm * 2 + index);
+ uint32_t q_data[4];
+ for (int i = 0; i < 4; i++) q_data[i] = s_data;
+ set_q_register(vd, q_data);
+ } else if (instr->Bits(19, 16) == 0 && instr->Bits(11, 6) == 0x17) {
+ // vmvn Qd, Qm.
+ int vd = instr->VFPDRegValue(kSimd128Precision);
+ int vm = instr->VFPMRegValue(kSimd128Precision);
+ uint32_t q_data[4];
+ get_q_register(vm, q_data);
+ for (int i = 0; i < 4; i++) q_data[i] = ~q_data[i];
+ set_q_register(vd, q_data);
+ } else if (instr->Bits(11, 10) == 0x2) {
+ // vtb[l,x] Dd, <list>, Dm.
+ int vd = instr->VFPDRegValue(kDoublePrecision);
+ int vn = instr->VFPNRegValue(kDoublePrecision);
+ int vm = instr->VFPMRegValue(kDoublePrecision);
+ int table_len = (instr->Bits(9, 8) + 1) * kDoubleSize;
+ bool vtbx = instr->Bit(6) != 0; // vtbl / vtbx
+ uint64_t destination = 0, indices = 0, result = 0;
+ get_d_register(vd, &destination);
+ get_d_register(vm, &indices);
+ for (int i = 0; i < kDoubleSize; i++) {
+ int shift = i * kBitsPerByte;
+ int index = (indices >> shift) & 0xFF;
+ if (index < table_len) {
+ uint64_t table;
+ get_d_register(vn + index / kDoubleSize, &table);
+ result |=
+ ((table >> ((index % kDoubleSize) * kBitsPerByte)) & 0xFF)
+ << shift;
+ } else if (vtbx) {
+ result |= destination & (0xFFull << shift);
+ }
+ }
+ set_d_register(vd, &result);
+ } else if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 6) == 0x7) {
+ // vzip.<size> Qd, Qm.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(19, 18));
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ switch (size) {
+ case Neon8: {
+ uint8_t src1[16], src2[16], dst1[16], dst2[16];
+ get_q_register(Vd, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 8; i++) {
+ dst1[i * 2] = src1[i];
+ dst1[i * 2 + 1] = src2[i];
+ dst2[i * 2] = src1[i + 8];
+ dst2[i * 2 + 1] = src2[i + 8];
+ }
+ set_q_register(Vd, dst1);
+ set_q_register(Vm, dst2);
+ break;
+ }
+ case Neon16: {
+ uint16_t src1[8], src2[8], dst1[8], dst2[8];
+ get_q_register(Vd, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 8; i += 2) {
+ dst1[i] = src1[i / 2];
+ dst1[i + 1] = src2[i / 2];
+ dst2[i] = src1[i / 2 + 4];
+ dst2[i + 1] = src2[i / 2 + 4];
+ }
+ set_q_register(Vd, dst1);
+ set_q_register(Vm, dst2);
+ break;
+ }
+ case Neon32: {
+ uint32_t src1[4], src2[4], dst1[4], dst2[4];
+ get_q_register(Vd, src1);
+ get_q_register(Vm, src2);
+ for (int i = 0; i < 2; i++) {
+ dst1[i * 2] = src1[i];
+ dst1[i * 2 + 1] = src2[i];
+ dst2[i * 2] = src1[i + 2];
+ dst2[i * 2 + 1] = src2[i + 2];
+ }
+ set_q_register(Vd, dst1);
+ set_q_register(Vm, dst2);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (instr->Bits(17, 16) == 0 && instr->Bits(11, 9) == 0) {
+ // vrev<op>.size Qd, Qm
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ NeonSize size = static_cast<NeonSize>(instr->Bits(19, 18));
+ NeonSize op = static_cast<NeonSize>(static_cast<int>(Neon64) -
+ instr->Bits(8, 7));
+ switch (op) {
+ case Neon16: {
+ DCHECK_EQ(Neon8, size);
+ uint8_t src[16];
+ get_q_register(Vm, src);
+ for (int i = 0; i < 16; i += 2) {
+ std::swap(src[i], src[i + 1]);
+ }
+ set_q_register(Vd, src);
+ break;
+ }
+ case Neon32: {
+ switch (size) {
+ case Neon16: {
+ uint16_t src[8];
+ get_q_register(Vm, src);
+ for (int i = 0; i < 8; i += 2) {
+ std::swap(src[i], src[i + 1]);
+ }
+ set_q_register(Vd, src);
+ break;
+ }
+ case Neon8: {
+ uint8_t src[16];
+ get_q_register(Vm, src);
+ for (int i = 0; i < 4; i++) {
+ std::swap(src[i * 4], src[i * 4 + 3]);
+ std::swap(src[i * 4 + 1], src[i * 4 + 2]);
+ }
+ set_q_register(Vd, src);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ break;
+ }
+ case Neon64: {
+ switch (size) {
+ case Neon32: {
+ uint32_t src[4];
+ get_q_register(Vm, src);
+ std::swap(src[0], src[1]);
+ std::swap(src[2], src[3]);
+ set_q_register(Vd, src);
+ break;
+ }
+ case Neon16: {
+ uint16_t src[8];
+ get_q_register(Vm, src);
+ for (int i = 0; i < 4; i++) {
+ std::swap(src[i * 4], src[i * 4 + 3]);
+ std::swap(src[i * 4 + 1], src[i * 4 + 2]);
+ }
+ set_q_register(Vd, src);
+ break;
+ }
+ case Neon8: {
+ uint8_t src[16];
+ get_q_register(Vm, src);
+ for (int i = 0; i < 4; i++) {
+ std::swap(src[i], src[7 - i]);
+ std::swap(src[i + 8], src[15 - i]);
+ }
+ set_q_register(Vd, src);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (instr->Bits(17, 16) == 0x1 && instr->Bit(11) == 0) {
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ NeonSize size = static_cast<NeonSize>(instr->Bits(19, 18));
+ if (instr->Bits(9, 6) == 0xd) {
+ // vabs<type>.<size> Qd, Qm
+ if (instr->Bit(10) != 0) {
+ // floating point (clear sign bits)
+ uint32_t src[4];
+ get_q_register(Vm, src);
+ for (int i = 0; i < 4; i++) {
+ src[i] &= ~0x80000000;
+ }
+ set_q_register(Vd, src);
+ } else {
+ // signed integer
+ switch (size) {
+ case Neon8: {
+ int8_t src[16];
+ get_q_register(Vm, src);
+ for (int i = 0; i < 16; i++) {
+ src[i] = std::abs(src[i]);
+ }
+ set_q_register(Vd, src);
+ break;
+ }
+ case Neon16: {
+ int16_t src[8];
+ get_q_register(Vm, src);
+ for (int i = 0; i < 8; i++) {
+ src[i] = std::abs(src[i]);
+ }
+ set_q_register(Vd, src);
+ break;
+ }
+ case Neon32: {
+ int32_t src[4];
+ get_q_register(Vm, src);
+ for (int i = 0; i < 4; i++) {
+ src[i] = std::abs(src[i]);
+ }
+ set_q_register(Vd, src);
+ break;
+ }
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ }
+ } else if (instr->Bits(9, 6) == 0xf) {
+ // vneg<type>.<size> Qd, Qm (signed integer)
+ if (instr->Bit(10) != 0) {
+ // floating point (toggle sign bits)
+ uint32_t src[4];
+ get_q_register(Vm, src);
+ for (int i = 0; i < 4; i++) {
+ src[i] ^= 0x80000000;
+ }
+ set_q_register(Vd, src);
+ } else {
+ // signed integer
+ switch (size) {
+ case Neon8: {
+ int8_t src[16];
+ get_q_register(Vm, src);
+ for (int i = 0; i < 16; i++) {
+ src[i] = -src[i];
+ }
+ set_q_register(Vd, src);
+ break;
+ }
+ case Neon16:
+ int16_t src[8];
+ get_q_register(Vm, src);
+ for (int i = 0; i < 8; i++) {
+ src[i] = -src[i];
+ }
+ set_q_register(Vd, src);
+ break;
+ case Neon32: {
+ int32_t src[4];
+ get_q_register(Vm, src);
+ for (int i = 0; i < 4; i++) {
+ src[i] = -src[i];
+ }
+ set_q_register(Vd, src);
+ break;
+ }
+ default:
+ UNIMPLEMENTED();
+ break;
+ }
+ }
+ } else {
+ UNIMPLEMENTED();
+ }
+ } else if (instr->Bits(19, 18) == 0x2 && instr->Bits(11, 8) == 0x5) {
+ // vrecpe/vrsqrte.f32 Qd, Qm.
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ int Vm = instr->VFPMRegValue(kSimd128Precision);
+ uint32_t src[4];
+ get_q_register(Vm, src);
+ if (instr->Bit(7) == 0) {
+ for (int i = 0; i < 4; i++) {
+ float denom = bit_cast<float>(src[i]);
+ div_zero_vfp_flag_ = (denom == 0);
+ float result = 1.0f / denom;
+ result = canonicalizeNaN(result);
+ src[i] = bit_cast<uint32_t>(result);
+ }
+ } else {
+ lazily_initialize_fast_sqrt(isolate_);
+ for (int i = 0; i < 4; i++) {
+ float radicand = bit_cast<float>(src[i]);
+ float result = 1.0f / fast_sqrt(radicand, isolate_);
+ result = canonicalizeNaN(result);
+ src[i] = bit_cast<uint32_t>(result);
+ }
+ }
+ set_q_register(Vd, src);
} else {
- // Q register vswp unimplemented.
UNIMPLEMENTED();
}
} else {
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index 7435b77255..48c2d0f44a 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -151,10 +151,11 @@ class Simulator {
void set_d_register(int dreg, const uint64_t* value);
void get_d_register(int dreg, uint32_t* value);
void set_d_register(int dreg, const uint32_t* value);
- void get_q_register(int qreg, uint64_t* value);
- void set_q_register(int qreg, const uint64_t* value);
- void get_q_register(int qreg, uint32_t* value);
- void set_q_register(int qreg, const uint32_t* value);
+ // Support for NEON.
+ template <typename T>
+ void get_q_register(int qreg, T* value);
+ template <typename T>
+ void set_q_register(int qreg, const T* value);
void set_s_register(int reg, unsigned int value);
unsigned int get_s_register(int reg) const;
@@ -339,6 +340,8 @@ class Simulator {
void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
void DecodeVCMP(Instruction* instr);
void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
+ int32_t ConvertDoubleToInt(double val, bool unsigned_integer,
+ VFPRoundingMode mode);
void DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr);
// Executes one instruction.
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index 37fdb2618f..3002d7c250 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -194,13 +194,18 @@ Address RelocInfo::wasm_global_reference() {
return Memory::Address_at(Assembler::target_pointer_address_at(pc_));
}
+uint32_t RelocInfo::wasm_function_table_size_reference() {
+ DCHECK(IsWasmFunctionTableSizeReference(rmode_));
+ return Memory::uint32_at(Assembler::target_pointer_address_at(pc_));
+}
+
void RelocInfo::unchecked_update_wasm_memory_reference(
Address address, ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
}
-void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
- ICacheFlushMode flush_mode) {
+void RelocInfo::unchecked_update_wasm_size(uint32_t size,
+ ICacheFlushMode flush_mode) {
Memory::uint32_at(Assembler::target_pointer_address_at(pc_)) = size;
}
@@ -2950,15 +2955,13 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
(rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL) ||
(rmode == RelocInfo::DEOPT_SCRIPT_OFFSET) ||
(rmode == RelocInfo::DEOPT_INLINING_ID) ||
- (rmode == RelocInfo::DEOPT_REASON) || (rmode == RelocInfo::DEOPT_ID) ||
- (rmode == RelocInfo::GENERATOR_CONTINUATION)) {
+ (rmode == RelocInfo::DEOPT_REASON) || (rmode == RelocInfo::DEOPT_ID)) {
// Adjust code for new modes.
DCHECK(RelocInfo::IsDebugBreakSlot(rmode) || RelocInfo::IsComment(rmode) ||
RelocInfo::IsDeoptReason(rmode) || RelocInfo::IsDeoptId(rmode) ||
RelocInfo::IsDeoptPosition(rmode) ||
RelocInfo::IsInternalReference(rmode) ||
- RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode) ||
- RelocInfo::IsGeneratorContinuation(rmode));
+ RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode));
// These modes do not need an entry in the constant pool.
} else {
constpool_.RecordEntry(data, rmode);
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index d5c2936dc2..a55f8138f2 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -938,9 +938,6 @@ class Assembler : public AssemblerBase {
int buffer_space() const;
- // Mark generator continuation.
- void RecordGeneratorContinuation();
-
// Mark address of a debug break slot.
void RecordDebugBreakSlot(RelocInfo::Mode mode);
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index c0d700ce0d..9a712c925d 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -33,17 +33,6 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kNewArray);
}
-void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
- descriptor->Initialize(x0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
-void FastFunctionBindStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
- descriptor->Initialize(x0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
ExternalReference miss) {
// Update the static counter each time a new code stub is generated.
@@ -590,8 +579,11 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cond == eq) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(lhs, rhs);
- __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+ __ Push(cp);
+ __ Call(strict() ? isolate()->builtins()->StrictEqual()
+ : isolate()->builtins()->Equal(),
+ RelocInfo::CODE_TARGET);
+ __ Pop(cp);
}
// Turn true into 0 and false into some non-zero value.
STATIC_ASSERT(EQUAL == 0);
@@ -2980,234 +2972,6 @@ void CallICTrampolineStub::Generate(MacroAssembler* masm) {
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
-
-static void HandleArrayCases(MacroAssembler* masm, Register feedback,
- Register receiver_map, Register scratch1,
- Register scratch2, bool is_polymorphic,
- Label* miss) {
- // feedback initially contains the feedback array
- Label next_loop, prepare_next;
- Label load_smi_map, compare_map;
- Label start_polymorphic;
-
- Register cached_map = scratch1;
-
- __ Ldr(cached_map,
- FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
- __ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
- __ Cmp(receiver_map, cached_map);
- __ B(ne, &start_polymorphic);
- // found, now call handler.
- Register handler = feedback;
- __ Ldr(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
- __ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
- __ Jump(feedback);
-
- Register length = scratch2;
- __ Bind(&start_polymorphic);
- __ Ldr(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
- if (!is_polymorphic) {
- __ Cmp(length, Operand(Smi::FromInt(2)));
- __ B(eq, miss);
- }
-
- Register too_far = length;
- Register pointer_reg = feedback;
-
- // +-----+------+------+-----+-----+ ... ----+
- // | map | len | wm0 | h0 | wm1 | hN |
- // +-----+------+------+-----+-----+ ... ----+
- // 0 1 2 len-1
- // ^ ^
- // | |
- // pointer_reg too_far
- // aka feedback scratch2
- // also need receiver_map
- // use cached_map (scratch1) to look in the weak map values.
- __ Add(too_far, feedback,
- Operand::UntagSmiAndScale(length, kPointerSizeLog2));
- __ Add(too_far, too_far, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Add(pointer_reg, feedback,
- FixedArray::OffsetOfElementAt(2) - kHeapObjectTag);
-
- __ Bind(&next_loop);
- __ Ldr(cached_map, MemOperand(pointer_reg));
- __ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
- __ Cmp(receiver_map, cached_map);
- __ B(ne, &prepare_next);
- __ Ldr(handler, MemOperand(pointer_reg, kPointerSize));
- __ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
- __ Jump(handler);
-
- __ Bind(&prepare_next);
- __ Add(pointer_reg, pointer_reg, kPointerSize * 2);
- __ Cmp(pointer_reg, too_far);
- __ B(lt, &next_loop);
-
- // We exhausted our array of map handler pairs.
- __ jmp(miss);
-}
-
-
-static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
- Register receiver_map, Register feedback,
- Register vector, Register slot,
- Register scratch, Label* compare_map,
- Label* load_smi_map, Label* try_array) {
- __ JumpIfSmi(receiver, load_smi_map);
- __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ bind(compare_map);
- Register cached_map = scratch;
- // Move the weak map into the weak_cell register.
- __ Ldr(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
- __ Cmp(cached_map, receiver_map);
- __ B(ne, try_array);
-
- Register handler = feedback;
- __ Add(handler, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
- __ Ldr(handler,
- FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
- __ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
- __ Jump(handler);
-}
-
-void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
- KeyedStoreICStub stub(isolate(), state());
- stub.GenerateForTrampoline(masm);
-}
-
-void KeyedStoreICStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
-
-void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-
-static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
- Register receiver_map, Register scratch1,
- Register scratch2, Label* miss) {
- // feedback initially contains the feedback array
- Label next_loop, prepare_next;
- Label start_polymorphic;
- Label transition_call;
-
- Register cached_map = scratch1;
- Register too_far = scratch2;
- Register pointer_reg = feedback;
-
- __ Ldr(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
-
- // +-----+------+------+-----+-----+-----+ ... ----+
- // | map | len | wm0 | wt0 | h0 | wm1 | hN |
- // +-----+------+------+-----+-----+ ----+ ... ----+
- // 0 1 2 len-1
- // ^ ^
- // | |
- // pointer_reg too_far
- // aka feedback scratch2
- // also need receiver_map
- // use cached_map (scratch1) to look in the weak map values.
- __ Add(too_far, feedback,
- Operand::UntagSmiAndScale(too_far, kPointerSizeLog2));
- __ Add(too_far, too_far, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Add(pointer_reg, feedback,
- FixedArray::OffsetOfElementAt(0) - kHeapObjectTag);
-
- __ Bind(&next_loop);
- __ Ldr(cached_map, MemOperand(pointer_reg));
- __ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
- __ Cmp(receiver_map, cached_map);
- __ B(ne, &prepare_next);
- // Is it a transitioning store?
- __ Ldr(too_far, MemOperand(pointer_reg, kPointerSize));
- __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
- __ B(ne, &transition_call);
-
- __ Ldr(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
- __ Add(pointer_reg, pointer_reg, Code::kHeaderSize - kHeapObjectTag);
- __ Jump(pointer_reg);
-
- __ Bind(&transition_call);
- __ Ldr(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
- __ JumpIfSmi(too_far, miss);
-
- __ Ldr(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
- // Load the map into the correct register.
- DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
- __ mov(feedback, too_far);
- __ Add(receiver_map, receiver_map, Code::kHeaderSize - kHeapObjectTag);
- __ Jump(receiver_map);
-
- __ Bind(&prepare_next);
- __ Add(pointer_reg, pointer_reg, kPointerSize * 3);
- __ Cmp(pointer_reg, too_far);
- __ B(lt, &next_loop);
-
- // We exhausted our array of map handler pairs.
- __ jmp(miss);
-}
-
-void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // x1
- Register key = StoreWithVectorDescriptor::NameRegister(); // x2
- Register vector = StoreWithVectorDescriptor::VectorRegister(); // x3
- Register slot = StoreWithVectorDescriptor::SlotRegister(); // x4
- DCHECK(StoreWithVectorDescriptor::ValueRegister().is(x0)); // x0
- Register feedback = x5;
- Register receiver_map = x6;
- Register scratch1 = x7;
-
- __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
- __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
- scratch1, &compare_map, &load_smi_map, &try_array);
-
- __ Bind(&try_array);
- // Is it a fixed array?
- __ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
- __ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
-
- // We have a polymorphic element handler.
- Label try_poly_name;
- HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, x8, &miss);
-
- __ Bind(&not_array);
- // Is it generic?
- __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex,
- &try_poly_name);
- Handle<Code> megamorphic_stub =
- KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
- __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
-
- __ Bind(&try_poly_name);
- // We might have a name in feedback, and a fixed array in the next slot.
- __ Cmp(key, feedback);
- __ B(ne, &miss);
- // If the name comparison succeeded, we know we have a fixed array with
- // at least one map/handler pair.
- __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
- __ Ldr(feedback,
- FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
- HandleArrayCases(masm, feedback, receiver_map, scratch1, x8, false, &miss);
-
- __ Bind(&miss);
- KeyedStoreIC::GenerateMiss(masm);
-
- __ Bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ jmp(&compare_map);
-}
-
-
// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
// a "Push lr" instruction, followed by a call.
static const unsigned int kProfileEntryHookCallSize =
@@ -3309,91 +3073,6 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
__ Blr(lr);
}
-
-// Probe the name dictionary in the 'elements' register.
-// Jump to the 'done' label if a property with the given name is found.
-// Jump to the 'miss' label otherwise.
-//
-// If lookup was successful 'scratch2' will be equal to elements + 4 * index.
-// 'elements' and 'name' registers are preserved on miss.
-void NameDictionaryLookupStub::GeneratePositiveLookup(
- MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register scratch1,
- Register scratch2) {
- DCHECK(!AreAliased(elements, name, scratch1, scratch2));
-
- // Assert that name contains a string.
- __ AssertName(name);
-
- // Compute the capacity mask.
- __ Ldrsw(scratch1, UntagSmiFieldMemOperand(elements, kCapacityOffset));
- __ Sub(scratch1, scratch1, 1);
-
- // Generate an unrolled loop that performs a few probes before giving up.
- for (int i = 0; i < kInlinedProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ Ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
- if (i > 0) {
- // Add the probe offset (i + i * i) left shifted to avoid right shifting
- // the hash in a separate instruction. The value hash + i + i * i is right
- // shifted in the following and instruction.
- DCHECK(NameDictionary::GetProbeOffset(i) <
- 1 << (32 - Name::kHashFieldOffset));
- __ Add(scratch2, scratch2, Operand(
- NameDictionary::GetProbeOffset(i) << Name::kHashShift));
- }
- __ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
-
- // Scale the index by multiplying by the element size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- __ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
-
- // Check if the key is identical to the name.
- UseScratchRegisterScope temps(masm);
- Register scratch3 = temps.AcquireX();
- __ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
- __ Ldr(scratch3, FieldMemOperand(scratch2, kElementsStartOffset));
- __ Cmp(name, scratch3);
- __ B(eq, done);
- }
-
- // The inlined probes didn't find the entry.
- // Call the complete stub to scan the whole dictionary.
-
- CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
- spill_list.Combine(lr);
- spill_list.Remove(scratch1);
- spill_list.Remove(scratch2);
-
- __ PushCPURegList(spill_list);
-
- if (name.is(x0)) {
- DCHECK(!elements.is(x1));
- __ Mov(x1, name);
- __ Mov(x0, elements);
- } else {
- __ Mov(x0, elements);
- __ Mov(x1, name);
- }
-
- Label not_found;
- NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
- __ CallStub(&stub);
- __ Cbz(x0, &not_found);
- __ Mov(scratch2, x2); // Move entry index into scratch2.
- __ PopCPURegList(spill_list);
- __ B(done);
-
- __ Bind(&not_found);
- __ PopCPURegList(spill_list);
- __ B(miss);
-}
-
-
void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
Label* miss,
Label* done,
@@ -3875,127 +3554,6 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateCase(masm, FAST_ELEMENTS);
}
-
-void FastNewObjectStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- x1 : target
- // -- x3 : new target
- // -- cp : context
- // -- lr : return address
- // -----------------------------------
- __ AssertFunction(x1);
- __ AssertReceiver(x3);
-
- // Verify that the new target is a JSFunction.
- Label new_object;
- __ JumpIfNotObjectType(x3, x2, x2, JS_FUNCTION_TYPE, &new_object);
-
- // Load the initial map and verify that it's in fact a map.
- __ Ldr(x2, FieldMemOperand(x3, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(x2, &new_object);
- __ JumpIfNotObjectType(x2, x0, x0, MAP_TYPE, &new_object);
-
- // Fall back to runtime if the target differs from the new target's
- // initial map constructor.
- __ Ldr(x0, FieldMemOperand(x2, Map::kConstructorOrBackPointerOffset));
- __ CompareAndBranch(x0, x1, ne, &new_object);
-
- // Allocate the JSObject on the heap.
- Label allocate, done_allocate;
- __ Ldrb(x4, FieldMemOperand(x2, Map::kInstanceSizeOffset));
- __ Allocate(x4, x0, x5, x6, &allocate, SIZE_IN_WORDS);
- __ Bind(&done_allocate);
-
- // Initialize the JSObject fields.
- STATIC_ASSERT(JSObject::kMapOffset == 0 * kPointerSize);
- __ Str(x2, FieldMemOperand(x0, JSObject::kMapOffset));
- __ LoadRoot(x3, Heap::kEmptyFixedArrayRootIndex);
- STATIC_ASSERT(JSObject::kPropertiesOffset == 1 * kPointerSize);
- STATIC_ASSERT(JSObject::kElementsOffset == 2 * kPointerSize);
- __ Str(x3, FieldMemOperand(x0, JSObject::kPropertiesOffset));
- __ Str(x3, FieldMemOperand(x0, JSObject::kElementsOffset));
- STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ Add(x1, x0, Operand(JSObject::kHeaderSize - kHeapObjectTag));
-
- // ----------- S t a t e -------------
- // -- x0 : result (tagged)
- // -- x1 : result fields (untagged)
- // -- x5 : result end (untagged)
- // -- x2 : initial map
- // -- cp : context
- // -- lr : return address
- // -----------------------------------
-
- // Perform in-object slack tracking if requested.
- Label slack_tracking;
- STATIC_ASSERT(Map::kNoSlackTracking == 0);
- __ LoadRoot(x6, Heap::kUndefinedValueRootIndex);
- __ Ldr(w3, FieldMemOperand(x2, Map::kBitField3Offset));
- __ TestAndBranchIfAnySet(w3, Map::ConstructionCounter::kMask,
- &slack_tracking);
- {
- // Initialize all in-object fields with undefined.
- __ InitializeFieldsWithFiller(x1, x5, x6);
- __ Ret();
- }
- __ Bind(&slack_tracking);
- {
- // Decrease generous allocation count.
- STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
- __ Sub(w3, w3, 1 << Map::ConstructionCounter::kShift);
- __ Str(w3, FieldMemOperand(x2, Map::kBitField3Offset));
-
- // Initialize the in-object fields with undefined.
- __ Ldrb(x4, FieldMemOperand(x2, Map::kUnusedPropertyFieldsOffset));
- __ Sub(x4, x5, Operand(x4, LSL, kPointerSizeLog2));
- __ InitializeFieldsWithFiller(x1, x4, x6);
-
- // Initialize the remaining (reserved) fields with one pointer filler map.
- __ LoadRoot(x6, Heap::kOnePointerFillerMapRootIndex);
- __ InitializeFieldsWithFiller(x1, x5, x6);
-
- // Check if we can finalize the instance size.
- Label finalize;
- STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
- __ TestAndBranchIfAllClear(w3, Map::ConstructionCounter::kMask, &finalize);
- __ Ret();
-
- // Finalize the instance size.
- __ Bind(&finalize);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(x0, x2);
- __ CallRuntime(Runtime::kFinalizeInstanceSize);
- __ Pop(x0);
- }
- __ Ret();
- }
-
- // Fall back to %AllocateInNewSpace.
- __ Bind(&allocate);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- __ Mov(x4,
- Operand(x4, LSL, kPointerSizeLog2 + kSmiTagSize + kSmiShiftSize));
- __ Push(x2, x4);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ Pop(x2);
- }
- __ Ldrb(x5, FieldMemOperand(x2, Map::kInstanceSizeOffset));
- __ Add(x5, x0, Operand(x5, LSL, kPointerSizeLog2));
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ Sub(x5, x5, kHeapObjectTag); // Subtract the tag from end.
- __ B(&done_allocate);
-
- // Fall back to %NewObject.
- __ Bind(&new_object);
- __ Push(x1, x3);
- __ TailCallRuntime(Runtime::kNewObject);
-}
-
-
void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x1 : function
diff --git a/deps/v8/src/arm64/code-stubs-arm64.h b/deps/v8/src/arm64/code-stubs-arm64.h
index 4b56b5468f..13e1b9d234 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.h
+++ b/deps/v8/src/arm64/code-stubs-arm64.h
@@ -355,14 +355,6 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
Handle<Name> name,
Register scratch0);
- static void GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register scratch1,
- Register scratch2);
-
bool SometimesSetsUpAFrame() override { return false; }
private:
diff --git a/deps/v8/src/arm64/codegen-arm64.cc b/deps/v8/src/arm64/codegen-arm64.cc
index edd289900e..e6ddcfadb8 100644
--- a/deps/v8/src/arm64/codegen-arm64.cc
+++ b/deps/v8/src/arm64/codegen-arm64.cc
@@ -40,272 +40,6 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
// -------------------------------------------------------------------------
// Code generators
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register target_map,
- AllocationSiteMode mode,
- Label* allocation_memento_found) {
- ASM_LOCATION(
- "ElementsTransitionGenerator::GenerateMapChangeElementsTransition");
- DCHECK(!AreAliased(receiver, key, value, target_map));
-
- if (mode == TRACK_ALLOCATION_SITE) {
- DCHECK(allocation_memento_found != NULL);
- __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11,
- allocation_memento_found);
- }
-
- // Set transitioned map.
- __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver,
- HeapObject::kMapOffset,
- target_map,
- x10,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateSmiToDouble(
- MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register target_map,
- AllocationSiteMode mode,
- Label* fail) {
- ASM_LOCATION("ElementsTransitionGenerator::GenerateSmiToDouble");
- Label gc_required, only_change_map;
- Register elements = x4;
- Register length = x5;
- Register array_size = x6;
- Register array = x7;
-
- Register scratch = x6;
-
- // Verify input registers don't conflict with locals.
- DCHECK(!AreAliased(receiver, key, value, target_map,
- elements, length, array_size, array));
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
-
- __ Push(lr);
- __ Ldrsw(length, UntagSmiFieldMemOperand(elements,
- FixedArray::kLengthOffset));
-
- // Allocate new FixedDoubleArray.
- __ Lsl(array_size, length, kDoubleSizeLog2);
- __ Add(array_size, array_size, FixedDoubleArray::kHeaderSize);
- __ Allocate(array_size, array, x10, x11, &gc_required, DOUBLE_ALIGNMENT);
- // Register array is non-tagged heap object.
-
- // Set the destination FixedDoubleArray's length and map.
- Register map_root = array_size;
- __ LoadRoot(map_root, Heap::kFixedDoubleArrayMapRootIndex);
- __ SmiTag(x11, length);
- __ Str(x11, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
- __ Str(map_root, FieldMemOperand(array, HeapObject::kMapOffset));
-
- __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
- kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- // Replace receiver's backing store with newly created FixedDoubleArray.
- __ Move(x10, array);
- __ Str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ RecordWriteField(receiver, JSObject::kElementsOffset, x10, scratch,
- kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- // Prepare for conversion loop.
- Register src_elements = x10;
- Register dst_elements = x11;
- Register dst_end = x12;
- __ Add(src_elements, elements, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Add(dst_elements, array, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
- __ Add(dst_end, dst_elements, Operand(length, LSL, kDoubleSizeLog2));
-
- FPRegister nan_d = d1;
- __ Fmov(nan_d, rawbits_to_double(kHoleNanInt64));
-
- Label entry, done;
- __ B(&entry);
-
- __ Bind(&only_change_map);
- __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
- kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ B(&done);
-
- // Call into runtime if GC is required.
- __ Bind(&gc_required);
- __ Pop(lr);
- __ B(fail);
-
- // Iterate over the array, copying and coverting smis to doubles. If an
- // element is non-smi, write a hole to the destination.
- {
- Label loop;
- __ Bind(&loop);
- __ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
- __ SmiUntagToDouble(d0, x13, kSpeculativeUntag);
- __ Tst(x13, kSmiTagMask);
- __ Fcsel(d0, d0, nan_d, eq);
- __ Str(d0, MemOperand(dst_elements, kDoubleSize, PostIndex));
-
- __ Bind(&entry);
- __ Cmp(dst_elements, dst_end);
- __ B(lt, &loop);
- }
-
- __ Pop(lr);
- __ Bind(&done);
-}
-
-
-void ElementsTransitionGenerator::GenerateDoubleToObject(
- MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register target_map,
- AllocationSiteMode mode,
- Label* fail) {
- ASM_LOCATION("ElementsTransitionGenerator::GenerateDoubleToObject");
- Register elements = x4;
- Register array_size = x6;
- Register array = x7;
- Register length = x5;
-
- // Verify input registers don't conflict with locals.
- DCHECK(!AreAliased(receiver, key, value, target_map,
- elements, array_size, array, length));
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- Label only_change_map;
-
- __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
-
- __ Push(lr);
- // TODO(all): These registers may not need to be pushed. Examine
- // RecordWriteStub and check whether it's needed.
- __ Push(target_map, receiver, key, value);
- __ Ldrsw(length, UntagSmiFieldMemOperand(elements,
- FixedArray::kLengthOffset));
- // Allocate new FixedArray.
- Label gc_required;
- __ Mov(array_size, FixedDoubleArray::kHeaderSize);
- __ Add(array_size, array_size, Operand(length, LSL, kPointerSizeLog2));
- __ Allocate(array_size, array, x10, x11, &gc_required, NO_ALLOCATION_FLAGS);
-
- // Set destination FixedDoubleArray's length and map.
- Register map_root = array_size;
- __ LoadRoot(map_root, Heap::kFixedArrayMapRootIndex);
- __ SmiTag(x11, length);
- __ Str(x11, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
- __ Str(map_root, FieldMemOperand(array, HeapObject::kMapOffset));
-
- // Prepare for conversion loop.
- Register src_elements = x10;
- Register dst_elements = x11;
- Register dst_end = x12;
- Register the_hole = x14;
- __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
- __ Add(src_elements, elements,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag);
- __ Add(dst_elements, array, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Add(dst_end, dst_elements, Operand(length, LSL, kPointerSizeLog2));
-
- // Allocating heap numbers in the loop below can fail and cause a jump to
- // gc_required. We can't leave a partly initialized FixedArray behind,
- // so pessimistically fill it with holes now.
- Label initialization_loop, initialization_loop_entry;
- __ B(&initialization_loop_entry);
- __ bind(&initialization_loop);
- __ Str(the_hole, MemOperand(dst_elements, kPointerSize, PostIndex));
- __ bind(&initialization_loop_entry);
- __ Cmp(dst_elements, dst_end);
- __ B(lt, &initialization_loop);
-
- __ Add(dst_elements, array, FixedArray::kHeaderSize - kHeapObjectTag);
-
- Register heap_num_map = x15;
- __ LoadRoot(heap_num_map, Heap::kHeapNumberMapRootIndex);
-
- Label entry;
- __ B(&entry);
-
- // Call into runtime if GC is required.
- __ Bind(&gc_required);
- __ Pop(value, key, receiver, target_map);
- __ Pop(lr);
- __ B(fail);
-
- {
- Label loop, convert_hole;
- __ Bind(&loop);
- __ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
- __ Cmp(x13, kHoleNanInt64);
- __ B(eq, &convert_hole);
-
- // Non-hole double, copy value into a heap number.
- Register heap_num = length;
- Register scratch = array_size;
- Register scratch2 = elements;
- __ AllocateHeapNumber(heap_num, &gc_required, scratch, scratch2,
- x13, heap_num_map);
- __ Mov(x13, dst_elements);
- __ Str(heap_num, MemOperand(dst_elements, kPointerSize, PostIndex));
- __ RecordWrite(array, x13, heap_num, kLRHasBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- __ B(&entry);
-
- // Replace the-hole NaN with the-hole pointer.
- __ Bind(&convert_hole);
- __ Str(the_hole, MemOperand(dst_elements, kPointerSize, PostIndex));
-
- __ Bind(&entry);
- __ Cmp(dst_elements, dst_end);
- __ B(lt, &loop);
- }
-
- __ Pop(value, key, receiver, target_map);
- // Replace receiver's backing store with newly created and filled FixedArray.
- __ Str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ RecordWriteField(receiver, JSObject::kElementsOffset, array, x13,
- kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Pop(lr);
-
- __ Bind(&only_change_map);
- __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x13,
- kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-
CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
@@ -338,30 +72,22 @@ bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
return MacroAssembler::IsYoungSequence(isolate, sequence);
}
+Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
+ if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
-void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
- MarkingParity* parity) {
- if (IsYoungSequence(isolate, sequence)) {
- *age = kNoAgeCodeAge;
- *parity = NO_MARKING_PARITY;
- } else {
- byte* target = sequence + kCodeAgeStubEntryOffset;
- Code* stub = GetCodeFromTargetAddress(Memory::Address_at(target));
- GetCodeAgeAndParity(stub, age, parity);
- }
+ byte* target = sequence + kCodeAgeStubEntryOffset;
+ Code* stub = GetCodeFromTargetAddress(Memory::Address_at(target));
+ return GetAgeOfCodeAgeStub(stub);
}
-
-void Code::PatchPlatformCodeAge(Isolate* isolate,
- byte* sequence,
- Code::Age age,
- MarkingParity parity) {
+void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
+ Code::Age age) {
PatchingAssembler patcher(isolate, sequence,
kNoCodeAgeSequenceLength / kInstructionSize);
if (age == kNoAgeCodeAge) {
MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
} else {
- Code * stub = GetCodeAgeStub(isolate, age, parity);
+ Code* stub = GetCodeAgeStub(isolate, age);
MacroAssembler::EmitCodeAgeSequence(&patcher, stub);
}
}
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index 13ecc2b982..b0a80c636f 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -64,14 +64,10 @@ const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- // x2: function info
- Register registers[] = {x2};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastNewObjectDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {x1, x3};
+ // x1: function info
+ // x2: feedback vector
+ // x3: slot
+ Register registers[] = {x1, x2, x3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index bc7a2817fa..896b4f9a44 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -2203,65 +2203,6 @@ void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
Bind(&done);
}
-
-void MacroAssembler::JumpIfEitherIsNotSequentialOneByteStrings(
- Register first, Register second, Register scratch1, Register scratch2,
- Label* failure, SmiCheckType smi_check) {
- if (smi_check == DO_SMI_CHECK) {
- JumpIfEitherSmi(first, second, failure);
- } else if (emit_debug_code()) {
- DCHECK(smi_check == DONT_DO_SMI_CHECK);
- Label not_smi;
- JumpIfEitherSmi(first, second, NULL, &not_smi);
-
- // At least one input is a smi, but the flags indicated a smi check wasn't
- // needed.
- Abort(kUnexpectedSmi);
-
- Bind(&not_smi);
- }
-
- // Test that both first and second are sequential one-byte strings.
- Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
- Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
- Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
-
- JumpIfEitherInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, scratch1,
- scratch2, failure);
-}
-
-
-void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialOneByte(
- Register first, Register second, Register scratch1, Register scratch2,
- Label* failure) {
- DCHECK(!AreAliased(scratch1, second));
- DCHECK(!AreAliased(scratch1, scratch2));
- const int kFlatOneByteStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatOneByteStringTag =
- kStringTag | kOneByteStringTag | kSeqStringTag;
- And(scratch1, first, kFlatOneByteStringMask);
- And(scratch2, second, kFlatOneByteStringMask);
- Cmp(scratch1, kFlatOneByteStringTag);
- Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
- B(ne, failure);
-}
-
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
- Register scratch,
- Label* failure) {
- const int kFlatOneByteStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatOneByteStringTag =
- kStringTag | kOneByteStringTag | kSeqStringTag;
- And(scratch, type, kFlatOneByteStringMask);
- Cmp(scratch, kFlatOneByteStringTag);
- B(ne, failure);
-}
-
-
void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
Register first, Register second, Register scratch1, Register scratch2,
Label* failure) {
@@ -2425,17 +2366,15 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Bind(&regular_invoke);
}
-
-void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual) {
- Label skip_flooding;
- ExternalReference last_step_action =
- ExternalReference::debug_last_step_action_address(isolate());
- STATIC_ASSERT(StepFrame > StepIn);
- Mov(x4, Operand(last_step_action));
+void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_hook;
+ ExternalReference debug_hook_active =
+ ExternalReference::debug_hook_on_function_call_address(isolate());
+ Mov(x4, Operand(debug_hook_active));
Ldrsb(x4, MemOperand(x4));
- CompareAndBranch(x4, Operand(StepIn), lt, &skip_flooding);
+ CompareAndBranch(x4, Operand(0), eq, &skip_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -2452,7 +2391,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
}
Push(fun);
Push(fun);
- CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ CallRuntime(Runtime::kDebugOnFunctionCall);
Pop(fun);
if (new_target.is_valid()) {
Pop(new_target);
@@ -2466,7 +2405,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
SmiUntag(expected.reg());
}
}
- bind(&skip_flooding);
+ bind(&skip_hook);
}
@@ -2480,7 +2419,9 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK(function.is(x1));
DCHECK_IMPLIES(new_target.is_valid(), new_target.is(x3));
- FloodFunctionIfStepping(function, new_target, expected, actual);
+ if (call_wrapper.NeedsDebugHookCheck()) {
+ CheckDebugHook(function, new_target, expected, actual);
+ }
// Clear the new.target register if not given.
if (!new_target.is_valid()) {
@@ -2709,12 +2650,12 @@ void MacroAssembler::EnterFrame(StackFrame::Type type,
void MacroAssembler::EnterFrame(StackFrame::Type type) {
- DCHECK(jssp.Is(StackPointer()));
UseScratchRegisterScope temps(this);
Register type_reg = temps.AcquireX();
Register code_reg = temps.AcquireX();
if (type == StackFrame::INTERNAL) {
+ DCHECK(jssp.Is(StackPointer()));
Mov(type_reg, Smi::FromInt(type));
Push(lr, fp);
Push(type_reg);
@@ -2725,7 +2666,18 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
// jssp[3] : fp
// jssp[1] : type
// jssp[0] : [code object]
+ } else if (type == StackFrame::WASM_COMPILED) {
+ DCHECK(csp.Is(StackPointer()));
+ Mov(type_reg, Smi::FromInt(type));
+ Push(xzr, lr);
+ Push(fp, type_reg);
+ Add(fp, csp, TypedFrameConstants::kFixedFrameSizeFromFp);
+ // csp[3] for alignment
+ // csp[2] : lr
+ // csp[1] : fp
+ // csp[0] : type
} else {
+ DCHECK(jssp.Is(StackPointer()));
Mov(type_reg, Smi::FromInt(type));
Push(lr, fp);
Push(type_reg);
@@ -3208,114 +3160,6 @@ void MacroAssembler::FastAllocate(Register object_size, Register result,
ObjectTag(result, result);
}
-void MacroAssembler::AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- Add(scratch1, length, length); // Length in bytes, not chars.
- Add(scratch1, scratch1, kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
- Bic(scratch1, scratch1, kObjectAlignmentMask);
-
- // Allocate two-byte string in new space.
- Allocate(scratch1, result, scratch2, scratch3, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map, length and hash field.
- InitializeNewString(result,
- length,
- Heap::kStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteString(Register result, Register length,
- Register scratch1, Register scratch2,
- Register scratch3,
- Label* gc_required) {
- DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- STATIC_ASSERT(kCharSize == 1);
- Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
- Bic(scratch1, scratch1, kObjectAlignmentMask);
-
- // Allocate one-byte string in new space.
- Allocate(scratch1, result, scratch2, scratch3, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map, length and hash field.
- InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
- scratch1, scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- InitializeNewString(result,
- length,
- Heap::kConsStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
- scratch1, scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- DCHECK(!AreAliased(result, length, scratch1, scratch2));
- Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- InitializeNewString(result,
- length,
- Heap::kSlicedStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- DCHECK(!AreAliased(result, length, scratch1, scratch2));
- Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
- scratch1, scratch2);
-}
-
-
// Allocates a heap number or jumps to the need_gc label if the young space
// is full and a scavenge is needed.
void MacroAssembler::AllocateHeapNumber(Register result,
@@ -3664,59 +3508,6 @@ void MacroAssembler::TestAndSplit(const Register& reg,
}
}
-void MacroAssembler::CheckFastObjectElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- Cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
- // If cond==ls, set cond=hi, otherwise compare.
- Ccmp(scratch,
- Operand(Map::kMaximumBitField2FastHoleyElementValue), CFlag, hi);
- B(hi, fail);
-}
-
-
-// Note: The ARM version of this clobbers elements_reg, but this version does
-// not. Some uses of this in ARM64 assume that elements_reg will be preserved.
-void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
- Register key_reg,
- Register elements_reg,
- Register scratch1,
- FPRegister fpscratch1,
- Label* fail,
- int elements_offset) {
- DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
- Label store_num;
-
- // Speculatively convert the smi to a double - all smis can be exactly
- // represented as a double.
- SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag);
-
- // If value_reg is a smi, we're done.
- JumpIfSmi(value_reg, &store_num);
-
- // Ensure that the object is a heap number.
- JumpIfNotHeapNumber(value_reg, fail);
-
- Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
-
- // Canonicalize NaNs.
- CanonicalizeNaN(fpscratch1);
-
- // Store the result.
- Bind(&store_num);
- Add(scratch1, elements_reg,
- Operand::UntagSmiAndScale(key_reg, kDoubleSizeLog2));
- Str(fpscratch1,
- FieldMemOperand(scratch1,
- FixedDoubleArray::kHeaderSize - elements_offset));
-}
-
-
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame_ || !stub->SometimesSetsUpAFrame();
}
@@ -4276,39 +4067,6 @@ void MacroAssembler::JumpIfBlack(Register object,
HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
}
-
-void MacroAssembler::JumpIfDictionaryInPrototypeChain(
- Register object,
- Register scratch0,
- Register scratch1,
- Label* found) {
- DCHECK(!AreAliased(object, scratch0, scratch1));
- Register current = scratch0;
- Label loop_again, end;
-
- // Scratch contains elements pointer.
- Mov(current, object);
- Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
- Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
- CompareAndBranch(current, Heap::kNullValueRootIndex, eq, &end);
-
- // Loop based on the map going up the prototype chain.
- Bind(&loop_again);
- Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
- STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
- STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
- CompareInstanceType(current, scratch1, JS_OBJECT_TYPE);
- B(lo, found);
- Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
- DecodeField<Map::ElementsKindBits>(scratch1);
- CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found);
- Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
- CompareAndBranch(current, Heap::kNullValueRootIndex, ne, &loop_again);
-
- Bind(&end);
-}
-
-
void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
Register shift_scratch, Register load_scratch,
Register length_scratch,
@@ -4471,30 +4229,6 @@ void MacroAssembler::Abort(BailoutReason reason) {
TmpList()->set_list(old_tmp_list);
}
-
-void MacroAssembler::LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch1,
- Register scratch2,
- Label* no_map_match) {
- DCHECK(IsFastElementsKind(expected_kind));
- DCHECK(IsFastElementsKind(transitioned_kind));
-
- // Check that the function's map is the same as the expected cached map.
- Ldr(scratch1, NativeContextMemOperand());
- Ldr(scratch2,
- ContextMemOperand(scratch1, Context::ArrayMapIndex(expected_kind)));
- Cmp(map_in_out, scratch2);
- B(ne, no_map_match);
-
- // Use the transitioned cached map.
- Ldr(map_in_out,
- ContextMemOperand(scratch1, Context::ArrayMapIndex(transitioned_kind)));
-}
-
-
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
Ldr(dst, NativeContextMemOperand());
Ldr(dst, ContextMemOperand(dst, index));
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index a89c106b63..0bd5c64769 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -1101,24 +1101,6 @@ class MacroAssembler : public Assembler {
// ---- String Utilities ----
-
- // Jump to label if either object is not a sequential one-byte string.
- // Optionally perform a smi check on the objects first.
- void JumpIfEitherIsNotSequentialOneByteStrings(
- Register first, Register second, Register scratch1, Register scratch2,
- Label* failure, SmiCheckType smi_check = DO_SMI_CHECK);
-
- // Check if instance type is sequential one-byte string and jump to label if
- // it is not.
- void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
- Label* failure);
-
- // Checks if both instance types are sequential one-byte strings and jumps to
- // label if either is not.
- void JumpIfEitherInstanceTypeIsNotSequentialOneByte(
- Register first_object_instance_type, Register second_object_instance_type,
- Register scratch1, Register scratch2, Label* failure);
-
// Checks if both instance types are sequential one-byte strings and jumps to
// label if either is not.
void JumpIfBothInstanceTypesAreNotSequentialOneByte(
@@ -1227,9 +1209,11 @@ class MacroAssembler : public Assembler {
InvokeFlag flag,
bool* definitely_mismatches,
const CallWrapper& call_wrapper);
- void FloodFunctionIfStepping(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual);
+
+ // On function call, call into the debugger if necessary.
+ void CheckDebugHook(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
void InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag,
@@ -1360,32 +1344,6 @@ class MacroAssembler : public Assembler {
void FastAllocate(int object_size, Register result, Register scratch1,
Register scratch2, AllocationFlags flags);
- void AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
- void AllocateOneByteString(Register result, Register length,
- Register scratch1, Register scratch2,
- Register scratch3, Label* gc_required);
- void AllocateTwoByteConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateOneByteConsString(Register result, Register length,
- Register scratch1, Register scratch2,
- Label* gc_required);
- void AllocateTwoByteSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateOneByteSlicedString(Register result, Register length,
- Register scratch1, Register scratch2,
- Label* gc_required);
-
// Allocates a heap number or jumps to the gc_required label if the young
// space is full and a scavenge is needed.
// All registers are clobbered.
@@ -1566,21 +1524,6 @@ class MacroAssembler : public Assembler {
Label* if_any_set,
Label* fall_through);
- // Check if a map for a JSObject indicates that the object can have both smi
- // and HeapObject elements. Jump to the specified label if it does not.
- void CheckFastObjectElements(Register map, Register scratch, Label* fail);
-
- // Check to see if number can be stored as a double in FastDoubleElements.
- // If it can, store it at the index specified by key_reg in the array,
- // otherwise jump to fail.
- void StoreNumberToDoubleElements(Register value_reg,
- Register key_reg,
- Register elements_reg,
- Register scratch1,
- FPRegister fpscratch1,
- Label* fail,
- int elements_offset = 0);
-
// ---------------------------------------------------------------------------
// Inline caching support.
@@ -1624,17 +1567,6 @@ class MacroAssembler : public Assembler {
Register scratch2,
Label* no_memento_found);
- void JumpIfJSArrayHasAllocationMemento(Register receiver,
- Register scratch1,
- Register scratch2,
- Label* memento_found) {
- Label no_memento_found;
- TestJSArrayForAllocationMemento(receiver, scratch1, scratch2,
- &no_memento_found);
- B(eq, memento_found);
- Bind(&no_memento_found);
- }
-
// The stack pointer has to switch between csp and jssp when setting up and
// destroying the exit frame. Hence preserving/restoring the registers is
// slightly more complicated than simple push/pop operations.
@@ -1902,18 +1834,6 @@ class MacroAssembler : public Assembler {
// Print a message to stderr and abort execution.
void Abort(BailoutReason reason);
- // Conditionally load the cached Array transitioned map of type
- // transitioned_kind from the native context if the map in register
- // map_in_out is the cached Array map in the native context of
- // expected_kind.
- void LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch1,
- Register scratch2,
- Label* no_map_match);
-
void LoadNativeContextSlot(int index, Register dst);
// Load the initial map from the global function. The registers function and
@@ -2002,10 +1922,6 @@ class MacroAssembler : public Assembler {
// sequence is a code age sequence (emitted by EmitCodeAgeSequence).
static bool IsYoungSequence(Isolate* isolate, byte* sequence);
- // Jumps to found label if a prototype map has dictionary elements.
- void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
- Register scratch1, Label* found);
-
// Perform necessary maintenance operations before a push or after a pop.
//
// Note that size is specified in bytes.
diff --git a/deps/v8/src/asmjs/OWNERS b/deps/v8/src/asmjs/OWNERS
index 78e688d86e..b994be3e17 100644
--- a/deps/v8/src/asmjs/OWNERS
+++ b/deps/v8/src/asmjs/OWNERS
@@ -4,6 +4,7 @@ set noparent
ahaas@chromium.org
bradnelson@chromium.org
+clemensh@chromium.org
jpp@chromium.org
mtrofin@chromium.org
rossberg@chromium.org
diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc
index 13f936d0b5..b4026b0b19 100644
--- a/deps/v8/src/asmjs/asm-js.cc
+++ b/deps/v8/src/asmjs/asm-js.cc
@@ -9,6 +9,7 @@
#include "src/asmjs/asm-typer.h"
#include "src/asmjs/asm-wasm-builder.h"
#include "src/assert-scope.h"
+#include "src/compilation-info.h"
#include "src/execution.h"
#include "src/factory.h"
#include "src/handles.h"
@@ -31,6 +32,15 @@ namespace v8 {
namespace internal {
namespace {
+enum WasmDataEntries {
+ kWasmDataCompiledModule,
+ kWasmDataForeignGlobals,
+ kWasmDataUsesArray,
+ kWasmDataScript,
+ kWasmDataScriptPosition,
+ kWasmDataEntryCount,
+};
+
Handle<i::Object> StdlibMathMember(i::Isolate* isolate,
Handle<JSReceiver> stdlib,
Handle<Name> name) {
@@ -151,29 +161,38 @@ bool IsStdlibMemberValid(i::Isolate* isolate, Handle<JSReceiver> stdlib,
} // namespace
-MaybeHandle<FixedArray> AsmJs::ConvertAsmToWasm(ParseInfo* info) {
+MaybeHandle<FixedArray> AsmJs::CompileAsmViaWasm(CompilationInfo* info) {
ErrorThrower thrower(info->isolate(), "Asm.js -> WebAssembly conversion");
- wasm::AsmTyper typer(info->isolate(), info->zone(), *(info->script()),
- info->literal());
- if (!typer.Validate()) {
+ base::ElapsedTimer asm_wasm_timer;
+ asm_wasm_timer.Start();
+ wasm::AsmWasmBuilder builder(info);
+ Handle<FixedArray> foreign_globals;
+ auto asm_wasm_result = builder.Run(&foreign_globals);
+ if (!asm_wasm_result.success) {
DCHECK(!info->isolate()->has_pending_exception());
- PrintF("Validation of asm.js module failed: %s", typer.error_message());
+ if (!FLAG_suppress_asm_messages) {
+ MessageHandler::ReportMessage(info->isolate(),
+ builder.typer()->message_location(),
+ builder.typer()->error_message());
+ }
return MaybeHandle<FixedArray>();
}
- v8::internal::wasm::AsmWasmBuilder builder(info->isolate(), info->zone(),
- info->literal(), &typer);
- i::Handle<i::FixedArray> foreign_globals;
- auto asm_wasm_result = builder.Run(&foreign_globals);
+ double asm_wasm_time = asm_wasm_timer.Elapsed().InMillisecondsF();
+
wasm::ZoneBuffer* module = asm_wasm_result.module_bytes;
wasm::ZoneBuffer* asm_offsets = asm_wasm_result.asm_offset_table;
+ Vector<const byte> asm_offsets_vec(asm_offsets->begin(),
+ static_cast<int>(asm_offsets->size()));
- i::MaybeHandle<i::JSObject> compiled = wasm::CreateModuleObjectFromBytes(
+ base::ElapsedTimer compile_timer;
+ compile_timer.Start();
+ MaybeHandle<JSObject> compiled = wasm::CreateModuleObjectFromBytes(
info->isolate(), module->begin(), module->end(), &thrower,
- internal::wasm::kAsmJsOrigin, info->script(), asm_offsets->begin(),
- asm_offsets->end());
+ internal::wasm::kAsmJsOrigin, info->script(), asm_offsets_vec);
DCHECK(!compiled.is_null());
+ double compile_time = compile_timer.Elapsed().InMillisecondsF();
- wasm::AsmTyper::StdlibSet uses = typer.StdlibUses();
+ wasm::AsmTyper::StdlibSet uses = builder.typer()->StdlibUses();
Handle<FixedArray> uses_array =
info->isolate()->factory()->NewFixedArray(static_cast<int>(uses.size()));
int count = 0;
@@ -181,16 +200,45 @@ MaybeHandle<FixedArray> AsmJs::ConvertAsmToWasm(ParseInfo* info) {
uses_array->set(count++, Smi::FromInt(i));
}
- Handle<FixedArray> result = info->isolate()->factory()->NewFixedArray(3);
- result->set(0, *compiled.ToHandleChecked());
- result->set(1, *foreign_globals);
- result->set(2, *uses_array);
+ Handle<FixedArray> result =
+ info->isolate()->factory()->NewFixedArray(kWasmDataEntryCount);
+ result->set(kWasmDataCompiledModule, *compiled.ToHandleChecked());
+ result->set(kWasmDataForeignGlobals, *foreign_globals);
+ result->set(kWasmDataUsesArray, *uses_array);
+ result->set(kWasmDataScript, *info->script());
+ result->set(kWasmDataScriptPosition,
+ Smi::FromInt(info->literal()->position()));
+
+ MessageLocation location(info->script(), info->literal()->position(),
+ info->literal()->position());
+ char text[100];
+ int length;
+ if (FLAG_predictable) {
+ length = base::OS::SNPrintF(text, arraysize(text), "success");
+ } else {
+ length =
+ base::OS::SNPrintF(text, arraysize(text),
+ "success, asm->wasm: %0.3f ms, compile: %0.3f ms",
+ asm_wasm_time, compile_time);
+ }
+ DCHECK_NE(-1, length);
+ USE(length);
+ Handle<String> stext(info->isolate()->factory()->InternalizeUtf8String(text));
+ Handle<JSMessageObject> message = MessageHandler::MakeMessageObject(
+ info->isolate(), MessageTemplate::kAsmJsCompiled, &location, stext,
+ Handle<JSArray>::null());
+ message->set_error_level(v8::Isolate::kMessageInfo);
+ if (!FLAG_suppress_asm_messages && FLAG_trace_asm_time) {
+ MessageHandler::ReportMessage(info->isolate(), &location, message);
+ }
+
return result;
}
bool AsmJs::IsStdlibValid(i::Isolate* isolate, Handle<FixedArray> wasm_data,
Handle<JSReceiver> stdlib) {
- i::Handle<i::FixedArray> uses(i::FixedArray::cast(wasm_data->get(2)));
+ i::Handle<i::FixedArray> uses(
+ i::FixedArray::cast(wasm_data->get(kWasmDataUsesArray)));
for (int i = 0; i < uses->length(); ++i) {
if (!IsStdlibMemberValid(isolate, stdlib,
uses->GetValueChecked<i::Object>(isolate, i))) {
@@ -204,14 +252,27 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(i::Isolate* isolate,
Handle<FixedArray> wasm_data,
Handle<JSArrayBuffer> memory,
Handle<JSReceiver> foreign) {
- i::Handle<i::JSObject> module(i::JSObject::cast(wasm_data->get(0)));
+ base::ElapsedTimer instantiate_timer;
+ instantiate_timer.Start();
+ i::Handle<i::WasmModuleObject> module(
+ i::WasmModuleObject::cast(wasm_data->get(kWasmDataCompiledModule)));
i::Handle<i::FixedArray> foreign_globals(
- i::FixedArray::cast(wasm_data->get(1)));
+ i::FixedArray::cast(wasm_data->get(kWasmDataForeignGlobals)));
ErrorThrower thrower(isolate, "Asm.js -> WebAssembly instantiation");
+ // Create the ffi object for foreign functions {"": foreign}.
+ Handle<JSObject> ffi_object;
+ if (!foreign.is_null()) {
+ Handle<JSFunction> object_function = Handle<JSFunction>(
+ isolate->native_context()->object_function(), isolate);
+ ffi_object = isolate->factory()->NewJSObject(object_function);
+ JSObject::AddProperty(ffi_object, isolate->factory()->empty_string(),
+ foreign, NONE);
+ }
+
i::MaybeHandle<i::JSObject> maybe_module_object =
- i::wasm::WasmModule::Instantiate(isolate, &thrower, module, foreign,
+ i::wasm::WasmModule::Instantiate(isolate, &thrower, module, ffi_object,
memory);
if (maybe_module_object.is_null()) {
return MaybeHandle<Object>();
@@ -258,6 +319,32 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(i::Isolate* isolate,
!single_function.ToHandleChecked()->IsUndefined(isolate)) {
return single_function;
}
+
+ i::Handle<i::Script> script(i::Script::cast(wasm_data->get(kWasmDataScript)));
+ int32_t position = 0;
+ if (!wasm_data->get(kWasmDataScriptPosition)->ToInt32(&position)) {
+ UNREACHABLE();
+ }
+ MessageLocation location(script, position, position);
+ char text[50];
+ int length;
+ if (FLAG_predictable) {
+ length = base::OS::SNPrintF(text, arraysize(text), "success");
+ } else {
+ length = base::OS::SNPrintF(text, arraysize(text), "success, %0.3f ms",
+ instantiate_timer.Elapsed().InMillisecondsF());
+ }
+ DCHECK_NE(-1, length);
+ USE(length);
+ Handle<String> stext(isolate->factory()->InternalizeUtf8String(text));
+ Handle<JSMessageObject> message = MessageHandler::MakeMessageObject(
+ isolate, MessageTemplate::kAsmJsInstantiated, &location, stext,
+ Handle<JSArray>::null());
+ message->set_error_level(v8::Isolate::kMessageInfo);
+ if (!FLAG_suppress_asm_messages && FLAG_trace_asm_time) {
+ MessageHandler::ReportMessage(isolate, &location, message);
+ }
+
return module_object;
}
diff --git a/deps/v8/src/asmjs/asm-js.h b/deps/v8/src/asmjs/asm-js.h
index a2c5cec280..a7795dc541 100644
--- a/deps/v8/src/asmjs/asm-js.h
+++ b/deps/v8/src/asmjs/asm-js.h
@@ -10,13 +10,13 @@
namespace v8 {
namespace internal {
+class CompilationInfo;
class JSArrayBuffer;
-class ParseInfo;
// Interface to compile and instantiate for asmjs.
class AsmJs {
public:
- static MaybeHandle<FixedArray> ConvertAsmToWasm(ParseInfo* info);
+ static MaybeHandle<FixedArray> CompileAsmViaWasm(CompilationInfo* info);
static bool IsStdlibValid(Isolate* isolate, Handle<FixedArray> wasm_data,
Handle<JSReceiver> stdlib);
static MaybeHandle<Object> InstantiateAsmWasm(Isolate* isolate,
diff --git a/deps/v8/src/asmjs/asm-typer.cc b/deps/v8/src/asmjs/asm-typer.cc
index 55b5fc70d8..2389551872 100644
--- a/deps/v8/src/asmjs/asm-typer.cc
+++ b/deps/v8/src/asmjs/asm-typer.cc
@@ -9,6 +9,7 @@
#include <memory>
#include <string>
+#include "include/v8.h"
#include "src/v8.h"
#include "src/asmjs/asm-types.h"
@@ -17,18 +18,33 @@
#include "src/base/bits.h"
#include "src/codegen.h"
#include "src/globals.h"
+#include "src/messages.h"
#include "src/utils.h"
+#include "src/vector.h"
+
+#define FAIL_LOCATION_RAW(location, msg) \
+ do { \
+ Handle<String> message( \
+ isolate_->factory()->InternalizeOneByteString(msg)); \
+ error_message_ = MessageHandler::MakeMessageObject( \
+ isolate_, MessageTemplate::kAsmJsInvalid, (location), message, \
+ Handle<JSArray>::null()); \
+ error_message_->set_error_level(v8::Isolate::kMessageWarning); \
+ message_location_ = *(location); \
+ return AsmType::None(); \
+ } while (false)
-#define FAIL(node, msg) \
- do { \
- int line = node->position() == kNoSourcePosition \
- ? -1 \
- : script_->GetLineNumber(node->position()); \
- base::OS::SNPrintF(error_message_, sizeof(error_message_), \
- "asm: line %d: %s\n", line + 1, msg); \
- return AsmType::None(); \
+#define FAIL_RAW(node, msg) \
+ do { \
+ MessageLocation location(script_, node->position(), node->position()); \
+ FAIL_LOCATION_RAW(&location, msg); \
} while (false)
+#define FAIL_LOCATION(location, msg) \
+ FAIL_LOCATION_RAW(location, STATIC_CHAR_VECTOR(msg))
+
+#define FAIL(node, msg) FAIL_RAW(node, STATIC_CHAR_VECTOR(msg))
+
#define RECURSE(call) \
do { \
if (GetCurrentStackPosition() < stack_limit_) { \
@@ -91,6 +107,53 @@ Statement* AsmTyper::FlattenedStatements::Next() {
}
// ----------------------------------------------------------------------------
+// Implementation of AsmTyper::SourceLayoutTracker
+
+bool AsmTyper::SourceLayoutTracker::IsValid() const {
+ const Section* kAllSections[] = {&use_asm_, &globals_, &functions_, &tables_,
+ &exports_};
+ for (size_t ii = 0; ii < arraysize(kAllSections); ++ii) {
+ const auto& curr_section = *kAllSections[ii];
+ for (size_t jj = ii + 1; jj < arraysize(kAllSections); ++jj) {
+ if (curr_section.IsPrecededBy(*kAllSections[jj])) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+void AsmTyper::SourceLayoutTracker::Section::AddNewElement(
+ const AstNode& node) {
+ const int node_pos = node.position();
+ if (start_ == kNoSourcePosition) {
+ start_ = node_pos;
+ } else {
+ start_ = std::min(start_, node_pos);
+ }
+ if (end_ == kNoSourcePosition) {
+ end_ = node_pos;
+ } else {
+ end_ = std::max(end_, node_pos);
+ }
+}
+
+bool AsmTyper::SourceLayoutTracker::Section::IsPrecededBy(
+ const Section& other) const {
+ if (start_ == kNoSourcePosition) {
+ DCHECK_EQ(end_, kNoSourcePosition);
+ return false;
+ }
+ if (other.start_ == kNoSourcePosition) {
+ DCHECK_EQ(other.end_, kNoSourcePosition);
+ return false;
+ }
+ DCHECK_LE(start_, end_);
+ DCHECK_LE(other.start_, other.end_);
+ return other.start_ <= end_;
+}
+
+// ----------------------------------------------------------------------------
// Implementation of AsmTyper::VariableInfo
AsmTyper::VariableInfo* AsmTyper::VariableInfo::ForSpecialSymbol(
@@ -112,16 +175,16 @@ AsmTyper::VariableInfo* AsmTyper::VariableInfo::Clone(Zone* zone) const {
return new_var_info;
}
-void AsmTyper::VariableInfo::FirstForwardUseIs(VariableProxy* var) {
- DCHECK(first_forward_use_ == nullptr);
+void AsmTyper::VariableInfo::SetFirstForwardUse(
+ const MessageLocation& source_location) {
missing_definition_ = true;
- first_forward_use_ = var;
+ source_location_ = source_location;
}
// ----------------------------------------------------------------------------
// Implementation of AsmTyper
-AsmTyper::AsmTyper(Isolate* isolate, Zone* zone, Script* script,
+AsmTyper::AsmTyper(Isolate* isolate, Zone* zone, Handle<Script> script,
FunctionLiteral* root)
: isolate_(isolate),
zone_(zone),
@@ -137,9 +200,9 @@ AsmTyper::AsmTyper(Isolate* isolate, Zone* zone, Script* script,
local_scope_(ZoneHashMap::kDefaultHashMapCapacity,
ZoneAllocationPolicy(zone)),
stack_limit_(isolate->stack_guard()->real_climit()),
- node_types_(zone_),
fround_type_(AsmType::FroundType(zone_)),
- ffi_type_(AsmType::FFIType(zone_)) {
+ ffi_type_(AsmType::FFIType(zone_)),
+ function_pointer_tables_(zone_) {
InitializeStdlib();
}
@@ -283,6 +346,9 @@ void AsmTyper::InitializeStdlib() {
AsmTyper::VariableInfo* AsmTyper::ImportLookup(Property* import) {
auto* obj = import->obj();
auto* key = import->key()->AsLiteral();
+ if (key == nullptr) {
+ return nullptr;
+ }
ObjectTypeMap* stdlib = &stdlib_types_;
if (auto* obj_as_property = obj->AsProperty()) {
@@ -345,7 +411,8 @@ AsmTyper::VariableInfo* AsmTyper::Lookup(Variable* variable) const {
}
void AsmTyper::AddForwardReference(VariableProxy* proxy, VariableInfo* info) {
- info->FirstForwardUseIs(proxy);
+ MessageLocation location(script_, proxy->position(), proxy->position());
+ info->SetFirstForwardUse(location);
forward_definitions_.push_back(info);
}
@@ -390,22 +457,58 @@ bool AsmTyper::AddLocal(Variable* variable, VariableInfo* info) {
void AsmTyper::SetTypeOf(AstNode* node, AsmType* type) {
DCHECK_NE(type, AsmType::None());
- DCHECK(node_types_.find(node) == node_types_.end());
- node_types_.insert(std::make_pair(node, type));
+ if (in_function_) {
+ DCHECK(function_node_types_.find(node) == function_node_types_.end());
+ function_node_types_.insert(std::make_pair(node, type));
+ } else {
+ DCHECK(module_node_types_.find(node) == module_node_types_.end());
+ module_node_types_.insert(std::make_pair(node, type));
+ }
+}
+
+namespace {
+bool IsLiteralDouble(Literal* literal) {
+ return literal->raw_value()->IsNumber() &&
+ literal->raw_value()->ContainsDot();
+}
+
+bool IsLiteralInt(Literal* literal) {
+ return literal->raw_value()->IsNumber() &&
+ !literal->raw_value()->ContainsDot();
+}
+
+bool IsLiteralMinus1(Literal* literal) {
+ return IsLiteralInt(literal) && literal->raw_value()->AsNumber() == -1.0;
+}
+
+bool IsLiteral1Dot0(Literal* literal) {
+ return IsLiteralDouble(literal) && literal->raw_value()->AsNumber() == 1.0;
}
+bool IsLiteral0(Literal* literal) {
+ return IsLiteralInt(literal) && literal->raw_value()->AsNumber() == 0.0;
+}
+} // namespace
+
AsmType* AsmTyper::TypeOf(AstNode* node) const {
- auto node_type_iter = node_types_.find(node);
- if (node_type_iter != node_types_.end()) {
+ auto node_type_iter = function_node_types_.find(node);
+ if (node_type_iter != function_node_types_.end()) {
+ return node_type_iter->second;
+ }
+ node_type_iter = module_node_types_.find(node);
+ if (node_type_iter != module_node_types_.end()) {
return node_type_iter->second;
}
// Sometimes literal nodes are not added to the node_type_ map simply because
// their are not visited with ValidateExpression().
if (auto* literal = node->AsLiteral()) {
- if (literal->raw_value()->ContainsDot()) {
+ if (IsLiteralDouble(literal)) {
return AsmType::Double();
}
+ if (!IsLiteralInt(literal)) {
+ return AsmType::None();
+ }
uint32_t u;
if (literal->value()->ToUint32(&u)) {
if (u > LargestFixNum) {
@@ -433,13 +536,39 @@ AsmTyper::StandardMember AsmTyper::VariableAsStandardMember(Variable* var) {
return member;
}
+AsmType* AsmTyper::FailWithMessage(const char* text) {
+ FAIL_RAW(root_, OneByteVector(text));
+}
+
bool AsmTyper::Validate() {
- if (!AsmType::None()->IsExactly(ValidateModule(root_))) {
+ return ValidateBeforeFunctionsPhase() &&
+ !AsmType::None()->IsExactly(ValidateModuleFunctions(root_)) &&
+ ValidateAfterFunctionsPhase();
+}
+
+bool AsmTyper::ValidateBeforeFunctionsPhase() {
+ if (!AsmType::None()->IsExactly(ValidateModuleBeforeFunctionsPhase(root_))) {
return true;
}
return false;
}
+bool AsmTyper::ValidateInnerFunction(FunctionDeclaration* fun_decl) {
+ if (!AsmType::None()->IsExactly(ValidateModuleFunction(fun_decl))) {
+ return true;
+ }
+ return false;
+}
+
+bool AsmTyper::ValidateAfterFunctionsPhase() {
+ if (!AsmType::None()->IsExactly(ValidateModuleAfterFunctionsPhase(root_))) {
+ return true;
+ }
+ return false;
+}
+
+void AsmTyper::ClearFunctionNodeTypes() { function_node_types_.clear(); }
+
namespace {
bool IsUseAsmDirective(Statement* first_statement) {
ExpressionStatement* use_asm = first_statement->AsExpressionStatement();
@@ -477,91 +606,12 @@ Assignment* ExtractInitializerExpression(Statement* statement) {
} // namespace
// 6.1 ValidateModule
-namespace {
-// SourceLayoutTracker keeps track of the start and end positions of each
-// section in the asm.js source. The sections should not overlap, otherwise the
-// asm.js source is invalid.
-class SourceLayoutTracker {
- public:
- SourceLayoutTracker() = default;
-
- bool IsValid() const {
- const Section* kAllSections[] = {&use_asm_, &globals_, &functions_,
- &tables_, &exports_};
- for (size_t ii = 0; ii < arraysize(kAllSections); ++ii) {
- const auto& curr_section = *kAllSections[ii];
- for (size_t jj = ii + 1; jj < arraysize(kAllSections); ++jj) {
- if (curr_section.OverlapsWith(*kAllSections[jj])) {
- return false;
- }
- }
- }
- return true;
- }
-
- void AddUseAsm(const AstNode& node) { use_asm_.AddNewElement(node); }
-
- void AddGlobal(const AstNode& node) { globals_.AddNewElement(node); }
-
- void AddFunction(const AstNode& node) { functions_.AddNewElement(node); }
-
- void AddTable(const AstNode& node) { tables_.AddNewElement(node); }
-
- void AddExport(const AstNode& node) { exports_.AddNewElement(node); }
-
- private:
- class Section {
- public:
- Section() = default;
- Section(const Section&) = default;
- Section& operator=(const Section&) = default;
-
- void AddNewElement(const AstNode& node) {
- const int node_pos = node.position();
- if (start_ == kNoSourcePosition) {
- start_ = node_pos;
- } else {
- start_ = std::max(start_, node_pos);
- }
- if (end_ == kNoSourcePosition) {
- end_ = node_pos;
- } else {
- end_ = std::max(end_, node_pos);
- }
- }
-
- bool OverlapsWith(const Section& other) const {
- if (start_ == kNoSourcePosition) {
- DCHECK_EQ(end_, kNoSourcePosition);
- return false;
- }
- if (other.start_ == kNoSourcePosition) {
- DCHECK_EQ(other.end_, kNoSourcePosition);
- return false;
- }
- return other.start_ < end_ || other.end_ < start_;
- }
-
- private:
- int start_ = kNoSourcePosition;
- int end_ = kNoSourcePosition;
- };
-
- Section use_asm_;
- Section globals_;
- Section functions_;
- Section tables_;
- Section exports_;
-
- DISALLOW_COPY_AND_ASSIGN(SourceLayoutTracker);
-};
-} // namespace
-
-AsmType* AsmTyper::ValidateModule(FunctionLiteral* fun) {
- SourceLayoutTracker source_layout;
-
+AsmType* AsmTyper::ValidateModuleBeforeFunctionsPhase(FunctionLiteral* fun) {
DeclarationScope* scope = fun->scope();
if (!scope->is_function_scope()) FAIL(fun, "Not at function scope.");
+ if (scope->inner_scope_calls_eval()) {
+ FAIL(fun, "Invalid asm.js module using eval.");
+ }
if (!ValidAsmIdentifier(fun->name()))
FAIL(fun, "Invalid asm.js identifier in module name.");
module_name_ = fun->name();
@@ -594,7 +644,6 @@ AsmType* AsmTyper::ValidateModule(FunctionLiteral* fun) {
}
}
- ZoneVector<Assignment*> function_pointer_tables(zone_);
FlattenedStatements iter(zone_, fun->body());
auto* use_asm_directive = iter.Next();
if (use_asm_directive == nullptr) {
@@ -616,8 +665,8 @@ AsmType* AsmTyper::ValidateModule(FunctionLiteral* fun) {
if (!IsUseAsmDirective(use_asm_directive)) {
FAIL(fun, "Missing \"use asm\".");
}
- source_layout.AddUseAsm(*use_asm_directive);
- ReturnStatement* module_return = nullptr;
+ source_layout_.AddUseAsm(*use_asm_directive);
+ module_return_ = nullptr;
// *VIOLATION* The spec states that globals should be followed by function
// declarations, which should be followed by function pointer tables, followed
@@ -627,40 +676,57 @@ AsmType* AsmTyper::ValidateModule(FunctionLiteral* fun) {
if (auto* assign = ExtractInitializerExpression(current)) {
if (assign->value()->IsArrayLiteral()) {
// Save function tables for later validation.
- function_pointer_tables.push_back(assign);
+ function_pointer_tables_.push_back(assign);
} else {
RECURSE(ValidateGlobalDeclaration(assign));
- source_layout.AddGlobal(*assign);
+ source_layout_.AddGlobal(*assign);
}
continue;
}
if (auto* current_as_return = current->AsReturnStatement()) {
- if (module_return != nullptr) {
+ if (module_return_ != nullptr) {
FAIL(fun, "Multiple export statements.");
}
- module_return = current_as_return;
- source_layout.AddExport(*module_return);
+ module_return_ = current_as_return;
+ source_layout_.AddExport(*module_return_);
continue;
}
FAIL(current, "Invalid top-level statement in asm.js module.");
}
+ return AsmType::Int(); // Any type that is not AsmType::None();
+}
+
+AsmType* AsmTyper::ValidateModuleFunction(FunctionDeclaration* fun_decl) {
+ RECURSE(ValidateFunction(fun_decl));
+ source_layout_.AddFunction(*fun_decl);
+
+ return AsmType::Int(); // Any type that is not AsmType::None();
+}
+
+AsmType* AsmTyper::ValidateModuleFunctions(FunctionLiteral* fun) {
+ DeclarationScope* scope = fun->scope();
Declaration::List* decls = scope->declarations();
for (Declaration* decl : *decls) {
if (FunctionDeclaration* fun_decl = decl->AsFunctionDeclaration()) {
- RECURSE(ValidateFunction(fun_decl));
- source_layout.AddFunction(*fun_decl);
+ RECURSE(ValidateModuleFunction(fun_decl));
continue;
}
}
- for (auto* function_table : function_pointer_tables) {
+ return AsmType::Int(); // Any type that is not AsmType::None();
+}
+
+AsmType* AsmTyper::ValidateModuleAfterFunctionsPhase(FunctionLiteral* fun) {
+ for (auto* function_table : function_pointer_tables_) {
RECURSE(ValidateFunctionTable(function_table));
- source_layout.AddTable(*function_table);
+ source_layout_.AddTable(*function_table);
}
+ DeclarationScope* scope = fun->scope();
+ Declaration::List* decls = scope->declarations();
for (Declaration* decl : *decls) {
if (decl->IsFunctionDeclaration()) {
continue;
@@ -682,20 +748,20 @@ AsmType* AsmTyper::ValidateModule(FunctionLiteral* fun) {
}
// 6.2 ValidateExport
- if (module_return == nullptr) {
+ if (module_return_ == nullptr) {
FAIL(fun, "Missing asm.js module export.");
}
for (auto* forward_def : forward_definitions_) {
if (forward_def->missing_definition()) {
- FAIL(forward_def->first_forward_use(),
- "Missing definition for forward declared identifier.");
+ FAIL_LOCATION(forward_def->source_location(),
+ "Missing definition for forward declared identifier.");
}
}
- RECURSE(ValidateExport(module_return));
+ RECURSE(ValidateExport(module_return_));
- if (!source_layout.IsValid()) {
+ if (!source_layout_.IsValid()) {
FAIL(fun, "Invalid asm.js source code layout.");
}
@@ -714,8 +780,7 @@ bool IsDoubleAnnotation(BinaryOperation* binop) {
return false;
}
- return right_as_literal->raw_value()->ContainsDot() &&
- right_as_literal->raw_value()->AsNumber() == 1.0;
+ return IsLiteral1Dot0(right_as_literal);
}
bool IsIntAnnotation(BinaryOperation* binop) {
@@ -728,8 +793,7 @@ bool IsIntAnnotation(BinaryOperation* binop) {
return false;
}
- return !right_as_literal->raw_value()->ContainsDot() &&
- right_as_literal->raw_value()->AsNumber() == 0.0;
+ return IsLiteral0(right_as_literal);
}
} // namespace
@@ -894,6 +958,10 @@ AsmType* AsmTyper::ExportType(VariableProxy* fun_export) {
FAIL(fun_export, "Module export is not an asm.js function.");
}
+ if (!fun_export->var()->is_function()) {
+ FAIL(fun_export, "Module exports must be function declarations.");
+ }
+
return type;
}
@@ -915,6 +983,10 @@ AsmType* AsmTyper::ValidateExport(ReturnStatement* exports) {
"Only normal object properties may be used in the export object "
"literal.");
}
+ if (!prop->key()->AsLiteral()->IsPropertyName()) {
+ FAIL(prop->key(),
+ "Exported functions must have valid identifier names.");
+ }
auto* export_obj = prop->value()->AsVariableProxy();
if (export_obj == nullptr) {
@@ -1091,6 +1163,7 @@ AsmType* AsmTyper::ValidateFunction(FunctionDeclaration* fun_decl) {
parameter_types.push_back(type);
SetTypeOf(proxy, type);
SetTypeOf(expr, type);
+ SetTypeOf(expr->value(), type);
}
if (static_cast<int>(annotated_parameters) != fun->parameter_count()) {
@@ -1442,7 +1515,7 @@ bool ExtractInt32CaseLabel(CaseClause* clause, int32_t* lbl) {
return false;
}
- if (lbl_expr->raw_value()->ContainsDot()) {
+ if (!IsLiteralInt(lbl_expr)) {
return false;
}
@@ -1539,8 +1612,7 @@ bool IsInvert(BinaryOperation* binop) {
return false;
}
- return !right_as_literal->raw_value()->ContainsDot() &&
- right_as_literal->raw_value()->AsNumber() == -1.0;
+ return IsLiteralMinus1(right_as_literal);
}
bool IsUnaryMinus(BinaryOperation* binop) {
@@ -1554,8 +1626,7 @@ bool IsUnaryMinus(BinaryOperation* binop) {
return false;
}
- return !right_as_literal->raw_value()->ContainsDot() &&
- right_as_literal->raw_value()->AsNumber() == -1.0;
+ return IsLiteralMinus1(right_as_literal);
}
} // namespace
@@ -1684,7 +1755,7 @@ AsmType* AsmTyper::ValidateNumericLiteral(Literal* literal) {
return AsmType::Void();
}
- if (literal->raw_value()->ContainsDot()) {
+ if (IsLiteralDouble(literal)) {
return AsmType::Double();
}
@@ -1864,7 +1935,7 @@ bool IsIntishLiteralFactor(Expression* expr, int32_t* factor) {
return false;
}
- if (literal->raw_value()->ContainsDot()) {
+ if (!IsLiteralInt(literal)) {
return false;
}
@@ -2204,12 +2275,12 @@ AsmType* AsmTyper::ValidateBitwiseORExpression(BinaryOperation* binop) {
RECURSE(type = ValidateCall(AsmType::Signed(), left_as_call));
return type;
}
-
- // TODO(jpp): at this point we know that binop is expr|0. We could sinply
- //
- // RECURSE(t = ValidateExpression(left));
- // FAIL_IF(t->IsNotA(Intish));
- // return Signed;
+ AsmType* left_type;
+ RECURSE(left_type = ValidateExpression(left));
+ if (!left_type->IsA(AsmType::Intish())) {
+ FAIL(left, "Left side of |0 annotation must be intish.");
+ }
+ return AsmType::Signed();
}
auto* right = binop->right();
@@ -2273,7 +2344,7 @@ bool ExtractIndirectCallMask(Expression* expr, uint32_t* value) {
return false;
}
- if (as_literal->raw_value()->ContainsDot()) {
+ if (!IsLiteralInt(as_literal)) {
return false;
}
@@ -2329,6 +2400,9 @@ AsmType* AsmTyper::ValidateCall(AsmType* return_type, Call* call) {
DCHECK(false);
FAIL(call, "Redeclared global identifier.");
}
+ if (call->GetCallType() != Call::OTHER_CALL) {
+ FAIL(call, "Invalid call of existing global function.");
+ }
SetTypeOf(call_var_proxy, reinterpret_cast<AsmType*>(call_type));
SetTypeOf(call, return_type);
return return_type;
@@ -2359,6 +2433,10 @@ AsmType* AsmTyper::ValidateCall(AsmType* return_type, Call* call) {
FAIL(call, "Function invocation does not match function type.");
}
+ if (call->GetCallType() != Call::OTHER_CALL) {
+ FAIL(call, "Invalid forward call of global function.");
+ }
+
SetTypeOf(call_var_proxy, call_var_info->type());
SetTypeOf(call, return_type);
return return_type;
@@ -2417,6 +2495,9 @@ AsmType* AsmTyper::ValidateCall(AsmType* return_type, Call* call) {
DCHECK(false);
FAIL(call, "Redeclared global identifier.");
}
+ if (call->GetCallType() != Call::KEYED_PROPERTY_CALL) {
+ FAIL(call, "Invalid call of existing function table.");
+ }
SetTypeOf(call_property, reinterpret_cast<AsmType*>(call_type));
SetTypeOf(call, return_type);
return return_type;
@@ -2441,6 +2522,9 @@ AsmType* AsmTyper::ValidateCall(AsmType* return_type, Call* call) {
"signature.");
}
+ if (call->GetCallType() != Call::KEYED_PROPERTY_CALL) {
+ FAIL(call, "Invalid forward call of function table.");
+ }
SetTypeOf(call_property, previous_type->signature());
SetTypeOf(call, return_type);
return return_type;
@@ -2457,7 +2541,7 @@ bool ExtractHeapAccessShift(Expression* expr, uint32_t* value) {
return false;
}
- if (as_literal->raw_value()->ContainsDot()) {
+ if (!IsLiteralInt(as_literal)) {
return false;
}
@@ -2501,7 +2585,7 @@ AsmType* AsmTyper::ValidateHeapAccess(Property* heap,
SetTypeOf(obj, obj_type);
if (auto* key_as_literal = heap->key()->AsLiteral()) {
- if (key_as_literal->raw_value()->ContainsDot()) {
+ if (!IsLiteralInt(key_as_literal)) {
FAIL(key_as_literal, "Heap access index must be int.");
}
@@ -2685,9 +2769,9 @@ AsmType* AsmTyper::ReturnTypeAnnotations(ReturnStatement* statement) {
if (auto* literal = ret_expr->AsLiteral()) {
int32_t _;
- if (literal->raw_value()->ContainsDot()) {
+ if (IsLiteralDouble(literal)) {
return AsmType::Double();
- } else if (literal->value()->ToInt32(&_)) {
+ } else if (IsLiteralInt(literal) && literal->value()->ToInt32(&_)) {
return AsmType::Signed();
} else if (literal->IsUndefinedLiteral()) {
// *VIOLATION* The parser changes
@@ -2728,13 +2812,15 @@ AsmType* AsmTyper::ReturnTypeAnnotations(ReturnStatement* statement) {
AsmType* AsmTyper::VariableTypeAnnotations(
Expression* initializer, VariableInfo::Mutability mutability_type) {
if (auto* literal = initializer->AsLiteral()) {
- if (literal->raw_value()->ContainsDot()) {
+ if (IsLiteralDouble(literal)) {
SetTypeOf(initializer, AsmType::Double());
return AsmType::Double();
}
+ if (!IsLiteralInt(literal)) {
+ FAIL(initializer, "Invalid type annotation - forbidden literal.");
+ }
int32_t i32;
uint32_t u32;
-
AsmType* initializer_type = nullptr;
if (literal->value()->ToUint32(&u32)) {
if (u32 > LargestFixNum) {
@@ -2793,13 +2879,17 @@ AsmType* AsmTyper::VariableTypeAnnotations(
"to fround.");
}
- // Float constants must contain dots in local, but not in globals.
- if (mutability_type == VariableInfo::kLocal) {
- if (!src_expr->raw_value()->ContainsDot()) {
- FAIL(initializer,
- "Invalid float type annotation - expected literal argument to be a "
- "floating point literal.");
- }
+ // ERRATA: 5.4
+ // According to the spec: float constants must contain dots in local,
+ // but not in globals.
+ // However, the errata doc (and actual programs), use integer values
+ // with fround(..).
+ // Skipping the check that would go here to enforce this.
+ // Checking instead the literal expression is at least a number.
+ if (!src_expr->raw_value()->IsNumber()) {
+ FAIL(initializer,
+ "Invalid float type annotation - expected numeric literal for call "
+ "to fround.");
}
return AsmType::Float();
@@ -2848,19 +2938,6 @@ AsmType* AsmTyper::NewHeapView(CallNew* new_heap_view) {
return heap_view_info->type();
}
-bool IsValidAsm(Isolate* isolate, Zone* zone, Script* script,
- FunctionLiteral* root, std::string* error_message) {
- error_message->clear();
-
- AsmTyper typer(isolate, zone, script, root);
- if (typer.Validate()) {
- return true;
- }
-
- *error_message = typer.error_message();
- return false;
-}
-
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/asmjs/asm-typer.h b/deps/v8/src/asmjs/asm-typer.h
index 2c66948d56..8ddcb34b0f 100644
--- a/deps/v8/src/asmjs/asm-typer.h
+++ b/deps/v8/src/asmjs/asm-typer.h
@@ -7,6 +7,7 @@
#include <cstdint>
#include <string>
+#include <unordered_map>
#include <unordered_set>
#include "src/allocation.h"
@@ -15,6 +16,7 @@
#include "src/ast/ast-types.h"
#include "src/ast/ast.h"
#include "src/effects.h"
+#include "src/messages.h"
#include "src/type-info.h"
#include "src/zone/zone-containers.h"
#include "src/zone/zone.h"
@@ -25,6 +27,7 @@ namespace wasm {
class AsmType;
class AsmTyperHarnessBuilder;
+class SourceLayoutTracker;
class AsmTyper final {
public:
@@ -66,16 +69,27 @@ class AsmTyper final {
};
~AsmTyper() = default;
- AsmTyper(Isolate* isolate, Zone* zone, Script* script, FunctionLiteral* root);
+ AsmTyper(Isolate* isolate, Zone* zone, Handle<Script> script,
+ FunctionLiteral* root);
bool Validate();
+ // Do asm.js validation in phases (to interleave with conversion to wasm).
+ bool ValidateBeforeFunctionsPhase();
+ bool ValidateInnerFunction(FunctionDeclaration* decl);
+ bool ValidateAfterFunctionsPhase();
+ void ClearFunctionNodeTypes();
- const char* error_message() const { return error_message_; }
+ Handle<JSMessageObject> error_message() const { return error_message_; }
+ const MessageLocation* message_location() const { return &message_location_; }
AsmType* TypeOf(AstNode* node) const;
AsmType* TypeOf(Variable* v) const;
StandardMember VariableAsStandardMember(Variable* var);
+ // Allow the asm-wasm-builder to trigger failures (for interleaved
+ // validating).
+ AsmType* FailWithMessage(const char* text);
+
typedef std::unordered_set<StandardMember, std::hash<int> > StdlibSet;
StdlibSet StdlibUses() const { return stdlib_uses_; }
@@ -130,7 +144,7 @@ class AsmTyper final {
bool IsHeap() const { return standard_member_ == kHeap; }
void MarkDefined() { missing_definition_ = false; }
- void FirstForwardUseIs(VariableProxy* var);
+ void SetFirstForwardUse(const MessageLocation& source_location);
StandardMember standard_member() const { return standard_member_; }
void set_standard_member(StandardMember standard_member) {
@@ -145,7 +159,7 @@ class AsmTyper final {
bool missing_definition() const { return missing_definition_; }
- VariableProxy* first_forward_use() const { return first_forward_use_; }
+ const MessageLocation* source_location() { return &source_location_; }
static VariableInfo* ForSpecialSymbol(Zone* zone,
StandardMember standard_member);
@@ -157,9 +171,8 @@ class AsmTyper final {
// missing_definition_ is set to true for forward definition - i.e., use
// before definition.
bool missing_definition_ = false;
- // first_forward_use_ holds the AST node that first referenced this
- // VariableInfo. Used for error messages.
- VariableProxy* first_forward_use_ = nullptr;
+ // Used for error messages.
+ MessageLocation source_location_;
};
// RAII-style manager for the in_function_ member variable.
@@ -199,6 +212,40 @@ class AsmTyper final {
DISALLOW_IMPLICIT_CONSTRUCTORS(FlattenedStatements);
};
+ class SourceLayoutTracker {
+ public:
+ SourceLayoutTracker() = default;
+ bool IsValid() const;
+ void AddUseAsm(const AstNode& node) { use_asm_.AddNewElement(node); }
+ void AddGlobal(const AstNode& node) { globals_.AddNewElement(node); }
+ void AddFunction(const AstNode& node) { functions_.AddNewElement(node); }
+ void AddTable(const AstNode& node) { tables_.AddNewElement(node); }
+ void AddExport(const AstNode& node) { exports_.AddNewElement(node); }
+
+ private:
+ class Section {
+ public:
+ Section() = default;
+ Section(const Section&) = default;
+ Section& operator=(const Section&) = default;
+
+ void AddNewElement(const AstNode& node);
+ bool IsPrecededBy(const Section& other) const;
+
+ private:
+ int start_ = kNoSourcePosition;
+ int end_ = kNoSourcePosition;
+ };
+
+ Section use_asm_;
+ Section globals_;
+ Section functions_;
+ Section tables_;
+ Section exports_;
+
+ DISALLOW_COPY_AND_ASSIGN(SourceLayoutTracker);
+ };
+
using ObjectTypeMap = ZoneMap<std::string, VariableInfo*>;
void InitializeStdlib();
void SetTypeOf(AstNode* node, AsmType* type);
@@ -220,7 +267,10 @@ class AsmTyper final {
// validation failure.
// 6.1 ValidateModule
- AsmType* ValidateModule(FunctionLiteral* fun);
+ AsmType* ValidateModuleBeforeFunctionsPhase(FunctionLiteral* fun);
+ AsmType* ValidateModuleFunction(FunctionDeclaration* fun_decl);
+ AsmType* ValidateModuleFunctions(FunctionLiteral* fun);
+ AsmType* ValidateModuleAfterFunctionsPhase(FunctionLiteral* fun);
AsmType* ValidateGlobalDeclaration(Assignment* assign);
// 6.2 ValidateExport
AsmType* ExportType(VariableProxy* fun_export);
@@ -323,7 +373,7 @@ class AsmTyper final {
Isolate* isolate_;
Zone* zone_;
- Script* script_;
+ Handle<Script> script_;
FunctionLiteral* root_;
bool in_function_ = false;
@@ -345,13 +395,19 @@ class AsmTyper final {
std::uintptr_t stack_limit_;
bool stack_overflow_ = false;
- ZoneMap<AstNode*, AsmType*> node_types_;
- static const int kErrorMessageLimit = 100;
+ std::unordered_map<AstNode*, AsmType*> module_node_types_;
+ std::unordered_map<AstNode*, AsmType*> function_node_types_;
+ static const int kErrorMessageLimit = 128;
AsmType* fround_type_;
AsmType* ffi_type_;
- char error_message_[kErrorMessageLimit];
+ Handle<JSMessageObject> error_message_;
+ MessageLocation message_location_;
StdlibSet stdlib_uses_;
+ SourceLayoutTracker source_layout_;
+ ReturnStatement* module_return_;
+ ZoneVector<Assignment*> function_pointer_tables_;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(AsmTyper);
};
diff --git a/deps/v8/src/asmjs/asm-types.cc b/deps/v8/src/asmjs/asm-types.cc
index 8f3c9a51e6..79c43a370b 100644
--- a/deps/v8/src/asmjs/asm-types.cc
+++ b/deps/v8/src/asmjs/asm-types.cc
@@ -6,6 +6,7 @@
#include <cinttypes>
+#include "src/utils.h"
#include "src/v8.h"
namespace v8 {
diff --git a/deps/v8/src/asmjs/asm-wasm-builder.cc b/deps/v8/src/asmjs/asm-wasm-builder.cc
index cac6fbd8b3..907e80fe4b 100644
--- a/deps/v8/src/asmjs/asm-wasm-builder.cc
+++ b/deps/v8/src/asmjs/asm-wasm-builder.cc
@@ -19,6 +19,11 @@
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
+#include "src/codegen.h"
+#include "src/compilation-info.h"
+#include "src/compiler.h"
+#include "src/isolate.h"
+#include "src/parsing/parse-info.h"
namespace v8 {
namespace internal {
@@ -37,13 +42,14 @@ enum ValueFate { kDrop, kLeaveOnStack };
struct ForeignVariable {
Handle<Name> name;
Variable* var;
- LocalType type;
+ ValueType type;
};
class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
public:
- AsmWasmBuilderImpl(Isolate* isolate, Zone* zone, FunctionLiteral* literal,
- AsmTyper* typer)
+ AsmWasmBuilderImpl(Isolate* isolate, Zone* zone, CompilationInfo* info,
+ AstValueFactory* ast_value_factory, Handle<Script> script,
+ FunctionLiteral* literal, AsmTyper* typer)
: local_variables_(ZoneHashMap::kDefaultHashMapCapacity,
ZoneAllocationPolicy(zone)),
functions_(ZoneHashMap::kDefaultHashMapCapacity,
@@ -56,15 +62,20 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
literal_(literal),
isolate_(isolate),
zone_(zone),
+ info_(info),
+ ast_value_factory_(ast_value_factory),
+ script_(script),
typer_(typer),
+ typer_failed_(false),
+ typer_finished_(false),
breakable_blocks_(zone),
foreign_variables_(zone),
init_function_(nullptr),
foreign_init_function_(nullptr),
- next_table_index_(0),
function_tables_(ZoneHashMap::kDefaultHashMapCapacity,
ZoneAllocationPolicy(zone)),
- imported_function_table_(this) {
+ imported_function_table_(this),
+ parent_binop_(nullptr) {
InitializeAstVisitor(isolate);
}
@@ -90,10 +101,11 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
uint32_t index = LookupOrInsertGlobal(fv->var, fv->type);
foreign_init_function_->EmitWithVarInt(kExprSetGlobal, index);
}
+ foreign_init_function_->Emit(kExprEnd);
}
- i::Handle<i::FixedArray> GetForeignArgs() {
- i::Handle<FixedArray> ret = isolate_->factory()->NewFixedArray(
+ Handle<FixedArray> GetForeignArgs() {
+ Handle<FixedArray> ret = isolate_->factory()->NewFixedArray(
static_cast<int>(foreign_variables_.size()));
for (size_t i = 0; i < foreign_variables_.size(); ++i) {
ForeignVariable* fv = &foreign_variables_[i];
@@ -102,10 +114,26 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
return ret;
}
- void Build() {
+ bool Build() {
InitializeInitFunction();
- RECURSE(VisitFunctionLiteral(literal_));
+ if (!typer_->ValidateBeforeFunctionsPhase()) {
+ return false;
+ }
+ DCHECK(!HasStackOverflow());
+ VisitFunctionLiteral(literal_);
+ if (HasStackOverflow()) {
+ return false;
+ }
+ if (!typer_finished_ && !typer_failed_) {
+ typer_->FailWithMessage("Module missing export section.");
+ typer_failed_ = true;
+ }
+ if (typer_failed_) {
+ return false;
+ }
BuildForeignInitFunction();
+ init_function_->Emit(kExprEnd); // finish init function.
+ return true;
}
void VisitVariableDeclaration(VariableDeclaration* decl) {}
@@ -113,12 +141,65 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
void VisitFunctionDeclaration(FunctionDeclaration* decl) {
DCHECK_EQ(kModuleScope, scope_);
DCHECK_NULL(current_function_builder_);
+ FunctionLiteral* old_func = decl->fun();
+ Zone zone(isolate_->allocator(), ZONE_NAME);
+ DeclarationScope* new_func_scope = nullptr;
+ if (decl->fun()->body() == nullptr) {
+ // TODO(titzer/bradnelson): Reuse SharedFunctionInfos used here when
+ // compiling the wasm module.
+ Handle<SharedFunctionInfo> shared =
+ Compiler::GetSharedFunctionInfo(decl->fun(), script_, info_);
+ shared->set_is_toplevel(false);
+ ParseInfo info(&zone, script_);
+ info.set_shared_info(shared);
+ info.set_toplevel(false);
+ info.set_language_mode(decl->fun()->scope()->language_mode());
+ info.set_allow_lazy_parsing(false);
+ info.set_function_literal_id(shared->function_literal_id());
+ info.set_ast_value_factory(ast_value_factory_);
+ info.set_ast_value_factory_owned(false);
+ // Create fresh function scope to use to parse the function in.
+ new_func_scope = new (info.zone()) DeclarationScope(
+ info.zone(), decl->fun()->scope()->outer_scope(), FUNCTION_SCOPE);
+ info.set_asm_function_scope(new_func_scope);
+ if (!Compiler::ParseAndAnalyze(&info)) {
+ typer_failed_ = true;
+ return;
+ }
+ FunctionLiteral* func = info.literal();
+ DCHECK_NOT_NULL(func);
+ decl->set_fun(func);
+ }
+ if (!typer_->ValidateInnerFunction(decl)) {
+ typer_failed_ = true;
+ decl->set_fun(old_func);
+ if (new_func_scope != nullptr) {
+ DCHECK_EQ(new_func_scope, decl->scope()->inner_scope());
+ if (!decl->scope()->RemoveInnerScope(new_func_scope)) {
+ UNREACHABLE();
+ }
+ }
+ return;
+ }
current_function_builder_ = LookupOrInsertFunction(decl->proxy()->var());
scope_ = kFuncScope;
+
+ // Record start of the function, used as position for the stack check.
+ current_function_builder_->SetAsmFunctionStartPosition(
+ decl->fun()->start_position());
+
RECURSE(Visit(decl->fun()));
+ decl->set_fun(old_func);
+ if (new_func_scope != nullptr) {
+ DCHECK_EQ(new_func_scope, decl->scope()->inner_scope());
+ if (!decl->scope()->RemoveInnerScope(new_func_scope)) {
+ UNREACHABLE();
+ }
+ }
scope_ = kModuleScope;
current_function_builder_ = nullptr;
local_variables_.Clear();
+ typer_->ClearFunctionNodeTypes();
}
void VisitStatements(ZoneList<Statement*>* stmts) {
@@ -129,7 +210,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
continue;
}
RECURSE(Visit(stmt));
- if (stmt->IsJump()) break;
+ if (typer_failed_) break;
}
}
@@ -204,6 +285,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
void VisitEmptyParentheses(EmptyParentheses* paren) { UNREACHABLE(); }
+ void VisitGetIterator(GetIterator* expr) { UNREACHABLE(); }
+
void VisitIfStatement(IfStatement* stmt) {
DCHECK_EQ(kFuncScope, scope_);
RECURSE(Visit(stmt->condition()));
@@ -245,6 +328,16 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
void VisitReturnStatement(ReturnStatement* stmt) {
if (scope_ == kModuleScope) {
+ if (typer_finished_) {
+ typer_->FailWithMessage("Module has multiple returns.");
+ typer_failed_ = true;
+ return;
+ }
+ if (!typer_->ValidateAfterFunctionsPhase()) {
+ typer_failed_ = true;
+ return;
+ }
+ typer_finished_ = true;
scope_ = kExportScope;
RECURSE(Visit(stmt->expression()));
scope_ = kModuleScope;
@@ -440,16 +533,21 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
// Add the parameters for the function.
const auto& arguments = func_type->Arguments();
for (int i = 0; i < expr->parameter_count(); ++i) {
- LocalType type = TypeFrom(arguments[i]);
- DCHECK_NE(kAstStmt, type);
+ ValueType type = TypeFrom(arguments[i]);
+ DCHECK_NE(kWasmStmt, type);
InsertParameter(scope->parameter(i), type, i);
}
} else {
UNREACHABLE();
}
}
- RECURSE(VisitStatements(expr->body()));
RECURSE(VisitDeclarations(scope->declarations()));
+ if (typer_failed_) return;
+ RECURSE(VisitStatements(expr->body()));
+ if (scope_ == kFuncScope) {
+ // Finish the function-body scope block.
+ current_function_builder_->Emit(kExprEnd);
+ }
}
void VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
@@ -461,18 +559,18 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
RECURSE(Visit(expr->condition()));
// WASM ifs come with implicit blocks for both arms.
breakable_blocks_.push_back(std::make_pair(nullptr, false));
- LocalTypeCode type;
+ ValueTypeCode type;
switch (TypeOf(expr)) {
- case kAstI32:
+ case kWasmI32:
type = kLocalI32;
break;
- case kAstI64:
+ case kWasmI64:
type = kLocalI64;
break;
- case kAstF32:
+ case kWasmF32:
type = kLocalF32;
break;
- case kAstF64:
+ case kWasmF64:
type = kLocalF64;
break;
default:
@@ -544,8 +642,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
if (VisitStdlibConstant(var)) {
return;
}
- LocalType var_type = TypeOf(expr);
- DCHECK_NE(kAstStmt, var_type);
+ ValueType var_type = TypeOf(expr);
+ DCHECK_NE(kWasmStmt, var_type);
if (var->IsContextSlot()) {
current_function_builder_->EmitWithVarInt(
kExprGetGlobal, LookupOrInsertGlobal(var, var_type));
@@ -638,12 +736,13 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
Literal* name = prop->key()->AsLiteral();
DCHECK_NOT_NULL(name);
DCHECK(name->IsPropertyName());
- const AstRawString* raw_name = name->AsRawPropertyName();
+ Handle<String> function_name = name->AsPropertyName();
+ int length;
+ std::unique_ptr<char[]> utf8 = function_name->ToCString(
+ DISALLOW_NULLS, FAST_STRING_TRAVERSAL, &length);
if (var->is_function()) {
WasmFunctionBuilder* function = LookupOrInsertFunction(var);
- function->Export();
- function->SetName({reinterpret_cast<const char*>(raw_name->raw_data()),
- raw_name->length()});
+ function->ExportAs({utf8.get(), length});
}
}
}
@@ -660,53 +759,67 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
current_function_builder_ = nullptr;
}
- void AddFunctionTable(VariableProxy* table, ArrayLiteral* funcs) {
- auto* func_tbl_type = typer_->TypeOf(funcs)->AsFunctionTableType();
- DCHECK_NOT_NULL(func_tbl_type);
- auto* func_type = func_tbl_type->signature()->AsFunctionType();
+ struct FunctionTableIndices : public ZoneObject {
+ uint32_t start_index;
+ uint32_t signature_index;
+ };
+
+ FunctionTableIndices* LookupOrAddFunctionTable(VariableProxy* table,
+ Property* p) {
+ FunctionTableIndices* indices = LookupFunctionTable(table->var());
+ if (indices != nullptr) {
+ // Already setup.
+ return indices;
+ }
+ indices = new (zone()) FunctionTableIndices();
+ auto* func_type = typer_->TypeOf(p)->AsFunctionType();
+ auto* func_table_type = typer_->TypeOf(p->obj()->AsVariableProxy()->var())
+ ->AsFunctionTableType();
const auto& arguments = func_type->Arguments();
- LocalType return_type = TypeFrom(func_type->ReturnType());
- FunctionSig::Builder sig(zone(), return_type == kAstStmt ? 0 : 1,
+ ValueType return_type = TypeFrom(func_type->ReturnType());
+ FunctionSig::Builder sig(zone(), return_type == kWasmStmt ? 0 : 1,
arguments.size());
- if (return_type != kAstStmt) {
+ if (return_type != kWasmStmt) {
sig.AddReturn(return_type);
}
for (auto* arg : arguments) {
sig.AddParam(TypeFrom(arg));
}
uint32_t signature_index = builder_->AddSignature(sig.Build());
- InsertFunctionTable(table->var(), next_table_index_, signature_index);
- next_table_index_ += funcs->values()->length();
- for (int i = 0; i < funcs->values()->length(); ++i) {
- VariableProxy* func = funcs->values()->at(i)->AsVariableProxy();
- DCHECK_NOT_NULL(func);
- builder_->AddIndirectFunction(
- LookupOrInsertFunction(func->var())->func_index());
- }
- }
-
- struct FunctionTableIndices : public ZoneObject {
- uint32_t start_index;
- uint32_t signature_index;
- };
-
- void InsertFunctionTable(Variable* v, uint32_t start_index,
- uint32_t signature_index) {
- FunctionTableIndices* container = new (zone()) FunctionTableIndices();
- container->start_index = start_index;
- container->signature_index = signature_index;
+ indices->start_index = builder_->AllocateIndirectFunctions(
+ static_cast<uint32_t>(func_table_type->length()));
+ indices->signature_index = signature_index;
ZoneHashMap::Entry* entry = function_tables_.LookupOrInsert(
- v, ComputePointerHash(v), ZoneAllocationPolicy(zone()));
- entry->value = container;
+ table->var(), ComputePointerHash(table->var()),
+ ZoneAllocationPolicy(zone()));
+ entry->value = indices;
+ return indices;
}
FunctionTableIndices* LookupFunctionTable(Variable* v) {
ZoneHashMap::Entry* entry =
function_tables_.Lookup(v, ComputePointerHash(v));
- DCHECK_NOT_NULL(entry);
+ if (entry == nullptr) {
+ return nullptr;
+ }
return reinterpret_cast<FunctionTableIndices*>(entry->value);
}
+ void PopulateFunctionTable(VariableProxy* table, ArrayLiteral* funcs) {
+ FunctionTableIndices* indices = LookupFunctionTable(table->var());
+ // Ignore unused function tables.
+ if (indices == nullptr) {
+ return;
+ }
+ for (int i = 0; i < funcs->values()->length(); ++i) {
+ VariableProxy* func = funcs->values()->at(i)->AsVariableProxy();
+ DCHECK_NOT_NULL(func);
+ builder_->SetIndirectFunction(
+ indices->start_index + i,
+ LookupOrInsertFunction(func->var())->func_index());
+ }
+ }
+
class ImportedFunctionTable {
private:
class ImportedFunctionIndices : public ZoneObject {
@@ -727,20 +840,33 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
ZoneAllocationPolicy(builder->zone())),
builder_(builder) {}
- void AddImport(Variable* v, const char* name, int name_length) {
- ImportedFunctionIndices* indices = new (builder_->zone())
- ImportedFunctionIndices(name, name_length, builder_->zone());
+ ImportedFunctionIndices* LookupOrInsertImport(Variable* v) {
auto* entry = table_.LookupOrInsert(
v, ComputePointerHash(v), ZoneAllocationPolicy(builder_->zone()));
- entry->value = indices;
+ ImportedFunctionIndices* indices;
+ if (entry->value == nullptr) {
+ indices = new (builder_->zone())
+ ImportedFunctionIndices(nullptr, 0, builder_->zone());
+ entry->value = indices;
+ } else {
+ indices = reinterpret_cast<ImportedFunctionIndices*>(entry->value);
+ }
+ return indices;
+ }
+
+ void SetImportName(Variable* v, const char* name, int name_length) {
+ auto* indices = LookupOrInsertImport(v);
+ indices->name_ = name;
+ indices->name_length_ = name_length;
+ for (auto i : indices->signature_to_index_) {
+ builder_->builder_->SetImportName(i.second, indices->name_,
+ indices->name_length_);
+ }
}
// Get a function's index (or allocate if new).
- uint32_t LookupOrInsertImport(Variable* v, FunctionSig* sig) {
- ZoneHashMap::Entry* entry = table_.Lookup(v, ComputePointerHash(v));
- DCHECK_NOT_NULL(entry);
- ImportedFunctionIndices* indices =
- reinterpret_cast<ImportedFunctionIndices*>(entry->value);
+ uint32_t LookupOrInsertImportUse(Variable* v, FunctionSig* sig) {
+ auto* indices = LookupOrInsertImport(v);
WasmModuleBuilder::SignatureMap::iterator pos =
indices->signature_to_index_.find(sig);
if (pos != indices->signature_to_index_.end()) {
@@ -819,8 +945,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
if (target_var != nullptr) {
// Left hand side is a local or a global variable.
Variable* var = target_var->var();
- LocalType var_type = TypeOf(expr);
- DCHECK_NE(kAstStmt, var_type);
+ ValueType var_type = TypeOf(expr);
+ DCHECK_NE(kWasmStmt, var_type);
if (var->IsContextSlot()) {
uint32_t index = LookupOrInsertGlobal(var, var_type);
current_function_builder_->EmitWithVarInt(kExprSetGlobal, index);
@@ -841,7 +967,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
Property* target_prop = expr->target()->AsProperty();
if (target_prop != nullptr) {
// Left hand side is a property access, i.e. the asm.js heap.
- if (TypeOf(expr->value()) == kAstF64 && expr->target()->IsProperty() &&
+ if (TypeOf(expr->value()) == kWasmF64 && expr->target()->IsProperty() &&
typer_->TypeOf(expr->target()->AsProperty()->obj())
->IsA(AsmType::Float32Array())) {
current_function_builder_->Emit(kExprF32ConvertF64);
@@ -901,7 +1027,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
if (typer_->TypeOf(target)->AsFFIType() != nullptr) {
const AstRawString* name =
prop->key()->AsLiteral()->AsRawPropertyName();
- imported_function_table_.AddImport(
+ imported_function_table_.SetImportName(
target->var(), reinterpret_cast<const char*>(name->raw_data()),
name->length());
}
@@ -910,14 +1036,10 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
return;
}
ArrayLiteral* funcs = expr->value()->AsArrayLiteral();
- if (funcs != nullptr &&
- typer_->TypeOf(funcs)
- ->AsFunctionTableType()
- ->signature()
- ->AsFunctionType()) {
+ if (funcs != nullptr) {
VariableProxy* target = expr->target()->AsVariableProxy();
DCHECK_NOT_NULL(target);
- AddFunctionTable(target, funcs);
+ PopulateFunctionTable(target, funcs);
// Only add to the function table. No init needed.
return;
}
@@ -952,8 +1074,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
DCHECK_NOT_NULL(key_literal);
if (!key_literal->value().is_null()) {
Handle<Name> name =
- i::Object::ToName(isolate_, key_literal->value()).ToHandleChecked();
- LocalType type = is_float ? kAstF64 : kAstI32;
+ Object::ToName(isolate_, key_literal->value()).ToHandleChecked();
+ ValueType type = is_float ? kWasmF64 : kWasmI32;
foreign_variables_.push_back({name, var, type});
}
}
@@ -961,7 +1083,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
void VisitPropertyAndEmitIndex(Property* expr, AsmType** atype) {
Expression* obj = expr->obj();
*atype = typer_->TypeOf(obj);
- int size = (*atype)->ElementSizeInBytes();
+ int32_t size = (*atype)->ElementSizeInBytes();
if (size == 1) {
// Allow more general expression in byte arrays than the spec
// strictly permits.
@@ -974,7 +1096,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
Literal* value = expr->key()->AsLiteral();
if (value) {
DCHECK(value->raw_value()->IsNumber());
- DCHECK_EQ(kAstI32, TypeOf(value));
+ DCHECK_EQ(kWasmI32, TypeOf(value));
int32_t val = static_cast<int32_t>(value->raw_value()->AsNumber());
// TODO(titzer): handle overflow here.
current_function_builder_->EmitI32Const(val * size);
@@ -984,14 +1106,13 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
if (binop) {
DCHECK_EQ(Token::SAR, binop->op());
DCHECK(binop->right()->AsLiteral()->raw_value()->IsNumber());
- DCHECK(kAstI32 == TypeOf(binop->right()->AsLiteral()));
+ DCHECK(kWasmI32 == TypeOf(binop->right()->AsLiteral()));
DCHECK_EQ(size,
1 << static_cast<int>(
binop->right()->AsLiteral()->raw_value()->AsNumber()));
// Mask bottom bits to match asm.js behavior.
- byte mask = static_cast<byte>(~(size - 1));
RECURSE(Visit(binop->left()));
- current_function_builder_->EmitWithU8(kExprI8Const, mask);
+ current_function_builder_->EmitI32Const(~(size - 1));
current_function_builder_->Emit(kExprI32And);
return;
}
@@ -1030,7 +1151,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
AsmTyper::StandardMember standard_object =
typer_->VariableAsStandardMember(var);
ZoneList<Expression*>* args = call->arguments();
- LocalType call_type = TypeOf(call);
+ ValueType call_type = TypeOf(call);
switch (standard_object) {
case AsmTyper::kNone: {
@@ -1038,57 +1159,57 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
}
case AsmTyper::kMathAcos: {
VisitCallArgs(call);
- DCHECK_EQ(kAstF64, call_type);
+ DCHECK_EQ(kWasmF64, call_type);
current_function_builder_->Emit(kExprF64Acos);
break;
}
case AsmTyper::kMathAsin: {
VisitCallArgs(call);
- DCHECK_EQ(kAstF64, call_type);
+ DCHECK_EQ(kWasmF64, call_type);
current_function_builder_->Emit(kExprF64Asin);
break;
}
case AsmTyper::kMathAtan: {
VisitCallArgs(call);
- DCHECK_EQ(kAstF64, call_type);
+ DCHECK_EQ(kWasmF64, call_type);
current_function_builder_->Emit(kExprF64Atan);
break;
}
case AsmTyper::kMathCos: {
VisitCallArgs(call);
- DCHECK_EQ(kAstF64, call_type);
+ DCHECK_EQ(kWasmF64, call_type);
current_function_builder_->Emit(kExprF64Cos);
break;
}
case AsmTyper::kMathSin: {
VisitCallArgs(call);
- DCHECK_EQ(kAstF64, call_type);
+ DCHECK_EQ(kWasmF64, call_type);
current_function_builder_->Emit(kExprF64Sin);
break;
}
case AsmTyper::kMathTan: {
VisitCallArgs(call);
- DCHECK_EQ(kAstF64, call_type);
+ DCHECK_EQ(kWasmF64, call_type);
current_function_builder_->Emit(kExprF64Tan);
break;
}
case AsmTyper::kMathExp: {
VisitCallArgs(call);
- DCHECK_EQ(kAstF64, call_type);
+ DCHECK_EQ(kWasmF64, call_type);
current_function_builder_->Emit(kExprF64Exp);
break;
}
case AsmTyper::kMathLog: {
VisitCallArgs(call);
- DCHECK_EQ(kAstF64, call_type);
+ DCHECK_EQ(kWasmF64, call_type);
current_function_builder_->Emit(kExprF64Log);
break;
}
case AsmTyper::kMathCeil: {
VisitCallArgs(call);
- if (call_type == kAstF32) {
+ if (call_type == kWasmF32) {
current_function_builder_->Emit(kExprF32Ceil);
- } else if (call_type == kAstF64) {
+ } else if (call_type == kWasmF64) {
current_function_builder_->Emit(kExprF64Ceil);
} else {
UNREACHABLE();
@@ -1097,9 +1218,9 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
}
case AsmTyper::kMathFloor: {
VisitCallArgs(call);
- if (call_type == kAstF32) {
+ if (call_type == kWasmF32) {
current_function_builder_->Emit(kExprF32Floor);
- } else if (call_type == kAstF64) {
+ } else if (call_type == kWasmF64) {
current_function_builder_->Emit(kExprF64Floor);
} else {
UNREACHABLE();
@@ -1108,9 +1229,9 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
}
case AsmTyper::kMathSqrt: {
VisitCallArgs(call);
- if (call_type == kAstF32) {
+ if (call_type == kWasmF32) {
current_function_builder_->Emit(kExprF32Sqrt);
- } else if (call_type == kAstF64) {
+ } else if (call_type == kWasmF64) {
current_function_builder_->Emit(kExprF64Sqrt);
} else {
UNREACHABLE();
@@ -1119,18 +1240,18 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
}
case AsmTyper::kMathClz32: {
VisitCallArgs(call);
- DCHECK(call_type == kAstI32);
+ DCHECK(call_type == kWasmI32);
current_function_builder_->Emit(kExprI32Clz);
break;
}
case AsmTyper::kMathAbs: {
- if (call_type == kAstI32) {
- WasmTemporary tmp(current_function_builder_, kAstI32);
+ if (call_type == kWasmI32) {
+ WasmTemporary tmp(current_function_builder_, kWasmI32);
// if set_local(tmp, x) < 0
Visit(call->arguments()->at(0));
current_function_builder_->EmitTeeLocal(tmp.index());
- byte code[] = {WASM_I8(0)};
+ byte code[] = {WASM_ZERO};
current_function_builder_->EmitCode(code, sizeof(code));
current_function_builder_->Emit(kExprI32LtS);
current_function_builder_->EmitWithU8(kExprIf, kLocalI32);
@@ -1146,10 +1267,10 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
// end
current_function_builder_->Emit(kExprEnd);
- } else if (call_type == kAstF32) {
+ } else if (call_type == kWasmF32) {
VisitCallArgs(call);
current_function_builder_->Emit(kExprF32Abs);
- } else if (call_type == kAstF64) {
+ } else if (call_type == kWasmF64) {
VisitCallArgs(call);
current_function_builder_->Emit(kExprF64Abs);
} else {
@@ -1159,9 +1280,9 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
}
case AsmTyper::kMathMin: {
// TODO(bradnelson): Change wasm to match Math.min in asm.js mode.
- if (call_type == kAstI32) {
- WasmTemporary tmp_x(current_function_builder_, kAstI32);
- WasmTemporary tmp_y(current_function_builder_, kAstI32);
+ if (call_type == kWasmI32) {
+ WasmTemporary tmp_x(current_function_builder_, kWasmI32);
+ WasmTemporary tmp_y(current_function_builder_, kWasmI32);
// if set_local(tmp_x, x) < set_local(tmp_y, y)
Visit(call->arguments()->at(0));
@@ -1181,10 +1302,10 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
current_function_builder_->EmitGetLocal(tmp_y.index());
current_function_builder_->Emit(kExprEnd);
- } else if (call_type == kAstF32) {
+ } else if (call_type == kWasmF32) {
VisitCallArgs(call);
current_function_builder_->Emit(kExprF32Min);
- } else if (call_type == kAstF64) {
+ } else if (call_type == kWasmF64) {
VisitCallArgs(call);
current_function_builder_->Emit(kExprF64Min);
} else {
@@ -1194,9 +1315,9 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
}
case AsmTyper::kMathMax: {
// TODO(bradnelson): Change wasm to match Math.max in asm.js mode.
- if (call_type == kAstI32) {
- WasmTemporary tmp_x(current_function_builder_, kAstI32);
- WasmTemporary tmp_y(current_function_builder_, kAstI32);
+ if (call_type == kWasmI32) {
+ WasmTemporary tmp_x(current_function_builder_, kWasmI32);
+ WasmTemporary tmp_y(current_function_builder_, kWasmI32);
// if set_local(tmp_x, x) < set_local(tmp_y, y)
Visit(call->arguments()->at(0));
@@ -1217,10 +1338,10 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
current_function_builder_->EmitGetLocal(tmp_x.index());
current_function_builder_->Emit(kExprEnd);
- } else if (call_type == kAstF32) {
+ } else if (call_type == kWasmF32) {
VisitCallArgs(call);
current_function_builder_->Emit(kExprF32Max);
- } else if (call_type == kAstF64) {
+ } else if (call_type == kWasmF64) {
VisitCallArgs(call);
current_function_builder_->Emit(kExprF64Max);
} else {
@@ -1230,13 +1351,13 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
}
case AsmTyper::kMathAtan2: {
VisitCallArgs(call);
- DCHECK_EQ(kAstF64, call_type);
+ DCHECK_EQ(kWasmF64, call_type);
current_function_builder_->Emit(kExprF64Atan2);
break;
}
case AsmTyper::kMathPow: {
VisitCallArgs(call);
- DCHECK_EQ(kAstF64, call_type);
+ DCHECK_EQ(kWasmF64, call_type);
current_function_builder_->Emit(kExprF64Pow);
break;
}
@@ -1298,6 +1419,10 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
bool VisitCallExpression(Call* expr) {
Call::CallType call_type = expr->GetCallType();
bool returns_value = true;
+
+ // Save the parent now, it might be overwritten in VisitCallArgs.
+ BinaryOperation* parent_binop = parent_binop_;
+
switch (call_type) {
case Call::OTHER_CALL: {
VariableProxy* proxy = expr->expression()->AsVariableProxy();
@@ -1313,11 +1438,11 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
VariableProxy* vp = expr->expression()->AsVariableProxy();
DCHECK_NOT_NULL(vp);
if (typer_->TypeOf(vp)->AsFFIType() != nullptr) {
- LocalType return_type = TypeOf(expr);
+ ValueType return_type = TypeOf(expr);
ZoneList<Expression*>* args = expr->arguments();
- FunctionSig::Builder sig(zone(), return_type == kAstStmt ? 0 : 1,
+ FunctionSig::Builder sig(zone(), return_type == kWasmStmt ? 0 : 1,
args->length());
- if (return_type != kAstStmt) {
+ if (return_type != kWasmStmt) {
sig.AddReturn(return_type);
} else {
returns_value = false;
@@ -1325,16 +1450,23 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
for (int i = 0; i < args->length(); ++i) {
sig.AddParam(TypeOf(args->at(i)));
}
- uint32_t index = imported_function_table_.LookupOrInsertImport(
+ uint32_t index = imported_function_table_.LookupOrInsertImportUse(
vp->var(), sig.Build());
VisitCallArgs(expr);
- current_function_builder_->AddAsmWasmOffset(expr->position());
+ // For non-void functions, we must know the parent node.
+ DCHECK_IMPLIES(returns_value, parent_binop != nullptr);
+ DCHECK_IMPLIES(returns_value, parent_binop->left() == expr ||
+ parent_binop->right() == expr);
+ int pos = expr->position();
+ int parent_pos = returns_value ? parent_binop->position() : pos;
+ current_function_builder_->AddAsmWasmOffset(pos, parent_pos);
current_function_builder_->Emit(kExprCallFunction);
current_function_builder_->EmitVarInt(index);
} else {
WasmFunctionBuilder* function = LookupOrInsertFunction(vp->var());
VisitCallArgs(expr);
- current_function_builder_->AddAsmWasmOffset(expr->position());
+ current_function_builder_->AddAsmWasmOffset(expr->position(),
+ expr->position());
current_function_builder_->Emit(kExprCallFunction);
current_function_builder_->EmitDirectCallIndex(
function->func_index());
@@ -1348,19 +1480,20 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
DCHECK_NOT_NULL(p);
VariableProxy* var = p->obj()->AsVariableProxy();
DCHECK_NOT_NULL(var);
- FunctionTableIndices* indices = LookupFunctionTable(var->var());
+ FunctionTableIndices* indices = LookupOrAddFunctionTable(var, p);
Visit(p->key()); // TODO(titzer): should use RECURSE()
// We have to use a temporary for the correct order of evaluation.
current_function_builder_->EmitI32Const(indices->start_index);
current_function_builder_->Emit(kExprI32Add);
- WasmTemporary tmp(current_function_builder_, kAstI32);
+ WasmTemporary tmp(current_function_builder_, kWasmI32);
current_function_builder_->EmitSetLocal(tmp.index());
VisitCallArgs(expr);
current_function_builder_->EmitGetLocal(tmp.index());
- current_function_builder_->AddAsmWasmOffset(expr->position());
+ current_function_builder_->AddAsmWasmOffset(expr->position(),
+ expr->position());
current_function_builder_->Emit(kExprCallIndirect);
current_function_builder_->EmitVarInt(indices->signature_index);
current_function_builder_->EmitVarInt(0); // table index
@@ -1383,7 +1516,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
RECURSE(Visit(expr->expression()));
switch (expr->op()) {
case Token::NOT: {
- DCHECK_EQ(kAstI32, TypeOf(expr->expression()));
+ DCHECK_EQ(kWasmI32, TypeOf(expr->expression()));
current_function_builder_->Emit(kExprI32Eqz);
break;
}
@@ -1398,10 +1531,10 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
int32_t val) {
DCHECK_NOT_NULL(expr->right());
if (expr->op() == op && expr->right()->IsLiteral() &&
- TypeOf(expr) == kAstI32) {
+ TypeOf(expr) == kWasmI32) {
Literal* right = expr->right()->AsLiteral();
- DCHECK(right->raw_value()->IsNumber());
- if (static_cast<int32_t>(right->raw_value()->AsNumber()) == val) {
+ if (right->raw_value()->IsNumber() &&
+ static_cast<int32_t>(right->raw_value()->AsNumber()) == val) {
return true;
}
}
@@ -1412,7 +1545,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
double val) {
DCHECK_NOT_NULL(expr->right());
if (expr->op() == op && expr->right()->IsLiteral() &&
- TypeOf(expr) == kAstF64) {
+ TypeOf(expr) == kWasmF64) {
Literal* right = expr->right()->AsLiteral();
DCHECK(right->raw_value()->IsNumber());
if (right->raw_value()->AsNumber() == val) {
@@ -1426,7 +1559,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
ConvertOperation MatchOr(BinaryOperation* expr) {
if (MatchIntBinaryOperation(expr, Token::BIT_OR, 0) &&
- (TypeOf(expr->left()) == kAstI32)) {
+ (TypeOf(expr->left()) == kWasmI32)) {
return kAsIs;
} else {
return kNone;
@@ -1436,7 +1569,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
ConvertOperation MatchShr(BinaryOperation* expr) {
if (MatchIntBinaryOperation(expr, Token::SHR, 0)) {
// TODO(titzer): this probably needs to be kToUint
- return (TypeOf(expr->left()) == kAstI32) ? kAsIs : kToInt;
+ return (TypeOf(expr->left()) == kWasmI32) ? kAsIs : kToInt;
} else {
return kNone;
}
@@ -1444,13 +1577,13 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
ConvertOperation MatchXor(BinaryOperation* expr) {
if (MatchIntBinaryOperation(expr, Token::BIT_XOR, 0xffffffff)) {
- DCHECK_EQ(kAstI32, TypeOf(expr->left()));
- DCHECK_EQ(kAstI32, TypeOf(expr->right()));
+ DCHECK_EQ(kWasmI32, TypeOf(expr->left()));
+ DCHECK_EQ(kWasmI32, TypeOf(expr->right()));
BinaryOperation* op = expr->left()->AsBinaryOperation();
if (op != nullptr) {
if (MatchIntBinaryOperation(op, Token::BIT_XOR, 0xffffffff)) {
- DCHECK_EQ(kAstI32, TypeOf(op->right()));
- if (TypeOf(op->left()) != kAstI32) {
+ DCHECK_EQ(kWasmI32, TypeOf(op->right()));
+ if (TypeOf(op->left()) != kWasmI32) {
return kToInt;
} else {
return kAsIs;
@@ -1463,8 +1596,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
ConvertOperation MatchMul(BinaryOperation* expr) {
if (MatchDoubleBinaryOperation(expr, Token::MUL, 1.0)) {
- DCHECK_EQ(kAstF64, TypeOf(expr->right()));
- if (TypeOf(expr->left()) != kAstF64) {
+ DCHECK_EQ(kWasmF64, TypeOf(expr->right()));
+ if (TypeOf(expr->left()) != kWasmF64) {
return kToDouble;
} else {
return kAsIs;
@@ -1532,6 +1665,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
void VisitBinaryOperation(BinaryOperation* expr) {
ConvertOperation convertOperation = MatchBinaryOperation(expr);
static const bool kDontIgnoreSign = false;
+ parent_binop_ = expr;
if (convertOperation == kToDouble) {
RECURSE(Visit(expr->left()));
TypeIndex type = TypeIndexOf(expr->left(), kDontIgnoreSign);
@@ -1694,6 +1828,9 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
void VisitDeclarations(Declaration::List* decls) {
for (Declaration* decl : *decls) {
RECURSE(Visit(decl));
+ if (typer_failed_) {
+ return;
+ }
}
}
@@ -1719,7 +1856,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
uint32_t index;
};
- uint32_t LookupOrInsertLocal(Variable* v, LocalType type) {
+ uint32_t LookupOrInsertLocal(Variable* v, ValueType type) {
DCHECK_NOT_NULL(current_function_builder_);
ZoneHashMap::Entry* entry =
local_variables_.Lookup(v, ComputePointerHash(v));
@@ -1736,7 +1873,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
return (reinterpret_cast<IndexContainer*>(entry->value))->index;
}
- void InsertParameter(Variable* v, LocalType type, uint32_t index) {
+ void InsertParameter(Variable* v, ValueType type, uint32_t index) {
DCHECK(v->IsParameter());
DCHECK_NOT_NULL(current_function_builder_);
ZoneHashMap::Entry* entry =
@@ -1749,7 +1886,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
entry->value = container;
}
- uint32_t LookupOrInsertGlobal(Variable* v, LocalType type) {
+ uint32_t LookupOrInsertGlobal(Variable* v, ValueType type) {
ZoneHashMap::Entry* entry =
global_variables_.Lookup(v, ComputePointerHash(v));
if (entry == nullptr) {
@@ -1770,14 +1907,14 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
auto* func_type = typer_->TypeOf(v)->AsFunctionType();
DCHECK_NOT_NULL(func_type);
// Build the signature for the function.
- LocalType return_type = TypeFrom(func_type->ReturnType());
+ ValueType return_type = TypeFrom(func_type->ReturnType());
const auto& arguments = func_type->Arguments();
- FunctionSig::Builder b(zone(), return_type == kAstStmt ? 0 : 1,
+ FunctionSig::Builder b(zone(), return_type == kWasmStmt ? 0 : 1,
arguments.size());
- if (return_type != kAstStmt) b.AddReturn(return_type);
+ if (return_type != kWasmStmt) b.AddReturn(return_type);
for (int i = 0; i < static_cast<int>(arguments.size()); ++i) {
- LocalType type = TypeFrom(arguments[i]);
- DCHECK_NE(kAstStmt, type);
+ ValueType type = TypeFrom(arguments[i]);
+ DCHECK_NE(kWasmStmt, type);
b.AddParam(type);
}
@@ -1792,22 +1929,22 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
return (reinterpret_cast<WasmFunctionBuilder*>(entry->value));
}
- LocalType TypeOf(Expression* expr) { return TypeFrom(typer_->TypeOf(expr)); }
+ ValueType TypeOf(Expression* expr) { return TypeFrom(typer_->TypeOf(expr)); }
- LocalType TypeFrom(AsmType* type) {
+ ValueType TypeFrom(AsmType* type) {
if (type->IsA(AsmType::Intish())) {
- return kAstI32;
+ return kWasmI32;
}
if (type->IsA(AsmType::Floatish())) {
- return kAstF32;
+ return kWasmF32;
}
if (type->IsA(AsmType::DoubleQ())) {
- return kAstF64;
+ return kWasmF64;
}
- return kAstStmt;
+ return kWasmStmt;
}
Zone* zone() { return zone_; }
@@ -1821,7 +1958,12 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
FunctionLiteral* literal_;
Isolate* isolate_;
Zone* zone_;
+ CompilationInfo* info_;
+ AstValueFactory* ast_value_factory_;
+ Handle<Script> script_;
AsmTyper* typer_;
+ bool typer_failed_;
+ bool typer_finished_;
ZoneVector<std::pair<BreakableStatement*, bool>> breakable_blocks_;
ZoneVector<ForeignVariable> foreign_variables_;
WasmFunctionBuilder* init_function_;
@@ -1829,6 +1971,9 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
uint32_t next_table_index_;
ZoneHashMap function_tables_;
ImportedFunctionTable imported_function_table_;
+ // Remember the parent node for reporting the correct location for ToNumber
+ // conversions after calls.
+ BinaryOperation* parent_binop_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
@@ -1836,22 +1981,24 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
DISALLOW_COPY_AND_ASSIGN(AsmWasmBuilderImpl);
};
-AsmWasmBuilder::AsmWasmBuilder(Isolate* isolate, Zone* zone,
- FunctionLiteral* literal, AsmTyper* typer)
- : isolate_(isolate), zone_(zone), literal_(literal), typer_(typer) {}
+AsmWasmBuilder::AsmWasmBuilder(CompilationInfo* info)
+ : info_(info),
+ typer_(info->isolate(), info->zone(), info->script(), info->literal()) {}
// TODO(aseemgarg): probably should take zone (to write wasm to) as input so
// that zone in constructor may be thrown away once wasm module is written.
-AsmWasmBuilder::Result AsmWasmBuilder::Run(
- i::Handle<i::FixedArray>* foreign_args) {
- AsmWasmBuilderImpl impl(isolate_, zone_, literal_, typer_);
- impl.Build();
+AsmWasmBuilder::Result AsmWasmBuilder::Run(Handle<FixedArray>* foreign_args) {
+ Zone* zone = info_->zone();
+ AsmWasmBuilderImpl impl(info_->isolate(), zone, info_,
+ info_->parse_info()->ast_value_factory(),
+ info_->script(), info_->literal(), &typer_);
+ bool success = impl.Build();
*foreign_args = impl.GetForeignArgs();
- ZoneBuffer* module_buffer = new (zone_) ZoneBuffer(zone_);
+ ZoneBuffer* module_buffer = new (zone) ZoneBuffer(zone);
impl.builder_->WriteTo(*module_buffer);
- ZoneBuffer* asm_offsets_buffer = new (zone_) ZoneBuffer(zone_);
+ ZoneBuffer* asm_offsets_buffer = new (zone) ZoneBuffer(zone);
impl.builder_->WriteAsmJsOffsetTable(*asm_offsets_buffer);
- return {module_buffer, asm_offsets_buffer};
+ return {module_buffer, asm_offsets_buffer, success};
}
const char* AsmWasmBuilder::foreign_init_name = "__foreign_init__";
diff --git a/deps/v8/src/asmjs/asm-wasm-builder.h b/deps/v8/src/asmjs/asm-wasm-builder.h
index f234abde8a..a5db096683 100644
--- a/deps/v8/src/asmjs/asm-wasm-builder.h
+++ b/deps/v8/src/asmjs/asm-wasm-builder.h
@@ -14,7 +14,7 @@
namespace v8 {
namespace internal {
-class FunctionLiteral;
+class CompilationInfo;
namespace wasm {
@@ -23,20 +23,20 @@ class AsmWasmBuilder {
struct Result {
ZoneBuffer* module_bytes;
ZoneBuffer* asm_offset_table;
+ bool success;
};
- explicit AsmWasmBuilder(Isolate* isolate, Zone* zone, FunctionLiteral* root,
- AsmTyper* typer);
+ explicit AsmWasmBuilder(CompilationInfo* info);
Result Run(Handle<FixedArray>* foreign_args);
static const char* foreign_init_name;
static const char* single_function_name;
+ const AsmTyper* typer() { return &typer_; }
+
private:
- Isolate* isolate_;
- Zone* zone_;
- FunctionLiteral* literal_;
- AsmTyper* typer_;
+ CompilationInfo* info_;
+ AsmTyper typer_;
};
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/assembler-inl.h b/deps/v8/src/assembler-inl.h
new file mode 100644
index 0000000000..24d0377ce5
--- /dev/null
+++ b/deps/v8/src/assembler-inl.h
@@ -0,0 +1,32 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ASSEMBLER_INL_H_
+#define V8_ASSEMBLER_INL_H_
+
+#include "src/assembler.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "src/ia32/assembler-ia32-inl.h"
+#elif V8_TARGET_ARCH_X64
+#include "src/x64/assembler-x64-inl.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/arm64/assembler-arm64-inl.h"
+#elif V8_TARGET_ARCH_ARM
+#include "src/arm/assembler-arm-inl.h"
+#elif V8_TARGET_ARCH_PPC
+#include "src/ppc/assembler-ppc-inl.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "src/mips/assembler-mips-inl.h"
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/assembler-mips64-inl.h"
+#elif V8_TARGET_ARCH_S390
+#include "src/s390/assembler-s390-inl.h"
+#elif V8_TARGET_ARCH_X87
+#include "src/x87/assembler-x87-inl.h"
+#else
+#error Unknown architecture.
+#endif
+
+#endif // V8_ASSEMBLER_INL_H_
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index a2c0ebebaf..a4d97ec3e6 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -35,8 +35,11 @@
#include "src/assembler.h"
#include <math.h>
+#include <string.h>
#include <cmath>
+
#include "src/api.h"
+#include "src/assembler-inl.h"
#include "src/base/cpu.h"
#include "src/base/functional.h"
#include "src/base/ieee754.h"
@@ -62,28 +65,6 @@
#include "src/snapshot/serializer-common.h"
#include "src/wasm/wasm-external-refs.h"
-#if V8_TARGET_ARCH_IA32
-#include "src/ia32/assembler-ia32-inl.h" // NOLINT
-#elif V8_TARGET_ARCH_X64
-#include "src/x64/assembler-x64-inl.h" // NOLINT
-#elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/assembler-arm64-inl.h" // NOLINT
-#elif V8_TARGET_ARCH_ARM
-#include "src/arm/assembler-arm-inl.h" // NOLINT
-#elif V8_TARGET_ARCH_PPC
-#include "src/ppc/assembler-ppc-inl.h" // NOLINT
-#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/assembler-mips-inl.h" // NOLINT
-#elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/assembler-mips64-inl.h" // NOLINT
-#elif V8_TARGET_ARCH_S390
-#include "src/s390/assembler-s390-inl.h" // NOLINT
-#elif V8_TARGET_ARCH_X87
-#include "src/x87/assembler-x87-inl.h" // NOLINT
-#else
-#error "Unknown architecture."
-#endif
-
// Include native regexp-macro-assembler.
#ifndef V8_INTERPRETED_REGEXP
#if V8_TARGET_ARCH_IA32
@@ -353,8 +334,7 @@ void RelocInfo::update_wasm_memory_reference(
uint32_t current_size_reference = wasm_memory_size_reference();
uint32_t updated_size_reference =
new_size + (current_size_reference - old_size);
- unchecked_update_wasm_memory_size(updated_size_reference,
- icache_flush_mode);
+ unchecked_update_wasm_size(updated_size_reference, icache_flush_mode);
} else {
UNREACHABLE();
}
@@ -378,6 +358,18 @@ void RelocInfo::update_wasm_global_reference(
}
}
+void RelocInfo::update_wasm_function_table_size_reference(
+ uint32_t old_size, uint32_t new_size, ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsWasmFunctionTableSizeReference(rmode_));
+ uint32_t current_size_reference = wasm_function_table_size_reference();
+ uint32_t updated_size_reference =
+ new_size + (current_size_reference - old_size);
+ unchecked_update_wasm_size(updated_size_reference, icache_flush_mode);
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ Assembler::FlushICache(isolate_, pc_, sizeof(int64_t));
+ }
+}
+
void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
@@ -782,14 +774,14 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "debug break slot at tail call";
case CODE_AGE_SEQUENCE:
return "code age sequence";
- case GENERATOR_CONTINUATION:
- return "generator continuation";
case WASM_MEMORY_REFERENCE:
return "wasm memory reference";
case WASM_MEMORY_SIZE_REFERENCE:
return "wasm memory size reference";
case WASM_GLOBAL_REFERENCE:
return "wasm global value reference";
+ case WASM_FUNCTION_TABLE_SIZE_REFERENCE:
+ return "wasm function table size reference";
case NUMBER_OF_MODES:
case PC_JUMP:
UNREACHABLE();
@@ -884,10 +876,10 @@ void RelocInfo::Verify(Isolate* isolate) {
case DEBUG_BREAK_SLOT_AT_RETURN:
case DEBUG_BREAK_SLOT_AT_CALL:
case DEBUG_BREAK_SLOT_AT_TAIL_CALL:
- case GENERATOR_CONTINUATION:
case WASM_MEMORY_REFERENCE:
case WASM_MEMORY_SIZE_REFERENCE:
case WASM_GLOBAL_REFERENCE:
+ case WASM_FUNCTION_TABLE_SIZE_REFERENCE:
case NONE32:
case NONE64:
break;
@@ -1204,6 +1196,12 @@ ExternalReference ExternalReference::f64_mod_wrapper_function(
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_mod_wrapper)));
}
+ExternalReference ExternalReference::wasm_call_trap_callback_for_testing(
+ Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(wasm::call_trap_callback_for_testing)));
+}
+
ExternalReference ExternalReference::log_enter_external_function(
Isolate* isolate) {
return ExternalReference(
@@ -1548,6 +1546,14 @@ ExternalReference ExternalReference::ieee754_tanh_function(Isolate* isolate) {
Redirect(isolate, FUNCTION_ADDR(base::ieee754::tanh), BUILTIN_FP_CALL));
}
+void* libc_memchr(void* string, int character, size_t search_length) {
+ return memchr(string, character, search_length);
+}
+
+ExternalReference ExternalReference::libc_memchr_function(Isolate* isolate) {
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(libc_memchr)));
+}
+
ExternalReference ExternalReference::page_flags(Page* page) {
return ExternalReference(reinterpret_cast<Address>(page) +
MemoryChunk::kFlagsOffset);
@@ -1569,11 +1575,19 @@ ExternalReference ExternalReference::is_tail_call_elimination_enabled_address(
return ExternalReference(isolate->is_tail_call_elimination_enabled_address());
}
+ExternalReference ExternalReference::promise_hook_address(Isolate* isolate) {
+ return ExternalReference(isolate->promise_hook_address());
+}
+
ExternalReference ExternalReference::debug_is_active_address(
Isolate* isolate) {
return ExternalReference(isolate->debug()->is_active_address());
}
+ExternalReference ExternalReference::debug_hook_on_function_call_address(
+ Isolate* isolate) {
+ return ExternalReference(isolate->debug()->hook_on_function_call_address());
+}
ExternalReference ExternalReference::debug_after_break_target_address(
Isolate* isolate) {
@@ -1914,12 +1928,6 @@ void Assembler::RecordComment(const char* msg) {
}
-void Assembler::RecordGeneratorContinuation() {
- EnsureSpace ensure_space(this);
- RecordRelocInfo(RelocInfo::GENERATOR_CONTINUATION);
-}
-
-
void Assembler::RecordDebugBreakSlot(RelocInfo::Mode mode) {
EnsureSpace ensure_space(this);
DCHECK(RelocInfo::IsDebugBreakSlot(mode));
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 2169b15c1d..cd5867689e 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -395,6 +395,7 @@ class RelocInfo {
WASM_MEMORY_REFERENCE,
WASM_GLOBAL_REFERENCE,
WASM_MEMORY_SIZE_REFERENCE,
+ WASM_FUNCTION_TABLE_SIZE_REFERENCE,
CELL,
// Everything after runtime_entry (inclusive) is not GC'ed.
@@ -413,9 +414,6 @@ class RelocInfo {
// Encoded internal reference, used only on MIPS, MIPS64 and PPC.
INTERNAL_REFERENCE_ENCODED,
- // Continuation points for a generator yield.
- GENERATOR_CONTINUATION,
-
// Marks constant and veneer pools. Only used on ARM and ARM64.
// They use a custom noncompact encoding.
CONST_POOL,
@@ -440,7 +438,7 @@ class RelocInfo {
FIRST_REAL_RELOC_MODE = CODE_TARGET,
LAST_REAL_RELOC_MODE = VENEER_POOL,
LAST_CODE_ENUM = DEBUGGER_STATEMENT,
- LAST_GCED_ENUM = WASM_MEMORY_SIZE_REFERENCE,
+ LAST_GCED_ENUM = WASM_FUNCTION_TABLE_SIZE_REFERENCE,
FIRST_SHAREABLE_RELOC_MODE = CELL,
};
@@ -524,9 +522,6 @@ class RelocInfo {
static inline bool IsCodeAgeSequence(Mode mode) {
return mode == CODE_AGE_SEQUENCE;
}
- static inline bool IsGeneratorContinuation(Mode mode) {
- return mode == GENERATOR_CONTINUATION;
- }
static inline bool IsWasmMemoryReference(Mode mode) {
return mode == WASM_MEMORY_REFERENCE;
}
@@ -536,6 +531,22 @@ class RelocInfo {
static inline bool IsWasmGlobalReference(Mode mode) {
return mode == WASM_GLOBAL_REFERENCE;
}
+ static inline bool IsWasmFunctionTableSizeReference(Mode mode) {
+ return mode == WASM_FUNCTION_TABLE_SIZE_REFERENCE;
+ }
+ static inline bool IsWasmReference(Mode mode) {
+ return mode == WASM_MEMORY_REFERENCE || mode == WASM_GLOBAL_REFERENCE ||
+ mode == WASM_MEMORY_SIZE_REFERENCE ||
+ mode == WASM_FUNCTION_TABLE_SIZE_REFERENCE;
+ }
+ static inline bool IsWasmSizeReference(Mode mode) {
+ return mode == WASM_MEMORY_SIZE_REFERENCE ||
+ mode == WASM_FUNCTION_TABLE_SIZE_REFERENCE;
+ }
+ static inline bool IsWasmPtrReference(Mode mode) {
+ return mode == WASM_MEMORY_REFERENCE || mode == WASM_GLOBAL_REFERENCE;
+ }
+
static inline int ModeMask(Mode mode) { return 1 << mode; }
// Accessors
@@ -564,6 +575,7 @@ class RelocInfo {
Address wasm_memory_reference();
Address wasm_global_reference();
+ uint32_t wasm_function_table_size_reference();
uint32_t wasm_memory_size_reference();
void update_wasm_memory_reference(
Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
@@ -571,6 +583,9 @@ class RelocInfo {
void update_wasm_global_reference(
Address old_base, Address new_base,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+ void update_wasm_function_table_size_reference(
+ uint32_t old_base, uint32_t new_base,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void set_target_address(
Address target,
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
@@ -679,8 +694,7 @@ class RelocInfo {
private:
void unchecked_update_wasm_memory_reference(Address address,
ICacheFlushMode flush_mode);
- void unchecked_update_wasm_memory_size(uint32_t size,
- ICacheFlushMode flush_mode);
+ void unchecked_update_wasm_size(uint32_t size, ICacheFlushMode flush_mode);
Isolate* isolate_;
// On ARM, note that pc_ is the address of the constant pool entry
@@ -949,6 +963,10 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference f64_asin_wrapper_function(Isolate* isolate);
static ExternalReference f64_mod_wrapper_function(Isolate* isolate);
+ // Trap callback function for cctest/wasm/wasm-run-utils.h
+ static ExternalReference wasm_call_trap_callback_for_testing(
+ Isolate* isolate);
+
// Log support.
static ExternalReference log_enter_external_function(Isolate* isolate);
static ExternalReference log_leave_external_function(Isolate* isolate);
@@ -1031,6 +1049,8 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference ieee754_tan_function(Isolate* isolate);
static ExternalReference ieee754_tanh_function(Isolate* isolate);
+ static ExternalReference libc_memchr_function(Isolate* isolate);
+
static ExternalReference page_flags(Page* page);
static ExternalReference ForDeoptEntry(Address entry);
@@ -1041,12 +1061,16 @@ class ExternalReference BASE_EMBEDDED {
Isolate* isolate);
static ExternalReference debug_is_active_address(Isolate* isolate);
+ static ExternalReference debug_hook_on_function_call_address(
+ Isolate* isolate);
static ExternalReference debug_after_break_target_address(Isolate* isolate);
static ExternalReference is_profiling_address(Isolate* isolate);
static ExternalReference invoke_function_callback(Isolate* isolate);
static ExternalReference invoke_accessor_getter_callback(Isolate* isolate);
+ static ExternalReference promise_hook_address(Isolate* isolate);
+
V8_EXPORT_PRIVATE static ExternalReference runtime_function_table_address(
Isolate* isolate);
@@ -1117,6 +1141,7 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, ExternalReference);
// -----------------------------------------------------------------------------
// Utility functions
+void* libc_memchr(void* string, int character, size_t search_length);
inline int NumberOfBitsSet(uint32_t x) {
unsigned int num_bits_set;
@@ -1144,7 +1169,7 @@ class CallWrapper {
// Called just after emitting a call, i.e., at the return site for the call.
virtual void AfterCall() const = 0;
// Return whether call needs to check for debug stepping.
- virtual bool NeedsDebugStepCheck() const { return false; }
+ virtual bool NeedsDebugHookCheck() const { return false; }
};
@@ -1163,7 +1188,7 @@ class CheckDebugStepCallWrapper : public CallWrapper {
virtual ~CheckDebugStepCallWrapper() {}
virtual void BeforeCall(int call_size) const {}
virtual void AfterCall() const {}
- virtual bool NeedsDebugStepCheck() const { return true; }
+ virtual bool NeedsDebugHookCheck() const { return true; }
};
diff --git a/deps/v8/src/assert-scope.cc b/deps/v8/src/assert-scope.cc
index 3852709966..f446ad0895 100644
--- a/deps/v8/src/assert-scope.cc
+++ b/deps/v8/src/assert-scope.cc
@@ -83,15 +83,21 @@ PerThreadAssertScope<kType, kAllow>::PerThreadAssertScope()
template <PerThreadAssertType kType, bool kAllow>
PerThreadAssertScope<kType, kAllow>::~PerThreadAssertScope() {
+ if (data_ == nullptr) return;
+ Release();
+}
+
+template <PerThreadAssertType kType, bool kAllow>
+void PerThreadAssertScope<kType, kAllow>::Release() {
DCHECK_NOT_NULL(data_);
data_->Set(kType, old_state_);
if (data_->DecrementLevel()) {
PerThreadAssertData::SetCurrent(NULL);
delete data_;
}
+ data_ = nullptr;
}
-
// static
template <PerThreadAssertType kType, bool kAllow>
bool PerThreadAssertScope<kType, kAllow>::IsAllowed() {
@@ -149,6 +155,8 @@ template class PerIsolateAssertScope<DEOPTIMIZATION_ASSERT, false>;
template class PerIsolateAssertScope<DEOPTIMIZATION_ASSERT, true>;
template class PerIsolateAssertScope<COMPILATION_ASSERT, false>;
template class PerIsolateAssertScope<COMPILATION_ASSERT, true>;
+template class PerIsolateAssertScope<NO_EXCEPTION_ASSERT, false>;
+template class PerIsolateAssertScope<NO_EXCEPTION_ASSERT, true>;
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/assert-scope.h b/deps/v8/src/assert-scope.h
index fde49f8406..981a037f13 100644
--- a/deps/v8/src/assert-scope.h
+++ b/deps/v8/src/assert-scope.h
@@ -26,12 +26,12 @@ enum PerThreadAssertType {
LAST_PER_THREAD_ASSERT_TYPE
};
-
enum PerIsolateAssertType {
JAVASCRIPT_EXECUTION_ASSERT,
JAVASCRIPT_EXECUTION_THROWS,
DEOPTIMIZATION_ASSERT,
- COMPILATION_ASSERT
+ COMPILATION_ASSERT,
+ NO_EXCEPTION_ASSERT
};
template <PerThreadAssertType kType, bool kAllow>
@@ -42,6 +42,8 @@ class PerThreadAssertScope {
V8_EXPORT_PRIVATE static bool IsAllowed();
+ void Release();
+
private:
PerThreadAssertData* data_;
bool old_state_;
@@ -76,6 +78,7 @@ class PerThreadAssertScopeDebugOnly : public
class PerThreadAssertScopeDebugOnly {
public:
PerThreadAssertScopeDebugOnly() { }
+ void Release() {}
#endif
};
@@ -147,6 +150,14 @@ typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, false>
typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, true>
AllowJavascriptExecution;
+// Scope to document where we do not expect javascript execution (debug only)
+typedef PerIsolateAssertScopeDebugOnly<JAVASCRIPT_EXECUTION_ASSERT, false>
+ DisallowJavascriptExecutionDebugOnly;
+
+// Scope to introduce an exception to DisallowJavascriptExecutionDebugOnly.
+typedef PerIsolateAssertScopeDebugOnly<JAVASCRIPT_EXECUTION_ASSERT, true>
+ AllowJavascriptExecutionDebugOnly;
+
// Scope in which javascript execution leads to exception being thrown.
typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_THROWS, false>
ThrowOnJavascriptExecution;
@@ -170,6 +181,14 @@ typedef PerIsolateAssertScopeDebugOnly<COMPILATION_ASSERT, false>
// Scope to introduce an exception to DisallowDeoptimization.
typedef PerIsolateAssertScopeDebugOnly<COMPILATION_ASSERT, true>
AllowCompilation;
+
+// Scope to document where we do not expect exceptions.
+typedef PerIsolateAssertScopeDebugOnly<NO_EXCEPTION_ASSERT, false>
+ DisallowExceptions;
+
+// Scope to introduce an exception to DisallowExceptions.
+typedef PerIsolateAssertScopeDebugOnly<NO_EXCEPTION_ASSERT, true>
+ AllowExceptions;
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ast/ast-expression-rewriter.cc b/deps/v8/src/ast/ast-expression-rewriter.cc
index d0db9eab66..f46e21b410 100644
--- a/deps/v8/src/ast/ast-expression-rewriter.cc
+++ b/deps/v8/src/ast/ast-expression-rewriter.cc
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/ast/ast.h"
#include "src/ast/ast-expression-rewriter.h"
+#include "src/ast/ast.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -372,6 +373,9 @@ void AstExpressionRewriter::VisitEmptyParentheses(EmptyParentheses* node) {
NOTHING();
}
+void AstExpressionRewriter::VisitGetIterator(GetIterator* node) {
+ AST_REWRITE_PROPERTY(Expression, node, iterable);
+}
void AstExpressionRewriter::VisitDoExpression(DoExpression* node) {
REWRITE_THIS(node);
diff --git a/deps/v8/src/ast/ast-function-literal-id-reindexer.cc b/deps/v8/src/ast/ast-function-literal-id-reindexer.cc
new file mode 100644
index 0000000000..5cb1e87d23
--- /dev/null
+++ b/deps/v8/src/ast/ast-function-literal-id-reindexer.cc
@@ -0,0 +1,29 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/ast/ast-function-literal-id-reindexer.h"
+#include "src/objects-inl.h"
+
+#include "src/ast/ast.h"
+
+namespace v8 {
+namespace internal {
+
+AstFunctionLiteralIdReindexer::AstFunctionLiteralIdReindexer(size_t stack_limit,
+ int delta)
+ : AstTraversalVisitor(stack_limit), delta_(delta) {}
+
+AstFunctionLiteralIdReindexer::~AstFunctionLiteralIdReindexer() {}
+
+void AstFunctionLiteralIdReindexer::Reindex(Expression* pattern) {
+ Visit(pattern);
+}
+
+void AstFunctionLiteralIdReindexer::VisitFunctionLiteral(FunctionLiteral* lit) {
+ AstTraversalVisitor::VisitFunctionLiteral(lit);
+ lit->set_function_literal_id(lit->function_literal_id() + delta_);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/ast/ast-function-literal-id-reindexer.h b/deps/v8/src/ast/ast-function-literal-id-reindexer.h
new file mode 100644
index 0000000000..837595f41b
--- /dev/null
+++ b/deps/v8/src/ast/ast-function-literal-id-reindexer.h
@@ -0,0 +1,36 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_AST_AST_FUNCTION_LITERAL_ID_REINDEXER
+#define V8_AST_AST_FUNCTION_LITERAL_ID_REINDEXER
+
+#include "src/ast/ast-traversal-visitor.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+
+// Changes the ID of all FunctionLiterals in the given Expression by adding the
+// given delta.
+class AstFunctionLiteralIdReindexer final
+ : public AstTraversalVisitor<AstFunctionLiteralIdReindexer> {
+ public:
+ AstFunctionLiteralIdReindexer(size_t stack_limit, int delta);
+ ~AstFunctionLiteralIdReindexer();
+
+ void Reindex(Expression* pattern);
+
+ // AstTraversalVisitor implementation.
+ void VisitFunctionLiteral(FunctionLiteral* lit);
+
+ private:
+ int delta_;
+
+ DISALLOW_COPY_AND_ASSIGN(AstFunctionLiteralIdReindexer);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_AST_AST_FUNCTION_LITERAL_ID_REINDEXER
diff --git a/deps/v8/src/ast/ast-literal-reindexer.cc b/deps/v8/src/ast/ast-literal-reindexer.cc
index 81a5225fdc..67e180fe42 100644
--- a/deps/v8/src/ast/ast-literal-reindexer.cc
+++ b/deps/v8/src/ast/ast-literal-reindexer.cc
@@ -6,6 +6,7 @@
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -186,6 +187,9 @@ void AstLiteralReindexer::VisitSpread(Spread* node) {
void AstLiteralReindexer::VisitEmptyParentheses(EmptyParentheses* node) {}
+void AstLiteralReindexer::VisitGetIterator(GetIterator* node) {
+ Visit(node->iterable());
+}
void AstLiteralReindexer::VisitForInStatement(ForInStatement* node) {
Visit(node->each());
diff --git a/deps/v8/src/ast/ast-numbering.cc b/deps/v8/src/ast/ast-numbering.cc
index 82f9767281..25aa9d7a5a 100644
--- a/deps/v8/src/ast/ast-numbering.cc
+++ b/deps/v8/src/ast/ast-numbering.cc
@@ -6,22 +6,26 @@
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
+#include "src/compiler.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
public:
- AstNumberingVisitor(Isolate* isolate, Zone* zone)
- : isolate_(isolate),
- zone_(zone),
+ AstNumberingVisitor(uintptr_t stack_limit, Zone* zone,
+ Compiler::EagerInnerFunctionLiterals* eager_literals)
+ : zone_(zone),
+ eager_literals_(eager_literals),
next_id_(BailoutId::FirstUsable().ToInt()),
yield_count_(0),
properties_(zone),
slot_cache_(zone),
+ disable_crankshaft_reason_(kNoReason),
dont_optimize_reason_(kNoReason),
catch_prediction_(HandlerTable::UNCAUGHT) {
- InitializeAstVisitor(isolate);
+ InitializeAstVisitor(stack_limit);
}
bool Renumber(FunctionLiteral* node);
@@ -55,25 +59,28 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
dont_optimize_reason_ = reason;
DisableSelfOptimization();
}
- void DisableCrankshaft(BailoutReason reason) {
- properties_.flags() |= AstProperties::kDontCrankshaft;
+ void DisableFullCodegenAndCrankshaft(BailoutReason reason) {
+ disable_crankshaft_reason_ = reason;
+ properties_.flags() |= AstProperties::kMustUseIgnitionTurbo;
}
template <typename Node>
void ReserveFeedbackSlots(Node* node) {
- node->AssignFeedbackVectorSlots(isolate_, properties_.get_spec(),
- &slot_cache_);
+ node->AssignFeedbackVectorSlots(properties_.get_spec(), &slot_cache_);
}
BailoutReason dont_optimize_reason() const { return dont_optimize_reason_; }
- Isolate* isolate_;
+ Zone* zone() const { return zone_; }
+
Zone* zone_;
+ Compiler::EagerInnerFunctionLiterals* eager_literals_;
int next_id_;
int yield_count_;
AstProperties properties_;
// The slot cache allows us to reuse certain feedback vector slots.
FeedbackVectorSlotCache slot_cache_;
+ BailoutReason disable_crankshaft_reason_;
BailoutReason dont_optimize_reason_;
HandlerTable::CatchPrediction catch_prediction_;
@@ -122,6 +129,7 @@ void AstNumberingVisitor::VisitNativeFunctionLiteral(
IncrementNodeCount();
DisableOptimization(kNativeFunctionLiteral);
node->set_base_id(ReserveIdRange(NativeFunctionLiteral::num_ids()));
+ ReserveFeedbackSlots(node);
}
@@ -149,10 +157,11 @@ void AstNumberingVisitor::VisitVariableProxyReference(VariableProxy* node) {
IncrementNodeCount();
switch (node->var()->location()) {
case VariableLocation::LOOKUP:
- DisableCrankshaft(kReferenceToAVariableWhichRequiresDynamicLookup);
+ DisableFullCodegenAndCrankshaft(
+ kReferenceToAVariableWhichRequiresDynamicLookup);
break;
case VariableLocation::MODULE:
- DisableCrankshaft(kReferenceToModuleVariable);
+ DisableFullCodegenAndCrankshaft(kReferenceToModuleVariable);
break;
default:
break;
@@ -176,7 +185,7 @@ void AstNumberingVisitor::VisitThisFunction(ThisFunction* node) {
void AstNumberingVisitor::VisitSuperPropertyReference(
SuperPropertyReference* node) {
IncrementNodeCount();
- DisableCrankshaft(kSuperReference);
+ DisableFullCodegenAndCrankshaft(kSuperReference);
node->set_base_id(ReserveIdRange(SuperPropertyReference::num_ids()));
Visit(node->this_var());
Visit(node->home_object());
@@ -185,7 +194,7 @@ void AstNumberingVisitor::VisitSuperPropertyReference(
void AstNumberingVisitor::VisitSuperCallReference(SuperCallReference* node) {
IncrementNodeCount();
- DisableCrankshaft(kSuperReference);
+ DisableFullCodegenAndCrankshaft(kSuperReference);
node->set_base_id(ReserveIdRange(SuperCallReference::num_ids()));
Visit(node->this_var());
Visit(node->new_target_var());
@@ -282,8 +291,7 @@ void AstNumberingVisitor::VisitCallRuntime(CallRuntime* node) {
void AstNumberingVisitor::VisitWithStatement(WithStatement* node) {
IncrementNodeCount();
- DisableCrankshaft(kWithStatement);
- node->set_base_id(ReserveIdRange(WithStatement::num_ids()));
+ DisableFullCodegenAndCrankshaft(kWithStatement);
Visit(node->expression());
Visit(node->statement());
}
@@ -313,7 +321,7 @@ void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
IncrementNodeCount();
- DisableCrankshaft(kTryCatchStatement);
+ DisableFullCodegenAndCrankshaft(kTryCatchStatement);
{
const HandlerTable::CatchPrediction old_prediction = catch_prediction_;
// This node uses its own prediction, unless it's "uncaught", in which case
@@ -332,7 +340,7 @@ void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
void AstNumberingVisitor::VisitTryFinallyStatement(TryFinallyStatement* node) {
IncrementNodeCount();
- DisableCrankshaft(kTryFinallyStatement);
+ DisableFullCodegenAndCrankshaft(kTryFinallyStatement);
// We can't know whether the finally block will override ("catch") an
// exception thrown in the try block, so we just adopt the outer prediction.
node->set_catch_prediction(catch_prediction_);
@@ -393,14 +401,25 @@ void AstNumberingVisitor::VisitCompareOperation(CompareOperation* node) {
ReserveFeedbackSlots(node);
}
-
-void AstNumberingVisitor::VisitSpread(Spread* node) { UNREACHABLE(); }
-
+void AstNumberingVisitor::VisitSpread(Spread* node) {
+ IncrementNodeCount();
+ // We can only get here from super calls currently.
+ DisableFullCodegenAndCrankshaft(kSuperReference);
+ node->set_base_id(ReserveIdRange(Spread::num_ids()));
+ Visit(node->expression());
+}
void AstNumberingVisitor::VisitEmptyParentheses(EmptyParentheses* node) {
UNREACHABLE();
}
+void AstNumberingVisitor::VisitGetIterator(GetIterator* node) {
+ IncrementNodeCount();
+ DisableFullCodegenAndCrankshaft(kGetIterator);
+ node->set_base_id(ReserveIdRange(GetIterator::num_ids()));
+ Visit(node->iterable());
+ ReserveFeedbackSlots(node);
+}
void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
IncrementNodeCount();
@@ -417,7 +436,7 @@ void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
void AstNumberingVisitor::VisitForOfStatement(ForOfStatement* node) {
IncrementNodeCount();
- DisableCrankshaft(kForOfStatement);
+ DisableFullCodegenAndCrankshaft(kForOfStatement);
node->set_base_id(ReserveIdRange(ForOfStatement::num_ids()));
Visit(node->assign_iterator()); // Not part of loop.
node->set_first_yield_id(yield_count_);
@@ -484,8 +503,8 @@ void AstNumberingVisitor::VisitForStatement(ForStatement* node) {
void AstNumberingVisitor::VisitClassLiteral(ClassLiteral* node) {
IncrementNodeCount();
- DisableCrankshaft(kClassLiteral);
- node->set_base_id(ReserveIdRange(node->num_ids()));
+ DisableFullCodegenAndCrankshaft(kClassLiteral);
+ node->set_base_id(ReserveIdRange(ClassLiteral::num_ids()));
if (node->extends()) Visit(node->extends());
if (node->constructor()) Visit(node->constructor());
if (node->class_variable_proxy()) {
@@ -504,7 +523,7 @@ void AstNumberingVisitor::VisitObjectLiteral(ObjectLiteral* node) {
for (int i = 0; i < node->properties()->length(); i++) {
VisitLiteralProperty(node->properties()->at(i));
}
- node->BuildConstantProperties(isolate_);
+ node->InitDepthAndFlags();
// Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the
// marked expressions, no store code will be is emitted.
@@ -513,7 +532,8 @@ void AstNumberingVisitor::VisitObjectLiteral(ObjectLiteral* node) {
}
void AstNumberingVisitor::VisitLiteralProperty(LiteralProperty* node) {
- if (node->is_computed_name()) DisableCrankshaft(kComputedPropertyName);
+ if (node->is_computed_name())
+ DisableFullCodegenAndCrankshaft(kComputedPropertyName);
Visit(node->key());
Visit(node->value());
}
@@ -524,12 +544,15 @@ void AstNumberingVisitor::VisitArrayLiteral(ArrayLiteral* node) {
for (int i = 0; i < node->values()->length(); i++) {
Visit(node->values()->at(i));
}
- node->BuildConstantElements(isolate_);
+ node->InitDepthAndFlags();
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitCall(Call* node) {
+ if (node->is_possibly_eval()) {
+ DisableFullCodegenAndCrankshaft(kFunctionCallsEval);
+ }
IncrementNodeCount();
ReserveFeedbackSlots(node);
node->set_base_id(ReserveIdRange(Call::num_ids()));
@@ -569,8 +592,13 @@ void AstNumberingVisitor::VisitArguments(ZoneList<Expression*>* arguments) {
void AstNumberingVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(FunctionLiteral::num_ids()));
+ if (eager_literals_ && node->ShouldEagerCompile()) {
+ eager_literals_->Add(new (zone())
+ ThreadedListZoneEntry<FunctionLiteral*>(node));
+ }
// We don't recurse into the declarations or body of the function literal:
// you have to separately Renumber() each FunctionLiteral that you compile.
+ ReserveFeedbackSlots(node);
}
@@ -584,22 +612,26 @@ void AstNumberingVisitor::VisitRewritableExpression(
bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
DeclarationScope* scope = node->scope();
- if (scope->new_target_var()) DisableCrankshaft(kSuperReference);
- if (scope->calls_eval()) DisableCrankshaft(kFunctionCallsEval);
- if (scope->arguments() != NULL && !scope->arguments()->IsStackAllocated()) {
- DisableCrankshaft(kContextAllocatedArguments);
+ if (scope->new_target_var() != nullptr ||
+ scope->this_function_var() != nullptr) {
+ DisableFullCodegenAndCrankshaft(kSuperReference);
+ }
+
+ if (scope->arguments() != nullptr &&
+ !scope->arguments()->IsStackAllocated()) {
+ DisableFullCodegenAndCrankshaft(kContextAllocatedArguments);
}
if (scope->rest_parameter() != nullptr) {
- DisableCrankshaft(kRestParameter);
+ DisableFullCodegenAndCrankshaft(kRestParameter);
}
- if (IsGeneratorFunction(node->kind()) || IsAsyncFunction(node->kind())) {
- DisableCrankshaft(kGenerator);
+ if (IsResumableFunction(node->kind())) {
+ DisableFullCodegenAndCrankshaft(kGenerator);
}
if (IsClassConstructor(node->kind())) {
- DisableCrankshaft(kClassConstructorFunction);
+ DisableFullCodegenAndCrankshaft(kClassConstructorFunction);
}
VisitDeclarations(scope->declarations());
@@ -608,13 +640,26 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
node->set_ast_properties(&properties_);
node->set_dont_optimize_reason(dont_optimize_reason());
node->set_yield_count(yield_count_);
+
+ if (FLAG_trace_opt) {
+ if (disable_crankshaft_reason_ != kNoReason) {
+ PrintF("[enforcing Ignition and TurboFan for %s because: %s\n",
+ node->debug_name()->ToCString().get(),
+ GetBailoutReason(disable_crankshaft_reason_));
+ }
+ }
+
return !HasStackOverflow();
}
+bool AstNumbering::Renumber(
+ uintptr_t stack_limit, Zone* zone, FunctionLiteral* function,
+ Compiler::EagerInnerFunctionLiterals* eager_literals) {
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ DisallowHandleDereference no_deref;
-bool AstNumbering::Renumber(Isolate* isolate, Zone* zone,
- FunctionLiteral* function) {
- AstNumberingVisitor visitor(isolate, zone);
+ AstNumberingVisitor visitor(stack_limit, zone, eager_literals);
return visitor.Renumber(function);
}
} // namespace internal
diff --git a/deps/v8/src/ast/ast-numbering.h b/deps/v8/src/ast/ast-numbering.h
index 73278950cd..bea441d67b 100644
--- a/deps/v8/src/ast/ast-numbering.h
+++ b/deps/v8/src/ast/ast-numbering.h
@@ -5,6 +5,8 @@
#ifndef V8_AST_AST_NUMBERING_H_
#define V8_AST_AST_NUMBERING_H_
+#include <stdint.h>
+
namespace v8 {
namespace internal {
@@ -12,11 +14,20 @@ namespace internal {
class FunctionLiteral;
class Isolate;
class Zone;
+template <typename T>
+class ThreadedList;
+template <typename T>
+class ThreadedListZoneEntry;
+template <typename T>
+class ZoneVector;
namespace AstNumbering {
// Assign type feedback IDs, bailout IDs, and generator yield IDs to an AST node
-// tree; perform catch prediction for TryStatements.
-bool Renumber(Isolate* isolate, Zone* zone, FunctionLiteral* function);
+// tree; perform catch prediction for TryStatements. If |eager_literals| is
+// non-null, adds any eager inner literal functions into it.
+bool Renumber(
+ uintptr_t stack_limit, Zone* zone, FunctionLiteral* function,
+ ThreadedList<ThreadedListZoneEntry<FunctionLiteral*>>* eager_literals);
}
// Some details on yield IDs
diff --git a/deps/v8/src/ast/ast-traversal-visitor.h b/deps/v8/src/ast/ast-traversal-visitor.h
index d93e02ffe0..6d0c386f3b 100644
--- a/deps/v8/src/ast/ast-traversal-visitor.h
+++ b/deps/v8/src/ast/ast-traversal-visitor.h
@@ -288,7 +288,7 @@ void AstTraversalVisitor<Subclass>::VisitFunctionLiteral(
DeclarationScope* scope = expr->scope();
RECURSE_EXPRESSION(VisitDeclarations(scope->declarations()));
// A lazily parsed function literal won't have a body.
- if (expr->scope()->is_lazily_parsed()) return;
+ if (expr->scope()->was_lazily_parsed()) return;
RECURSE_EXPRESSION(VisitStatements(expr->body()));
}
@@ -471,6 +471,12 @@ void AstTraversalVisitor<Subclass>::VisitEmptyParentheses(
}
template <class Subclass>
+void AstTraversalVisitor<Subclass>::VisitGetIterator(GetIterator* expr) {
+ PROCESS_EXPRESSION(expr);
+ RECURSE_EXPRESSION(Visit(expr->iterable()));
+}
+
+template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitSuperPropertyReference(
SuperPropertyReference* expr) {
PROCESS_EXPRESSION(expr);
diff --git a/deps/v8/src/ast/ast-types.cc b/deps/v8/src/ast/ast-types.cc
index 49551dd7fa..83879215fc 100644
--- a/deps/v8/src/ast/ast-types.cc
+++ b/deps/v8/src/ast/ast-types.cc
@@ -7,6 +7,7 @@
#include "src/ast/ast-types.h"
#include "src/handles-inl.h"
+#include "src/objects-inl.h"
#include "src/ostreams.h"
namespace v8 {
@@ -209,7 +210,6 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
case JS_MODULE_NAMESPACE_TYPE:
- case JS_FIXED_ARRAY_ITERATOR_TYPE:
case JS_ARRAY_BUFFER_TYPE:
case JS_ARRAY_TYPE:
case JS_REGEXP_TYPE: // TODO(rossberg): there should be a RegExp type.
@@ -259,6 +259,7 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
+ case JS_PROMISE_CAPABILITY_TYPE:
case JS_PROMISE_TYPE:
case JS_BOUND_FUNCTION_TYPE:
DCHECK(!map->is_undetectable());
@@ -304,8 +305,6 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
case PROMISE_REACTION_JOB_INFO_TYPE:
case FUNCTION_TEMPLATE_INFO_TYPE:
case OBJECT_TEMPLATE_INFO_TYPE:
- case SIGNATURE_INFO_TYPE:
- case TYPE_SWITCH_INFO_TYPE:
case ALLOCATION_MEMENTO_TYPE:
case TYPE_FEEDBACK_INFO_TYPE:
case ALIASED_ARGUMENTS_ENTRY_TYPE:
@@ -315,8 +314,10 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
case CELL_TYPE:
case WEAK_CELL_TYPE:
case PROTOTYPE_INFO_TYPE:
+ case TUPLE2_TYPE:
case TUPLE3_TYPE:
case CONTEXT_EXTENSION_TYPE:
+ case CONSTANT_ELEMENTS_PAIR_TYPE:
UNREACHABLE();
return kNone;
}
diff --git a/deps/v8/src/ast/ast-value-factory.cc b/deps/v8/src/ast/ast-value-factory.cc
index ed2976f52a..4add57955f 100644
--- a/deps/v8/src/ast/ast-value-factory.cc
+++ b/deps/v8/src/ast/ast-value-factory.cc
@@ -28,6 +28,8 @@
#include "src/ast/ast-value-factory.h"
#include "src/api.h"
+#include "src/char-predicates-inl.h"
+#include "src/objects-inl.h"
#include "src/objects.h"
#include "src/utils.h"
@@ -219,9 +221,17 @@ void AstValue::Internalize(Isolate* isolate) {
}
}
-
AstRawString* AstValueFactory::GetOneByteStringInternal(
Vector<const uint8_t> literal) {
+ if (literal.length() == 1 && IsInRange(literal[0], 'a', 'z')) {
+ int key = literal[0] - 'a';
+ if (one_character_strings_[key] == nullptr) {
+ uint32_t hash = StringHasher::HashSequentialString<uint8_t>(
+ literal.start(), literal.length(), hash_seed_);
+ one_character_strings_[key] = GetString(hash, true, literal);
+ }
+ return one_character_strings_[key];
+ }
uint32_t hash = StringHasher::HashSequentialString<uint8_t>(
literal.start(), literal.length(), hash_seed_);
return GetString(hash, true, literal);
@@ -260,39 +270,6 @@ const AstConsString* AstValueFactory::NewConsString(
return new_string;
}
-const AstRawString* AstValueFactory::ConcatStrings(const AstRawString* left,
- const AstRawString* right) {
- int left_length = left->length();
- int right_length = right->length();
- const unsigned char* left_data = left->raw_data();
- const unsigned char* right_data = right->raw_data();
- if (left->is_one_byte() && right->is_one_byte()) {
- uint8_t* buffer = zone_->NewArray<uint8_t>(left_length + right_length);
- memcpy(buffer, left_data, left_length);
- memcpy(buffer + left_length, right_data, right_length);
- Vector<const uint8_t> literal(buffer, left_length + right_length);
- return GetOneByteStringInternal(literal);
- } else {
- uint16_t* buffer = zone_->NewArray<uint16_t>(left_length + right_length);
- if (left->is_one_byte()) {
- for (int i = 0; i < left_length; ++i) {
- buffer[i] = left_data[i];
- }
- } else {
- memcpy(buffer, left_data, 2 * left_length);
- }
- if (right->is_one_byte()) {
- for (int i = 0; i < right_length; ++i) {
- buffer[i + left_length] = right_data[i];
- }
- } else {
- memcpy(buffer + left_length, right_data, 2 * right_length);
- }
- Vector<const uint16_t> literal(buffer, left_length + right_length);
- return GetTwoByteStringInternal(literal);
- }
-}
-
void AstValueFactory::Internalize(Isolate* isolate) {
// Strings need to be internalized before values, because values refer to
// strings.
diff --git a/deps/v8/src/ast/ast-value-factory.h b/deps/v8/src/ast/ast-value-factory.h
index 4ce480fe57..fd9ed71167 100644
--- a/deps/v8/src/ast/ast-value-factory.h
+++ b/deps/v8/src/ast/ast-value-factory.h
@@ -30,6 +30,7 @@
#include "src/api.h"
#include "src/base/hashmap.h"
+#include "src/conversions.h"
#include "src/globals.h"
#include "src/utils.h"
@@ -110,8 +111,9 @@ class AstRawString final : public AstString {
}
private:
- friend class AstValueFactory;
friend class AstRawStringInternalizationKey;
+ friend class AstStringConstants;
+ friend class AstValueFactory;
AstRawString(bool is_one_byte, const Vector<const byte>& literal_bytes,
uint32_t hash)
@@ -158,10 +160,7 @@ class AstValue : public ZoneObject {
return type_ == STRING;
}
- bool IsNumber() const {
- return type_ == NUMBER || type_ == NUMBER_WITH_DOT || type_ == SMI ||
- type_ == SMI_WITH_DOT;
- }
+ bool IsNumber() const { return IsSmi() || IsHeapNumber(); }
bool ContainsDot() const {
return type_ == NUMBER_WITH_DOT || type_ == SMI_WITH_DOT;
@@ -173,19 +172,30 @@ class AstValue : public ZoneObject {
}
double AsNumber() const {
- if (type_ == NUMBER || type_ == NUMBER_WITH_DOT)
- return number_;
- if (type_ == SMI || type_ == SMI_WITH_DOT)
- return smi_;
+ if (IsHeapNumber()) return number_;
+ if (IsSmi()) return smi_;
UNREACHABLE();
return 0;
}
Smi* AsSmi() const {
- CHECK(type_ == SMI || type_ == SMI_WITH_DOT);
+ CHECK(IsSmi());
return Smi::FromInt(smi_);
}
+ bool ToUint32(uint32_t* value) const {
+ if (IsSmi()) {
+ int num = smi_;
+ if (num < 0) return false;
+ *value = static_cast<uint32_t>(num);
+ return true;
+ }
+ if (IsHeapNumber()) {
+ return DoubleToUint32IfEqualToSelf(number_, value);
+ }
+ return false;
+ }
+
bool EqualsString(const AstRawString* string) const {
return type_ == STRING && string_ == string;
}
@@ -195,6 +205,9 @@ class AstValue : public ZoneObject {
bool BooleanValue() const;
bool IsSmi() const { return type_ == SMI || type_ == SMI_WITH_DOT; }
+ bool IsHeapNumber() const {
+ return type_ == NUMBER || type_ == NUMBER_WITH_DOT;
+ }
bool IsFalse() const { return type_ == BOOLEAN && !bool_; }
bool IsTrue() const { return type_ == BOOLEAN && bool_; }
bool IsUndefined() const { return type_ == UNDEFINED; }
@@ -280,7 +293,6 @@ class AstValue : public ZoneObject {
};
};
-
// For generating constants.
#define STRING_CONSTANTS(F) \
F(anonymous_function, "(anonymous function)") \
@@ -291,7 +303,6 @@ class AstValue : public ZoneObject {
F(default, "default") \
F(done, "done") \
F(dot, ".") \
- F(dot_class_field_init, ".class-field-init") \
F(dot_for, ".for") \
F(dot_generator_object, ".generator_object") \
F(dot_iterator, ".iterator") \
@@ -304,6 +315,7 @@ class AstValue : public ZoneObject {
F(get_space, "get ") \
F(length, "length") \
F(let, "let") \
+ F(name, "name") \
F(native, "native") \
F(new_target, ".new.target") \
F(next, "next") \
@@ -320,6 +332,45 @@ class AstValue : public ZoneObject {
F(use_strict, "use strict") \
F(value, "value")
+class AstStringConstants final {
+ public:
+ AstStringConstants(Isolate* isolate, uint32_t hash_seed)
+ : zone_(isolate->allocator(), ZONE_NAME), hash_seed_(hash_seed) {
+ DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
+#define F(name, str) \
+ { \
+ const char* data = str; \
+ Vector<const uint8_t> literal(reinterpret_cast<const uint8_t*>(data), \
+ static_cast<int>(strlen(data))); \
+ uint32_t hash = StringHasher::HashSequentialString<uint8_t>( \
+ literal.start(), literal.length(), hash_seed_); \
+ name##_string_ = new (&zone_) AstRawString(true, literal, hash); \
+ /* The Handle returned by the factory is located on the roots */ \
+ /* array, not on the temporary HandleScope, so this is safe. */ \
+ name##_string_->set_string(isolate->factory()->name##_string()); \
+ }
+ STRING_CONSTANTS(F)
+#undef F
+ }
+
+#define F(name, str) \
+ AstRawString* name##_string() { return name##_string_; }
+ STRING_CONSTANTS(F)
+#undef F
+
+ uint32_t hash_seed() const { return hash_seed_; }
+
+ private:
+ Zone zone_;
+ uint32_t hash_seed_;
+
+#define F(name, str) AstRawString* name##_string_;
+ STRING_CONSTANTS(F)
+#undef F
+
+ DISALLOW_COPY_AND_ASSIGN(AstStringConstants);
+};
+
#define OTHER_CONSTANTS(F) \
F(true_value) \
F(false_value) \
@@ -329,21 +380,24 @@ class AstValue : public ZoneObject {
class AstValueFactory {
public:
- AstValueFactory(Zone* zone, uint32_t hash_seed)
+ AstValueFactory(Zone* zone, AstStringConstants* string_constants,
+ uint32_t hash_seed)
: string_table_(AstRawStringCompare),
values_(nullptr),
- smis_(),
strings_(nullptr),
strings_end_(&strings_),
+ string_constants_(string_constants),
zone_(zone),
hash_seed_(hash_seed) {
-#define F(name, str) name##_string_ = NULL;
- STRING_CONSTANTS(F)
-#undef F
-#define F(name) name##_ = NULL;
+#define F(name) name##_ = nullptr;
OTHER_CONSTANTS(F)
#undef F
+ DCHECK_EQ(hash_seed, string_constants->hash_seed());
std::fill(smis_, smis_ + arraysize(smis_), nullptr);
+ std::fill(one_character_strings_,
+ one_character_strings_ + arraysize(one_character_strings_),
+ nullptr);
+ InitializeStringConstants();
}
Zone* zone() const { return zone_; }
@@ -361,20 +415,12 @@ class AstValueFactory {
const AstRawString* GetString(Handle<String> literal);
const AstConsString* NewConsString(const AstString* left,
const AstString* right);
- const AstRawString* ConcatStrings(const AstRawString* left,
- const AstRawString* right);
void Internalize(Isolate* isolate);
-#define F(name, str) \
- const AstRawString* name##_string() { \
- if (name##_string_ == NULL) { \
- const char* data = str; \
- name##_string_ = GetOneByteString( \
- Vector<const uint8_t>(reinterpret_cast<const uint8_t*>(data), \
- static_cast<int>(strlen(data)))); \
- } \
- return name##_string_; \
+#define F(name, str) \
+ const AstRawString* name##_string() { \
+ return string_constants_->name##_string(); \
}
STRING_CONSTANTS(F)
#undef F
@@ -415,6 +461,17 @@ class AstValueFactory {
AstRawString* GetString(uint32_t hash, bool is_one_byte,
Vector<const byte> literal_bytes);
+ void InitializeStringConstants() {
+#define F(name, str) \
+ AstRawString* raw_string_##name = string_constants_->name##_string(); \
+ base::HashMap::Entry* entry_##name = string_table_.LookupOrInsert( \
+ raw_string_##name, raw_string_##name->hash()); \
+ DCHECK(entry_##name->value == nullptr); \
+ entry_##name->value = reinterpret_cast<void*>(1);
+ STRING_CONSTANTS(F)
+#undef F
+ }
+
static bool AstRawStringCompare(void* a, void* b);
// All strings are copied here, one after another (no NULLs inbetween).
@@ -423,19 +480,23 @@ class AstValueFactory {
// they can be internalized later).
AstValue* values_;
- AstValue* smis_[kMaxCachedSmi + 1];
// We need to keep track of strings_ in order since cons strings require their
// members to be internalized first.
AstString* strings_;
AstString** strings_end_;
+
+ // Holds constant string values which are shared across the isolate.
+ AstStringConstants* string_constants_;
+
+ // Caches for faster access: small numbers, one character lowercase strings
+ // (for minified code).
+ AstValue* smis_[kMaxCachedSmi + 1];
+ AstRawString* one_character_strings_[26];
+
Zone* zone_;
uint32_t hash_seed_;
-#define F(name, str) const AstRawString* name##_string_;
- STRING_CONSTANTS(F)
-#undef F
-
#define F(name) AstValue* name##_;
OTHER_CONSTANTS(F)
#undef F
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index fc8bd8a5bd..c63f90ecf1 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -10,6 +10,7 @@
#include "src/ast/prettyprinter.h"
#include "src/ast/scopes.h"
#include "src/base/hashmap.h"
+#include "src/builtins/builtins-constructor.h"
#include "src/builtins/builtins.h"
#include "src/code-stubs.h"
#include "src/contexts.h"
@@ -28,6 +29,8 @@ namespace internal {
#ifdef DEBUG
+void AstNode::Print() { Print(Isolate::Current()); }
+
void AstNode::Print(Isolate* isolate) {
AstPrinter::PrintOut(isolate, this);
}
@@ -70,6 +73,10 @@ bool Expression::IsSmiLiteral() const {
return IsLiteral() && AsLiteral()->raw_value()->IsSmi();
}
+bool Expression::IsNumberLiteral() const {
+ return IsLiteral() && AsLiteral()->raw_value()->IsNumber();
+}
+
bool Expression::IsStringLiteral() const {
return IsLiteral() && AsLiteral()->raw_value()->IsString();
}
@@ -197,9 +204,7 @@ void VariableProxy::BindTo(Variable* var) {
var->set_is_used();
}
-
-void VariableProxy::AssignFeedbackVectorSlots(Isolate* isolate,
- FeedbackVectorSpec* spec,
+void VariableProxy::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
if (UsesVariableFeedbackSlot()) {
// VariableProxies that point to the same Variable within a function can
@@ -211,7 +216,7 @@ void VariableProxy::AssignFeedbackVectorSlots(Isolate* isolate,
static_cast<int>(reinterpret_cast<intptr_t>(entry->value)));
return;
}
- variable_feedback_slot_ = spec->AddLoadGlobalICSlot(var()->name());
+ variable_feedback_slot_ = spec->AddLoadGlobalICSlot();
cache->Put(var(), variable_feedback_slot_);
} else {
variable_feedback_slot_ = spec->AddLoadICSlot();
@@ -235,8 +240,7 @@ static void AssignVectorSlots(Expression* expr, FeedbackVectorSpec* spec,
}
}
-void ForInStatement::AssignFeedbackVectorSlots(Isolate* isolate,
- FeedbackVectorSpec* spec,
+void ForInStatement::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
AssignVectorSlots(each(), spec, &each_slot_);
for_in_feedback_slot_ = spec->AddGeneralSlot();
@@ -253,15 +257,12 @@ Assignment::Assignment(Token::Value op, Expression* target, Expression* value,
StoreModeField::encode(STANDARD_STORE) | TokenField::encode(op);
}
-void Assignment::AssignFeedbackVectorSlots(Isolate* isolate,
- FeedbackVectorSpec* spec,
+void Assignment::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
AssignVectorSlots(target(), spec, &slot_);
}
-
-void CountOperation::AssignFeedbackVectorSlots(Isolate* isolate,
- FeedbackVectorSpec* spec,
+void CountOperation::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
AssignVectorSlots(expression(), spec, &slot_);
// Assign a slot to collect feedback about binary operations. Used only in
@@ -346,6 +347,16 @@ ObjectLiteralProperty::ObjectLiteralProperty(AstValueFactory* ast_value_factory,
}
}
+FeedbackVectorSlot LiteralProperty::GetStoreDataPropertySlot() const {
+ int offset = FunctionLiteral::NeedsHomeObject(value_) ? 1 : 0;
+ return GetSlot(offset);
+}
+
+void LiteralProperty::SetStoreDataPropertySlot(FeedbackVectorSlot slot) {
+ int offset = FunctionLiteral::NeedsHomeObject(value_) ? 1 : 0;
+ return SetSlot(slot, offset);
+}
+
bool LiteralProperty::NeedsSetFunctionName() const {
return is_computed_name_ &&
(value_->IsAnonymousFunctionDefinition() ||
@@ -360,12 +371,14 @@ ClassLiteralProperty::ClassLiteralProperty(Expression* key, Expression* value,
kind_(kind),
is_static_(is_static) {}
-void ClassLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
- FeedbackVectorSpec* spec,
+void ClassLiteral::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
// This logic that computes the number of slots needed for vector store
- // ICs must mirror FullCodeGenerator::VisitClassLiteral.
- prototype_slot_ = spec->AddLoadICSlot();
+ // ICs must mirror BytecodeGenerator::VisitClassLiteral.
+ if (FunctionLiteral::NeedsHomeObject(constructor())) {
+ home_object_slot_ = spec->AddStoreICSlot();
+ }
+
if (NeedsProxySlot()) {
proxy_slot_ = spec->AddStoreICSlot();
}
@@ -376,6 +389,8 @@ void ClassLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
if (FunctionLiteral::NeedsHomeObject(value)) {
property->SetSlot(spec->AddStoreICSlot());
}
+ property->SetStoreDataPropertySlot(
+ spec->AddStoreDataPropertyInLiteralICSlot());
}
}
@@ -392,8 +407,7 @@ void ObjectLiteral::Property::set_emit_store(bool emit_store) {
bool ObjectLiteral::Property::emit_store() const { return emit_store_; }
-void ObjectLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
- FeedbackVectorSpec* spec,
+void ObjectLiteral::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
// This logic that computes the number of slots needed for vector store
// ics must mirror FullCodeGenerator::VisitObjectLiteral.
@@ -406,6 +420,7 @@ void ObjectLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
switch (property->kind()) {
+ case ObjectLiteral::Property::SPREAD:
case ObjectLiteral::Property::CONSTANT:
UNREACHABLE();
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -413,7 +428,7 @@ void ObjectLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
case ObjectLiteral::Property::COMPUTED:
// It is safe to use [[Put]] here because the boilerplate already
// contains computed properties with an uninitialized value.
- if (key->value()->IsInternalizedString()) {
+ if (key->IsStringLiteral()) {
if (property->emit_store()) {
property->SetSlot(spec->AddStoreICSlot());
if (FunctionLiteral::NeedsHomeObject(value)) {
@@ -450,6 +465,8 @@ void ObjectLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
property->SetSlot(spec->AddStoreICSlot());
}
}
+ property->SetStoreDataPropertySlot(
+ spec->AddStoreDataPropertyInLiteralICSlot());
}
}
@@ -491,13 +508,8 @@ bool ObjectLiteral::IsBoilerplateProperty(ObjectLiteral::Property* property) {
property->kind() != ObjectLiteral::Property::PROTOTYPE;
}
-
-void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
- if (!constant_properties_.is_null()) return;
-
- // Allocate a fixed array to hold all the constant properties.
- Handle<FixedArray> constant_properties = isolate->factory()->NewFixedArray(
- boilerplate_properties_ * 2, TENURED);
+void ObjectLiteral::InitDepthAndFlags() {
+ if (depth_ > 0) return;
int position = 0;
// Accumulate the value in local variables and store it at the end.
@@ -521,50 +533,43 @@ void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
MaterializedLiteral* m_literal = property->value()->AsMaterializedLiteral();
if (m_literal != NULL) {
- m_literal->BuildConstants(isolate);
+ m_literal->InitDepthAndFlags();
if (m_literal->depth() >= depth_acc) depth_acc = m_literal->depth() + 1;
}
- // Add CONSTANT and COMPUTED properties to boilerplate. Use undefined
- // value for COMPUTED properties, the real value is filled in at
- // runtime. The enumeration order is maintained.
- Handle<Object> key = property->key()->AsLiteral()->value();
- Handle<Object> value = GetBoilerplateValue(property->value(), isolate);
+ const AstValue* key = property->key()->AsLiteral()->raw_value();
+ Expression* value = property->value();
+
+ bool is_compile_time_value = CompileTimeValue::IsCompileTimeValue(value);
// Ensure objects that may, at any point in time, contain fields with double
// representation are always treated as nested objects. This is true for
- // computed fields (value is undefined), and smi and double literals
- // (value->IsNumber()).
+ // computed fields, and smi and double literals.
// TODO(verwaest): Remove once we can store them inline.
if (FLAG_track_double_fields &&
- (value->IsNumber() || value->IsUninitialized(isolate))) {
+ (value->IsNumberLiteral() || !is_compile_time_value)) {
bit_field_ = MayStoreDoublesField::update(bit_field_, true);
}
- is_simple = is_simple && !value->IsUninitialized(isolate);
+ is_simple = is_simple && is_compile_time_value;
// Keep track of the number of elements in the object literal and
// the largest element index. If the largest element index is
// much larger than the number of elements, creating an object
// literal with fast elements will be a waste of space.
uint32_t element_index = 0;
- if (key->IsString() && String::cast(*key)->AsArrayIndex(&element_index)) {
+ if (key->IsString() && key->AsString()->AsArrayIndex(&element_index)) {
max_element_index = Max(element_index, max_element_index);
elements++;
- key = isolate->factory()->NewNumberFromUint(element_index);
- } else if (key->ToArrayIndex(&element_index)) {
+ } else if (key->ToUint32(&element_index) && element_index != kMaxUInt32) {
max_element_index = Max(element_index, max_element_index);
elements++;
- } else if (key->IsNumber()) {
- key = isolate->factory()->NumberToString(key);
}
- // Add name, value pair to the fixed array.
- constant_properties->set(position++, *key);
- constant_properties->set(position++, *value);
+ // Increment the position for the key and the value.
+ position += 2;
}
- constant_properties_ = constant_properties;
bit_field_ = FastElementsField::update(
bit_field_,
(max_element_index <= 32) || ((2 * elements) >= max_element_index));
@@ -574,6 +579,91 @@ void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
set_depth(depth_acc);
}
+void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
+ if (!constant_properties_.is_null()) return;
+
+ // Allocate a fixed array to hold all the constant properties.
+ Handle<FixedArray> constant_properties =
+ isolate->factory()->NewFixedArray(boilerplate_properties_ * 2, TENURED);
+
+ int position = 0;
+ for (int i = 0; i < properties()->length(); i++) {
+ ObjectLiteral::Property* property = properties()->at(i);
+ if (!IsBoilerplateProperty(property)) {
+ continue;
+ }
+
+ if (static_cast<uint32_t>(position) == boilerplate_properties_ * 2) {
+ DCHECK(property->is_computed_name());
+ break;
+ }
+ DCHECK(!property->is_computed_name());
+
+ MaterializedLiteral* m_literal = property->value()->AsMaterializedLiteral();
+ if (m_literal != NULL) {
+ m_literal->BuildConstants(isolate);
+ }
+
+ // Add CONSTANT and COMPUTED properties to boilerplate. Use undefined
+ // value for COMPUTED properties, the real value is filled in at
+ // runtime. The enumeration order is maintained.
+ Handle<Object> key = property->key()->AsLiteral()->value();
+ Handle<Object> value = GetBoilerplateValue(property->value(), isolate);
+
+ uint32_t element_index = 0;
+ if (key->IsString() && String::cast(*key)->AsArrayIndex(&element_index)) {
+ key = isolate->factory()->NewNumberFromUint(element_index);
+ } else if (key->IsNumber() && !key->ToArrayIndex(&element_index)) {
+ key = isolate->factory()->NumberToString(key);
+ }
+
+ // Add name, value pair to the fixed array.
+ constant_properties->set(position++, *key);
+ constant_properties->set(position++, *value);
+ }
+
+ constant_properties_ = constant_properties;
+}
+
+bool ObjectLiteral::IsFastCloningSupported() const {
+ // The FastCloneShallowObject builtin doesn't copy elements, and object
+ // literals don't support copy-on-write (COW) elements for now.
+ // TODO(mvstanton): make object literals support COW elements.
+ return fast_elements() && has_shallow_properties() &&
+ properties_count() <= ConstructorBuiltinsAssembler::
+ kMaximumClonedShallowObjectProperties;
+}
+
+void ArrayLiteral::InitDepthAndFlags() {
+ DCHECK_LT(first_spread_index_, 0);
+
+ if (depth_ > 0) return;
+
+ int constants_length = values()->length();
+
+ // Fill in the literals.
+ bool is_simple = true;
+ int depth_acc = 1;
+ int array_index = 0;
+ for (; array_index < constants_length; array_index++) {
+ Expression* element = values()->at(array_index);
+ DCHECK(!element->IsSpread());
+ MaterializedLiteral* m_literal = element->AsMaterializedLiteral();
+ if (m_literal != NULL) {
+ m_literal->InitDepthAndFlags();
+ if (m_literal->depth() + 1 > depth_acc) {
+ depth_acc = m_literal->depth() + 1;
+ }
+ }
+
+ if (!CompileTimeValue::IsCompileTimeValue(element)) {
+ is_simple = false;
+ }
+ }
+
+ set_is_simple(is_simple);
+ set_depth(depth_acc);
+}
void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
DCHECK_LT(first_spread_index_, 0);
@@ -586,8 +676,6 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
isolate->factory()->NewFixedArrayWithHoles(constants_length);
// Fill in the literals.
- bool is_simple = true;
- int depth_acc = 1;
bool is_holey = false;
int array_index = 0;
for (; array_index < constants_length; array_index++) {
@@ -596,9 +684,6 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
MaterializedLiteral* m_literal = element->AsMaterializedLiteral();
if (m_literal != NULL) {
m_literal->BuildConstants(isolate);
- if (m_literal->depth() + 1 > depth_acc) {
- depth_acc = m_literal->depth() + 1;
- }
}
// New handle scope here, needs to be after BuildContants().
@@ -611,7 +696,6 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
if (boilerplate_value->IsUninitialized(isolate)) {
boilerplate_value = handle(Smi::kZero, isolate);
- is_simple = false;
}
kind = GetMoreGeneralElementsKind(kind,
@@ -623,7 +707,7 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
// Simple and shallow arrays can be lazily copied, we transform the
// elements array to a copy-on-write array.
- if (is_simple && depth_acc == 1 && array_index > 0 &&
+ if (is_simple() && depth() == 1 && array_index > 0 &&
IsFastSmiOrObjectElementsKind(kind)) {
fixed_array->set_map(isolate->heap()->fixed_cow_array_map());
}
@@ -637,20 +721,20 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
accessor->CopyElements(fixed_array, from_kind, elements, constants_length);
}
- // Remember both the literal's constant values as well as the ElementsKind
- // in a 2-element FixedArray.
- Handle<FixedArray> literals = isolate->factory()->NewFixedArray(2, TENURED);
- literals->set(0, Smi::FromInt(kind));
- literals->set(1, *elements);
+ // Remember both the literal's constant values as well as the ElementsKind.
+ Handle<ConstantElementsPair> literals =
+ isolate->factory()->NewConstantElementsPair(kind, elements);
constant_elements_ = literals;
- set_is_simple(is_simple);
- set_depth(depth_acc);
}
+bool ArrayLiteral::IsFastCloningSupported() const {
+ return depth() <= 1 &&
+ values()->length() <=
+ ConstructorBuiltinsAssembler::kMaximumClonedShallowArrayElements;
+}
-void ArrayLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
- FeedbackVectorSpec* spec,
+void ArrayLiteral::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
// This logic that computes the number of slots needed for vector store
// ics must mirror FullCodeGenerator::VisitArrayLiteral.
@@ -678,6 +762,16 @@ Handle<Object> MaterializedLiteral::GetBoilerplateValue(Expression* expression,
return isolate->factory()->uninitialized_value();
}
+void MaterializedLiteral::InitDepthAndFlags() {
+ if (IsArrayLiteral()) {
+ return AsArrayLiteral()->InitDepthAndFlags();
+ }
+ if (IsObjectLiteral()) {
+ return AsObjectLiteral()->InitDepthAndFlags();
+ }
+ DCHECK(IsRegExpLiteral());
+ DCHECK_LE(1, depth()); // Depth should be initialized.
+}
void MaterializedLiteral::BuildConstants(Isolate* isolate) {
if (IsArrayLiteral()) {
@@ -687,7 +781,6 @@ void MaterializedLiteral::BuildConstants(Isolate* isolate) {
return AsObjectLiteral()->BuildConstantProperties(isolate);
}
DCHECK(IsRegExpLiteral());
- DCHECK(depth() >= 1); // Depth should be initialized.
}
@@ -711,8 +804,7 @@ void BinaryOperation::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
}
void BinaryOperation::AssignFeedbackVectorSlots(
- Isolate* isolate, FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache) {
+ FeedbackVectorSpec* spec, FeedbackVectorSlotCache* cache) {
// Feedback vector slot is only used by interpreter for binary operations.
// Full-codegen uses AstId to record type feedback.
switch (op()) {
@@ -733,8 +825,7 @@ static bool IsTypeof(Expression* expr) {
}
void CompareOperation::AssignFeedbackVectorSlots(
- Isolate* isolate, FeedbackVectorSpec* spec,
- FeedbackVectorSlotCache* cache_) {
+ FeedbackVectorSpec* spec, FeedbackVectorSlotCache* cache_) {
// Feedback vector slot is only used by interpreter for binary operations.
// Full-codegen uses AstId to record type feedback.
switch (op()) {
@@ -892,7 +983,7 @@ bool Expression::IsMonomorphic() const {
}
}
-void Call::AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+void Call::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
ic_slot_ = spec->AddCallICSlot();
}
@@ -931,8 +1022,7 @@ CaseClause::CaseClause(Expression* label, ZoneList<Statement*>* statements,
statements_(statements),
compare_type_(AstType::None()) {}
-void CaseClause::AssignFeedbackVectorSlots(Isolate* isolate,
- FeedbackVectorSpec* spec,
+void CaseClause::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
type_feedback_slot_ = spec->AddInterpreterCompareICSlot();
}
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index 99e0672a4c..af561e0a3f 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -5,6 +5,7 @@
#ifndef V8_AST_AST_H_
#define V8_AST_AST_H_
+#include "src/assembler.h"
#include "src/ast/ast-types.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/modules.h"
@@ -102,6 +103,7 @@ namespace internal {
V(SuperCallReference) \
V(CaseClause) \
V(EmptyParentheses) \
+ V(GetIterator) \
V(DoExpression) \
V(RewritableExpression)
@@ -154,7 +156,7 @@ class AstProperties final BASE_EMBEDDED {
enum Flag {
kNoFlags = 0,
kDontSelfOptimize = 1 << 0,
- kDontCrankshaft = 1 << 1
+ kMustUseIgnitionTurbo = 1 << 1
};
typedef base::Flags<Flag> Flags;
@@ -190,6 +192,7 @@ class AstNode: public ZoneObject {
int position() const { return position_; }
#ifdef DEBUG
+ void Print();
void Print(Isolate* isolate);
#endif // DEBUG
@@ -317,6 +320,9 @@ class Expression : public AstNode {
// True iff the expression is a literal represented as a smi.
bool IsSmiLiteral() const;
+ // True iff the expression is a literal represented as a number.
+ bool IsNumberLiteral() const;
+
// True iff the expression is a string literal.
bool IsStringLiteral() const;
@@ -466,9 +472,6 @@ class Block final : public BreakableStatement {
class IgnoreCompletionField
: public BitField<bool, BreakableStatement::kNextBitFieldIndex, 1> {};
-
- protected:
- static const uint8_t kNextBitFieldIndex = IgnoreCompletionField::kNext;
};
@@ -484,9 +487,6 @@ class DoExpression final : public Expression {
}
bool IsAnonymousFunctionDefinition() const;
- protected:
- static const uint8_t kNextBitFieldIndex = Expression::kNextBitFieldIndex;
-
private:
friend class AstNodeFactory;
@@ -518,8 +518,6 @@ class Declaration : public AstNode {
Declaration(VariableProxy* proxy, Scope* scope, int pos, NodeType type)
: AstNode(pos, type), proxy_(proxy), scope_(scope), next_(nullptr) {}
- static const uint8_t kNextBitFieldIndex = AstNode::kNextBitFieldIndex;
-
private:
VariableProxy* proxy_;
// Nested scope from which the declaration originated.
@@ -734,7 +732,7 @@ class ForInStatement final : public ForEachStatement {
void set_subject(Expression* e) { subject_ = e; }
// Type feedback information.
- void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+ void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
FeedbackVectorSlot EachFeedbackSlot() const { return each_slot_; }
FeedbackVectorSlot ForInFeedbackSlot() {
@@ -778,9 +776,6 @@ class ForInStatement final : public ForEachStatement {
class ForInTypeField
: public BitField<ForInType, ForEachStatement::kNextBitFieldIndex, 1> {};
-
- protected:
- static const uint8_t kNextBitFieldIndex = ForInTypeField::kNext;
};
@@ -826,12 +821,6 @@ class ForOfStatement final : public ForEachStatement {
void set_result_done(Expression* e) { result_done_ = e; }
void set_assign_each(Expression* e) { assign_each_ = e; }
- BailoutId ContinueId() const { return EntryId(); }
- BailoutId StackCheckId() const { return BackEdgeId(); }
-
- static int num_ids() { return parent_num_ids() + 1; }
- BailoutId BackEdgeId() const { return BailoutId(local_id(0)); }
-
private:
friend class AstNodeFactory;
@@ -842,8 +831,6 @@ class ForOfStatement final : public ForEachStatement {
next_result_(NULL),
result_done_(NULL),
assign_each_(NULL) {}
- static int parent_num_ids() { return ForEachStatement::num_ids(); }
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
Variable* iterator_;
Expression* assign_iterator_;
@@ -930,30 +917,16 @@ class WithStatement final : public Statement {
Statement* statement() const { return statement_; }
void set_statement(Statement* s) { statement_ = s; }
- void set_base_id(int id) { base_id_ = id; }
- static int num_ids() { return parent_num_ids() + 2; }
- BailoutId ToObjectId() const { return BailoutId(local_id(0)); }
- BailoutId EntryId() const { return BailoutId(local_id(1)); }
-
private:
friend class AstNodeFactory;
WithStatement(Scope* scope, Expression* expression, Statement* statement,
int pos)
: Statement(pos, kWithStatement),
- base_id_(BailoutId::None().ToInt()),
scope_(scope),
expression_(expression),
statement_(statement) {}
- static int parent_num_ids() { return 0; }
- int base_id() const {
- DCHECK(!BailoutId(base_id_).IsNone());
- return base_id_;
- }
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
-
- int base_id_;
Scope* scope_;
Expression* expression_;
Statement* statement_;
@@ -981,7 +954,7 @@ class CaseClause final : public Expression {
// CaseClause will have both a slot in the feedback vector and the
// TypeFeedbackId to record the type information. TypeFeedbackId is used by
// full codegen and the feedback vector slot is used by interpreter.
- void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+ void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
FeedbackVectorSlot CompareOperationFeedbackSlot() {
@@ -1212,22 +1185,15 @@ class SloppyBlockFunctionStatement final : public Statement {
public:
Statement* statement() const { return statement_; }
void set_statement(Statement* statement) { statement_ = statement; }
- Scope* scope() const { return scope_; }
- SloppyBlockFunctionStatement* next() { return next_; }
- void set_next(SloppyBlockFunctionStatement* next) { next_ = next; }
private:
friend class AstNodeFactory;
- SloppyBlockFunctionStatement(Statement* statement, Scope* scope)
+ explicit SloppyBlockFunctionStatement(Statement* statement)
: Statement(kNoSourcePosition, kSloppyBlockFunctionStatement),
- statement_(statement),
- scope_(scope),
- next_(nullptr) {}
+ statement_(statement) {}
Statement* statement_;
- Scope* const scope_;
- SloppyBlockFunctionStatement* next_;
};
@@ -1317,6 +1283,9 @@ class MaterializedLiteral : public Expression {
depth_ = depth;
}
+ // Populate the depth field and any flags the literal has.
+ void InitDepthAndFlags();
+
// Populate the constant properties/elements fixed array.
void BuildConstants(Isolate* isolate);
friend class ArrayLiteral;
@@ -1347,11 +1316,15 @@ class LiteralProperty : public ZoneObject {
return slots_[offset];
}
+ FeedbackVectorSlot GetStoreDataPropertySlot() const;
+
void SetSlot(FeedbackVectorSlot slot, int offset = 0) {
DCHECK_LT(offset, static_cast<int>(arraysize(slots_)));
slots_[offset] = slot;
}
+ void SetStoreDataPropertySlot(FeedbackVectorSlot slot);
+
bool NeedsSetFunctionName() const;
protected:
@@ -1374,8 +1347,9 @@ class ObjectLiteralProperty final : public LiteralProperty {
COMPUTED, // Property with computed value (execution time).
MATERIALIZED_LITERAL, // Property value is a materialized literal.
GETTER,
- SETTER, // Property is an accessor function.
- PROTOTYPE // Property is __proto__.
+ SETTER, // Property is an accessor function.
+ PROTOTYPE, // Property is __proto__.
+ SPREAD
};
Kind kind() const { return kind_; }
@@ -1412,6 +1386,7 @@ class ObjectLiteral final : public MaterializedLiteral {
typedef ObjectLiteralProperty Property;
Handle<FixedArray> constant_properties() const {
+ DCHECK(!constant_properties_.is_null());
return constant_properties_;
}
int properties_count() const { return boilerplate_properties_; }
@@ -1428,6 +1403,17 @@ class ObjectLiteral final : public MaterializedLiteral {
// Decide if a property should be in the object boilerplate.
static bool IsBoilerplateProperty(Property* property);
+ // Populate the depth field and flags.
+ void InitDepthAndFlags();
+
+ // Get the constant properties fixed array, populating it if necessary.
+ Handle<FixedArray> GetOrBuildConstantProperties(Isolate* isolate) {
+ if (constant_properties_.is_null()) {
+ BuildConstantProperties(isolate);
+ }
+ return constant_properties();
+ }
+
// Populate the constant properties fixed array.
void BuildConstantProperties(Isolate* isolate);
@@ -1436,6 +1422,9 @@ class ObjectLiteral final : public MaterializedLiteral {
// marked expressions, no store code is emitted.
void CalculateEmitStore(Zone* zone);
+ // Determines whether the {FastCloneShallowObject} builtin can be used.
+ bool IsFastCloningSupported() const;
+
// Assemble bitfield of flags for the CreateObjectLiteral helper.
int ComputeFlags(bool disable_mementos = false) const {
int flags = fast_elements() ? kFastElements : kNoFlags;
@@ -1465,22 +1454,15 @@ class ObjectLiteral final : public MaterializedLiteral {
BailoutId CreateLiteralId() const { return BailoutId(local_id(0)); }
// Return an AST id for a property that is used in simulate instructions.
- BailoutId GetIdForPropertyName(int i) {
- return BailoutId(local_id(2 * i + 1));
- }
- BailoutId GetIdForPropertySet(int i) {
- return BailoutId(local_id(2 * i + 2));
- }
+ BailoutId GetIdForPropertySet(int i) { return BailoutId(local_id(i + 1)); }
// Unlike other AST nodes, this number of bailout IDs allocated for an
// ObjectLiteral can vary, so num_ids() is not a static method.
- int num_ids() const {
- return parent_num_ids() + 1 + 2 * properties()->length();
- }
+ int num_ids() const { return parent_num_ids() + 1 + properties()->length(); }
// Object literals need one feedback slot for each non-trivial value, as well
// as some slots for home objects.
- void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+ void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
private:
@@ -1500,7 +1482,6 @@ class ObjectLiteral final : public MaterializedLiteral {
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
uint32_t boilerplate_properties_;
- FeedbackVectorSlot slot_;
Handle<FixedArray> constant_properties_;
ZoneList<Property*>* properties_;
@@ -1510,9 +1491,6 @@ class ObjectLiteral final : public MaterializedLiteral {
};
class MayStoreDoublesField
: public BitField<bool, HasElementsField::kNext, 1> {};
-
- protected:
- static const uint8_t kNextBitFieldIndex = MayStoreDoublesField::kNext;
};
@@ -1565,11 +1543,11 @@ class RegExpLiteral final : public MaterializedLiteral {
// for minimizing the work when constructing it at runtime.
class ArrayLiteral final : public MaterializedLiteral {
public:
- Handle<FixedArray> constant_elements() const { return constant_elements_; }
+ Handle<ConstantElementsPair> constant_elements() const {
+ return constant_elements_;
+ }
ElementsKind constant_elements_kind() const {
- DCHECK_EQ(2, constant_elements_->length());
- return static_cast<ElementsKind>(
- Smi::cast(constant_elements_->get(0))->value());
+ return static_cast<ElementsKind>(constant_elements()->elements_kind());
}
ZoneList<Expression*>* values() const { return values_; }
@@ -1583,9 +1561,23 @@ class ArrayLiteral final : public MaterializedLiteral {
// ArrayLiteral can vary, so num_ids() is not a static method.
int num_ids() const { return parent_num_ids() + 1 + values()->length(); }
+ // Populate the depth field and flags.
+ void InitDepthAndFlags();
+
+ // Get the constant elements fixed array, populating it if necessary.
+ Handle<ConstantElementsPair> GetOrBuildConstantElements(Isolate* isolate) {
+ if (constant_elements_.is_null()) {
+ BuildConstantElements(isolate);
+ }
+ return constant_elements();
+ }
+
// Populate the constant elements fixed array.
void BuildConstantElements(Isolate* isolate);
+ // Determines whether the {FastCloneShallowArray} builtin can be used.
+ bool IsFastCloningSupported() const;
+
// Assemble bitfield of flags for the CreateArrayLiteral helper.
int ComputeFlags(bool disable_mementos = false) const {
int flags = depth() == 1 ? kShallowElements : kNoFlags;
@@ -1614,7 +1606,7 @@ class ArrayLiteral final : public MaterializedLiteral {
kDisableMementos = 1 << 1
};
- void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+ void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
FeedbackVectorSlot LiteralFeedbackSlot() const { return literal_slot_; }
@@ -1632,7 +1624,7 @@ class ArrayLiteral final : public MaterializedLiteral {
int first_spread_index_;
FeedbackVectorSlot literal_slot_;
- Handle<FixedArray> constant_elements_;
+ Handle<ConstantElementsPair> constant_elements_;
ZoneList<Expression*>* values_;
};
@@ -1663,6 +1655,9 @@ class VariableProxy final : public Expression {
bool is_assigned() const { return IsAssignedField::decode(bit_field_); }
void set_is_assigned() {
bit_field_ = IsAssignedField::update(bit_field_, true);
+ if (is_resolved()) {
+ var()->set_maybe_assigned();
+ }
}
bool is_resolved() const { return IsResolvedField::decode(bit_field_); }
@@ -1690,7 +1685,7 @@ class VariableProxy final : public Expression {
return var()->IsUnallocated() || var()->IsLookupSlot();
}
- void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+ void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
FeedbackVectorSlot VariableFeedbackSlot() { return variable_feedback_slot_; }
@@ -1786,7 +1781,7 @@ class Property final : public Expression {
bool IsSuperAccess() { return obj()->IsSuperPropertyReference(); }
- void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+ void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
FeedbackVectorSlotKind kind = key()->IsPropertyName()
? FeedbackVectorSlotKind::LOAD_IC
@@ -1844,7 +1839,7 @@ class Call final : public Expression {
void set_expression(Expression* e) { expression_ = e; }
// Type feedback information.
- void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+ void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
FeedbackVectorSlot CallFeedbackICSlot() const { return ic_slot_; }
@@ -1876,11 +1871,9 @@ class Call final : public Expression {
allocation_site_ = site;
}
- static int num_ids() { return parent_num_ids() + 4; }
+ static int num_ids() { return parent_num_ids() + 2; }
BailoutId ReturnId() const { return BailoutId(local_id(0)); }
- BailoutId EvalId() const { return BailoutId(local_id(1)); }
- BailoutId LookupId() const { return BailoutId(local_id(2)); }
- BailoutId CallId() const { return BailoutId(local_id(3)); }
+ BailoutId CallId() const { return BailoutId(local_id(1)); }
bool is_uninitialized() const {
return IsUninitializedField::decode(bit_field_);
@@ -1964,7 +1957,7 @@ class CallNew final : public Expression {
void set_expression(Expression* e) { expression_ = e; }
// Type feedback information.
- void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+ void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
// CallNew stores feedback in the exact same way as Call. We can
// piggyback on the type feedback infrastructure for calls.
@@ -2138,7 +2131,7 @@ class BinaryOperation final : public Expression {
// BinaryOperation will have both a slot in the feedback vector and the
// TypeFeedbackId to record the type information. TypeFeedbackId is used
// by full codegen and the feedback vector slot is used by interpreter.
- void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+ void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
FeedbackVectorSlot BinaryOperationFeedbackSlot() const {
@@ -2231,7 +2224,7 @@ class CountOperation final : public Expression {
return binary_operation_slot_;
}
- void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+ void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
FeedbackVectorSlot CountSlot() const { return slot_; }
@@ -2283,7 +2276,7 @@ class CompareOperation final : public Expression {
// CompareOperation will have both a slot in the feedback vector and the
// TypeFeedbackId to record the type information. TypeFeedbackId is used
// by full codegen and the feedback vector slot is used by interpreter.
- void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+ void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
FeedbackVectorSlot CompareOperationFeedbackSlot() const {
@@ -2429,7 +2422,7 @@ class Assignment final : public Expression {
bit_field_ = StoreModeField::update(bit_field_, mode);
}
- void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+ void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
FeedbackVectorSlot AssignmentSlot() const { return slot_; }
@@ -2571,6 +2564,8 @@ class FunctionLiteral final : public Expression {
kAccessorOrMethod
};
+ enum IdType { kIdTypeInvalid = -1, kIdTypeTopLevel = 0 };
+
enum ParameterFlag { kNoDuplicateParameters, kHasDuplicateParameters };
enum EagerCompileHint { kShouldEagerCompile, kShouldLazyCompile };
@@ -2594,6 +2589,18 @@ class FunctionLiteral final : public Expression {
}
LanguageMode language_mode() const;
+ void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
+ FeedbackVectorSlotCache* cache) {
+ // The + 1 is because we need an array with room for the literals
+ // as well as the feedback vector.
+ literal_feedback_slot_ =
+ spec->AddCreateClosureSlot(materialized_literal_count_ + 1);
+ }
+
+ FeedbackVectorSlot LiteralFeedbackSlot() const {
+ return literal_feedback_slot_;
+ }
+
static bool NeedsHomeObject(Expression* expr);
int materialized_literal_count() { return materialized_literal_count_; }
@@ -2644,8 +2651,6 @@ class FunctionLiteral final : public Expression {
return HasDuplicateParameters::decode(bit_field_);
}
- bool is_function() const { return IsFunction::decode(bit_field_); }
-
// This is used as a heuristic on when to eagerly compile a function
// literal. We consider the following constructs as hints that the
// function will be called immediately:
@@ -2691,25 +2696,15 @@ class FunctionLiteral final : public Expression {
int yield_count() { return yield_count_; }
void set_yield_count(int yield_count) { yield_count_ = yield_count; }
- bool requires_class_field_init() {
- return RequiresClassFieldInit::decode(bit_field_);
- }
- void set_requires_class_field_init(bool requires_class_field_init) {
- bit_field_ =
- RequiresClassFieldInit::update(bit_field_, requires_class_field_init);
- }
- bool is_class_field_initializer() {
- return IsClassFieldInitializer::decode(bit_field_);
- }
- void set_is_class_field_initializer(bool is_class_field_initializer) {
- bit_field_ =
- IsClassFieldInitializer::update(bit_field_, is_class_field_initializer);
- }
-
int return_position() {
return std::max(start_position(), end_position() - (has_braces_ ? 1 : 0));
}
+ int function_literal_id() const { return function_literal_id_; }
+ void set_function_literal_id(int function_literal_id) {
+ function_literal_id_ = function_literal_id;
+ }
+
private:
friend class AstNodeFactory;
@@ -2720,7 +2715,7 @@ class FunctionLiteral final : public Expression {
int function_length, FunctionType function_type,
ParameterFlag has_duplicate_parameters,
EagerCompileHint eager_compile_hint, int position,
- bool is_function, bool has_braces)
+ bool has_braces, int function_literal_id)
: Expression(position, kFunctionLiteral),
materialized_literal_count_(materialized_literal_count),
expected_property_count_(expected_property_count),
@@ -2733,16 +2728,14 @@ class FunctionLiteral final : public Expression {
scope_(scope),
body_(body),
raw_inferred_name_(ast_value_factory->empty_string()),
- ast_properties_(zone) {
- bit_field_ |=
- FunctionTypeBits::encode(function_type) | Pretenure::encode(false) |
- HasDuplicateParameters::encode(has_duplicate_parameters ==
- kHasDuplicateParameters) |
- IsFunction::encode(is_function) |
- RequiresClassFieldInit::encode(false) |
- ShouldNotBeUsedOnceHintField::encode(false) |
- DontOptimizeReasonField::encode(kNoReason) |
- IsClassFieldInitializer::encode(false);
+ ast_properties_(zone),
+ function_literal_id_(function_literal_id) {
+ bit_field_ |= FunctionTypeBits::encode(function_type) |
+ Pretenure::encode(false) |
+ HasDuplicateParameters::encode(has_duplicate_parameters ==
+ kHasDuplicateParameters) |
+ ShouldNotBeUsedOnceHintField::encode(false) |
+ DontOptimizeReasonField::encode(kNoReason);
if (eager_compile_hint == kShouldEagerCompile) SetShouldEagerCompile();
}
@@ -2750,15 +2743,11 @@ class FunctionLiteral final : public Expression {
: public BitField<FunctionType, Expression::kNextBitFieldIndex, 2> {};
class Pretenure : public BitField<bool, FunctionTypeBits::kNext, 1> {};
class HasDuplicateParameters : public BitField<bool, Pretenure::kNext, 1> {};
- class IsFunction : public BitField<bool, HasDuplicateParameters::kNext, 1> {};
class ShouldNotBeUsedOnceHintField
- : public BitField<bool, IsFunction::kNext, 1> {};
- class RequiresClassFieldInit
- : public BitField<bool, ShouldNotBeUsedOnceHintField::kNext, 1> {};
- class IsClassFieldInitializer
- : public BitField<bool, RequiresClassFieldInit::kNext, 1> {};
+ : public BitField<bool, HasDuplicateParameters::kNext, 1> {};
class DontOptimizeReasonField
- : public BitField<BailoutReason, IsClassFieldInitializer::kNext, 8> {};
+ : public BitField<BailoutReason, ShouldNotBeUsedOnceHintField::kNext, 8> {
+ };
int materialized_literal_count_;
int expected_property_count_;
@@ -2774,6 +2763,8 @@ class FunctionLiteral final : public Expression {
const AstString* raw_inferred_name_;
Handle<String> inferred_name_;
AstProperties ast_properties_;
+ int function_literal_id_;
+ FeedbackVectorSlot literal_feedback_slot_;
};
// Property is used for passing information
@@ -2808,27 +2799,16 @@ class ClassLiteral final : public Expression {
ZoneList<Property*>* properties() const { return properties_; }
int start_position() const { return position(); }
int end_position() const { return end_position_; }
-
- VariableProxy* static_initializer_proxy() const {
- return static_initializer_proxy_;
+ bool has_name_static_property() const {
+ return HasNameStaticProperty::decode(bit_field_);
}
- void set_static_initializer_proxy(VariableProxy* proxy) {
- static_initializer_proxy_ = proxy;
+ bool has_static_computed_names() const {
+ return HasStaticComputedNames::decode(bit_field_);
}
- BailoutId CreateLiteralId() const { return BailoutId(local_id(0)); }
- BailoutId PrototypeId() { return BailoutId(local_id(1)); }
-
- // Return an AST id for a property that is used in simulate instructions.
- BailoutId GetIdForProperty(int i) { return BailoutId(local_id(i + 2)); }
-
- // Unlike other AST nodes, this number of bailout IDs allocated for an
- // ClassLiteral can vary, so num_ids() is not a static method.
- int num_ids() const { return parent_num_ids() + 2 + properties()->length(); }
-
// Object literals need one feedback slot for each non-trivial value, as well
// as some slots for home objects.
- void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+ void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
bool NeedsProxySlot() const {
@@ -2836,7 +2816,7 @@ class ClassLiteral final : public Expression {
class_variable_proxy()->var()->IsUnallocated();
}
- FeedbackVectorSlot PrototypeSlot() const { return prototype_slot_; }
+ FeedbackVectorSlot HomeObjectSlot() const { return home_object_slot_; }
FeedbackVectorSlot ProxySlot() const { return proxy_slot_; }
private:
@@ -2844,26 +2824,30 @@ class ClassLiteral final : public Expression {
ClassLiteral(VariableProxy* class_variable_proxy, Expression* extends,
FunctionLiteral* constructor, ZoneList<Property*>* properties,
- int start_position, int end_position)
+ int start_position, int end_position,
+ bool has_name_static_property, bool has_static_computed_names)
: Expression(start_position, kClassLiteral),
end_position_(end_position),
class_variable_proxy_(class_variable_proxy),
extends_(extends),
constructor_(constructor),
- properties_(properties),
- static_initializer_proxy_(nullptr) {}
-
- static int parent_num_ids() { return Expression::num_ids(); }
- int local_id(int n) const { return base_id() + parent_num_ids() + n; }
+ properties_(properties) {
+ bit_field_ |= HasNameStaticProperty::encode(has_name_static_property) |
+ HasStaticComputedNames::encode(has_static_computed_names);
+ }
int end_position_;
- FeedbackVectorSlot prototype_slot_;
+ FeedbackVectorSlot home_object_slot_;
FeedbackVectorSlot proxy_slot_;
VariableProxy* class_variable_proxy_;
Expression* extends_;
FunctionLiteral* constructor_;
ZoneList<Property*>* properties_;
- VariableProxy* static_initializer_proxy_;
+
+ class HasNameStaticProperty
+ : public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
+ class HasStaticComputedNames
+ : public BitField<bool, HasNameStaticProperty::kNext, 1> {};
};
@@ -2871,6 +2855,19 @@ class NativeFunctionLiteral final : public Expression {
public:
Handle<String> name() const { return name_->string(); }
v8::Extension* extension() const { return extension_; }
+ FeedbackVectorSlot LiteralFeedbackSlot() const {
+ return literal_feedback_slot_;
+ }
+
+ void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
+ FeedbackVectorSlotCache* cache) {
+ // 0 is a magic number here. It means we are holding the literals
+ // array for a native function literal, which needs to be
+ // the empty literals array.
+ // TODO(mvstanton): The FeedbackVectorSlotCache can be adapted
+ // to always return the same slot for this case.
+ literal_feedback_slot_ = spec->AddCreateClosureSlot(0);
+ }
private:
friend class AstNodeFactory;
@@ -2883,6 +2880,7 @@ class NativeFunctionLiteral final : public Expression {
const AstRawString* name_;
v8::Extension* extension_;
+ FeedbackVectorSlot literal_feedback_slot_;
};
@@ -2955,7 +2953,43 @@ class EmptyParentheses final : public Expression {
explicit EmptyParentheses(int pos) : Expression(pos, kEmptyParentheses) {}
};
+// Represents the spec operation `GetIterator()`
+// (defined at https://tc39.github.io/ecma262/#sec-getiterator). Ignition
+// desugars this into a LoadIC / JSLoadNamed, CallIC, and a type-check to
+// validate return value of the Symbol.iterator() call.
+class GetIterator final : public Expression {
+ public:
+ Expression* iterable() const { return iterable_; }
+ void set_iterable(Expression* iterable) { iterable_ = iterable; }
+
+ static int num_ids() { return parent_num_ids(); }
+
+ void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
+ FeedbackVectorSlotCache* cache) {
+ iterator_property_feedback_slot_ =
+ spec->AddSlot(FeedbackVectorSlotKind::LOAD_IC);
+ iterator_call_feedback_slot_ =
+ spec->AddSlot(FeedbackVectorSlotKind::CALL_IC);
+ }
+
+ FeedbackVectorSlot IteratorPropertyFeedbackSlot() const {
+ return iterator_property_feedback_slot_;
+ }
+
+ FeedbackVectorSlot IteratorCallFeedbackSlot() const {
+ return iterator_call_feedback_slot_;
+ }
+
+ private:
+ friend class AstNodeFactory;
+ explicit GetIterator(Expression* iterable, int pos)
+ : Expression(pos, kGetIterator), iterable_(iterable) {}
+
+ Expression* iterable_;
+ FeedbackVectorSlot iterator_property_feedback_slot_;
+ FeedbackVectorSlot iterator_call_feedback_slot_;
+};
// ----------------------------------------------------------------------------
// Basic visitor
@@ -3217,15 +3251,6 @@ class AstNodeFactory final BASE_EMBEDDED {
try_block, scope, variable, catch_block, HandlerTable::UNCAUGHT, pos);
}
- TryCatchStatement* NewTryCatchStatementForPromiseReject(Block* try_block,
- Scope* scope,
- Variable* variable,
- Block* catch_block,
- int pos) {
- return new (zone_) TryCatchStatement(
- try_block, scope, variable, catch_block, HandlerTable::PROMISE, pos);
- }
-
TryCatchStatement* NewTryCatchStatementForDesugaring(Block* try_block,
Scope* scope,
Variable* variable,
@@ -3258,9 +3283,9 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) EmptyStatement(pos);
}
- SloppyBlockFunctionStatement* NewSloppyBlockFunctionStatement(Scope* scope) {
- return new (zone_) SloppyBlockFunctionStatement(
- NewEmptyStatement(kNoSourcePosition), scope);
+ SloppyBlockFunctionStatement* NewSloppyBlockFunctionStatement() {
+ return new (zone_)
+ SloppyBlockFunctionStatement(NewEmptyStatement(kNoSourcePosition));
}
CaseClause* NewCaseClause(
@@ -3437,9 +3462,13 @@ class AstNodeFactory final BASE_EMBEDDED {
Expression* value,
int pos) {
DCHECK(Token::IsAssignmentOp(op));
+
+ if (op != Token::INIT && target->IsVariableProxy()) {
+ target->AsVariableProxy()->set_is_assigned();
+ }
+
Assignment* assign = new (zone_) Assignment(op, target, value, pos);
if (assign->is_compound()) {
- DCHECK(Token::IsAssignmentOp(op));
assign->binary_operation_ =
NewBinaryOperation(assign->binary_op(), target, value, pos + 1);
}
@@ -3463,12 +3492,12 @@ class AstNodeFactory final BASE_EMBEDDED {
FunctionLiteral::ParameterFlag has_duplicate_parameters,
FunctionLiteral::FunctionType function_type,
FunctionLiteral::EagerCompileHint eager_compile_hint, int position,
- bool has_braces) {
+ bool has_braces, int function_literal_id) {
return new (zone_) FunctionLiteral(
zone_, name, ast_value_factory_, scope, body,
materialized_literal_count, expected_property_count, parameter_count,
function_length, function_type, has_duplicate_parameters,
- eager_compile_hint, position, true, has_braces);
+ eager_compile_hint, position, has_braces, function_literal_id);
}
// Creates a FunctionLiteral representing a top-level script, the
@@ -3483,7 +3512,8 @@ class AstNodeFactory final BASE_EMBEDDED {
body, materialized_literal_count, expected_property_count,
parameter_count, parameter_count, FunctionLiteral::kAnonymousExpression,
FunctionLiteral::kNoDuplicateParameters,
- FunctionLiteral::kShouldLazyCompile, 0, false, true);
+ FunctionLiteral::kShouldLazyCompile, 0, true,
+ FunctionLiteral::kIdTypeTopLevel);
}
ClassLiteral::Property* NewClassLiteralProperty(
@@ -3496,9 +3526,12 @@ class AstNodeFactory final BASE_EMBEDDED {
ClassLiteral* NewClassLiteral(VariableProxy* proxy, Expression* extends,
FunctionLiteral* constructor,
ZoneList<ClassLiteral::Property*>* properties,
- int start_position, int end_position) {
- return new (zone_) ClassLiteral(proxy, extends, constructor, properties,
- start_position, end_position);
+ int start_position, int end_position,
+ bool has_name_static_property,
+ bool has_static_computed_names) {
+ return new (zone_) ClassLiteral(
+ proxy, extends, constructor, properties, start_position, end_position,
+ has_name_static_property, has_static_computed_names);
}
NativeFunctionLiteral* NewNativeFunctionLiteral(const AstRawString* name,
@@ -3534,6 +3567,10 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) EmptyParentheses(pos);
}
+ GetIterator* NewGetIterator(Expression* iterable, int pos) {
+ return new (zone_) GetIterator(iterable, pos);
+ }
+
Zone* zone() const { return zone_; }
void set_zone(Zone* zone) { zone_ = zone; }
diff --git a/deps/v8/src/ast/compile-time-value.cc b/deps/v8/src/ast/compile-time-value.cc
index eda536b716..27dd29fee0 100644
--- a/deps/v8/src/ast/compile-time-value.cc
+++ b/deps/v8/src/ast/compile-time-value.cc
@@ -48,8 +48,8 @@ CompileTimeValue::LiteralType CompileTimeValue::GetLiteralType(
return static_cast<LiteralType>(literal_type->value());
}
-Handle<FixedArray> CompileTimeValue::GetElements(Handle<FixedArray> value) {
- return Handle<FixedArray>(FixedArray::cast(value->get(kElementsSlot)));
+Handle<HeapObject> CompileTimeValue::GetElements(Handle<FixedArray> value) {
+ return Handle<HeapObject>(HeapObject::cast(value->get(kElementsSlot)));
}
} // namespace internal
diff --git a/deps/v8/src/ast/compile-time-value.h b/deps/v8/src/ast/compile-time-value.h
index 27351b79cc..d61443e583 100644
--- a/deps/v8/src/ast/compile-time-value.h
+++ b/deps/v8/src/ast/compile-time-value.h
@@ -31,8 +31,8 @@ class CompileTimeValue : public AllStatic {
// Get the type of a compile time value returned by GetValue().
static LiteralType GetLiteralType(Handle<FixedArray> value);
- // Get the elements array of a compile time value returned by GetValue().
- static Handle<FixedArray> GetElements(Handle<FixedArray> value);
+ // Get the elements of a compile time value returned by GetValue().
+ static Handle<HeapObject> GetElements(Handle<FixedArray> value);
private:
static const int kLiteralTypeSlot = 0;
diff --git a/deps/v8/src/ast/modules.cc b/deps/v8/src/ast/modules.cc
index 339d64c580..41ce9e03da 100644
--- a/deps/v8/src/ast/modules.cc
+++ b/deps/v8/src/ast/modules.cc
@@ -5,6 +5,8 @@
#include "src/ast/modules.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/scopes.h"
+#include "src/objects-inl.h"
+#include "src/objects/module-info.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index a3fc50ae57..463ae26c4d 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -10,18 +10,19 @@
#include "src/ast/scopes.h"
#include "src/base/platform/platform.h"
#include "src/globals.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
-CallPrinter::CallPrinter(Isolate* isolate, bool is_builtin)
+CallPrinter::CallPrinter(Isolate* isolate, bool is_user_js)
: builder_(isolate) {
isolate_ = isolate;
position_ = 0;
num_prints_ = 0;
found_ = false;
done_ = false;
- is_builtin_ = is_builtin;
+ is_user_js_ = is_user_js;
InitializeAstVisitor(isolate);
}
@@ -239,11 +240,11 @@ void CallPrinter::VisitArrayLiteral(ArrayLiteral* node) {
void CallPrinter::VisitVariableProxy(VariableProxy* node) {
- if (is_builtin_) {
- // Variable names of builtins are meaningless due to minification.
- Print("(var)");
- } else {
+ if (is_user_js_) {
PrintLiteral(node->name(), false);
+ } else {
+ // Variable names of non-user code are meaningless due to minification.
+ Print("(var)");
}
}
@@ -279,9 +280,9 @@ void CallPrinter::VisitProperty(Property* node) {
void CallPrinter::VisitCall(Call* node) {
bool was_found = !found_ && node->position() == position_;
if (was_found) {
- // Bail out if the error is caused by a direct call to a variable in builtin
- // code. The variable name is meaningless due to minification.
- if (is_builtin_ && node->expression()->IsVariableProxy()) {
+ // Bail out if the error is caused by a direct call to a variable in
+ // non-user JS code. The variable name is meaningless due to minification.
+ if (!is_user_js_ && node->expression()->IsVariableProxy()) {
done_ = true;
return;
}
@@ -297,9 +298,9 @@ void CallPrinter::VisitCall(Call* node) {
void CallPrinter::VisitCallNew(CallNew* node) {
bool was_found = !found_ && node->position() == position_;
if (was_found) {
- // Bail out if the error is caused by a direct call to a variable in builtin
- // code. The variable name is meaningless due to minification.
- if (is_builtin_ && node->expression()->IsVariableProxy()) {
+ // Bail out if the error is caused by a direct call to a variable in
+ // non-user JS code. The variable name is meaningless due to minification.
+ if (!is_user_js_ && node->expression()->IsVariableProxy()) {
done_ = true;
return;
}
@@ -370,6 +371,11 @@ void CallPrinter::VisitEmptyParentheses(EmptyParentheses* node) {
UNREACHABLE();
}
+void CallPrinter::VisitGetIterator(GetIterator* node) {
+ Print("GetIterator(");
+ Find(node->iterable(), true);
+ Print(")");
+}
void CallPrinter::VisitThisFunction(ThisFunction* node) {}
@@ -874,15 +880,16 @@ void AstPrinter::PrintTryStatement(TryStatement* node) {
case HandlerTable::CAUGHT:
prediction = "CAUGHT";
break;
- case HandlerTable::PROMISE:
- prediction = "PROMISE";
- break;
case HandlerTable::DESUGARING:
prediction = "DESUGARING";
break;
case HandlerTable::ASYNC_AWAIT:
prediction = "ASYNC_AWAIT";
break;
+ case HandlerTable::PROMISE:
+ // Catch prediction resulting in promise rejections aren't
+ // parsed by the parser.
+ UNREACHABLE();
}
Print(" %s\n", prediction);
}
@@ -1019,6 +1026,9 @@ void AstPrinter::PrintObjectProperties(
case ObjectLiteral::Property::SETTER:
prop_kind = "SETTER";
break;
+ case ObjectLiteral::Property::SPREAD:
+ prop_kind = "SPREAD";
+ break;
}
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "PROPERTY - %s", prop_kind);
@@ -1136,7 +1146,14 @@ void AstPrinter::VisitCallNew(CallNew* node) {
void AstPrinter::VisitCallRuntime(CallRuntime* node) {
EmbeddedVector<char, 128> buf;
- SNPrintF(buf, "CALL RUNTIME %s", node->debug_name());
+ if (node->is_jsruntime()) {
+ SNPrintF(
+ buf, "CALL RUNTIME %s code = %p", node->debug_name(),
+ static_cast<void*>(isolate_->context()->get(node->context_index())));
+ } else {
+ SNPrintF(buf, "CALL RUNTIME %s", node->debug_name());
+ }
+
IndentedScope indent(this, buf.start(), node->position());
PrintArguments(node->arguments());
}
@@ -1181,6 +1198,10 @@ void AstPrinter::VisitEmptyParentheses(EmptyParentheses* node) {
IndentedScope indent(this, "()", node->position());
}
+void AstPrinter::VisitGetIterator(GetIterator* node) {
+ IndentedScope indent(this, "GET-ITERATOR", node->position());
+ Visit(node->iterable());
+}
void AstPrinter::VisitThisFunction(ThisFunction* node) {
IndentedScope indent(this, "THIS-FUNCTION", node->position());
diff --git a/deps/v8/src/ast/prettyprinter.h b/deps/v8/src/ast/prettyprinter.h
index b56c834893..fdc079ca07 100644
--- a/deps/v8/src/ast/prettyprinter.h
+++ b/deps/v8/src/ast/prettyprinter.h
@@ -15,7 +15,7 @@ namespace internal {
class CallPrinter final : public AstVisitor<CallPrinter> {
public:
- explicit CallPrinter(Isolate* isolate, bool is_builtin);
+ explicit CallPrinter(Isolate* isolate, bool is_user_js);
// The following routine prints the node with position |position| into a
// string.
@@ -38,7 +38,7 @@ class CallPrinter final : public AstVisitor<CallPrinter> {
int position_; // position of ast node to print
bool found_;
bool done_;
- bool is_builtin_;
+ bool is_user_js_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index c1679a40b8..b61bcdab55 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -9,12 +9,27 @@
#include "src/accessors.h"
#include "src/ast/ast.h"
#include "src/bootstrapper.h"
+#include "src/counters.h"
#include "src/messages.h"
+#include "src/objects-inl.h"
+#include "src/objects/module-info.h"
#include "src/parsing/parse-info.h"
namespace v8 {
namespace internal {
+namespace {
+void* kDummyPreParserVariable = reinterpret_cast<void*>(0x1);
+void* kDummyPreParserLexicalVariable = reinterpret_cast<void*>(0x2);
+
+bool IsLexical(Variable* variable) {
+ if (variable == kDummyPreParserLexicalVariable) return true;
+ if (variable == kDummyPreParserVariable) return false;
+ return IsLexicalVariableMode(variable->mode());
+}
+
+} // namespace
+
// ----------------------------------------------------------------------------
// Implementation of LocalsMap
//
@@ -49,6 +64,19 @@ Variable* VariableMap::Declare(Zone* zone, Scope* scope,
return reinterpret_cast<Variable*>(p->value);
}
+void VariableMap::DeclareName(Zone* zone, const AstRawString* name,
+ VariableMode mode) {
+ Entry* p =
+ ZoneHashMap::LookupOrInsert(const_cast<AstRawString*>(name), name->hash(),
+ ZoneAllocationPolicy(zone));
+ if (p->value == nullptr) {
+ // The variable has not been declared yet -> insert it.
+ DCHECK_EQ(name, p->key);
+ p->value =
+ mode == VAR ? kDummyPreParserVariable : kDummyPreParserLexicalVariable;
+ }
+}
+
void VariableMap::Remove(Variable* var) {
const AstRawString* name = var->raw_name();
ZoneHashMap::Remove(const_cast<AstRawString*>(name), name->hash());
@@ -74,21 +102,27 @@ Variable* VariableMap::Lookup(const AstRawString* name) {
return NULL;
}
+void SloppyBlockFunctionMap::Delegate::set_statement(Statement* statement) {
+ if (statement_ != nullptr) {
+ statement_->set_statement(statement);
+ }
+}
+
SloppyBlockFunctionMap::SloppyBlockFunctionMap(Zone* zone)
: ZoneHashMap(8, ZoneAllocationPolicy(zone)) {}
-void SloppyBlockFunctionMap::Declare(Zone* zone, const AstRawString* name,
- SloppyBlockFunctionStatement* stmt) {
+void SloppyBlockFunctionMap::Declare(
+ Zone* zone, const AstRawString* name,
+ SloppyBlockFunctionMap::Delegate* delegate) {
// AstRawStrings are unambiguous, i.e., the same string is always represented
// by the same AstRawString*.
Entry* p =
ZoneHashMap::LookupOrInsert(const_cast<AstRawString*>(name), name->hash(),
ZoneAllocationPolicy(zone));
- stmt->set_next(static_cast<SloppyBlockFunctionStatement*>(p->value));
- p->value = stmt;
+ delegate->set_next(static_cast<SloppyBlockFunctionMap::Delegate*>(p->value));
+ p->value = delegate;
}
-
// ----------------------------------------------------------------------------
// Implementation of Scope
@@ -243,8 +277,7 @@ Scope::Scope(Zone* zone, const AstRawString* catch_variable_name,
// Cache the catch variable, even though it's also available via the
// scope_info, as the parser expects that a catch scope always has the catch
// variable as first and only variable.
- Variable* variable = Declare(zone, this, catch_variable_name, VAR,
- NORMAL_VARIABLE, kCreatedInitialized);
+ Variable* variable = Declare(zone, catch_variable_name, VAR);
AllocateHeapSlot(variable);
}
@@ -263,7 +296,14 @@ void DeclarationScope::SetDefaults() {
arguments_ = nullptr;
this_function_ = nullptr;
should_eager_compile_ = false;
- is_lazily_parsed_ = false;
+ was_lazily_parsed_ = false;
+#ifdef DEBUG
+ DeclarationScope* outer_declaration_scope =
+ outer_scope_ ? outer_scope_->GetDeclarationScope() : nullptr;
+ is_being_lazily_parsed_ =
+ outer_declaration_scope ? outer_declaration_scope->is_being_lazily_parsed_
+ : false;
+#endif
}
void Scope::SetDefaults() {
@@ -305,7 +345,7 @@ bool DeclarationScope::ShouldEagerCompile() const {
}
void DeclarationScope::set_should_eager_compile() {
- should_eager_compile_ = !is_lazily_parsed_;
+ should_eager_compile_ = !was_lazily_parsed_;
}
void DeclarationScope::set_asm_module() {
@@ -354,17 +394,16 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
}
DCHECK(!scope_info->HasOuterScopeInfo());
break;
- } else if (scope_info->scope_type() == FUNCTION_SCOPE ||
- scope_info->scope_type() == EVAL_SCOPE) {
- // TODO(neis): For an eval scope, we currently create an ordinary function
- // context. This is wrong and needs to be fixed.
- // https://bugs.chromium.org/p/v8/issues/detail?id=5295
+ } else if (scope_info->scope_type() == FUNCTION_SCOPE) {
outer_scope =
new (zone) DeclarationScope(zone, FUNCTION_SCOPE, handle(scope_info));
if (scope_info->IsAsmFunction())
outer_scope->AsDeclarationScope()->set_asm_function();
if (scope_info->IsAsmModule())
outer_scope->AsDeclarationScope()->set_asm_module();
+ } else if (scope_info->scope_type() == EVAL_SCOPE) {
+ outer_scope =
+ new (zone) DeclarationScope(zone, EVAL_SCOPE, handle(scope_info));
} else if (scope_info->scope_type() == BLOCK_SCOPE) {
if (scope_info->is_declaration_scope()) {
outer_scope =
@@ -424,11 +463,21 @@ int Scope::num_parameters() const {
return is_declaration_scope() ? AsDeclarationScope()->num_parameters() : 0;
}
+void DeclarationScope::DeclareSloppyBlockFunction(
+ const AstRawString* name, Scope* scope,
+ SloppyBlockFunctionStatement* statement) {
+ auto* delegate =
+ new (zone()) SloppyBlockFunctionMap::Delegate(scope, statement);
+ sloppy_block_function_map_.Declare(zone(), name, delegate);
+}
+
void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
DCHECK(is_sloppy(language_mode()));
DCHECK(is_function_scope() || is_eval_scope() || is_script_scope() ||
(is_block_scope() && outer_scope()->is_function_scope()));
- DCHECK(HasSimpleParameters() || is_block_scope());
+ DCHECK(HasSimpleParameters() || is_block_scope() || is_being_lazily_parsed_);
+ DCHECK_EQ(factory == nullptr, is_being_lazily_parsed_);
+
bool has_simple_parameters = HasSimpleParameters();
// For each variable which is used as a function declaration in a sloppy
// block,
@@ -460,7 +509,7 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
bool var_created = false;
// Write in assignments to var for each block-scoped function declaration
- auto delegates = static_cast<SloppyBlockFunctionStatement*>(p->value);
+ auto delegates = static_cast<SloppyBlockFunctionMap::Delegate*>(p->value);
DeclarationScope* decl_scope = this;
while (decl_scope->is_eval_scope()) {
@@ -468,7 +517,7 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
}
Scope* outer_scope = decl_scope->outer_scope();
- for (SloppyBlockFunctionStatement* delegate = delegates;
+ for (SloppyBlockFunctionMap::Delegate* delegate = delegates;
delegate != nullptr; delegate = delegate->next()) {
// Check if there's a conflict with a lexical declaration
Scope* query_scope = delegate->scope()->outer_scope();
@@ -482,7 +531,7 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
// `{ let e; try {} catch (e) { function e(){} } }`
do {
var = query_scope->LookupLocal(name);
- if (var != nullptr && IsLexicalVariableMode(var->mode())) {
+ if (var != nullptr && IsLexical(var)) {
should_hoist = false;
break;
}
@@ -494,30 +543,39 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
// Declare a var-style binding for the function in the outer scope
if (!var_created) {
var_created = true;
- VariableProxy* proxy = factory->NewVariableProxy(name, NORMAL_VARIABLE);
- Declaration* declaration =
- factory->NewVariableDeclaration(proxy, this, kNoSourcePosition);
- // Based on the preceding check, it doesn't matter what we pass as
- // allow_harmony_restrictive_generators and
- // sloppy_mode_block_scope_function_redefinition.
- bool ok = true;
- DeclareVariable(declaration, VAR,
- Variable::DefaultInitializationFlag(VAR), false,
- nullptr, &ok);
- CHECK(ok); // Based on the preceding check, this should not fail
+ if (factory) {
+ VariableProxy* proxy =
+ factory->NewVariableProxy(name, NORMAL_VARIABLE);
+ auto declaration =
+ factory->NewVariableDeclaration(proxy, this, kNoSourcePosition);
+ // Based on the preceding check, it doesn't matter what we pass as
+ // allow_harmony_restrictive_generators and
+ // sloppy_mode_block_scope_function_redefinition.
+ bool ok = true;
+ DeclareVariable(declaration, VAR,
+ Variable::DefaultInitializationFlag(VAR), false,
+ nullptr, &ok);
+ CHECK(ok); // Based on the preceding check, this should not fail
+ } else {
+ DeclareVariableName(name, VAR);
+ }
}
- Expression* assignment = factory->NewAssignment(
- Token::ASSIGN, NewUnresolved(factory, name),
- delegate->scope()->NewUnresolved(factory, name), kNoSourcePosition);
- Statement* statement =
- factory->NewExpressionStatement(assignment, kNoSourcePosition);
- delegate->set_statement(statement);
+ if (factory) {
+ Expression* assignment = factory->NewAssignment(
+ Token::ASSIGN, NewUnresolved(factory, name),
+ delegate->scope()->NewUnresolved(factory, name), kNoSourcePosition);
+ Statement* statement =
+ factory->NewExpressionStatement(assignment, kNoSourcePosition);
+ delegate->set_statement(statement);
+ }
}
}
}
void DeclarationScope::Analyze(ParseInfo* info, AnalyzeMode mode) {
+ RuntimeCallTimerScope runtimeTimer(info->isolate(),
+ &RuntimeCallStats::CompileScopeAnalysis);
DCHECK(info->literal() != NULL);
DeclarationScope* scope = info->literal()->scope();
@@ -542,13 +600,15 @@ void DeclarationScope::Analyze(ParseInfo* info, AnalyzeMode mode) {
scope->HoistSloppyBlockFunctions(&factory);
}
- // We are compiling one of three cases:
+ // We are compiling one of four cases:
// 1) top-level code,
// 2) a function/eval/module on the top-level
// 3) a function/eval in a scope that was already resolved.
+ // 4) an asm.js function
DCHECK(scope->scope_type() == SCRIPT_SCOPE ||
scope->outer_scope()->scope_type() == SCRIPT_SCOPE ||
- scope->outer_scope()->already_resolved_);
+ scope->outer_scope()->already_resolved_ ||
+ (info->asm_function_scope() && scope->is_function_scope()));
// The outer scope is never lazy.
scope->set_should_eager_compile();
@@ -577,11 +637,11 @@ void DeclarationScope::DeclareThis(AstValueFactory* ast_value_factory) {
DCHECK(is_declaration_scope());
DCHECK(has_this_declaration());
- bool subclass_constructor = IsSubclassConstructor(function_kind_);
- Variable* var = Declare(
- zone(), this, ast_value_factory->this_string(),
- subclass_constructor ? CONST : VAR, THIS_VARIABLE,
- subclass_constructor ? kNeedsInitialization : kCreatedInitialized);
+ bool derived_constructor = IsDerivedConstructor(function_kind_);
+ Variable* var =
+ Declare(zone(), ast_value_factory->this_string(),
+ derived_constructor ? CONST : VAR, THIS_VARIABLE,
+ derived_constructor ? kNeedsInitialization : kCreatedInitialized);
receiver_ = var;
}
@@ -594,8 +654,7 @@ void DeclarationScope::DeclareArguments(AstValueFactory* ast_value_factory) {
// Declare 'arguments' variable which exists in all non arrow functions.
// Note that it might never be accessed, in which case it won't be
// allocated during variable allocation.
- arguments_ = Declare(zone(), this, ast_value_factory->arguments_string(),
- VAR, NORMAL_VARIABLE, kCreatedInitialized);
+ arguments_ = Declare(zone(), ast_value_factory->arguments_string(), VAR);
} else if (IsLexicalVariableMode(arguments_->mode())) {
// Check if there's lexically declared variable named arguments to avoid
// redeclaration. See ES#sec-functiondeclarationinstantiation, step 20.
@@ -609,14 +668,12 @@ void DeclarationScope::DeclareDefaultFunctionVariables(
DCHECK(!is_arrow_scope());
DeclareThis(ast_value_factory);
- new_target_ = Declare(zone(), this, ast_value_factory->new_target_string(),
- CONST, NORMAL_VARIABLE, kCreatedInitialized);
+ new_target_ = Declare(zone(), ast_value_factory->new_target_string(), CONST);
if (IsConciseMethod(function_kind_) || IsClassConstructor(function_kind_) ||
IsAccessorFunction(function_kind_)) {
this_function_ =
- Declare(zone(), this, ast_value_factory->this_function_string(), CONST,
- NORMAL_VARIABLE, kCreatedInitialized);
+ Declare(zone(), ast_value_factory->this_function_string(), CONST);
}
}
@@ -637,23 +694,12 @@ Variable* DeclarationScope::DeclareFunctionVar(const AstRawString* name) {
}
bool Scope::HasBeenRemoved() const {
- // TODO(neis): Store this information somewhere instead of calculating it.
-
- if (!is_block_scope()) return false; // Shortcut.
-
- Scope* parent = outer_scope();
- if (parent == nullptr) {
- DCHECK(is_script_scope());
- return false;
- }
-
- Scope* sibling = parent->inner_scope();
- for (; sibling != nullptr; sibling = sibling->sibling()) {
- if (sibling == this) return false;
+ if (sibling() == this) {
+ DCHECK_NULL(inner_scope_);
+ DCHECK(is_block_scope());
+ return true;
}
-
- DCHECK_NULL(inner_scope_);
- return true;
+ return false;
}
Scope* Scope::GetUnremovedScope() {
@@ -667,6 +713,7 @@ Scope* Scope::GetUnremovedScope() {
Scope* Scope::FinalizeBlockScope() {
DCHECK(is_block_scope());
+ DCHECK(!HasBeenRemoved());
if (variables_.occupancy() > 0 ||
(is_declaration_scope() && calls_sloppy_eval())) {
@@ -705,7 +752,12 @@ Scope* Scope::FinalizeBlockScope() {
PropagateUsageFlagsToScope(outer_scope_);
// This block does not need a context.
num_heap_slots_ = 0;
- return NULL;
+
+ // Mark scope as removed by making it its own sibling.
+ sibling_ = this;
+ DCHECK(HasBeenRemoved());
+
+ return nullptr;
}
void DeclarationScope::AddLocal(Variable* var) {
@@ -715,13 +767,13 @@ void DeclarationScope::AddLocal(Variable* var) {
locals_.Add(var);
}
-Variable* Scope::Declare(Zone* zone, Scope* scope, const AstRawString* name,
+Variable* Scope::Declare(Zone* zone, const AstRawString* name,
VariableMode mode, VariableKind kind,
InitializationFlag initialization_flag,
MaybeAssignedFlag maybe_assigned_flag) {
bool added;
Variable* var =
- variables_.Declare(zone, scope, name, mode, kind, initialization_flag,
+ variables_.Declare(zone, this, name, mode, kind, initialization_flag,
maybe_assigned_flag, &added);
if (added) locals_.Add(var);
return var;
@@ -796,6 +848,7 @@ void Scope::PropagateUsageFlagsToScope(Scope* other) {
DCHECK(!already_resolved_);
DCHECK(!other->already_resolved_);
if (calls_eval()) other->RecordEvalCall();
+ if (inner_scope_calls_eval_) other->inner_scope_calls_eval_ = true;
}
Variable* Scope::LookupInScopeInfo(const AstRawString* name) {
@@ -869,12 +922,13 @@ Variable* DeclarationScope::DeclareParameter(
DCHECK(is_function_scope() || is_module_scope());
DCHECK(!has_rest_);
DCHECK(!is_optional || !is_rest);
+ DCHECK(!is_being_lazily_parsed_);
+ DCHECK(!was_lazily_parsed_);
Variable* var;
if (mode == TEMPORARY) {
var = NewTemporary(name);
} else {
- var =
- Declare(zone(), this, name, mode, NORMAL_VARIABLE, kCreatedInitialized);
+ var = Declare(zone(), name, mode);
// TODO(wingo): Avoid O(n^2) check.
*is_duplicate = IsDeclaredParameter(name);
}
@@ -894,8 +948,9 @@ Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode,
// introduced during variable allocation, and TEMPORARY variables are
// allocated via NewTemporary().
DCHECK(IsDeclaredVariableMode(mode));
- return Declare(zone(), this, name, mode, kind, init_flag,
- maybe_assigned_flag);
+ DCHECK(!GetDeclarationScope()->is_being_lazily_parsed());
+ DCHECK(!GetDeclarationScope()->was_lazily_parsed());
+ return Declare(zone(), name, mode, kind, init_flag, maybe_assigned_flag);
}
Variable* Scope::DeclareVariable(
@@ -904,6 +959,8 @@ Variable* Scope::DeclareVariable(
bool* sloppy_mode_block_scope_function_redefinition, bool* ok) {
DCHECK(IsDeclaredVariableMode(mode));
DCHECK(!already_resolved_);
+ DCHECK(!GetDeclarationScope()->is_being_lazily_parsed());
+ DCHECK(!GetDeclarationScope()->was_lazily_parsed());
if (mode == VAR && !is_declaration_scope()) {
return GetDeclarationScope()->DeclareVariable(
@@ -1002,6 +1059,28 @@ Variable* Scope::DeclareVariable(
return var;
}
+void Scope::DeclareVariableName(const AstRawString* name, VariableMode mode) {
+ DCHECK(IsDeclaredVariableMode(mode));
+ DCHECK(!already_resolved_);
+ DCHECK(GetDeclarationScope()->is_being_lazily_parsed());
+
+ if (mode == VAR && !is_declaration_scope()) {
+ return GetDeclarationScope()->DeclareVariableName(name, mode);
+ }
+ DCHECK(!is_with_scope());
+ DCHECK(!is_eval_scope());
+ // Unlike DeclareVariable, DeclareVariableName allows declaring variables in
+ // catch scopes: Parser::RewriteCatchPattern bypasses DeclareVariable by
+ // calling DeclareLocal directly, and it doesn't make sense to add a similar
+ // bypass mechanism for PreParser.
+ DCHECK(is_declaration_scope() || (IsLexicalVariableMode(mode) &&
+ (is_block_scope() || is_catch_scope())));
+ DCHECK(scope_info_.is_null());
+
+ // Declare the variable in the declaration scope.
+ variables_.DeclareName(zone(), name, mode);
+}
+
VariableProxy* Scope::NewUnresolved(AstNodeFactory* factory,
const AstRawString* name,
int start_position, VariableKind kind) {
@@ -1009,7 +1088,7 @@ VariableProxy* Scope::NewUnresolved(AstNodeFactory* factory,
// the same name because they may be removed selectively via
// RemoveUnresolved().
DCHECK(!already_resolved_);
- DCHECK_EQ(!needs_migration_, factory->zone() == zone());
+ DCHECK_EQ(factory->zone(), zone());
VariableProxy* proxy = factory->NewVariableProxy(name, kind, start_position);
proxy->set_next_unresolved(unresolved_);
unresolved_ = proxy;
@@ -1026,8 +1105,7 @@ void Scope::AddUnresolved(VariableProxy* proxy) {
Variable* DeclarationScope::DeclareDynamicGlobal(const AstRawString* name,
VariableKind kind) {
DCHECK(is_script_scope());
- return variables_.Declare(zone(), this, name, DYNAMIC_GLOBAL, kind,
- kCreatedInitialized);
+ return variables_.Declare(zone(), this, name, DYNAMIC_GLOBAL, kind);
}
@@ -1050,26 +1128,6 @@ bool Scope::RemoveUnresolved(VariableProxy* var) {
return false;
}
-bool Scope::RemoveUnresolved(const AstRawString* name) {
- if (unresolved_ != nullptr && unresolved_->raw_name() == name) {
- VariableProxy* removed = unresolved_;
- unresolved_ = unresolved_->next_unresolved();
- removed->set_next_unresolved(nullptr);
- return true;
- }
- VariableProxy* current = unresolved_;
- while (current != nullptr) {
- VariableProxy* next = current->next_unresolved();
- if (next != nullptr && next->raw_name() == name) {
- current->set_next_unresolved(next->next_unresolved());
- next->set_next_unresolved(nullptr);
- return true;
- }
- current = next;
- }
- return false;
-}
-
Variable* Scope::NewTemporary(const AstRawString* name) {
DeclarationScope* scope = GetClosureScope();
Variable* var = new (zone())
@@ -1157,9 +1215,9 @@ bool Scope::AllowsLazyParsingWithoutUnresolvedVariables(
// guaranteed to be correct.
for (const Scope* s = this; s != outer; s = s->outer_scope_) {
// Eval forces context allocation on all outer scopes, so we don't need to
- // look at those scopes. Sloppy eval makes all top-level variables dynamic,
- // whereas strict-mode requires context allocation.
- if (s->is_eval_scope()) return !is_strict(s->language_mode());
+ // look at those scopes. Sloppy eval makes top-level non-lexical variables
+ // dynamic, whereas strict-mode requires context allocation.
+ if (s->is_eval_scope()) return is_sloppy(s->language_mode());
// Catch scopes force context allocation of all variables.
if (s->is_catch_scope()) continue;
// With scopes do not introduce variables that need allocation.
@@ -1276,7 +1334,7 @@ Scope* Scope::GetOuterScopeWithContext() {
Handle<StringSet> DeclarationScope::CollectNonLocals(
ParseInfo* info, Handle<StringSet> non_locals) {
- VariableProxy* free_variables = FetchFreeVariables(this, true, info);
+ VariableProxy* free_variables = FetchFreeVariables(this, info);
for (VariableProxy* proxy = free_variables; proxy != nullptr;
proxy = proxy->next_unresolved()) {
non_locals = StringSet::Add(non_locals, proxy->name());
@@ -1292,21 +1350,30 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory,
params_.Clear();
decls_.Clear();
locals_.Clear();
- sloppy_block_function_map_.Clear();
- variables_.Clear();
- // Make sure we won't walk the scope tree from here on.
inner_scope_ = nullptr;
unresolved_ = nullptr;
- if (aborted && !IsArrowFunction(function_kind_)) {
- DeclareDefaultFunctionVariables(ast_value_factory);
+ if (aborted) {
+ // Prepare scope for use in the outer zone.
+ zone_ = ast_value_factory->zone();
+ variables_.Reset(ZoneAllocationPolicy(zone_));
+ sloppy_block_function_map_.Reset(ZoneAllocationPolicy(zone_));
+ if (!IsArrowFunction(function_kind_)) {
+ DeclareDefaultFunctionVariables(ast_value_factory);
+ }
+ } else {
+ // Make sure this scope isn't used for allocation anymore.
+ zone_ = nullptr;
+ variables_.Invalidate();
+ sloppy_block_function_map_.Invalidate();
}
#ifdef DEBUG
needs_migration_ = false;
+ is_being_lazily_parsed_ = false;
#endif
- is_lazily_parsed_ = !aborted;
+ was_lazily_parsed_ = !aborted;
}
void DeclarationScope::AnalyzePartially(AstNodeFactory* ast_node_factory) {
@@ -1317,9 +1384,8 @@ void DeclarationScope::AnalyzePartially(AstNodeFactory* ast_node_factory) {
// Try to resolve unresolved variables for this Scope and migrate those
// which cannot be resolved inside. It doesn't make sense to try to resolve
// them in the outer Scopes here, because they are incomplete.
- for (VariableProxy* proxy =
- FetchFreeVariables(this, !FLAG_lazy_inner_functions);
- proxy != nullptr; proxy = proxy->next_unresolved()) {
+ for (VariableProxy* proxy = FetchFreeVariables(this); proxy != nullptr;
+ proxy = proxy->next_unresolved()) {
DCHECK(!proxy->is_resolved());
VariableProxy* copy = ast_node_factory->CopyVariableProxy(proxy);
copy->set_next_unresolved(unresolved);
@@ -1339,8 +1405,10 @@ void DeclarationScope::AnalyzePartially(AstNodeFactory* ast_node_factory) {
}
#ifdef DEBUG
-static const char* Header(ScopeType scope_type, FunctionKind function_kind,
- bool is_declaration_scope) {
+namespace {
+
+const char* Header(ScopeType scope_type, FunctionKind function_kind,
+ bool is_declaration_scope) {
switch (scope_type) {
case EVAL_SCOPE: return "eval";
// TODO(adamk): Should we print concise method scopes specially?
@@ -1359,18 +1427,13 @@ static const char* Header(ScopeType scope_type, FunctionKind function_kind,
return NULL;
}
+void Indent(int n, const char* str) { PrintF("%*s%s", n, "", str); }
-static void Indent(int n, const char* str) {
- PrintF("%*s%s", n, "", str);
-}
-
-
-static void PrintName(const AstRawString* name) {
+void PrintName(const AstRawString* name) {
PrintF("%.*s", name->length(), name->raw_data());
}
-
-static void PrintLocation(Variable* var) {
+void PrintLocation(Variable* var) {
switch (var->location()) {
case VariableLocation::UNALLOCATED:
break;
@@ -1392,45 +1455,48 @@ static void PrintLocation(Variable* var) {
}
}
-
-static void PrintVar(int indent, Variable* var) {
- if (var->is_used() || !var->IsUnallocated()) {
- Indent(indent, VariableMode2String(var->mode()));
- PrintF(" ");
- if (var->raw_name()->IsEmpty())
- PrintF(".%p", reinterpret_cast<void*>(var));
- else
- PrintName(var->raw_name());
- PrintF("; // ");
- PrintLocation(var);
- bool comma = !var->IsUnallocated();
- if (var->has_forced_context_allocation()) {
- if (comma) PrintF(", ");
- PrintF("forced context allocation");
- comma = true;
- }
- if (var->maybe_assigned() == kNotAssigned) {
- if (comma) PrintF(", ");
- PrintF("never assigned");
- }
- PrintF("\n");
+void PrintVar(int indent, Variable* var) {
+ Indent(indent, VariableMode2String(var->mode()));
+ PrintF(" ");
+ if (var->raw_name()->IsEmpty())
+ PrintF(".%p", reinterpret_cast<void*>(var));
+ else
+ PrintName(var->raw_name());
+ PrintF("; // ");
+ PrintLocation(var);
+ bool comma = !var->IsUnallocated();
+ if (var->has_forced_context_allocation()) {
+ if (comma) PrintF(", ");
+ PrintF("forced context allocation");
+ comma = true;
+ }
+ if (var->maybe_assigned() == kNotAssigned) {
+ if (comma) PrintF(", ");
+ PrintF("never assigned");
}
+ PrintF("\n");
}
-static void PrintMap(int indent, VariableMap* map, bool locals) {
+void PrintMap(int indent, const char* label, VariableMap* map, bool locals,
+ Variable* function_var) {
+ bool printed_label = false;
for (VariableMap::Entry* p = map->Start(); p != nullptr; p = map->Next(p)) {
Variable* var = reinterpret_cast<Variable*>(p->value);
+ if (var == function_var) continue;
bool local = !IsDynamicVariableMode(var->mode());
- if (locals ? local : !local) {
- if (var == nullptr) {
- Indent(indent, "<?>\n");
- } else {
- PrintVar(indent, var);
+ if ((locals ? local : !local) &&
+ (var->is_used() || !var->IsUnallocated())) {
+ if (!printed_label) {
+ Indent(indent, label);
+ printed_label = true;
}
+ PrintVar(indent, var);
}
}
}
+} // anonymous namespace
+
void DeclarationScope::PrintParameters() {
PrintF(" (");
for (int i = 0; i < params_.length(); i++) {
@@ -1487,9 +1553,12 @@ void Scope::Print(int n) {
if (inner_scope_calls_eval_) Indent(n1, "// inner scope calls 'eval'\n");
if (is_declaration_scope()) {
DeclarationScope* scope = AsDeclarationScope();
- if (scope->is_lazily_parsed()) Indent(n1, "// lazily parsed\n");
+ if (scope->was_lazily_parsed()) Indent(n1, "// lazily parsed\n");
if (scope->ShouldEagerCompile()) Indent(n1, "// will be compiled\n");
}
+ if (has_forced_context_allocation()) {
+ Indent(n1, "// forces context allocation\n");
+ }
if (num_stack_slots_ > 0) {
Indent(n1, "// ");
PrintF("%d stack slots\n", num_stack_slots_);
@@ -1505,12 +1574,22 @@ void Scope::Print(int n) {
PrintVar(n1, function);
}
- if (variables_.Start() != NULL) {
- Indent(n1, "// local vars:\n");
- PrintMap(n1, &variables_, true);
+ // Print temporaries.
+ {
+ bool printed_header = false;
+ for (Variable* local : locals_) {
+ if (local->mode() != TEMPORARY) continue;
+ if (!printed_header) {
+ printed_header = true;
+ Indent(n1, "// temporary vars:\n");
+ }
+ PrintVar(n1, local);
+ }
+ }
- Indent(n1, "// dynamic vars:\n");
- PrintMap(n1, &variables_, false);
+ if (variables_.occupancy() > 0) {
+ PrintMap(n1, "// local vars:\n", &variables_, true, function);
+ PrintMap(n1, "// dynamic vars:\n", &variables_, false, function);
}
// Print inner scopes (disable by providing negative n).
@@ -1539,6 +1618,12 @@ void Scope::CheckScopePositions() {
void Scope::CheckZones() {
DCHECK(!needs_migration_);
for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
+ if (scope->is_declaration_scope() &&
+ scope->AsDeclarationScope()->was_lazily_parsed()) {
+ DCHECK_NULL(scope->zone());
+ DCHECK_NULL(scope->inner_scope_);
+ continue;
+ }
CHECK_EQ(scope->zone(), zone());
scope->CheckZones();
}
@@ -1548,8 +1633,7 @@ void Scope::CheckZones() {
Variable* Scope::NonLocal(const AstRawString* name, VariableMode mode) {
// Declare a new non-local.
DCHECK(IsDynamicVariableMode(mode));
- Variable* var = variables_.Declare(zone(), NULL, name, mode, NORMAL_VARIABLE,
- kCreatedInitialized);
+ Variable* var = variables_.Declare(zone(), nullptr, name, mode);
// Allocate it by giving it a dynamic lookup.
var->AllocateTo(VariableLocation::LOOKUP, -1);
return var;
@@ -1590,6 +1674,13 @@ Variable* Scope::LookupRecursive(VariableProxy* proxy, Scope* outer_scope_end) {
// The variable could not be resolved statically.
if (var == nullptr) return var;
+ // TODO(marja): Separate LookupRecursive for preparsed scopes better.
+ if (var == kDummyPreParserVariable || var == kDummyPreParserLexicalVariable) {
+ DCHECK(GetDeclarationScope()->is_being_lazily_parsed());
+ DCHECK(FLAG_lazy_inner_functions);
+ return var;
+ }
+
if (is_function_scope() && !var->is_dynamic()) {
var->ForceContextAllocation();
}
@@ -1641,34 +1732,20 @@ void Scope::ResolveVariable(ParseInfo* info, VariableProxy* proxy) {
DCHECK(!proxy->is_resolved());
Variable* var = LookupRecursive(proxy, nullptr);
ResolveTo(info, proxy, var);
-
- if (FLAG_lazy_inner_functions) {
- if (info != nullptr && info->is_native()) return;
- // Pessimistically force context allocation for all variables to which inner
- // scope variables could potentially resolve to.
- Scope* scope = GetClosureScope()->outer_scope_;
- while (scope != nullptr && scope->scope_info_.is_null()) {
- var = scope->LookupLocal(proxy->raw_name());
- if (var != nullptr) {
- // Since we don't lazy parse inner arrow functions, inner functions
- // cannot refer to the outer "this".
- if (!var->is_dynamic() && !var->is_this() &&
- !var->has_forced_context_allocation()) {
- var->ForceContextAllocation();
- var->set_is_used();
- // We don't know what the (potentially lazy parsed) inner function
- // does with the variable; pessimistically assume that it's assigned.
- var->set_maybe_assigned();
- }
- }
- scope = scope->outer_scope_;
- }
- }
}
namespace {
bool AccessNeedsHoleCheck(Variable* var, VariableProxy* proxy, Scope* scope) {
+ if (var->mode() == DYNAMIC_LOCAL) {
+ // Dynamically introduced variables never need a hole check (since they're
+ // VAR bindings, either from var or function declarations), but the variable
+ // they shadow might need a hole check, which we want to do if we decide
+ // that no shadowing variable was dynamically introoduced.
+ DCHECK(!var->binding_needs_init());
+ return AccessNeedsHoleCheck(var->local_if_not_shadowed(), proxy, scope);
+ }
+
if (!var->binding_needs_init()) {
return false;
}
@@ -1703,8 +1780,7 @@ bool AccessNeedsHoleCheck(Variable* var, VariableProxy* proxy, Scope* scope) {
}
if (var->is_this()) {
- DCHECK(
- IsSubclassConstructor(scope->GetDeclarationScope()->function_kind()));
+ DCHECK(IsDerivedConstructor(scope->GetDeclarationScope()->function_kind()));
// TODO(littledan): implement 'this' hole check elimination.
return true;
}
@@ -1749,37 +1825,65 @@ void Scope::ResolveTo(ParseInfo* info, VariableProxy* proxy, Variable* var) {
void Scope::ResolveVariablesRecursively(ParseInfo* info) {
DCHECK(info->script_scope()->is_script_scope());
+ // Lazy parsed declaration scopes are already partially analyzed. If there are
+ // unresolved references remaining, they just need to be resolved in outer
+ // scopes.
+ if (is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed()) {
+ DCHECK(variables_.occupancy() == 0);
+ for (VariableProxy* proxy = unresolved_; proxy != nullptr;
+ proxy = proxy->next_unresolved()) {
+ Variable* var = outer_scope()->LookupRecursive(proxy, nullptr);
+ if (!var->is_dynamic()) {
+ var->set_is_used();
+ var->ForceContextAllocation();
+ if (proxy->is_assigned()) var->set_maybe_assigned();
+ }
+ }
+ } else {
+ // Resolve unresolved variables for this scope.
+ for (VariableProxy* proxy = unresolved_; proxy != nullptr;
+ proxy = proxy->next_unresolved()) {
+ ResolveVariable(info, proxy);
+ }
- // Resolve unresolved variables for this scope.
- for (VariableProxy* proxy = unresolved_; proxy != nullptr;
- proxy = proxy->next_unresolved()) {
- ResolveVariable(info, proxy);
- }
-
- // Resolve unresolved variables for inner scopes.
- for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
- scope->ResolveVariablesRecursively(info);
+ // Resolve unresolved variables for inner scopes.
+ for (Scope* scope = inner_scope_; scope != nullptr;
+ scope = scope->sibling_) {
+ scope->ResolveVariablesRecursively(info);
+ }
}
}
VariableProxy* Scope::FetchFreeVariables(DeclarationScope* max_outer_scope,
- bool try_to_resolve, ParseInfo* info,
+ ParseInfo* info,
VariableProxy* stack) {
+ // Lazy parsed declaration scopes are already partially analyzed. If there are
+ // unresolved references remaining, they just need to be resolved in outer
+ // scopes.
+ Scope* lookup =
+ is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed()
+ ? outer_scope()
+ : this;
for (VariableProxy *proxy = unresolved_, *next = nullptr; proxy != nullptr;
proxy = next) {
next = proxy->next_unresolved();
DCHECK(!proxy->is_resolved());
- Variable* var = nullptr;
- if (try_to_resolve) {
- var = LookupRecursive(proxy, max_outer_scope->outer_scope());
- }
+ Variable* var =
+ lookup->LookupRecursive(proxy, max_outer_scope->outer_scope());
if (var == nullptr) {
proxy->set_next_unresolved(stack);
stack = proxy;
- } else if (info != nullptr) {
- ResolveTo(info, proxy, var);
- } else {
- var->set_is_used();
+ } else if (var != kDummyPreParserVariable &&
+ var != kDummyPreParserLexicalVariable) {
+ if (info != nullptr) {
+ // In this case we need to leave scopes in a way that they can be
+ // allocated. If we resolved variables from lazy parsed scopes, we need
+ // to context allocate the var.
+ ResolveTo(info, proxy, var);
+ if (!var->is_dynamic() && lookup != this) var->ForceContextAllocation();
+ } else {
+ var->set_is_used();
+ }
}
}
@@ -1787,8 +1891,7 @@ VariableProxy* Scope::FetchFreeVariables(DeclarationScope* max_outer_scope,
unresolved_ = nullptr;
for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
- stack =
- scope->FetchFreeVariables(max_outer_scope, try_to_resolve, info, stack);
+ stack = scope->FetchFreeVariables(max_outer_scope, info, stack);
}
return stack;
@@ -1823,7 +1926,10 @@ bool Scope::MustAllocateInContext(Variable* var) {
if (has_forced_context_allocation()) return true;
if (var->mode() == TEMPORARY) return false;
if (is_catch_scope()) return true;
- if (is_script_scope() && IsLexicalVariableMode(var->mode())) return true;
+ if ((is_script_scope() || is_eval_scope()) &&
+ IsLexicalVariableMode(var->mode())) {
+ return true;
+ }
return var->has_forced_context_allocation() || inner_scope_calls_eval_;
}
@@ -1880,6 +1986,7 @@ void DeclarationScope::AllocateParameterLocals() {
DCHECK_EQ(this, var->scope());
if (uses_sloppy_arguments) {
var->set_is_used();
+ var->set_maybe_assigned();
var->ForceContextAllocation();
}
AllocateParameter(var, i);
@@ -1969,7 +2076,7 @@ void Scope::AllocateVariablesRecursively() {
DCHECK(!already_resolved_);
DCHECK_EQ(0, num_stack_slots_);
// Don't allocate variables of preparsed scopes.
- if (is_declaration_scope() && AsDeclarationScope()->is_lazily_parsed()) {
+ if (is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed()) {
return;
}
@@ -1994,9 +2101,9 @@ void Scope::AllocateVariablesRecursively() {
// Force allocation of a context for this scope if necessary. For a 'with'
// scope and for a function scope that makes an 'eval' call we need a context,
// even if no local variables were statically allocated in the scope.
- // Likewise for modules.
+ // Likewise for modules and function scopes representing asm.js modules.
bool must_have_context =
- is_with_scope() || is_module_scope() ||
+ is_with_scope() || is_module_scope() || IsAsmModule() ||
(is_function_scope() && calls_sloppy_eval()) ||
(is_block_scope() && is_declaration_scope() && calls_sloppy_eval());
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index c7d88aca11..49cfdffba7 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -9,6 +9,7 @@
#include "src/base/hashmap.h"
#include "src/globals.h"
#include "src/objects.h"
+#include "src/objects/scope-info.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -20,6 +21,7 @@ class AstRawString;
class Declaration;
class ParseInfo;
class SloppyBlockFunctionStatement;
+class Statement;
class StringSet;
class VariableProxy;
@@ -28,11 +30,16 @@ class VariableMap: public ZoneHashMap {
public:
explicit VariableMap(Zone* zone);
- Variable* Declare(Zone* zone, Scope* scope, const AstRawString* name,
- VariableMode mode, VariableKind kind,
- InitializationFlag initialization_flag,
- MaybeAssignedFlag maybe_assigned_flag = kNotAssigned,
- bool* added = nullptr);
+ Variable* Declare(
+ Zone* zone, Scope* scope, const AstRawString* name, VariableMode mode,
+ VariableKind kind = NORMAL_VARIABLE,
+ InitializationFlag initialization_flag = kCreatedInitialized,
+ MaybeAssignedFlag maybe_assigned_flag = kNotAssigned,
+ bool* added = nullptr);
+
+ // Records that "name" exists (if not recorded yet) but doesn't create a
+ // Variable. Useful for preparsing.
+ void DeclareName(Zone* zone, const AstRawString* name, VariableMode mode);
Variable* Lookup(const AstRawString* name);
void Remove(Variable* var);
@@ -43,9 +50,24 @@ class VariableMap: public ZoneHashMap {
// Sloppy block-scoped function declarations to var-bind
class SloppyBlockFunctionMap : public ZoneHashMap {
public:
+ class Delegate : public ZoneObject {
+ public:
+ explicit Delegate(Scope* scope,
+ SloppyBlockFunctionStatement* statement = nullptr)
+ : scope_(scope), statement_(statement), next_(nullptr) {}
+ void set_statement(Statement* statement);
+ void set_next(Delegate* next) { next_ = next; }
+ Delegate* next() const { return next_; }
+ Scope* scope() const { return scope_; }
+
+ private:
+ Scope* scope_;
+ SloppyBlockFunctionStatement* statement_;
+ Delegate* next_;
+ };
+
explicit SloppyBlockFunctionMap(Zone* zone);
- void Declare(Zone* zone, const AstRawString* name,
- SloppyBlockFunctionStatement* statement);
+ void Declare(Zone* zone, const AstRawString* name, Delegate* delegate);
};
enum class AnalyzeMode { kRegular, kDebugger };
@@ -148,7 +170,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// Declare a local variable in this scope. If the variable has been
// declared before, the previously declared variable is returned.
Variable* DeclareLocal(const AstRawString* name, VariableMode mode,
- InitializationFlag init_flag, VariableKind kind,
+ InitializationFlag init_flag = kCreatedInitialized,
+ VariableKind kind = NORMAL_VARIABLE,
MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
Variable* DeclareVariable(Declaration* declaration, VariableMode mode,
@@ -157,6 +180,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
bool* sloppy_mode_block_scope_function_redefinition,
bool* ok);
+ void DeclareVariableName(const AstRawString* name, VariableMode mode);
+
// Declarations list.
ThreadedList<Declaration>* declarations() { return &decls_; }
@@ -177,7 +202,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// allocated globally as a "ghost" variable. RemoveUnresolved removes
// such a variable again if it was added; otherwise this is a no-op.
bool RemoveUnresolved(VariableProxy* var);
- bool RemoveUnresolved(const AstRawString* name);
// Creates a new temporary variable in this scope's TemporaryScope. The
// name is only used for printing and cannot be used to find the variable.
@@ -207,14 +231,11 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// Scope-specific info.
// Inform the scope and outer scopes that the corresponding code contains an
- // eval call. We don't record eval calls from innner scopes in the outer most
- // script scope, as we only see those when parsing eagerly. If we recorded the
- // calls then, the outer most script scope would look different depending on
- // whether we parsed eagerly or not which is undesirable.
+ // eval call.
void RecordEvalCall() {
scope_calls_eval_ = true;
inner_scope_calls_eval_ = true;
- for (Scope* scope = outer_scope(); scope && !scope->is_script_scope();
+ for (Scope* scope = outer_scope(); scope != nullptr;
scope = scope->outer_scope()) {
scope->inner_scope_calls_eval_ = true;
}
@@ -303,6 +324,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
bool calls_sloppy_eval() const {
return scope_calls_eval_ && is_sloppy(language_mode());
}
+ bool inner_scope_calls_eval() const { return inner_scope_calls_eval_; }
bool IsAsmModule() const;
bool IsAsmFunction() const;
// Does this scope have the potential to execute declarations non-linearly?
@@ -423,6 +445,22 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
void set_is_debug_evaluate_scope() { is_debug_evaluate_scope_ = true; }
bool is_debug_evaluate_scope() const { return is_debug_evaluate_scope_; }
+ bool RemoveInnerScope(Scope* inner_scope) {
+ DCHECK_NOT_NULL(inner_scope);
+ if (inner_scope == inner_scope_) {
+ inner_scope_ = inner_scope_->sibling_;
+ return true;
+ }
+ for (Scope* scope = inner_scope_; scope != nullptr;
+ scope = scope->sibling_) {
+ if (scope->sibling_ == inner_scope) {
+ scope->sibling_ = scope->sibling_->sibling_;
+ return true;
+ }
+ }
+ return false;
+ }
+
protected:
explicit Scope(Zone* zone);
@@ -431,10 +469,11 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
}
private:
- Variable* Declare(Zone* zone, Scope* scope, const AstRawString* name,
- VariableMode mode, VariableKind kind,
- InitializationFlag initialization_flag,
- MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
+ Variable* Declare(
+ Zone* zone, const AstRawString* name, VariableMode mode,
+ VariableKind kind = NORMAL_VARIABLE,
+ InitializationFlag initialization_flag = kCreatedInitialized,
+ MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
// This method should only be invoked on scopes created during parsing (i.e.,
// not deserialized from a context). Also, since NeedsContext() is only
@@ -527,7 +566,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// list along the way, so full resolution cannot be done afterwards.
// If a ParseInfo* is passed, non-free variables will be resolved.
VariableProxy* FetchFreeVariables(DeclarationScope* max_outer_scope,
- bool try_to_resolve = true,
ParseInfo* info = nullptr,
VariableProxy* stack = nullptr);
@@ -556,30 +594,15 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
Handle<ScopeInfo> scope_info);
void AddInnerScope(Scope* inner_scope) {
- DCHECK_EQ(!needs_migration_, inner_scope->zone() == zone());
inner_scope->sibling_ = inner_scope_;
inner_scope_ = inner_scope;
inner_scope->outer_scope_ = this;
}
- void RemoveInnerScope(Scope* inner_scope) {
- DCHECK_NOT_NULL(inner_scope);
- if (inner_scope == inner_scope_) {
- inner_scope_ = inner_scope_->sibling_;
- return;
- }
- for (Scope* scope = inner_scope_; scope != nullptr;
- scope = scope->sibling_) {
- if (scope->sibling_ == inner_scope) {
- scope->sibling_ = scope->sibling_->sibling_;
- return;
- }
- }
- }
-
void SetDefaults();
friend class DeclarationScope;
+ friend class ScopeTestHelper;
};
class DeclarationScope : public Scope {
@@ -616,7 +639,15 @@ class DeclarationScope : public Scope {
IsClassConstructor(function_kind())));
}
- bool is_lazily_parsed() const { return is_lazily_parsed_; }
+ bool was_lazily_parsed() const { return was_lazily_parsed_; }
+
+#ifdef DEBUG
+ void set_is_being_lazily_parsed(bool is_being_lazily_parsed) {
+ is_being_lazily_parsed_ = is_being_lazily_parsed;
+ }
+ bool is_being_lazily_parsed() const { return is_being_lazily_parsed_; }
+#endif
+
bool ShouldEagerCompile() const;
void set_should_eager_compile();
@@ -629,7 +660,7 @@ class DeclarationScope : public Scope {
bool asm_module() const { return asm_module_; }
void set_asm_module();
bool asm_function() const { return asm_function_; }
- void set_asm_function() { asm_module_ = true; }
+ void set_asm_function() { asm_function_ = true; }
void DeclareThis(AstValueFactory* ast_value_factory);
void DeclareArguments(AstValueFactory* ast_value_factory);
@@ -736,10 +767,9 @@ class DeclarationScope : public Scope {
// initializers.
void AddLocal(Variable* var);
- void DeclareSloppyBlockFunction(const AstRawString* name,
- SloppyBlockFunctionStatement* statement) {
- sloppy_block_function_map_.Declare(zone(), name, statement);
- }
+ void DeclareSloppyBlockFunction(
+ const AstRawString* name, Scope* scope,
+ SloppyBlockFunctionStatement* statement = nullptr);
// Go through sloppy_block_function_map_ and hoist those (into this scope)
// which should be hoisted.
@@ -819,7 +849,11 @@ class DeclarationScope : public Scope {
// This scope uses "super" property ('super.foo').
bool scope_uses_super_property_ : 1;
bool should_eager_compile_ : 1;
- bool is_lazily_parsed_ : 1;
+ // Set to true after we have finished lazy parsing the scope.
+ bool was_lazily_parsed_ : 1;
+#if DEBUG
+ bool is_being_lazily_parsed_ : 1;
+#endif
// Parameter list in source order.
ZoneList<Variable*> params_;
diff --git a/deps/v8/src/ast/variables.cc b/deps/v8/src/ast/variables.cc
index 3771bfee12..f138727177 100644
--- a/deps/v8/src/ast/variables.cc
+++ b/deps/v8/src/ast/variables.cc
@@ -6,6 +6,7 @@
#include "src/ast/scopes.h"
#include "src/globals.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/bailout-reason.h b/deps/v8/src/bailout-reason.h
index 247024ff41..fe3f6a4b70 100644
--- a/deps/v8/src/bailout-reason.h
+++ b/deps/v8/src/bailout-reason.h
@@ -88,6 +88,7 @@ namespace internal {
"The function_data field should be a BytecodeArray on interpreter entry") \
V(kGeneratedCodeIsTooLarge, "Generated code is too large") \
V(kGenerator, "Generator") \
+ V(kGetIterator, "GetIterator") \
V(kGlobalFunctionsMustHaveInitialMap, \
"Global functions must have initial map") \
V(kGraphBuildingFailed, "Optimized graph construction failed") \
@@ -125,6 +126,7 @@ namespace internal {
V(kLookupVariableInCountOperation, "Lookup variable in count operation") \
V(kMapBecameDeprecated, "Map became deprecated") \
V(kMapBecameUnstable, "Map became unstable") \
+ V(kMissingBytecodeArray, "Missing bytecode array from function") \
V(kNativeFunctionLiteral, "Native function literal") \
V(kNeedSmiLiteral, "Need a Smi literal here") \
V(kNoCasesLeft, "No cases left") \
@@ -138,7 +140,6 @@ namespace internal {
V(kNotEnoughSpillSlotsForOsr, "Not enough spill slots for OSR") \
V(kNotEnoughVirtualRegistersRegalloc, \
"Not enough virtual registers (regalloc)") \
- V(kObjectFoundInSmiOnlyArray, "Object found in smi-only array") \
V(kObjectLiteralWithComplexProperty, "Object literal with complex property") \
V(kOffsetOutOfRange, "Offset out of range") \
V(kOperandIsANumber, "Operand is a number") \
@@ -165,7 +166,7 @@ namespace internal {
V(kObjectNotTagged, "The object is not tagged") \
V(kOptimizationDisabled, "Optimization disabled") \
V(kOptimizationDisabledForTest, "Optimization disabled for test") \
- V(kOptimizedTooManyTimes, "Optimized too many times") \
+ V(kDeoptimizedTooManyTimes, "Deoptimized too many times") \
V(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister, \
"Out of virtual registers while trying to allocate temp register") \
V(kParseScopeError, "Parse/scope error") \
@@ -263,7 +264,9 @@ namespace internal {
V(kWrongArgumentCountForInvokeIntrinsic, \
"Wrong number of arguments for intrinsic") \
V(kShouldNotDirectlyEnterOsrFunction, \
- "Should not directly enter OSR-compiled function")
+ "Should not directly enter OSR-compiled function") \
+ V(kUnexpectedReturnFromWasmTrap, \
+ "Should not return after throwing a wasm trap")
#define ERROR_MESSAGES_CONSTANTS(C, T) C,
enum BailoutReason {
diff --git a/deps/v8/src/base.isolate b/deps/v8/src/base.isolate
index c457f001fe..85f59ec942 100644
--- a/deps/v8/src/base.isolate
+++ b/deps/v8/src/base.isolate
@@ -4,6 +4,9 @@
{
'includes': [
'../third_party/icu/icu.isolate',
+
+ # MSVS runtime libraries.
+ '../gypfiles/win/msvs_dependencies.isolate',
],
'conditions': [
['v8_use_snapshot=="true" and v8_use_external_startup_data==1', {
diff --git a/deps/v8/src/base/cpu.cc b/deps/v8/src/base/cpu.cc
index cf1f9c399d..896c25dab1 100644
--- a/deps/v8/src/base/cpu.cc
+++ b/deps/v8/src/base/cpu.cc
@@ -24,6 +24,9 @@
#ifndef POWER_8
#define POWER_8 0x10000
#endif
+#ifndef POWER_9
+#define POWER_9 0x20000
+#endif
#endif
#if V8_OS_POSIX
#include <unistd.h> // sysconf()
@@ -670,7 +673,9 @@ CPU::CPU()
part_ = -1;
if (auxv_cpu_type) {
- if (strcmp(auxv_cpu_type, "power8") == 0) {
+ if (strcmp(auxv_cpu_type, "power9") == 0) {
+ part_ = PPC_POWER9;
+ } else if (strcmp(auxv_cpu_type, "power8") == 0) {
part_ = PPC_POWER8;
} else if (strcmp(auxv_cpu_type, "power7") == 0) {
part_ = PPC_POWER7;
@@ -689,6 +694,9 @@ CPU::CPU()
#elif V8_OS_AIX
switch (_system_configuration.implementation) {
+ case POWER_9:
+ part_ = PPC_POWER9;
+ break;
case POWER_8:
part_ = PPC_POWER8;
break;
diff --git a/deps/v8/src/base/cpu.h b/deps/v8/src/base/cpu.h
index e0fcea1ca0..ef55b57559 100644
--- a/deps/v8/src/base/cpu.h
+++ b/deps/v8/src/base/cpu.h
@@ -69,6 +69,7 @@ class V8_BASE_EXPORT CPU final {
PPC_POWER6,
PPC_POWER7,
PPC_POWER8,
+ PPC_POWER9,
PPC_G4,
PPC_G5,
PPC_PA6T
diff --git a/deps/v8/src/base/hashmap.h b/deps/v8/src/base/hashmap.h
index d2fc1337a6..e643b2f2ac 100644
--- a/deps/v8/src/base/hashmap.h
+++ b/deps/v8/src/base/hashmap.h
@@ -70,6 +70,14 @@ class TemplateHashMapImpl {
// Empties the hash map (occupancy() == 0).
void Clear();
+ // Empties the map and makes it unusable for allocation.
+ void Invalidate() {
+ AllocationPolicy::Delete(map_);
+ map_ = nullptr;
+ occupancy_ = 0;
+ capacity_ = 0;
+ }
+
// The number of (non-empty) entries in the table.
uint32_t occupancy() const { return occupancy_; }
@@ -89,6 +97,14 @@ class TemplateHashMapImpl {
Entry* Start() const;
Entry* Next(Entry* entry) const;
+ void Reset(AllocationPolicy allocator) {
+ Initialize(capacity_, allocator);
+ occupancy_ = 0;
+ }
+
+ protected:
+ void Initialize(uint32_t capacity, AllocationPolicy allocator);
+
private:
Entry* map_;
uint32_t capacity_;
@@ -102,7 +118,6 @@ class TemplateHashMapImpl {
Entry* FillEmptyEntry(Entry* entry, const Key& key, const Value& value,
uint32_t hash,
AllocationPolicy allocator = AllocationPolicy());
- void Initialize(uint32_t capacity, AllocationPolicy allocator);
void Resize(AllocationPolicy allocator);
};
template <typename Key, typename Value, typename MatchFun,
diff --git a/deps/v8/src/base/iterator.h b/deps/v8/src/base/iterator.h
index e380dc338f..175698cd36 100644
--- a/deps/v8/src/base/iterator.h
+++ b/deps/v8/src/base/iterator.h
@@ -7,8 +7,6 @@
#include <iterator>
-#include "src/base/macros.h"
-
namespace v8 {
namespace base {
diff --git a/deps/v8/src/base/logging.cc b/deps/v8/src/base/logging.cc
index cadcb6f1de..c94fe9d693 100644
--- a/deps/v8/src/base/logging.cc
+++ b/deps/v8/src/base/logging.cc
@@ -14,9 +14,8 @@ namespace v8 {
namespace base {
// Explicit instantiations for commonly used comparisons.
-#define DEFINE_MAKE_CHECK_OP_STRING(type) \
- template std::string* MakeCheckOpString<type, type>( \
- type const&, type const&, char const*);
+#define DEFINE_MAKE_CHECK_OP_STRING(type) \
+ template std::string* MakeCheckOpString<type, type>(type, type, char const*);
DEFINE_MAKE_CHECK_OP_STRING(int)
DEFINE_MAKE_CHECK_OP_STRING(long) // NOLINT(runtime/int)
DEFINE_MAKE_CHECK_OP_STRING(long long) // NOLINT(runtime/int)
@@ -29,11 +28,11 @@ DEFINE_MAKE_CHECK_OP_STRING(void const*)
// Explicit instantiations for floating point checks.
-#define DEFINE_CHECK_OP_IMPL(NAME) \
- template std::string* Check##NAME##Impl<float, float>( \
- float const& lhs, float const& rhs, char const* msg); \
- template std::string* Check##NAME##Impl<double, double>( \
- double const& lhs, double const& rhs, char const* msg);
+#define DEFINE_CHECK_OP_IMPL(NAME) \
+ template std::string* Check##NAME##Impl<float, float>(float lhs, float rhs, \
+ char const* msg); \
+ template std::string* Check##NAME##Impl<double, double>( \
+ double lhs, double rhs, char const* msg);
DEFINE_CHECK_OP_IMPL(EQ)
DEFINE_CHECK_OP_IMPL(NE)
DEFINE_CHECK_OP_IMPL(LE)
diff --git a/deps/v8/src/base/logging.h b/deps/v8/src/base/logging.h
index 7bbb82a485..cb2ff8f92f 100644
--- a/deps/v8/src/base/logging.h
+++ b/deps/v8/src/base/logging.h
@@ -55,13 +55,14 @@ namespace base {
// Helper macro for binary operators.
// Don't use this macro directly in your code, use CHECK_EQ et al below.
-#define CHECK_OP(name, op, lhs, rhs) \
- do { \
- if (std::string* _msg = ::v8::base::Check##name##Impl( \
- (lhs), (rhs), #lhs " " #op " " #rhs)) { \
- V8_Fatal(__FILE__, __LINE__, "Check failed: %s.", _msg->c_str()); \
- delete _msg; \
- } \
+#define CHECK_OP(name, op, lhs, rhs) \
+ do { \
+ if (std::string* _msg = \
+ ::v8::base::Check##name##Impl<decltype(lhs), decltype(rhs)>( \
+ (lhs), (rhs), #lhs " " #op " " #rhs)) { \
+ V8_Fatal(__FILE__, __LINE__, "Check failed: %s.", _msg->c_str()); \
+ delete _msg; \
+ } \
} while (0)
#else
@@ -73,13 +74,22 @@ namespace base {
#endif
+// Helper to determine how to pass values: Pass scalars and arrays by value,
+// others by const reference. std::decay<T> provides the type which should be
+// used to pass T by value, e.g. converts array to pointer and removes const,
+// volatile and reference.
+template <typename T>
+struct PassType : public std::conditional<
+ std::is_scalar<typename std::decay<T>::type>::value,
+ typename std::decay<T>::type, T const&> {};
// Build the error message string. This is separate from the "Impl"
// function template because it is not performance critical and so can
// be out of line, while the "Impl" code should be inline. Caller
// takes ownership of the returned string.
template <typename Lhs, typename Rhs>
-std::string* MakeCheckOpString(Lhs const& lhs, Rhs const& rhs,
+std::string* MakeCheckOpString(typename PassType<Lhs>::type lhs,
+ typename PassType<Rhs>::type rhs,
char const* msg) {
std::ostringstream ss;
ss << msg << " (" << lhs << " vs. " << rhs << ")";
@@ -90,7 +100,7 @@ std::string* MakeCheckOpString(Lhs const& lhs, Rhs const& rhs,
// in logging.cc.
#define DEFINE_MAKE_CHECK_OP_STRING(type) \
extern template V8_BASE_EXPORT std::string* MakeCheckOpString<type, type>( \
- type const&, type const&, char const*);
+ type, type, char const*);
DEFINE_MAKE_CHECK_OP_STRING(int)
DEFINE_MAKE_CHECK_OP_STRING(long) // NOLINT(runtime/int)
DEFINE_MAKE_CHECK_OP_STRING(long long) // NOLINT(runtime/int)
@@ -101,27 +111,77 @@ DEFINE_MAKE_CHECK_OP_STRING(char const*)
DEFINE_MAKE_CHECK_OP_STRING(void const*)
#undef DEFINE_MAKE_CHECK_OP_STRING
+// is_signed_vs_unsigned::value is true if both types are integral, Lhs is
+// signed, and Rhs is unsigned. False in all other cases.
+template <typename Lhs, typename Rhs>
+struct is_signed_vs_unsigned {
+ enum : bool {
+ value = std::is_integral<Lhs>::value && std::is_integral<Rhs>::value &&
+ std::is_signed<Lhs>::value && std::is_unsigned<Rhs>::value
+ };
+};
+// Same thing, other way around: Lhs is unsigned, Rhs signed.
+template <typename Lhs, typename Rhs>
+struct is_unsigned_vs_signed : public is_signed_vs_unsigned<Rhs, Lhs> {};
+
+// Specialize the compare functions for signed vs. unsigned comparisons.
+// std::enable_if ensures that this template is only instantiable if both Lhs
+// and Rhs are integral types, and their signedness does not match.
+#define MAKE_UNSIGNED(Type, value) \
+ static_cast<typename std::make_unsigned<Type>::type>(value)
+#define DEFINE_SIGNED_MISMATCH_COMP(CHECK, NAME, IMPL) \
+ template <typename Lhs, typename Rhs> \
+ V8_INLINE typename std::enable_if<CHECK<Lhs, Rhs>::value, bool>::type \
+ Cmp##NAME##Impl(Lhs const& lhs, Rhs const& rhs) { \
+ return IMPL; \
+ }
+DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, EQ,
+ lhs >= 0 && MAKE_UNSIGNED(Lhs, lhs) == rhs)
+DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, LT,
+ lhs < 0 || MAKE_UNSIGNED(Lhs, lhs) < rhs)
+DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, LE,
+ lhs <= 0 || MAKE_UNSIGNED(Lhs, lhs) <= rhs)
+DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, NE, !CmpEQImpl(lhs, rhs))
+DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, GT, !CmpLEImpl(lhs, rhs))
+DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, GE, !CmpLTImpl(lhs, rhs))
+DEFINE_SIGNED_MISMATCH_COMP(is_unsigned_vs_signed, EQ, CmpEQImpl(rhs, lhs))
+DEFINE_SIGNED_MISMATCH_COMP(is_unsigned_vs_signed, NE, CmpNEImpl(rhs, lhs))
+DEFINE_SIGNED_MISMATCH_COMP(is_unsigned_vs_signed, LT, CmpGTImpl(rhs, lhs))
+DEFINE_SIGNED_MISMATCH_COMP(is_unsigned_vs_signed, LE, CmpGEImpl(rhs, lhs))
+DEFINE_SIGNED_MISMATCH_COMP(is_unsigned_vs_signed, GT, CmpLTImpl(rhs, lhs))
+DEFINE_SIGNED_MISMATCH_COMP(is_unsigned_vs_signed, GE, CmpLEImpl(rhs, lhs))
+#undef MAKE_UNSIGNED
+#undef DEFINE_SIGNED_MISMATCH_COMP
// Helper functions for CHECK_OP macro.
-// The (int, int) specialization works around the issue that the compiler
-// will not instantiate the template version of the function on values of
-// unnamed enum type - see comment below.
// The (float, float) and (double, double) instantiations are explicitly
-// externialized to ensure proper 32/64-bit comparisons on x86.
+// externalized to ensure proper 32/64-bit comparisons on x86.
+// The Cmp##NAME##Impl function is only instantiable if one of the two types is
+// not integral or their signedness matches (i.e. whenever no specialization is
+// required, see above). Otherwise it is disabled by the enable_if construct,
+// and the compiler will pick a specialization from above.
#define DEFINE_CHECK_OP_IMPL(NAME, op) \
template <typename Lhs, typename Rhs> \
- V8_INLINE std::string* Check##NAME##Impl(Lhs const& lhs, Rhs const& rhs, \
- char const* msg) { \
- return V8_LIKELY(lhs op rhs) ? nullptr : MakeCheckOpString(lhs, rhs, msg); \
+ V8_INLINE \
+ typename std::enable_if<!is_signed_vs_unsigned<Lhs, Rhs>::value && \
+ !is_unsigned_vs_signed<Lhs, Rhs>::value, \
+ bool>::type \
+ Cmp##NAME##Impl(typename PassType<Lhs>::type lhs, \
+ typename PassType<Rhs>::type rhs) { \
+ return lhs op rhs; \
} \
- V8_INLINE std::string* Check##NAME##Impl(int lhs, int rhs, \
+ template <typename Lhs, typename Rhs> \
+ V8_INLINE std::string* Check##NAME##Impl(typename PassType<Lhs>::type lhs, \
+ typename PassType<Rhs>::type rhs, \
char const* msg) { \
- return V8_LIKELY(lhs op rhs) ? nullptr : MakeCheckOpString(lhs, rhs, msg); \
+ bool cmp = Cmp##NAME##Impl<Lhs, Rhs>(lhs, rhs); \
+ return V8_LIKELY(cmp) ? nullptr \
+ : MakeCheckOpString<Lhs, Rhs>(lhs, rhs, msg); \
} \
extern template V8_BASE_EXPORT std::string* Check##NAME##Impl<float, float>( \
- float const& lhs, float const& rhs, char const* msg); \
+ float lhs, float rhs, char const* msg); \
extern template V8_BASE_EXPORT std::string* \
- Check##NAME##Impl<double, double>(double const& lhs, double const& rhs, \
+ Check##NAME##Impl<double, double>(double lhs, double rhs, \
char const* msg);
DEFINE_CHECK_OP_IMPL(EQ, ==)
DEFINE_CHECK_OP_IMPL(NE, !=)
@@ -141,11 +201,6 @@ DEFINE_CHECK_OP_IMPL(GT, > )
#define CHECK_NOT_NULL(val) CHECK((val) != nullptr)
#define CHECK_IMPLIES(lhs, rhs) CHECK(!(lhs) || (rhs))
-
-// Exposed for making debugging easier (to see where your function is being
-// called, just add a call to DumpBacktrace).
-void DumpBacktrace();
-
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h
index e3866173be..33a0ef0f4f 100644
--- a/deps/v8/src/base/macros.h
+++ b/deps/v8/src/base/macros.h
@@ -282,23 +282,4 @@ inline T RoundUp(T x, intptr_t m) {
return RoundDown<T>(static_cast<T>(x + m - 1), m);
}
-
-namespace v8 {
-namespace base {
-
-// TODO(yangguo): This is a poor man's replacement for std::is_fundamental,
-// which requires C++11. Switch to std::is_fundamental once possible.
-template <typename T>
-inline bool is_fundamental() {
- return false;
-}
-
-template <>
-inline bool is_fundamental<uint8_t>() {
- return true;
-}
-
-} // namespace base
-} // namespace v8
-
#endif // V8_BASE_MACROS_H_
diff --git a/deps/v8/src/base/platform/platform-linux.cc b/deps/v8/src/base/platform/platform-linux.cc
index a35d423210..cd52dfe881 100644
--- a/deps/v8/src/base/platform/platform-linux.cc
+++ b/deps/v8/src/base/platform/platform-linux.cc
@@ -19,7 +19,7 @@
// executable. Otherwise, OS raises an exception when executing code
// in that page.
#include <errno.h>
-#include <fcntl.h> // open
+#include <fcntl.h> // open
#include <stdarg.h>
#include <strings.h> // index
#include <sys/mman.h> // mmap & munmap
@@ -30,7 +30,7 @@
// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
// Old versions of the C library <signal.h> didn't define the type.
#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
- (defined(__arm__) || defined(__aarch64__)) && \
+ (defined(__arm__) || defined(__aarch64__)) && \
!defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
#include <asm/sigcontext.h> // NOLINT
#endif
@@ -49,21 +49,19 @@
namespace v8 {
namespace base {
-
#ifdef __arm__
bool OS::ArmUsingHardFloat() {
- // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
- // the Floating Point ABI used (PCS stands for Procedure Call Standard).
- // We use these as well as a couple of other defines to statically determine
- // what FP ABI used.
- // GCC versions 4.4 and below don't support hard-fp.
- // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
- // __ARM_PCS_VFP.
-
-#define GCC_VERSION (__GNUC__ * 10000 \
- + __GNUC_MINOR__ * 100 \
- + __GNUC_PATCHLEVEL__)
+// GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
+// the Floating Point ABI used (PCS stands for Procedure Call Standard).
+// We use these as well as a couple of other defines to statically determine
+// what FP ABI used.
+// GCC versions 4.4 and below don't support hard-fp.
+// GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
+// __ARM_PCS_VFP.
+
+#define GCC_VERSION \
+ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
#if GCC_VERSION >= 40600 && !defined(__clang__)
#if defined(__ARM_PCS_VFP)
return true;
@@ -78,10 +76,11 @@ bool OS::ArmUsingHardFloat() {
#if defined(__ARM_PCS_VFP)
return true;
#elif defined(__ARM_PCS) || defined(__SOFTFP__) || defined(__SOFTFP) || \
- !defined(__VFP_FP__)
+ !defined(__VFP_FP__)
return false;
#else
-#error "Your version of compiler does not report the FP ABI compiled for." \
+#error \
+ "Your version of compiler does not report the FP ABI compiled for." \
"Please report it on this issue" \
"http://code.google.com/p/v8/issues/detail?id=2140"
@@ -92,17 +91,15 @@ bool OS::ArmUsingHardFloat() {
#endif // def __arm__
-
const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
- time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
+ time_t tv = static_cast<time_t>(std::floor(time / msPerSecond));
struct tm tm;
struct tm* t = localtime_r(&tv, &tm);
if (!t || !t->tm_zone) return "";
return t->tm_zone;
}
-
double OS::LocalTimeOffset(TimezoneCache* cache) {
time_t tv = time(NULL);
struct tm tm;
@@ -112,9 +109,7 @@ double OS::LocalTimeOffset(TimezoneCache* cache) {
(t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
}
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
+void* OS::Allocate(const size_t requested, size_t* allocated,
bool is_executable) {
const size_t msize = RoundUp(requested, AllocateAlignment());
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
@@ -125,7 +120,6 @@ void* OS::Allocate(const size_t requested,
return mbase;
}
-
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
std::vector<SharedLibraryAddress> result;
// This function assumes that the layout of the file is as follows:
@@ -169,8 +163,8 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
lib_name[strlen(lib_name) - 1] = '\0';
} else {
// No library name found, just record the raw address range.
- snprintf(lib_name, kLibNameLen,
- "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
+ snprintf(lib_name, kLibNameLen, "%08" V8PRIxPTR "-%08" V8PRIxPTR, start,
+ end);
}
result.push_back(SharedLibraryAddress(lib_name, start, end));
} else {
@@ -187,7 +181,6 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
return result;
}
-
void OS::SignalCodeMovingGC() {
// Support for ll_prof.py.
//
@@ -203,38 +196,30 @@ void OS::SignalCodeMovingGC() {
OS::PrintError("Failed to open %s\n", OS::GetGCFakeMMapFile());
OS::Abort();
}
- void* addr = mmap(OS::GetRandomMmapAddr(), size,
- PROT_READ | PROT_EXEC,
+ void* addr = mmap(OS::GetRandomMmapAddr(), size, PROT_READ | PROT_EXEC,
MAP_PRIVATE, fileno(f), 0);
DCHECK_NE(MAP_FAILED, addr);
OS::Free(addr, size);
fclose(f);
}
-
// Constants used for mmap.
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) {}
VirtualMemory::VirtualMemory(size_t size)
- : address_(ReserveRegion(size)), size_(size) { }
-
+ : address_(ReserveRegion(size)), size_(size) {}
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
: address_(NULL), size_(0) {
DCHECK((alignment % OS::AllocateAlignment()) == 0);
- size_t request_size = RoundUp(size + alignment,
- static_cast<intptr_t>(OS::AllocateAlignment()));
- void* reservation = mmap(OS::GetRandomMmapAddr(),
- request_size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
+ size_t request_size =
+ RoundUp(size + alignment, static_cast<intptr_t>(OS::AllocateAlignment()));
+ void* reservation =
+ mmap(OS::GetRandomMmapAddr(), request_size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, kMmapFd, kMmapFdOffset);
if (reservation == MAP_FAILED) return;
uint8_t* base = static_cast<uint8_t*>(reservation);
@@ -266,7 +251,6 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
#endif
}
-
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
bool result = ReleaseRegion(address(), size());
@@ -275,44 +259,33 @@ VirtualMemory::~VirtualMemory() {
}
}
-
-bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
-
+bool VirtualMemory::IsReserved() { return address_ != NULL; }
void VirtualMemory::Reset() {
address_ = NULL;
size_ = 0;
}
-
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
CHECK(InVM(address, size));
return CommitRegion(address, size, is_executable);
}
-
bool VirtualMemory::Uncommit(void* address, size_t size) {
CHECK(InVM(address, size));
return UncommitRegion(address, size);
}
-
bool VirtualMemory::Guard(void* address) {
CHECK(InVM(address, OS::CommitPageSize()));
OS::Guard(address, OS::CommitPageSize());
return true;
}
-
void* VirtualMemory::ReserveRegion(size_t size) {
- void* result = mmap(OS::GetRandomMmapAddr(),
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
- kMmapFd,
- kMmapFdOffset);
+ void* result =
+ mmap(OS::GetRandomMmapAddr(), size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, kMmapFd, kMmapFdOffset);
if (result == MAP_FAILED) return NULL;
@@ -322,14 +295,10 @@ void* VirtualMemory::ReserveRegion(size_t size) {
return result;
}
-
bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
- if (MAP_FAILED == mmap(base,
- size,
- prot,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
- kMmapFd,
+ if (MAP_FAILED == mmap(base, size, prot,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, kMmapFd,
kMmapFdOffset)) {
return false;
}
@@ -337,13 +306,9 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
return true;
}
-
bool VirtualMemory::UncommitRegion(void* base, size_t size) {
- return mmap(base,
- size,
- PROT_NONE,
- MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
- kMmapFd,
+ return mmap(base, size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, kMmapFd,
kMmapFdOffset) != MAP_FAILED;
}
@@ -363,10 +328,7 @@ bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
return munmap(base, size) == 0;
}
-
-bool VirtualMemory::HasLazyCommits() {
- return true;
-}
+bool VirtualMemory::HasLazyCommits() { return true; }
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index 3f4165de53..fd47931b6b 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -99,6 +99,20 @@ intptr_t OS::CommitPageSize() {
return page_size;
}
+void* OS::AllocateGuarded(const size_t requested) {
+ size_t allocated = 0;
+ const bool is_executable = false;
+ void* mbase = OS::Allocate(requested, &allocated, is_executable);
+ if (allocated != requested) {
+ OS::Free(mbase, allocated);
+ return nullptr;
+ }
+ if (mbase == nullptr) {
+ return nullptr;
+ }
+ OS::Guard(mbase, requested);
+ return mbase;
+}
void OS::Free(void* address, const size_t size) {
// TODO(1240712): munmap has a return value which is ignored here.
@@ -129,6 +143,15 @@ void OS::Guard(void* address, const size_t size) {
#endif
}
+// Make a region of memory readable and writable.
+void OS::Unprotect(void* address, const size_t size) {
+#if V8_OS_CYGWIN
+ DWORD oldprotect;
+ VirtualProtect(address, size, PAGE_READWRITE, &oldprotect);
+#else
+ mprotect(address, size, PROT_READ | PROT_WRITE);
+#endif
+}
static LazyInstance<RandomNumberGenerator>::type
platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index 080e6bc0af..60b60fdcd2 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -797,6 +797,9 @@ void* OS::Allocate(const size_t requested,
return mbase;
}
+void* OS::AllocateGuarded(const size_t requested) {
+ return VirtualAlloc(nullptr, requested, MEM_RESERVE, PAGE_NOACCESS);
+}
void OS::Free(void* address, const size_t size) {
// TODO(1240712): VirtualFree has a return value which is ignored here.
@@ -821,6 +824,10 @@ void OS::Guard(void* address, const size_t size) {
VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
}
+void OS::Unprotect(void* address, const size_t size) {
+ LPVOID result = VirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE);
+ DCHECK_IMPLIES(result != nullptr, GetLastError() == 0);
+}
void OS::Sleep(TimeDelta interval) {
::Sleep(static_cast<DWORD>(interval.InMilliseconds()));
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index 5d570e7048..374cddfc0c 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -178,6 +178,11 @@ class V8_BASE_EXPORT OS {
bool is_executable);
static void Free(void* address, const size_t size);
+ // Allocates a region of memory that is inaccessible. On Windows this reserves
+ // but does not commit the memory. On Linux, it is equivalent to a call to
+ // Allocate() followed by Guard().
+ static void* AllocateGuarded(const size_t requested);
+
// This is the granularity at which the ProtectCode(...) call can set page
// permissions.
static intptr_t CommitPageSize();
@@ -188,6 +193,9 @@ class V8_BASE_EXPORT OS {
// Assign memory as a guard page so that access will cause an exception.
static void Guard(void* address, const size_t size);
+ // Make a region of memory readable and writable.
+ static void Unprotect(void* address, const size_t size);
+
// Generate a random address to be used for hinting mmap().
static void* GetRandomMmapAddr();
diff --git a/deps/v8/src/bit-vector.cc b/deps/v8/src/bit-vector.cc
index 0fbb01811a..e6aec7efb1 100644
--- a/deps/v8/src/bit-vector.cc
+++ b/deps/v8/src/bit-vector.cc
@@ -5,6 +5,7 @@
#include "src/bit-vector.h"
#include "src/base/bits.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/bit-vector.h b/deps/v8/src/bit-vector.h
index 13f9e97c30..fd61489c2a 100644
--- a/deps/v8/src/bit-vector.h
+++ b/deps/v8/src/bit-vector.h
@@ -166,7 +166,7 @@ class BitVector : public ZoneObject {
return true;
}
- bool Equals(const BitVector& other) {
+ bool Equals(const BitVector& other) const {
for (int i = 0; i < data_length_; i++) {
if (data_[i] != other.data_[i]) return false;
}
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index ba5f4d5c1d..db4712cc7f 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -21,6 +21,10 @@
#include "src/snapshot/snapshot.h"
#include "src/wasm/wasm-js.h"
+#if V8_I18N_SUPPORT
+#include "src/i18n.h"
+#endif // V8_I18N_SUPPORT
+
namespace v8 {
namespace internal {
@@ -140,7 +144,8 @@ class Genesis BASE_EMBEDDED {
public:
Genesis(Isolate* isolate, MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_proxy_template,
- v8::ExtensionConfiguration* extensions, size_t context_snapshot_index,
+ size_t context_snapshot_index,
+ v8::DeserializeInternalFieldsCallback internal_fields_deserializer,
GlobalContextType context_type);
Genesis(Isolate* isolate, MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_proxy_template);
@@ -178,20 +183,19 @@ class Genesis BASE_EMBEDDED {
// in through the API. We call this regardless of whether we are building a
// context from scratch or using a deserialized one from the partial snapshot
// but in the latter case we don't use the objects it produces directly, as
- // we have to used the deserialized ones that are linked together with the
- // rest of the context snapshot.
+ // we have to use the deserialized ones that are linked together with the
+ // rest of the context snapshot. At the end we link the global proxy and the
+ // context to each other.
Handle<JSGlobalObject> CreateNewGlobals(
v8::Local<v8::ObjectTemplate> global_proxy_template,
Handle<JSGlobalProxy> global_proxy);
- // Hooks the given global proxy into the context. If the context was created
- // by deserialization then this will unhook the global proxy that was
- // deserialized, leaving the GC to pick it up.
- void HookUpGlobalProxy(Handle<JSGlobalObject> global_object,
- Handle<JSGlobalProxy> global_proxy);
// Similarly, we want to use the global that has been created by the templates
// passed through the API. The global from the snapshot is detached from the
// other objects in the snapshot.
void HookUpGlobalObject(Handle<JSGlobalObject> global_object);
+ // Hooks the given global proxy into the context in the case we do not
+ // replace the global object from the deserialized native context.
+ void HookUpGlobalProxy(Handle<JSGlobalProxy> global_proxy);
// The native context has a ScriptContextTable that store declarative bindings
// made in script scopes. Add a "this" binding to that table pointing to the
// global proxy.
@@ -293,6 +297,7 @@ class Genesis BASE_EMBEDDED {
// prototype, maps.
Handle<Map> sloppy_function_map_writable_prototype_;
Handle<Map> strict_function_map_writable_prototype_;
+ Handle<Map> class_function_map_;
Handle<JSFunction> strict_poison_function_;
Handle<JSFunction> restricted_function_properties_thrower_;
@@ -310,10 +315,12 @@ Handle<Context> Bootstrapper::CreateEnvironment(
MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_proxy_template,
v8::ExtensionConfiguration* extensions, size_t context_snapshot_index,
+ v8::DeserializeInternalFieldsCallback internal_fields_deserializer,
GlobalContextType context_type) {
HandleScope scope(isolate_);
Genesis genesis(isolate_, maybe_global_proxy, global_proxy_template,
- extensions, context_snapshot_index, context_type);
+ context_snapshot_index, internal_fields_deserializer,
+ context_type);
Handle<Context> env = genesis.result();
if (env.is_null() || !InstallExtensions(env, extensions)) {
return Handle<Context>();
@@ -332,14 +339,15 @@ Handle<JSGlobalProxy> Bootstrapper::NewRemoteContext(
}
void Bootstrapper::DetachGlobal(Handle<Context> env) {
- env->GetIsolate()->counters()->errors_thrown_per_context()->AddSample(
- env->GetErrorsThrown());
+ Isolate* isolate = env->GetIsolate();
+ isolate->counters()->errors_thrown_per_context()->AddSample(
+ env->GetErrorsThrown());
- Factory* factory = env->GetIsolate()->factory();
+ Heap* heap = isolate->heap();
Handle<JSGlobalProxy> global_proxy(JSGlobalProxy::cast(env->global_proxy()));
- global_proxy->set_native_context(*factory->null_value());
- JSObject::ForceSetPrototype(global_proxy, factory->null_value());
- global_proxy->map()->SetConstructor(*factory->null_value());
+ global_proxy->set_native_context(heap->null_value());
+ JSObject::ForceSetPrototype(global_proxy, isolate->factory()->null_value());
+ global_proxy->map()->SetConstructor(heap->null_value());
if (FLAG_track_detached_contexts) {
env->GetIsolate()->AddDetachedContext(env);
}
@@ -507,6 +515,22 @@ Handle<JSFunction> SimpleInstallGetter(Handle<JSObject> base,
return fun;
}
+void InstallConstant(Isolate* isolate, Handle<JSObject> holder,
+ const char* name, Handle<Object> value) {
+ JSObject::AddProperty(
+ holder, isolate->factory()->NewStringFromAsciiChecked(name), value,
+ static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
+}
+
+void InstallSpeciesGetter(Handle<JSFunction> constructor) {
+ Factory* factory = constructor->GetIsolate()->factory();
+ // TODO(adamk): We should be able to share a SharedFunctionInfo
+ // between all these JSFunctins.
+ SimpleInstallGetter(constructor, factory->symbol_species_string(),
+ factory->species_symbol(), Builtins::kReturnReceiver,
+ true);
+}
+
} // namespace
Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
@@ -589,8 +613,11 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
Handle<String> source = factory->NewStringFromStaticChars("() {}");
Handle<Script> script = factory->NewScript(source);
script->set_type(Script::TYPE_NATIVE);
+ Handle<FixedArray> infos = factory->NewFixedArray(2);
+ script->set_shared_function_infos(*infos);
empty_function->shared()->set_start_position(0);
empty_function->shared()->set_end_position(source->length());
+ empty_function->shared()->set_function_literal_id(1);
empty_function->shared()->DontAdaptArguments();
SharedFunctionInfo::SetScript(handle(empty_function->shared()), script);
@@ -677,6 +704,10 @@ void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
strict_function_map_writable_prototype_ = factory()->CreateStrictFunctionMap(
FUNCTION_WITH_WRITEABLE_PROTOTYPE, empty);
+ // Allocate map for classes
+ class_function_map_ = factory()->CreateClassFunctionMap(empty);
+ native_context()->set_class_function_map(*class_function_map_);
+
// Now that the strict mode function map is available, set up the
// restricted "arguments" and "caller" getters.
AddRestrictedFunctionProperties(empty);
@@ -689,7 +720,7 @@ void Genesis::CreateIteratorMaps(Handle<JSFunction> empty) {
Handle<JSFunction> iterator_prototype_iterator = SimpleCreateFunction(
isolate(), factory()->NewStringFromAsciiChecked("[Symbol.iterator]"),
- Builtins::kIteratorPrototypeIterator, 0, true);
+ Builtins::kReturnReceiver, 0, true);
iterator_prototype_iterator->shared()->set_native(true);
JSObject::AddProperty(iterator_prototype, factory()->iterator_symbol(),
@@ -741,21 +772,11 @@ void Genesis::CreateIteratorMaps(Handle<JSFunction> empty) {
// 04-14-15, section 25.2.4.3).
Handle<Map> strict_function_map(strict_function_map_writable_prototype_);
// Generator functions do not have "caller" or "arguments" accessors.
- Handle<Map> sloppy_generator_function_map =
- Map::Copy(strict_function_map, "SloppyGeneratorFunction");
- sloppy_generator_function_map->set_is_constructor(false);
- Map::SetPrototype(sloppy_generator_function_map,
- generator_function_prototype);
- native_context()->set_sloppy_generator_function_map(
- *sloppy_generator_function_map);
-
- Handle<Map> strict_generator_function_map =
- Map::Copy(strict_function_map, "StrictGeneratorFunction");
- strict_generator_function_map->set_is_constructor(false);
- Map::SetPrototype(strict_generator_function_map,
- generator_function_prototype);
- native_context()->set_strict_generator_function_map(
- *strict_generator_function_map);
+ Handle<Map> generator_function_map =
+ Map::Copy(strict_function_map, "GeneratorFunction");
+ generator_function_map->set_is_constructor(false);
+ Map::SetPrototype(generator_function_map, generator_function_prototype);
+ native_context()->set_generator_function_map(*generator_function_map);
Handle<JSFunction> object_function(native_context()->object_function());
Handle<Map> generator_object_prototype_map = Map::Create(isolate(), 0);
@@ -777,17 +798,11 @@ void Genesis::CreateAsyncFunctionMaps(Handle<JSFunction> empty) {
Handle<Map> strict_function_map(
native_context()->strict_function_without_prototype_map());
- Handle<Map> sloppy_async_function_map =
- Map::Copy(strict_function_map, "SloppyAsyncFunction");
- sloppy_async_function_map->set_is_constructor(false);
- Map::SetPrototype(sloppy_async_function_map, async_function_prototype);
- native_context()->set_sloppy_async_function_map(*sloppy_async_function_map);
-
- Handle<Map> strict_async_function_map =
- Map::Copy(strict_function_map, "StrictAsyncFunction");
- strict_async_function_map->set_is_constructor(false);
- Map::SetPrototype(strict_async_function_map, async_function_prototype);
- native_context()->set_strict_async_function_map(*strict_async_function_map);
+ Handle<Map> async_function_map =
+ Map::Copy(strict_function_map, "AsyncFunction");
+ async_function_map->set_is_constructor(false);
+ Map::SetPrototype(async_function_map, async_function_prototype);
+ native_context()->set_async_function_map(*async_function_map);
}
void Genesis::CreateJSProxyMaps() {
@@ -821,8 +836,8 @@ static void ReplaceAccessors(Handle<Map> map,
Handle<AccessorPair> accessor_pair) {
DescriptorArray* descriptors = map->instance_descriptors();
int idx = descriptors->SearchWithCache(map->GetIsolate(), *name, *map);
- AccessorConstantDescriptor descriptor(name, accessor_pair, attributes);
- descriptors->Replace(idx, &descriptor);
+ Descriptor d = Descriptor::AccessorConstant(name, accessor_pair, attributes);
+ descriptors->Replace(idx, &d);
}
void Genesis::AddRestrictedFunctionProperties(Handle<JSFunction> empty) {
@@ -976,30 +991,42 @@ Handle<JSGlobalObject> Genesis::CreateNewGlobals(
global_proxy_function->shared()->set_instance_class_name(*global_name);
global_proxy_function->initial_map()->set_is_access_check_needed(true);
global_proxy_function->initial_map()->set_has_hidden_prototype(true);
+ native_context()->set_global_proxy_function(*global_proxy_function);
// Set global_proxy.__proto__ to js_global after ConfigureGlobalObjects
// Return the global proxy.
factory()->ReinitializeJSGlobalProxy(global_proxy, global_proxy_function);
- return global_object;
-}
-
-void Genesis::HookUpGlobalProxy(Handle<JSGlobalObject> global_object,
- Handle<JSGlobalProxy> global_proxy) {
// Set the native context for the global object.
global_object->set_native_context(*native_context());
global_object->set_global_proxy(*global_proxy);
+ // Set the native context of the global proxy.
global_proxy->set_native_context(*native_context());
- // If we deserialized the context, the global proxy is already
- // correctly set up. Otherwise it's undefined.
+ // Set the global proxy of the native context. If the native context has been
+ // deserialized, the global proxy is already correctly set up by the
+ // deserializer. Otherwise it's undefined.
DCHECK(native_context()
->get(Context::GLOBAL_PROXY_INDEX)
->IsUndefined(isolate()) ||
native_context()->global_proxy() == *global_proxy);
native_context()->set_global_proxy(*global_proxy);
+
+ return global_object;
}
+void Genesis::HookUpGlobalProxy(Handle<JSGlobalProxy> global_proxy) {
+ // Re-initialize the global proxy with the global proxy function from the
+ // snapshot, and then set up the link to the native context.
+ Handle<JSFunction> global_proxy_function(
+ native_context()->global_proxy_function());
+ factory()->ReinitializeJSGlobalProxy(global_proxy, global_proxy_function);
+ Handle<JSObject> global_object(
+ JSObject::cast(native_context()->global_object()));
+ JSObject::ForceSetPrototype(global_proxy, global_object);
+ global_proxy->set_native_context(*native_context());
+ DCHECK(native_context()->global_proxy() == *global_proxy);
+}
void Genesis::HookUpGlobalObject(Handle<JSGlobalObject> global_object) {
Handle<JSGlobalObject> global_object_from_snapshot(
@@ -1082,8 +1109,8 @@ static void InstallError(Isolate* isolate, Handle<JSObject> global,
Handle<AccessorInfo> error_stack =
Accessors::ErrorStackInfo(isolate, attribs);
{
- AccessorConstantDescriptor d(Handle<Name>(Name::cast(error_stack->name())),
- error_stack, attribs);
+ Descriptor d = Descriptor::AccessorConstant(
+ Handle<Name>(Name::cast(error_stack->name())), error_stack, attribs);
initial_map->AppendDescriptor(&d);
}
}
@@ -1116,6 +1143,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Isolate* isolate = global_object->GetIsolate();
Factory* factory = isolate->factory();
+ native_context()->set_osr_code_table(*factory->empty_fixed_array());
+
Handle<ScriptContextTable> script_context_table =
factory->NewScriptContextTable();
native_context()->set_script_context_table(*script_context_table);
@@ -1129,8 +1158,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(object_function, factory->assign_string(),
Builtins::kObjectAssign, 2, false);
- SimpleInstallFunction(object_function, factory->create_string(),
- Builtins::kObjectCreate, 2, true);
SimpleInstallFunction(object_function, "getOwnPropertyDescriptor",
Builtins::kObjectGetOwnPropertyDescriptor, 2, false);
SimpleInstallFunction(object_function,
@@ -1147,6 +1174,11 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(object_function, "seal",
Builtins::kObjectSeal, 1, false);
+ Handle<JSFunction> object_create =
+ SimpleInstallFunction(object_function, factory->create_string(),
+ Builtins::kObjectCreate, 2, true);
+ native_context()->set_object_create(*object_create);
+
Handle<JSFunction> object_define_properties = SimpleInstallFunction(
object_function, "defineProperties",
Builtins::kObjectDefineProperties, 2, true);
@@ -1232,20 +1264,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// Setup the methods on the %FunctionPrototype%.
SimpleInstallFunction(prototype, factory->apply_string(),
Builtins::kFunctionPrototypeApply, 2, false);
-
- if (FLAG_minimal) {
- SimpleInstallFunction(prototype, factory->bind_string(),
- Builtins::kFunctionPrototypeBind, 1, false);
- } else {
- FastFunctionBindStub bind_stub(isolate);
- Handle<JSFunction> bind_function = factory->NewFunctionWithoutPrototype(
- factory->bind_string(), bind_stub.GetCode(), false);
- bind_function->shared()->DontAdaptArguments();
- bind_function->shared()->set_length(1);
- InstallFunction(prototype, bind_function, factory->bind_string(),
- DONT_ENUM);
- }
-
+ SimpleInstallFunction(prototype, factory->bind_string(),
+ Builtins::kFastFunctionPrototypeBind, 1, false);
SimpleInstallFunction(prototype, factory->call_string(),
Builtins::kFunctionPrototypeCall, 1, false);
SimpleInstallFunction(prototype, factory->toString_string(),
@@ -1272,6 +1292,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
sloppy_function_map_writable_prototype_->SetConstructor(*function_fun);
strict_function_map_writable_prototype_->SetConstructor(*function_fun);
+ class_function_map_->SetConstructor(*function_fun);
}
{ // --- A r r a y ---
@@ -1299,7 +1320,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<AccessorInfo> array_length =
Accessors::ArrayLengthInfo(isolate, attribs);
{ // Add length.
- AccessorConstantDescriptor d(
+ Descriptor d = Descriptor::AccessorConstant(
Handle<Name>(Name::cast(array_length->name())), array_length,
attribs);
initial_map->AppendDescriptor(&d);
@@ -1307,6 +1328,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
InstallWithIntrinsicDefaultProto(isolate, array_function,
Context::ARRAY_FUNCTION_INDEX);
+ InstallSpeciesGetter(array_function);
// Cache the array maps, needed by ArrayConstructorStub
CacheInitialJSArrayMaps(native_context(), initial_map);
@@ -1523,8 +1545,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Accessors::StringLengthInfo(isolate, attribs));
{ // Add length.
- AccessorConstantDescriptor d(factory->length_string(), string_length,
- attribs);
+ Descriptor d = Descriptor::AccessorConstant(factory->length_string(),
+ string_length, attribs);
string_map->AppendDescriptor(&d);
}
@@ -1636,6 +1658,30 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
symbol_fun->shared()->DontAdaptArguments();
native_context()->set_symbol_function(*symbol_fun);
+ // Install the Symbol.for and Symbol.keyFor functions.
+ SimpleInstallFunction(symbol_fun, "for", Builtins::kSymbolFor, 1, false);
+ SimpleInstallFunction(symbol_fun, "keyFor", Builtins::kSymbolKeyFor, 1,
+ false);
+
+ // Install well-known symbols.
+ InstallConstant(isolate, symbol_fun, "hasInstance",
+ factory->has_instance_symbol());
+ InstallConstant(isolate, symbol_fun, "isConcatSpreadable",
+ factory->is_concat_spreadable_symbol());
+ InstallConstant(isolate, symbol_fun, "iterator",
+ factory->iterator_symbol());
+ InstallConstant(isolate, symbol_fun, "match", factory->match_symbol());
+ InstallConstant(isolate, symbol_fun, "replace", factory->replace_symbol());
+ InstallConstant(isolate, symbol_fun, "search", factory->search_symbol());
+ InstallConstant(isolate, symbol_fun, "species", factory->species_symbol());
+ InstallConstant(isolate, symbol_fun, "split", factory->split_symbol());
+ InstallConstant(isolate, symbol_fun, "toPrimitive",
+ factory->to_primitive_symbol());
+ InstallConstant(isolate, symbol_fun, "toStringTag",
+ factory->to_string_tag_symbol());
+ InstallConstant(isolate, symbol_fun, "unscopables",
+ factory->unscopables_symbol());
+
// Install the @@toStringTag property on the {prototype}.
JSObject::AddProperty(
prototype, factory->to_string_tag_symbol(),
@@ -1770,7 +1816,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(prototype, "setUTCSeconds",
Builtins::kDatePrototypeSetUTCSeconds, 2, false);
SimpleInstallFunction(prototype, "valueOf", Builtins::kDatePrototypeValueOf,
- 0, false);
+ 0, true);
SimpleInstallFunction(prototype, "getYear", Builtins::kDatePrototypeGetYear,
0, true);
SimpleInstallFunction(prototype, "setYear", Builtins::kDatePrototypeSetYear,
@@ -1800,6 +1846,160 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
to_primitive->shared()->set_length(1);
}
+ {
+ Handle<Code> code = isolate->builtins()->PromiseGetCapabilitiesExecutor();
+ Handle<SharedFunctionInfo> info =
+ factory->NewSharedFunctionInfo(factory->empty_string(), code, true);
+ info->SetConstructStub(*isolate->builtins()->JSBuiltinsConstructStub());
+ info->set_instance_class_name(isolate->heap()->Object_string());
+ info->set_internal_formal_parameter_count(2);
+ info->set_length(2);
+ native_context()->set_promise_get_capabilities_executor_shared_fun(*info);
+
+ // %new_promise_capability(C, debugEvent)
+ Handle<JSFunction> new_promise_capability =
+ SimpleCreateFunction(isolate, factory->empty_string(),
+ Builtins::kNewPromiseCapability, 2, false);
+ InstallWithIntrinsicDefaultProto(isolate, new_promise_capability,
+ Context::NEW_PROMISE_CAPABILITY_INDEX);
+ }
+
+ { // -- P r o m i s e
+ // Set catch prediction
+ Handle<Code> promise_code = isolate->builtins()->PromiseConstructor();
+ promise_code->set_is_promise_rejection(true);
+
+ Handle<JSObject> prototype =
+ factory->NewJSObject(isolate->object_function(), TENURED);
+ Handle<JSFunction> promise_fun =
+ InstallFunction(global, "Promise", JS_PROMISE_TYPE, JSPromise::kSize,
+ prototype, Builtins::kPromiseConstructor);
+ InstallWithIntrinsicDefaultProto(isolate, promise_fun,
+ Context::PROMISE_FUNCTION_INDEX);
+
+ Handle<SharedFunctionInfo> shared(promise_fun->shared(), isolate);
+ shared->SetConstructStub(*isolate->builtins()->JSBuiltinsConstructStub());
+ shared->set_instance_class_name(isolate->heap()->Object_string());
+ shared->set_internal_formal_parameter_count(1);
+ shared->set_length(1);
+
+ // Install the "constructor" property on the {prototype}.
+ JSObject::AddProperty(prototype, factory->constructor_string(), promise_fun,
+ DONT_ENUM);
+
+ // Install the @@toStringTag property on the {prototype}.
+ JSObject::AddProperty(
+ prototype, factory->to_string_tag_symbol(), factory->Promise_string(),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ Handle<JSFunction> promise_then =
+ SimpleInstallFunction(prototype, isolate->factory()->then_string(),
+ Builtins::kPromiseThen, 2, true);
+ InstallWithIntrinsicDefaultProto(isolate, promise_then,
+ Context::PROMISE_THEN_INDEX);
+
+ Handle<JSFunction> promise_catch = SimpleInstallFunction(
+ prototype, "catch", Builtins::kPromiseCatch, 1, true, DONT_ENUM);
+ InstallWithIntrinsicDefaultProto(isolate, promise_catch,
+ Context::PROMISE_CATCH_INDEX);
+
+ InstallSpeciesGetter(promise_fun);
+
+ SimpleInstallFunction(promise_fun, "resolve", Builtins::kPromiseResolve, 1,
+ true, DONT_ENUM);
+
+ SimpleInstallFunction(promise_fun, "reject", Builtins::kPromiseReject, 1,
+ true, DONT_ENUM);
+
+ Handle<Map> prototype_map(prototype->map());
+ Map::SetShouldBeFastPrototypeMap(prototype_map, true, isolate);
+
+ // Store the initial Promise.prototype map. This is used in fast-path
+ // checks. Do not alter the prototype after this point.
+ native_context()->set_promise_prototype_map(*prototype_map);
+
+ { // Internal: PromiseInternalConstructor
+ // Also exposed as extrasUtils.createPromise.
+ Handle<JSFunction> function =
+ SimpleCreateFunction(isolate, factory->empty_string(),
+ Builtins::kPromiseInternalConstructor, 1, true);
+ InstallWithIntrinsicDefaultProto(
+ isolate, function, Context::PROMISE_INTERNAL_CONSTRUCTOR_INDEX);
+ }
+
+ { // Internal: IsPromise
+ Handle<JSFunction> function = SimpleCreateFunction(
+ isolate, factory->empty_string(), Builtins::kIsPromise, 1, false);
+ InstallWithIntrinsicDefaultProto(isolate, function,
+ Context::IS_PROMISE_INDEX);
+ }
+
+ { // Internal: PerformPromiseThen
+ Handle<JSFunction> function =
+ SimpleCreateFunction(isolate, factory->empty_string(),
+ Builtins::kPerformPromiseThen, 4, false);
+ InstallWithIntrinsicDefaultProto(isolate, function,
+ Context::PERFORM_PROMISE_THEN_INDEX);
+ }
+
+ { // Internal: ResolvePromise
+ // Also exposed as extrasUtils.resolvePromise.
+ Handle<JSFunction> function = SimpleCreateFunction(
+ isolate, factory->empty_string(), Builtins::kResolvePromise, 2, true);
+ InstallWithIntrinsicDefaultProto(isolate, function,
+ Context::PROMISE_RESOLVE_INDEX);
+ }
+
+ { // Internal: PromiseHandle
+ Handle<JSFunction> function = SimpleCreateFunction(
+ isolate, factory->empty_string(), Builtins::kPromiseHandle, 5, false);
+ InstallWithIntrinsicDefaultProto(isolate, function,
+ Context::PROMISE_HANDLE_INDEX);
+ // Set up catch prediction
+ Handle<Code> promise_handle = isolate->builtins()->PromiseHandle();
+ promise_handle->set_is_promise_rejection(true);
+ }
+
+ { // Internal: PromiseHandleReject
+ Handle<JSFunction> function =
+ SimpleCreateFunction(isolate, factory->empty_string(),
+ Builtins::kPromiseHandleReject, 3, false);
+ InstallWithIntrinsicDefaultProto(isolate, function,
+ Context::PROMISE_HANDLE_REJECT_INDEX);
+ // Set up catch prediction
+ Handle<Code> promise_handle = isolate->builtins()->PromiseHandleReject();
+ promise_handle->set_is_exception_caught(true);
+ }
+
+ { // Internal: InternalPromiseReject
+ Handle<JSFunction> function =
+ SimpleCreateFunction(isolate, factory->empty_string(),
+ Builtins::kInternalPromiseReject, 3, true);
+ InstallWithIntrinsicDefaultProto(isolate, function,
+ Context::PROMISE_INTERNAL_REJECT_INDEX);
+ }
+
+ {
+ Handle<Code> code =
+ handle(isolate->builtins()->builtin(Builtins::kPromiseResolveClosure),
+ isolate);
+ Handle<SharedFunctionInfo> info =
+ factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
+ info->set_internal_formal_parameter_count(1);
+ info->set_length(1);
+ native_context()->set_promise_resolve_shared_fun(*info);
+
+ code =
+ handle(isolate->builtins()->builtin(Builtins::kPromiseRejectClosure),
+ isolate);
+ info =
+ factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
+ info->set_internal_formal_parameter_count(1);
+ info->set_length(1);
+ native_context()->set_promise_reject_shared_fun(*info);
+ }
+ }
+
{ // -- R e g E x p
// Builtin functions for RegExp.prototype.
Handle<JSObject> prototype =
@@ -1811,9 +2011,9 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Context::REGEXP_FUNCTION_INDEX);
Handle<SharedFunctionInfo> shared(regexp_fun->shared(), isolate);
- shared->SetConstructStub(*isolate->builtins()->RegExpConstructor());
+ shared->SetConstructStub(*isolate->builtins()->JSBuiltinsConstructStub());
shared->set_instance_class_name(isolate->heap()->RegExp_string());
- shared->DontAdaptArguments();
+ shared->set_internal_formal_parameter_count(2);
shared->set_length(2);
{
@@ -1839,14 +2039,14 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallGetter(prototype, factory->multiline_string(),
Builtins::kRegExpPrototypeMultilineGetter, true);
SimpleInstallGetter(prototype, factory->source_string(),
- Builtins::kRegExpPrototypeSourceGetter, false);
+ Builtins::kRegExpPrototypeSourceGetter, true);
SimpleInstallGetter(prototype, factory->sticky_string(),
Builtins::kRegExpPrototypeStickyGetter, true);
SimpleInstallGetter(prototype, factory->unicode_string(),
Builtins::kRegExpPrototypeUnicodeGetter, true);
SimpleInstallFunction(prototype, "compile",
- Builtins::kRegExpPrototypeCompile, 2, false,
+ Builtins::kRegExpPrototypeCompile, 2, true,
DONT_ENUM);
SimpleInstallFunction(prototype, factory->toString_string(),
Builtins::kRegExpPrototypeToString, 0, false,
@@ -1857,7 +2057,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{
Handle<JSFunction> fun = SimpleCreateFunction(
isolate, factory->InternalizeUtf8String("[Symbol.match]"),
- Builtins::kRegExpPrototypeMatch, 1, false);
+ Builtins::kRegExpPrototypeMatch, 1, true);
InstallFunction(prototype, fun, factory->match_symbol(), DONT_ENUM);
}
@@ -1878,22 +2078,22 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{
Handle<JSFunction> fun = SimpleCreateFunction(
isolate, factory->InternalizeUtf8String("[Symbol.split]"),
- Builtins::kRegExpPrototypeSplit, 2, false);
+ Builtins::kRegExpPrototypeSplit, 2, true);
InstallFunction(prototype, fun, factory->split_symbol(), DONT_ENUM);
}
+ Handle<Map> prototype_map(prototype->map());
+ Map::SetShouldBeFastPrototypeMap(prototype_map, true, isolate);
+
// Store the initial RegExp.prototype map. This is used in fast-path
// checks. Do not alter the prototype after this point.
- native_context()->set_regexp_prototype_map(prototype->map());
+ native_context()->set_regexp_prototype_map(*prototype_map);
}
{
// RegExp getters and setters.
- SimpleInstallGetter(regexp_fun,
- factory->InternalizeUtf8String("[Symbol.species]"),
- factory->species_symbol(),
- Builtins::kRegExpPrototypeSpeciesGetter, false);
+ InstallSpeciesGetter(regexp_fun);
// Static properties set by a successful match.
@@ -1963,10 +2163,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// ECMA-262, section 15.10.7.5.
PropertyAttributes writable =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
- DataDescriptor field(factory->lastIndex_string(),
- JSRegExp::kLastIndexFieldIndex, writable,
- Representation::Tagged());
- initial_map->AppendDescriptor(&field);
+ Descriptor d = Descriptor::DataField(factory->lastIndex_string(),
+ JSRegExp::kLastIndexFieldIndex,
+ writable, Representation::Tagged());
+ initial_map->AppendDescriptor(&d);
static const int num_fields = JSRegExp::kInObjectFieldCount;
initial_map->SetInObjectProperties(num_fields);
@@ -2043,6 +2243,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
InstallError(isolate, dummy, factory->CompileError_string(),
Context::WASM_COMPILE_ERROR_FUNCTION_INDEX);
+ // -- L i n k E r r o r
+ InstallError(isolate, dummy, factory->LinkError_string(),
+ Context::WASM_LINK_ERROR_FUNCTION_INDEX);
+
// -- R u n t i m e E r r o r
InstallError(isolate, dummy, factory->RuntimeError_string(),
Context::WASM_RUNTIME_ERROR_FUNCTION_INDEX);
@@ -2121,41 +2325,103 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// Install math constants.
double const kE = base::ieee754::exp(1.0);
double const kPI = 3.1415926535897932;
+ InstallConstant(isolate, math, "E", factory->NewNumber(kE));
+ InstallConstant(isolate, math, "LN10",
+ factory->NewNumber(base::ieee754::log(10.0)));
+ InstallConstant(isolate, math, "LN2",
+ factory->NewNumber(base::ieee754::log(2.0)));
+ InstallConstant(isolate, math, "LOG10E",
+ factory->NewNumber(base::ieee754::log10(kE)));
+ InstallConstant(isolate, math, "LOG2E",
+ factory->NewNumber(base::ieee754::log2(kE)));
+ InstallConstant(isolate, math, "PI", factory->NewNumber(kPI));
+ InstallConstant(isolate, math, "SQRT1_2",
+ factory->NewNumber(std::sqrt(0.5)));
+ InstallConstant(isolate, math, "SQRT2", factory->NewNumber(std::sqrt(2.0)));
JSObject::AddProperty(
- math, factory->NewStringFromAsciiChecked("E"), factory->NewNumber(kE),
- static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
- JSObject::AddProperty(
- math, factory->NewStringFromAsciiChecked("LN10"),
- factory->NewNumber(base::ieee754::log(10.0)),
- static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
- JSObject::AddProperty(
- math, factory->NewStringFromAsciiChecked("LN2"),
- factory->NewNumber(base::ieee754::log(2.0)),
- static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
- JSObject::AddProperty(
- math, factory->NewStringFromAsciiChecked("LOG10E"),
- factory->NewNumber(base::ieee754::log10(kE)),
- static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
- JSObject::AddProperty(
- math, factory->NewStringFromAsciiChecked("LOG2E"),
- factory->NewNumber(base::ieee754::log2(kE)),
- static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
+ math, factory->to_string_tag_symbol(),
+ factory->NewStringFromAsciiChecked("Math"),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ }
+
+#ifdef V8_I18N_SUPPORT
+ { // -- I n t l
+ Handle<String> name = factory->InternalizeUtf8String("Intl");
+ Handle<JSFunction> cons = factory->NewFunction(name);
+ JSFunction::SetInstancePrototype(
+ cons,
+ Handle<Object>(native_context()->initial_object_prototype(), isolate));
+ Handle<JSObject> intl = factory->NewJSObject(cons, TENURED);
+ DCHECK(intl->IsJSObject());
+ JSObject::AddProperty(global, name, intl, DONT_ENUM);
+
+ Handle<JSObject> date_time_format_prototype =
+ factory->NewJSObject(isolate->object_function(), TENURED);
+ // Install the @@toStringTag property on the {prototype}.
JSObject::AddProperty(
- math, factory->NewStringFromAsciiChecked("PI"), factory->NewNumber(kPI),
- static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
+ date_time_format_prototype, factory->to_string_tag_symbol(),
+ factory->Object_string(),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ Handle<JSFunction> date_time_format_constructor = InstallFunction(
+ intl, "DateTimeFormat", JS_OBJECT_TYPE, DateFormat::kSize,
+ date_time_format_prototype, Builtins::kIllegal);
+ JSObject::AddProperty(date_time_format_prototype,
+ factory->constructor_string(),
+ date_time_format_constructor, DONT_ENUM);
+ InstallWithIntrinsicDefaultProto(
+ isolate, date_time_format_constructor,
+ Context::INTL_DATE_TIME_FORMAT_FUNCTION_INDEX);
+
+ Handle<JSObject> number_format_prototype =
+ factory->NewJSObject(isolate->object_function(), TENURED);
+ // Install the @@toStringTag property on the {prototype}.
JSObject::AddProperty(
- math, factory->NewStringFromAsciiChecked("SQRT1_2"),
- factory->NewNumber(std::sqrt(0.5)),
- static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
+ number_format_prototype, factory->to_string_tag_symbol(),
+ factory->Object_string(),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ Handle<JSFunction> number_format_constructor = InstallFunction(
+ intl, "NumberFormat", JS_OBJECT_TYPE, NumberFormat::kSize,
+ number_format_prototype, Builtins::kIllegal);
+ JSObject::AddProperty(number_format_prototype,
+ factory->constructor_string(),
+ number_format_constructor, DONT_ENUM);
+ InstallWithIntrinsicDefaultProto(
+ isolate, number_format_constructor,
+ Context::INTL_NUMBER_FORMAT_FUNCTION_INDEX);
+
+ Handle<JSObject> collator_prototype =
+ factory->NewJSObject(isolate->object_function(), TENURED);
+ // Install the @@toStringTag property on the {prototype}.
JSObject::AddProperty(
- math, factory->NewStringFromAsciiChecked("SQRT2"),
- factory->NewNumber(std::sqrt(2.0)),
- static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY));
+ collator_prototype, factory->to_string_tag_symbol(),
+ factory->Object_string(),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ Handle<JSFunction> collator_constructor =
+ InstallFunction(intl, "Collator", JS_OBJECT_TYPE, Collator::kSize,
+ collator_prototype, Builtins::kIllegal);
+ JSObject::AddProperty(collator_prototype, factory->constructor_string(),
+ collator_constructor, DONT_ENUM);
+ InstallWithIntrinsicDefaultProto(isolate, collator_constructor,
+ Context::INTL_COLLATOR_FUNCTION_INDEX);
+
+ Handle<JSObject> v8_break_iterator_prototype =
+ factory->NewJSObject(isolate->object_function(), TENURED);
+ // Install the @@toStringTag property on the {prototype}.
JSObject::AddProperty(
- math, factory->to_string_tag_symbol(),
- factory->NewStringFromAsciiChecked("Math"),
+ v8_break_iterator_prototype, factory->to_string_tag_symbol(),
+ factory->Object_string(),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ Handle<JSFunction> v8_break_iterator_constructor = InstallFunction(
+ intl, "v8BreakIterator", JS_OBJECT_TYPE, V8BreakIterator::kSize,
+ v8_break_iterator_prototype, Builtins::kIllegal);
+ JSObject::AddProperty(v8_break_iterator_prototype,
+ factory->constructor_string(),
+ v8_break_iterator_constructor, DONT_ENUM);
+ InstallWithIntrinsicDefaultProto(
+ isolate, v8_break_iterator_constructor,
+ Context::INTL_V8_BREAK_ITERATOR_FUNCTION_INDEX);
}
+#endif // V8_I18N_SUPPORT
{ // -- A r r a y B u f f e r
Handle<JSFunction> array_buffer_fun = InstallArrayBuffer(
@@ -2163,6 +2429,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
BuiltinFunctionId::kArrayBufferByteLength);
InstallWithIntrinsicDefaultProto(isolate, array_buffer_fun,
Context::ARRAY_BUFFER_FUN_INDEX);
+ InstallSpeciesGetter(array_buffer_fun);
}
{ // -- T y p e d A r r a y
@@ -2174,6 +2441,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
CreateFunction(isolate, factory->InternalizeUtf8String("TypedArray"),
JS_TYPED_ARRAY_TYPE, JSTypedArray::kSize, prototype,
Builtins::kIllegal);
+ InstallSpeciesGetter(typed_array_fun);
// Install the "constructor" property on the {prototype}.
JSObject::AddProperty(prototype, factory->constructor_string(),
@@ -2301,6 +2569,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate->initial_object_prototype(), Builtins::kIllegal);
InstallWithIntrinsicDefaultProto(isolate, js_map_fun,
Context::JS_MAP_FUN_INDEX);
+ InstallSpeciesGetter(js_map_fun);
}
{ // -- S e t
@@ -2309,33 +2578,27 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate->initial_object_prototype(), Builtins::kIllegal);
InstallWithIntrinsicDefaultProto(isolate, js_set_fun,
Context::JS_SET_FUN_INDEX);
+ InstallSpeciesGetter(js_set_fun);
}
{ // -- J S M o d u l e N a m e s p a c e
Handle<Map> map =
factory->NewMap(JS_MODULE_NAMESPACE_TYPE, JSModuleNamespace::kSize);
Map::SetPrototype(map, isolate->factory()->null_value());
- Map::EnsureDescriptorSlack(map, 2);
+ Map::EnsureDescriptorSlack(map, 1);
native_context()->set_js_module_namespace_map(*map);
{ // Install @@toStringTag.
PropertyAttributes attribs =
- static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
- DataConstantDescriptor d(factory->to_string_tag_symbol(),
- factory->NewStringFromAsciiChecked("Module"),
- attribs);
+ static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY);
+ Descriptor d =
+ Descriptor::DataField(factory->to_string_tag_symbol(),
+ JSModuleNamespace::kToStringTagFieldIndex,
+ attribs, Representation::Tagged());
map->AppendDescriptor(&d);
}
- { // Install @@iterator.
- Handle<JSFunction> iterator = SimpleCreateFunction(
- isolate, factory->NewStringFromAsciiChecked("[Symbol.iterator]"),
- Builtins::kModuleNamespaceIterator, 0, true);
- iterator->shared()->set_native(true);
- // TODO(neis): Is this really supposed to be writable?
- DataConstantDescriptor d(factory->iterator_symbol(), iterator, DONT_ENUM);
- map->AppendDescriptor(&d);
- }
+ map->SetInObjectProperties(JSModuleNamespace::kInObjectFieldCount);
}
{ // -- I t e r a t o r R e s u l t
@@ -2345,14 +2608,16 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Map::EnsureDescriptorSlack(map, 2);
{ // value
- DataDescriptor d(factory->value_string(), JSIteratorResult::kValueIndex,
- NONE, Representation::Tagged());
+ Descriptor d = Descriptor::DataField(factory->value_string(),
+ JSIteratorResult::kValueIndex, NONE,
+ Representation::Tagged());
map->AppendDescriptor(&d);
}
{ // done
- DataDescriptor d(factory->done_string(), JSIteratorResult::kDoneIndex,
- NONE, Representation::Tagged());
+ Descriptor d = Descriptor::DataField(factory->done_string(),
+ JSIteratorResult::kDoneIndex, NONE,
+ Representation::Tagged());
map->AppendDescriptor(&d);
}
@@ -2458,15 +2723,15 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<AccessorInfo> bound_length =
Accessors::BoundFunctionLengthInfo(isolate, roc_attribs);
{ // length
- AccessorConstantDescriptor d(factory->length_string(), bound_length,
- roc_attribs);
+ Descriptor d = Descriptor::AccessorConstant(factory->length_string(),
+ bound_length, roc_attribs);
map->AppendDescriptor(&d);
}
Handle<AccessorInfo> bound_name =
Accessors::BoundFunctionNameInfo(isolate, roc_attribs);
- { // length
- AccessorConstantDescriptor d(factory->name_string(), bound_name,
- roc_attribs);
+ { // name
+ Descriptor d = Descriptor::AccessorConstant(factory->name_string(),
+ bound_name, roc_attribs);
map->AppendDescriptor(&d);
}
map->SetInObjectProperties(0);
@@ -2493,15 +2758,15 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Map::EnsureDescriptorSlack(map, 2);
{ // length
- DataDescriptor d(factory->length_string(),
- JSSloppyArgumentsObject::kLengthIndex, DONT_ENUM,
- Representation::Tagged());
+ Descriptor d = Descriptor::DataField(
+ factory->length_string(), JSSloppyArgumentsObject::kLengthIndex,
+ DONT_ENUM, Representation::Tagged());
map->AppendDescriptor(&d);
}
{ // callee
- DataDescriptor d(factory->callee_string(),
- JSSloppyArgumentsObject::kCalleeIndex, DONT_ENUM,
- Representation::Tagged());
+ Descriptor d = Descriptor::DataField(
+ factory->callee_string(), JSSloppyArgumentsObject::kCalleeIndex,
+ DONT_ENUM, Representation::Tagged());
map->AppendDescriptor(&d);
}
// @@iterator method is added later.
@@ -2550,14 +2815,14 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Map::EnsureDescriptorSlack(map, 2);
{ // length
- DataDescriptor d(factory->length_string(),
- JSStrictArgumentsObject::kLengthIndex, DONT_ENUM,
- Representation::Tagged());
+ Descriptor d = Descriptor::DataField(
+ factory->length_string(), JSStrictArgumentsObject::kLengthIndex,
+ DONT_ENUM, Representation::Tagged());
map->AppendDescriptor(&d);
}
{ // callee
- AccessorConstantDescriptor d(factory->callee_string(), callee,
- attributes);
+ Descriptor d = Descriptor::AccessorConstant(factory->callee_string(),
+ callee, attributes);
map->AppendDescriptor(&d);
}
// @@iterator method is added later.
@@ -2739,12 +3004,16 @@ bool Bootstrapper::CompileNative(Isolate* isolate, Vector<const char> name,
// For non-extension scripts, run script to get the function wrapper.
Handle<Object> wrapper;
- if (!Execution::Call(isolate, fun, receiver, 0, NULL).ToHandle(&wrapper)) {
+ if (!Execution::TryCall(isolate, fun, receiver, 0, nullptr,
+ Execution::MessageHandling::kKeepPending, nullptr)
+ .ToHandle(&wrapper)) {
return false;
}
// Then run the function wrapper.
- return !Execution::Call(isolate, Handle<JSFunction>::cast(wrapper), receiver,
- argc, argv).is_null();
+ return !Execution::TryCall(isolate, Handle<JSFunction>::cast(wrapper),
+ receiver, argc, argv,
+ Execution::MessageHandling::kKeepPending, nullptr)
+ .is_null();
}
@@ -2756,7 +3025,9 @@ bool Genesis::CallUtilsFunction(Isolate* isolate, const char* name) {
Handle<Object> fun = JSObject::GetDataProperty(utils, name_string);
Handle<Object> receiver = isolate->factory()->undefined_value();
Handle<Object> args[] = {utils};
- return !Execution::Call(isolate, fun, receiver, 1, args).is_null();
+ return !Execution::TryCall(isolate, fun, receiver, 1, args,
+ Execution::MessageHandling::kKeepPending, nullptr)
+ .is_null();
}
@@ -2798,7 +3069,9 @@ bool Genesis::CompileExtension(Isolate* isolate, v8::Extension* extension) {
// Call function using either the runtime object or the global
// object as the receiver. Provide no parameters.
Handle<Object> receiver = isolate->global_object();
- return !Execution::Call(isolate, fun, receiver, 0, NULL).is_null();
+ return !Execution::TryCall(isolate, fun, receiver, 0, nullptr,
+ Execution::MessageHandling::kKeepPending, nullptr)
+ .is_null();
}
@@ -2896,7 +3169,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
iterator_prototype, NONE);
{
- PrototypeIterator iter(native_context->sloppy_generator_function_map());
+ PrototypeIterator iter(native_context->generator_function_map());
Handle<JSObject> generator_function_prototype(iter.GetCurrent<JSObject>());
JSObject::AddProperty(
@@ -2909,7 +3182,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
generator_function_prototype, Builtins::kGeneratorFunctionConstructor,
kUseStrictFunctionMap);
generator_function_function->set_prototype_or_initial_map(
- native_context->sloppy_generator_function_map());
+ native_context->generator_function_map());
generator_function_function->shared()->DontAdaptArguments();
generator_function_function->shared()->SetConstructStub(
*isolate->builtins()->GeneratorFunctionConstructor());
@@ -2925,29 +3198,8 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
generator_function_function,
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
- native_context->sloppy_generator_function_map()->SetConstructor(
+ native_context->generator_function_map()->SetConstructor(
*generator_function_function);
- native_context->strict_generator_function_map()->SetConstructor(
- *generator_function_function);
- }
-
- { // -- F i x e d A r r a y I t e r a t o r
- int size = JSFixedArrayIterator::kHeaderSize +
- JSFixedArrayIterator::kInObjectPropertyCount * kPointerSize;
- Handle<Map> map = factory->NewMap(JS_FIXED_ARRAY_ITERATOR_TYPE, size);
- Map::SetPrototype(map, iterator_prototype);
- Map::EnsureDescriptorSlack(map,
- JSFixedArrayIterator::kInObjectPropertyCount);
- map->SetInObjectProperties(JSFixedArrayIterator::kInObjectPropertyCount);
- map->SetConstructor(native_context->object_function());
-
- { // next
- DataDescriptor d(factory->next_string(), JSFixedArrayIterator::kNextIndex,
- DONT_ENUM, Representation::Tagged());
- map->AppendDescriptor(&d);
- }
-
- native_context->set_fixed_array_iterator_map(*map);
}
{ // -- S e t I t e r a t o r
@@ -2989,7 +3241,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Handle<AccessorInfo> script_column =
Accessors::ScriptColumnOffsetInfo(isolate, attribs);
{
- AccessorConstantDescriptor d(
+ Descriptor d = Descriptor::AccessorConstant(
Handle<Name>(Name::cast(script_column->name())), script_column,
attribs);
script_map->AppendDescriptor(&d);
@@ -2997,8 +3249,8 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Handle<AccessorInfo> script_id = Accessors::ScriptIdInfo(isolate, attribs);
{
- AccessorConstantDescriptor d(Handle<Name>(Name::cast(script_id->name())),
- script_id, attribs);
+ Descriptor d = Descriptor::AccessorConstant(
+ Handle<Name>(Name::cast(script_id->name())), script_id, attribs);
script_map->AppendDescriptor(&d);
}
@@ -3006,7 +3258,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Handle<AccessorInfo> script_name =
Accessors::ScriptNameInfo(isolate, attribs);
{
- AccessorConstantDescriptor d(
+ Descriptor d = Descriptor::AccessorConstant(
Handle<Name>(Name::cast(script_name->name())), script_name, attribs);
script_map->AppendDescriptor(&d);
}
@@ -3014,7 +3266,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Handle<AccessorInfo> script_line =
Accessors::ScriptLineOffsetInfo(isolate, attribs);
{
- AccessorConstantDescriptor d(
+ Descriptor d = Descriptor::AccessorConstant(
Handle<Name>(Name::cast(script_line->name())), script_line, attribs);
script_map->AppendDescriptor(&d);
}
@@ -3022,7 +3274,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Handle<AccessorInfo> script_source =
Accessors::ScriptSourceInfo(isolate, attribs);
{
- AccessorConstantDescriptor d(
+ Descriptor d = Descriptor::AccessorConstant(
Handle<Name>(Name::cast(script_source->name())), script_source,
attribs);
script_map->AppendDescriptor(&d);
@@ -3031,7 +3283,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Handle<AccessorInfo> script_type =
Accessors::ScriptTypeInfo(isolate, attribs);
{
- AccessorConstantDescriptor d(
+ Descriptor d = Descriptor::AccessorConstant(
Handle<Name>(Name::cast(script_type->name())), script_type, attribs);
script_map->AppendDescriptor(&d);
}
@@ -3039,7 +3291,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Handle<AccessorInfo> script_compilation_type =
Accessors::ScriptCompilationTypeInfo(isolate, attribs);
{
- AccessorConstantDescriptor d(
+ Descriptor d = Descriptor::AccessorConstant(
Handle<Name>(Name::cast(script_compilation_type->name())),
script_compilation_type, attribs);
script_map->AppendDescriptor(&d);
@@ -3048,7 +3300,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Handle<AccessorInfo> script_context_data =
Accessors::ScriptContextDataInfo(isolate, attribs);
{
- AccessorConstantDescriptor d(
+ Descriptor d = Descriptor::AccessorConstant(
Handle<Name>(Name::cast(script_context_data->name())),
script_context_data, attribs);
script_map->AppendDescriptor(&d);
@@ -3057,7 +3309,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Handle<AccessorInfo> script_eval_from_script =
Accessors::ScriptEvalFromScriptInfo(isolate, attribs);
{
- AccessorConstantDescriptor d(
+ Descriptor d = Descriptor::AccessorConstant(
Handle<Name>(Name::cast(script_eval_from_script->name())),
script_eval_from_script, attribs);
script_map->AppendDescriptor(&d);
@@ -3066,7 +3318,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Handle<AccessorInfo> script_eval_from_script_position =
Accessors::ScriptEvalFromScriptPositionInfo(isolate, attribs);
{
- AccessorConstantDescriptor d(
+ Descriptor d = Descriptor::AccessorConstant(
Handle<Name>(Name::cast(script_eval_from_script_position->name())),
script_eval_from_script_position, attribs);
script_map->AppendDescriptor(&d);
@@ -3075,7 +3327,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Handle<AccessorInfo> script_eval_from_function_name =
Accessors::ScriptEvalFromFunctionNameInfo(isolate, attribs);
{
- AccessorConstantDescriptor d(
+ Descriptor d = Descriptor::AccessorConstant(
Handle<Name>(Name::cast(script_eval_from_function_name->name())),
script_eval_from_function_name, attribs);
script_map->AppendDescriptor(&d);
@@ -3084,7 +3336,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Handle<AccessorInfo> script_source_url =
Accessors::ScriptSourceUrlInfo(isolate, attribs);
{
- AccessorConstantDescriptor d(
+ Descriptor d = Descriptor::AccessorConstant(
Handle<Name>(Name::cast(script_source_url->name())),
script_source_url, attribs);
script_map->AppendDescriptor(&d);
@@ -3093,23 +3345,14 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Handle<AccessorInfo> script_source_mapping_url =
Accessors::ScriptSourceMappingUrlInfo(isolate, attribs);
{
- AccessorConstantDescriptor d(
+ Descriptor d = Descriptor::AccessorConstant(
Handle<Name>(Name::cast(script_source_mapping_url->name())),
script_source_mapping_url, attribs);
script_map->AppendDescriptor(&d);
}
- Handle<AccessorInfo> script_is_embedder_debug_script =
- Accessors::ScriptIsEmbedderDebugScriptInfo(isolate, attribs);
- {
- AccessorConstantDescriptor d(
- Handle<Name>(Name::cast(script_is_embedder_debug_script->name())),
- script_is_embedder_debug_script, attribs);
- script_map->AppendDescriptor(&d);
- }
-
{
- PrototypeIterator iter(native_context->sloppy_async_function_map());
+ PrototypeIterator iter(native_context->async_function_map());
Handle<JSObject> async_function_prototype(iter.GetCurrent<JSObject>());
static const bool kUseStrictFunctionMap = true;
@@ -3229,15 +3472,14 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_named_captures)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_property)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_sent)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_tailcalls)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_string_padding)
#ifdef V8_I18N_SUPPORT
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(datetime_format_to_parts)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(icu_case_mapping)
#endif
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_async_await)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_restrictive_generators)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_trailing_commas)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_class_fields)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_object_spread)
void InstallPublicSymbol(Factory* factory, Handle<Context> native_context,
const char* name, Handle<Symbol> value) {
@@ -3405,8 +3647,8 @@ Handle<JSFunction> Genesis::InstallInternalArray(Handle<JSObject> target,
Handle<AccessorInfo> array_length =
Accessors::ArrayLengthInfo(isolate(), attribs);
{ // Add length.
- AccessorConstantDescriptor d(Handle<Name>(Name::cast(array_length->name())),
- array_length, attribs);
+ Descriptor d = Descriptor::AccessorConstant(
+ Handle<Name>(Name::cast(array_length->name())), array_length, attribs);
initial_map->AppendDescriptor(&d);
}
@@ -3430,6 +3672,11 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
InstallInternalArray(extras_utils, "InternalPackedArray", FAST_ELEMENTS);
+ InstallFunction(extras_utils, isolate()->promise_internal_constructor(),
+ factory()->NewStringFromAsciiChecked("createPromise"));
+ InstallFunction(extras_utils, isolate()->promise_resolve(),
+ factory()->NewStringFromAsciiChecked("resolvePromise"));
+
int builtin_index = Natives::GetDebuggerCount();
// Only run prologue.js and runtime.js at this point.
DCHECK_EQ(builtin_index, Natives::GetIndex("prologue"));
@@ -3586,46 +3833,6 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
concat->shared()->set_length(1);
}
- // Set up the Promise constructor.
- {
- Handle<String> key = factory()->Promise_string();
- Handle<JSFunction> function = Handle<JSFunction>::cast(
- JSReceiver::GetProperty(global_object, key).ToHandleChecked());
- JSFunction::EnsureHasInitialMap(function);
- function->initial_map()->set_instance_type(JS_PROMISE_TYPE);
- function->shared()->SetConstructStub(
- *isolate()->builtins()->JSBuiltinsConstructStub());
- InstallWithIntrinsicDefaultProto(isolate(), function,
- Context::PROMISE_FUNCTION_INDEX);
-
- {
- Handle<Code> code = handle(
- isolate()->builtins()->builtin(Builtins::kPromiseResolveClosure),
- isolate());
- Handle<SharedFunctionInfo> info =
- isolate()->factory()->NewSharedFunctionInfo(factory()->empty_string(),
- code, false);
- info->set_internal_formal_parameter_count(1);
- info->set_length(1);
- native_context()->set_promise_resolve_shared_fun(*info);
-
- code = handle(
- isolate()->builtins()->builtin(Builtins::kPromiseRejectClosure),
- isolate());
- info = isolate()->factory()->NewSharedFunctionInfo(
- factory()->empty_string(), code, false);
- info->set_internal_formal_parameter_count(2);
- info->set_length(1);
- native_context()->set_promise_reject_shared_fun(*info);
- }
-
- Handle<JSFunction> create_resolving_functions =
- SimpleCreateFunction(isolate(), factory()->empty_string(),
- Builtins::kCreateResolvingFunctions, 2, false);
- native_context()->set_create_resolving_functions(
- *create_resolving_functions);
- }
-
InstallBuiltinFunctionIds();
// Create a map for accessor property descriptors (a variant of JSObject
@@ -3638,27 +3845,29 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
Map::EnsureDescriptorSlack(map, 4);
{ // get
- DataDescriptor d(factory()->get_string(),
- JSAccessorPropertyDescriptor::kGetIndex, NONE,
- Representation::Tagged());
+ Descriptor d = Descriptor::DataField(
+ factory()->get_string(), JSAccessorPropertyDescriptor::kGetIndex,
+ NONE, Representation::Tagged());
map->AppendDescriptor(&d);
}
{ // set
- DataDescriptor d(factory()->set_string(),
- JSAccessorPropertyDescriptor::kSetIndex, NONE,
- Representation::Tagged());
+ Descriptor d = Descriptor::DataField(
+ factory()->set_string(), JSAccessorPropertyDescriptor::kSetIndex,
+ NONE, Representation::Tagged());
map->AppendDescriptor(&d);
}
{ // enumerable
- DataDescriptor d(factory()->enumerable_string(),
- JSAccessorPropertyDescriptor::kEnumerableIndex, NONE,
- Representation::Tagged());
+ Descriptor d =
+ Descriptor::DataField(factory()->enumerable_string(),
+ JSAccessorPropertyDescriptor::kEnumerableIndex,
+ NONE, Representation::Tagged());
map->AppendDescriptor(&d);
}
{ // configurable
- DataDescriptor d(factory()->configurable_string(),
- JSAccessorPropertyDescriptor::kConfigurableIndex, NONE,
- Representation::Tagged());
+ Descriptor d = Descriptor::DataField(
+ factory()->configurable_string(),
+ JSAccessorPropertyDescriptor::kConfigurableIndex, NONE,
+ Representation::Tagged());
map->AppendDescriptor(&d);
}
@@ -3681,27 +3890,30 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
Map::EnsureDescriptorSlack(map, 4);
{ // value
- DataDescriptor d(factory()->value_string(),
- JSDataPropertyDescriptor::kValueIndex, NONE,
- Representation::Tagged());
+ Descriptor d = Descriptor::DataField(
+ factory()->value_string(), JSDataPropertyDescriptor::kValueIndex,
+ NONE, Representation::Tagged());
map->AppendDescriptor(&d);
}
{ // writable
- DataDescriptor d(factory()->writable_string(),
- JSDataPropertyDescriptor::kWritableIndex, NONE,
- Representation::Tagged());
+ Descriptor d =
+ Descriptor::DataField(factory()->writable_string(),
+ JSDataPropertyDescriptor::kWritableIndex, NONE,
+ Representation::Tagged());
map->AppendDescriptor(&d);
}
{ // enumerable
- DataDescriptor d(factory()->enumerable_string(),
- JSDataPropertyDescriptor::kEnumerableIndex, NONE,
- Representation::Tagged());
+ Descriptor d =
+ Descriptor::DataField(factory()->enumerable_string(),
+ JSDataPropertyDescriptor::kEnumerableIndex,
+ NONE, Representation::Tagged());
map->AppendDescriptor(&d);
}
{ // configurable
- DataDescriptor d(factory()->configurable_string(),
- JSDataPropertyDescriptor::kConfigurableIndex, NONE,
- Representation::Tagged());
+ Descriptor d =
+ Descriptor::DataField(factory()->configurable_string(),
+ JSDataPropertyDescriptor::kConfigurableIndex,
+ NONE, Representation::Tagged());
map->AppendDescriptor(&d);
}
@@ -3743,23 +3955,23 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
int old = array_descriptors->SearchWithCache(
isolate(), *length, array_function->initial_map());
DCHECK(old != DescriptorArray::kNotFound);
- AccessorConstantDescriptor desc(
+ Descriptor d = Descriptor::AccessorConstant(
length, handle(array_descriptors->GetValue(old), isolate()),
array_descriptors->GetDetails(old).attributes());
- initial_map->AppendDescriptor(&desc);
+ initial_map->AppendDescriptor(&d);
}
{
- DataDescriptor index_field(factory()->index_string(),
- JSRegExpResult::kIndexIndex, NONE,
- Representation::Tagged());
- initial_map->AppendDescriptor(&index_field);
+ Descriptor d = Descriptor::DataField(factory()->index_string(),
+ JSRegExpResult::kIndexIndex, NONE,
+ Representation::Tagged());
+ initial_map->AppendDescriptor(&d);
}
{
- DataDescriptor input_field(factory()->input_string(),
- JSRegExpResult::kInputIndex, NONE,
- Representation::Tagged());
- initial_map->AppendDescriptor(&input_field);
+ Descriptor d = Descriptor::DataField(factory()->input_string(),
+ JSRegExpResult::kInputIndex, NONE,
+ Representation::Tagged());
+ initial_map->AppendDescriptor(&d);
}
initial_map->SetInObjectProperties(2);
@@ -3774,29 +3986,29 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
Handle<AccessorInfo> arguments_iterator =
Accessors::ArgumentsIteratorInfo(isolate(), attribs);
{
- AccessorConstantDescriptor d(factory()->iterator_symbol(),
- arguments_iterator, attribs);
+ Descriptor d = Descriptor::AccessorConstant(factory()->iterator_symbol(),
+ arguments_iterator, attribs);
Handle<Map> map(native_context()->sloppy_arguments_map());
Map::EnsureDescriptorSlack(map, 1);
map->AppendDescriptor(&d);
}
{
- AccessorConstantDescriptor d(factory()->iterator_symbol(),
- arguments_iterator, attribs);
+ Descriptor d = Descriptor::AccessorConstant(factory()->iterator_symbol(),
+ arguments_iterator, attribs);
Handle<Map> map(native_context()->fast_aliased_arguments_map());
Map::EnsureDescriptorSlack(map, 1);
map->AppendDescriptor(&d);
}
{
- AccessorConstantDescriptor d(factory()->iterator_symbol(),
- arguments_iterator, attribs);
+ Descriptor d = Descriptor::AccessorConstant(factory()->iterator_symbol(),
+ arguments_iterator, attribs);
Handle<Map> map(native_context()->slow_aliased_arguments_map());
Map::EnsureDescriptorSlack(map, 1);
map->AppendDescriptor(&d);
}
{
- AccessorConstantDescriptor d(factory()->iterator_symbol(),
- arguments_iterator, attribs);
+ Descriptor d = Descriptor::AccessorConstant(factory()->iterator_symbol(),
+ arguments_iterator, attribs);
Handle<Map> map(native_context()->strict_arguments_map());
Map::EnsureDescriptorSlack(map, 1);
map->AppendDescriptor(&d);
@@ -3819,18 +4031,16 @@ bool Genesis::InstallExperimentalNatives() {
static const char* harmony_regexp_property_natives[] = {nullptr};
static const char* harmony_function_sent_natives[] = {nullptr};
static const char* harmony_array_prototype_values_natives[] = {nullptr};
- static const char* harmony_string_padding_natives[] = {
- "native harmony-string-padding.js", nullptr};
#ifdef V8_I18N_SUPPORT
static const char* icu_case_mapping_natives[] = {"native icu-case-mapping.js",
nullptr};
static const char* datetime_format_to_parts_natives[] = {
"native datetime-format-to-parts.js", nullptr};
#endif
- static const char* harmony_async_await_natives[] = {nullptr};
static const char* harmony_restrictive_generators_natives[] = {nullptr};
static const char* harmony_trailing_commas_natives[] = {nullptr};
static const char* harmony_class_fields_natives[] = {nullptr};
+ static const char* harmony_object_spread_natives[] = {nullptr};
for (int i = ExperimentalNatives::GetDebuggerCount();
i < ExperimentalNatives::GetBuiltinsCount(); i++) {
@@ -3975,8 +4185,6 @@ bool Genesis::InstallSpecialObjects(Handle<Context> native_context) {
Factory* factory = isolate->factory();
HandleScope scope(isolate);
- Handle<JSGlobalObject> global(JSGlobalObject::cast(
- native_context->global_object()));
Handle<JSObject> Error = isolate->error_function();
Handle<String> name =
@@ -3984,6 +4192,10 @@ bool Genesis::InstallSpecialObjects(Handle<Context> native_context) {
Handle<Smi> stack_trace_limit(Smi::FromInt(FLAG_stack_trace_limit), isolate);
JSObject::AddProperty(Error, name, stack_trace_limit, NONE);
+ if (FLAG_expose_wasm || FLAG_validate_asm) {
+ WasmJs::Install(isolate);
+ }
+
// Expose the debug global object in global if a name for it is specified.
if (FLAG_expose_debug_as != NULL && strlen(FLAG_expose_debug_as) != 0) {
// If loading fails we just bail out without installing the
@@ -4000,11 +4212,10 @@ bool Genesis::InstallSpecialObjects(Handle<Context> native_context) {
uint32_t index;
if (debug_string->AsArrayIndex(&index)) return true;
Handle<Object> global_proxy(debug_context->global_proxy(), isolate);
- JSObject::AddProperty(global, debug_string, global_proxy, DONT_ENUM);
+ JSObject::AddProperty(handle(native_context->global_proxy()), debug_string,
+ global_proxy, DONT_ENUM);
}
- WasmJs::Install(isolate, global);
-
return true;
}
@@ -4133,7 +4344,6 @@ bool Genesis::InstallExtension(Isolate* isolate,
isolate->clear_pending_exception();
}
extension_states->set_state(current, INSTALLED);
- isolate->NotifyExtensionInstalled();
return result;
}
@@ -4207,27 +4417,29 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
Handle<DescriptorArray>(from->map()->instance_descriptors());
for (int i = 0; i < from->map()->NumberOfOwnDescriptors(); i++) {
PropertyDetails details = descs->GetDetails(i);
- switch (details.type()) {
- case DATA: {
+ if (details.location() == kField) {
+ if (details.kind() == kData) {
HandleScope inner(isolate());
Handle<Name> key = Handle<Name>(descs->GetKey(i));
FieldIndex index = FieldIndex::ForDescriptor(from->map(), i);
DCHECK(!descs->GetDetails(i).representation().IsDouble());
- Handle<Object> value = Handle<Object>(from->RawFastPropertyAt(index),
- isolate());
+ Handle<Object> value(from->RawFastPropertyAt(index), isolate());
JSObject::AddProperty(to, key, value, details.attributes());
- break;
+ } else {
+ DCHECK_EQ(kAccessor, details.kind());
+ UNREACHABLE();
}
- case DATA_CONSTANT: {
+
+ } else {
+ DCHECK_EQ(kDescriptor, details.location());
+ if (details.kind() == kData) {
HandleScope inner(isolate());
Handle<Name> key = Handle<Name>(descs->GetKey(i));
- Handle<Object> constant(descs->GetConstant(i), isolate());
- JSObject::AddProperty(to, key, constant, details.attributes());
- break;
- }
- case ACCESSOR:
- UNREACHABLE();
- case ACCESSOR_CONSTANT: {
+ Handle<Object> value(descs->GetValue(i), isolate());
+ JSObject::AddProperty(to, key, value, details.attributes());
+
+ } else {
+ DCHECK_EQ(kAccessor, details.kind());
Handle<Name> key(descs->GetKey(i));
LookupIterator it(to, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
@@ -4236,59 +4448,63 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
HandleScope inner(isolate());
DCHECK(!to->HasFastProperties());
// Add to dictionary.
- Handle<Object> callbacks(descs->GetCallbacksObject(i), isolate());
- PropertyDetails d(details.attributes(), ACCESSOR_CONSTANT, i + 1,
+ Handle<Object> value(descs->GetValue(i), isolate());
+ PropertyDetails d(kAccessor, details.attributes(), i + 1,
PropertyCellType::kMutable);
- JSObject::SetNormalizedProperty(to, key, callbacks, d);
- break;
+ JSObject::SetNormalizedProperty(to, key, value, d);
}
}
}
} else if (from->IsJSGlobalObject()) {
+ // Copy all keys and values in enumeration order.
Handle<GlobalDictionary> properties =
Handle<GlobalDictionary>(from->global_dictionary());
- int capacity = properties->Capacity();
- for (int i = 0; i < capacity; i++) {
- Object* raw_key(properties->KeyAt(i));
- if (properties->IsKey(isolate(), raw_key)) {
- DCHECK(raw_key->IsName());
- // If the property is already there we skip it.
- Handle<Name> key(Name::cast(raw_key));
- LookupIterator it(to, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
- CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
- if (it.IsFound()) continue;
- // Set the property.
- DCHECK(properties->ValueAt(i)->IsPropertyCell());
- Handle<PropertyCell> cell(PropertyCell::cast(properties->ValueAt(i)));
- Handle<Object> value(cell->value(), isolate());
- if (value->IsTheHole(isolate())) continue;
- PropertyDetails details = cell->property_details();
- DCHECK_EQ(kData, details.kind());
- JSObject::AddProperty(to, key, value, details.attributes());
- }
+ Handle<FixedArray> key_indices =
+ GlobalDictionary::IterationIndices(properties);
+ for (int i = 0; i < key_indices->length(); i++) {
+ int key_index = Smi::cast(key_indices->get(i))->value();
+ Object* raw_key = properties->KeyAt(key_index);
+ DCHECK(properties->IsKey(isolate(), raw_key));
+ DCHECK(raw_key->IsName());
+ // If the property is already there we skip it.
+ Handle<Name> key(Name::cast(raw_key), isolate());
+ LookupIterator it(to, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
+ if (it.IsFound()) continue;
+ // Set the property.
+ DCHECK(properties->ValueAt(key_index)->IsPropertyCell());
+ Handle<PropertyCell> cell(
+ PropertyCell::cast(properties->ValueAt(key_index)), isolate());
+ Handle<Object> value(cell->value(), isolate());
+ if (value->IsTheHole(isolate())) continue;
+ PropertyDetails details = cell->property_details();
+ if (details.kind() != kData) continue;
+ JSObject::AddProperty(to, key, value, details.attributes());
}
} else {
+ // Copy all keys and values in enumeration order.
Handle<NameDictionary> properties =
Handle<NameDictionary>(from->property_dictionary());
- int capacity = properties->Capacity();
- for (int i = 0; i < capacity; i++) {
- Object* raw_key(properties->KeyAt(i));
- if (properties->IsKey(isolate(), raw_key)) {
- DCHECK(raw_key->IsName());
- // If the property is already there we skip it.
- Handle<Name> key(Name::cast(raw_key));
- LookupIterator it(to, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
- CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
- if (it.IsFound()) continue;
- // Set the property.
- Handle<Object> value = Handle<Object>(properties->ValueAt(i),
- isolate());
- DCHECK(!value->IsCell());
- DCHECK(!value->IsTheHole(isolate()));
- PropertyDetails details = properties->DetailsAt(i);
- DCHECK_EQ(kData, details.kind());
- JSObject::AddProperty(to, key, value, details.attributes());
- }
+ Handle<FixedArray> key_indices =
+ NameDictionary::IterationIndices(properties);
+ for (int i = 0; i < key_indices->length(); i++) {
+ int key_index = Smi::cast(key_indices->get(i))->value();
+ Object* raw_key = properties->KeyAt(key_index);
+ DCHECK(properties->IsKey(isolate(), raw_key));
+ DCHECK(raw_key->IsName());
+ // If the property is already there we skip it.
+ Handle<Name> key(Name::cast(raw_key), isolate());
+ LookupIterator it(to, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
+ CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
+ if (it.IsFound()) continue;
+ // Set the property.
+ Handle<Object> value =
+ Handle<Object>(properties->ValueAt(key_index), isolate());
+ DCHECK(!value->IsCell());
+ DCHECK(!value->IsTheHole(isolate()));
+ PropertyDetails details = properties->DetailsAt(key_index);
+ DCHECK_EQ(kData, details.kind());
+ JSObject::AddProperty(to, key, value, details.attributes());
}
}
}
@@ -4357,11 +4573,12 @@ class NoTrackDoubleFieldsForSerializerScope {
bool enabled_;
};
-Genesis::Genesis(Isolate* isolate,
- MaybeHandle<JSGlobalProxy> maybe_global_proxy,
- v8::Local<v8::ObjectTemplate> global_proxy_template,
- v8::ExtensionConfiguration* extensions,
- size_t context_snapshot_index, GlobalContextType context_type)
+Genesis::Genesis(
+ Isolate* isolate, MaybeHandle<JSGlobalProxy> maybe_global_proxy,
+ v8::Local<v8::ObjectTemplate> global_proxy_template,
+ size_t context_snapshot_index,
+ v8::DeserializeInternalFieldsCallback internal_fields_deserializer,
+ GlobalContextType context_type)
: isolate_(isolate), active_(isolate->bootstrapper()) {
NoTrackDoubleFieldsForSerializerScope disable_scope(isolate);
result_ = Handle<Context>::null();
@@ -4385,12 +4602,22 @@ Genesis::Genesis(Isolate* isolate,
// and initialize it later in CreateNewGlobals.
Handle<JSGlobalProxy> global_proxy;
if (!maybe_global_proxy.ToHandle(&global_proxy)) {
- const int internal_field_count =
- !global_proxy_template.IsEmpty()
- ? global_proxy_template->InternalFieldCount()
- : 0;
- global_proxy = isolate->factory()->NewUninitializedJSGlobalProxy(
- JSGlobalProxy::SizeWithInternalFields(internal_field_count));
+ int instance_size = 0;
+ if (context_snapshot_index > 0) {
+ // The global proxy function to reinitialize this global proxy is in the
+ // context that is yet to be deserialized. We need to prepare a global
+ // proxy of the correct size.
+ Object* size = isolate->heap()->serialized_global_proxy_sizes()->get(
+ static_cast<int>(context_snapshot_index) - 1);
+ instance_size = Smi::cast(size)->value();
+ } else {
+ instance_size = JSGlobalProxy::SizeWithInternalFields(
+ global_proxy_template.IsEmpty()
+ ? 0
+ : global_proxy_template->InternalFieldCount());
+ }
+ global_proxy =
+ isolate->factory()->NewUninitializedJSGlobalProxy(instance_size);
}
// We can only de-serialize a context if the isolate was initialized from
@@ -4398,7 +4625,8 @@ Genesis::Genesis(Isolate* isolate,
// Also create a context from scratch to expose natives, if required by flag.
if (!isolate->initialized_from_snapshot() ||
!Snapshot::NewContextFromSnapshot(isolate, global_proxy,
- context_snapshot_index)
+ context_snapshot_index,
+ internal_fields_deserializer)
.ToHandle(&native_context_)) {
native_context_ = Handle<Context>();
}
@@ -4416,14 +4644,20 @@ Genesis::Genesis(Isolate* isolate,
Map::TraceAllTransitions(object_fun->initial_map());
}
#endif
- Handle<JSGlobalObject> global_object =
- CreateNewGlobals(global_proxy_template, global_proxy);
- HookUpGlobalProxy(global_object, global_proxy);
- HookUpGlobalObject(global_object);
+ if (context_snapshot_index == 0) {
+ Handle<JSGlobalObject> global_object =
+ CreateNewGlobals(global_proxy_template, global_proxy);
+ HookUpGlobalObject(global_object);
- if (!ConfigureGlobalObjects(global_proxy_template)) return;
+ if (!ConfigureGlobalObjects(global_proxy_template)) return;
+ } else {
+ // The global proxy needs to be integrated into the native context.
+ HookUpGlobalProxy(global_proxy);
+ }
+ DCHECK(!global_proxy->IsDetachedFrom(native_context()->global_object()));
} else {
+ DCHECK_EQ(0u, context_snapshot_index);
// We get here if there was no context snapshot.
CreateRoots();
Handle<JSFunction> empty_function = CreateEmptyFunction(isolate);
@@ -4432,7 +4666,6 @@ Genesis::Genesis(Isolate* isolate,
CreateAsyncFunctionMaps(empty_function);
Handle<JSGlobalObject> global_object =
CreateNewGlobals(global_proxy_template, global_proxy);
- HookUpGlobalProxy(global_object, global_proxy);
InitializeGlobal(global_object, empty_function, context_type);
InitializeNormalizedMapCaches();
@@ -4444,9 +4677,6 @@ Genesis::Genesis(Isolate* isolate,
if (!ConfigureGlobalObjects(global_proxy_template)) return;
isolate->counters()->contexts_created_from_scratch()->Increment();
- // Re-initialize the counter because it got incremented during snapshot
- // creation.
- isolate->native_context()->set_errors_thrown(Smi::kZero);
}
// Install experimental natives. Do not include them into the
@@ -4475,6 +4705,7 @@ Genesis::Genesis(Isolate* isolate,
// We do not need script contexts for native scripts.
DCHECK_EQ(1, native_context()->script_context_table()->used());
+ native_context()->ResetErrorsThrown();
result_ = native_context();
}
@@ -4535,7 +4766,7 @@ Genesis::Genesis(Isolate* isolate,
global_proxy_function->shared()->set_instance_class_name(*global_name);
factory()->ReinitializeJSGlobalProxy(global_proxy, global_proxy_function);
- // HookUpGlobalProxy.
+ // GlobalProxy.
global_proxy->set_native_context(heap()->null_value());
// DetachGlobal.
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index 51022fd608..a1ba9dd713 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -80,6 +80,7 @@ class Bootstrapper final {
MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_object_template,
v8::ExtensionConfiguration* extensions, size_t context_snapshot_index,
+ v8::DeserializeInternalFieldsCallback internal_fields_deserializer,
GlobalContextType context_type = FULL_CONTEXT);
Handle<JSGlobalProxy> NewRemoteContext(
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 6103971787..240d271b2b 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -326,11 +326,11 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameScope scope(masm, StackFrame::MANUAL);
- FastNewObjectStub stub(masm->isolate());
__ SmiTag(r6);
__ EnterBuiltinFrame(cp, r1, r6);
__ Push(r2); // first argument
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(r2);
__ LeaveBuiltinFrame(cp, r1, r6);
__ SmiUntag(r6);
@@ -474,11 +474,11 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameScope scope(masm, StackFrame::MANUAL);
- FastNewObjectStub stub(masm->isolate());
__ SmiTag(r6);
__ EnterBuiltinFrame(cp, r1, r6);
__ Push(r2); // first argument
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(r2);
__ LeaveBuiltinFrame(cp, r1, r6);
__ SmiUntag(r6);
@@ -574,8 +574,8 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
if (create_implicit_receiver) {
// Allocate the new receiver object.
__ Push(r1, r3);
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ mov(r4, r0);
__ Pop(r1, r3);
@@ -737,19 +737,18 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ str(r2, FieldMemOperand(r1, JSGeneratorObject::kResumeModeOffset));
// Load suspended function and context.
- __ ldr(cp, FieldMemOperand(r1, JSGeneratorObject::kContextOffset));
__ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
+ __ ldr(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
Label stepping_prepared;
- ExternalReference last_step_action =
- ExternalReference::debug_last_step_action_address(masm->isolate());
- STATIC_ASSERT(StepFrame > StepIn);
- __ mov(ip, Operand(last_step_action));
+ ExternalReference debug_hook =
+ ExternalReference::debug_hook_on_function_call_address(masm->isolate());
+ __ mov(ip, Operand(debug_hook));
__ ldrsb(ip, MemOperand(ip));
- __ cmp(ip, Operand(StepIn));
- __ b(ge, &prepare_step_in_if_stepping);
+ __ cmp(ip, Operand(0));
+ __ b(ne, &prepare_step_in_if_stepping);
// Flood function if we need to continue stepping in the suspended generator.
ExternalReference debug_suspended_generator =
@@ -790,14 +789,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&done_loop);
}
- // Dispatch on the kind of generator object.
- Label old_generator;
- __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
- __ CompareObjectType(r3, r3, r3, BYTECODE_ARRAY_TYPE);
- __ b(ne, &old_generator);
+ // Underlying function needs to have bytecode available.
+ if (FLAG_debug_code) {
+ __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kFunctionDataOffset));
+ __ CompareObjectType(r3, r3, r3, BYTECODE_ARRAY_TYPE);
+ __ Assert(eq, kMissingBytecodeArray);
+ }
- // New-style (ignition/turbofan) generator object
+ // Resume (Ignition/TurboFan) generator object.
{
__ ldr(r0, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r0, FieldMemOperand(
@@ -812,54 +812,11 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Jump(r5);
}
- // Old-style (full-codegen) generator object
- __ bind(&old_generator);
- {
- // Enter a new JavaScript frame, and initialize its slots as they were when
- // the generator was suspended.
- DCHECK(!FLAG_enable_embedded_constant_pool);
- FrameScope scope(masm, StackFrame::MANUAL);
- __ Push(lr, fp);
- __ Move(fp, sp);
- __ Push(cp, r4);
-
- // Restore the operand stack.
- __ ldr(r0, FieldMemOperand(r1, JSGeneratorObject::kOperandStackOffset));
- __ ldr(r3, FieldMemOperand(r0, FixedArray::kLengthOffset));
- __ add(r0, r0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(r3, r0, Operand(r3, LSL, kPointerSizeLog2 - 1));
- {
- Label done_loop, loop;
- __ bind(&loop);
- __ cmp(r0, r3);
- __ b(eq, &done_loop);
- __ ldr(ip, MemOperand(r0, kPointerSize, PostIndex));
- __ Push(ip);
- __ b(&loop);
- __ bind(&done_loop);
- }
-
- // Reset operand stack so we don't leak.
- __ LoadRoot(ip, Heap::kEmptyFixedArrayRootIndex);
- __ str(ip, FieldMemOperand(r1, JSGeneratorObject::kOperandStackOffset));
-
- // Resume the generator function at the continuation.
- __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
- __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
- __ add(r3, r3, Operand(r2, ASR, 1));
- __ mov(r2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
- __ str(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
- __ Move(r0, r1); // Continuation expects generator object in r0.
- __ Jump(r3);
- }
-
__ bind(&prepare_step_in_if_stepping);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r1, r2, r4);
- __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ __ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(r1, r2);
__ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
}
@@ -1078,6 +1035,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
+ // Reset code age.
+ __ mov(r9, Operand(BytecodeArray::kNoAgeBytecodeAge));
+ __ strb(r9, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kBytecodeAgeOffset));
+
// Load the initial bytecode offset.
__ mov(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
@@ -1407,12 +1369,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ cmp(temp, native_context);
__ b(ne, &loop_bottom);
- // OSR id set to none?
- __ ldr(temp, FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousOsrAstId));
- const int bailout_id = BailoutId::None().ToInt();
- __ cmp(temp, Operand(Smi::FromInt(bailout_id)));
- __ b(ne, &loop_bottom);
// Literals available?
__ ldr(temp, FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousLiterals));
@@ -1485,14 +1441,14 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
SharedFunctionInfo::kMarkedForTierUpByteOffset));
__ tst(r5, Operand(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
__ b(ne, &gotta_call_runtime_no_stack);
- // Is the full code valid?
+
+ // If SFI points to anything other than CompileLazy, install that.
__ ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
- __ ldr(r5, FieldMemOperand(entry, Code::kFlagsOffset));
- __ and_(r5, r5, Operand(Code::KindField::kMask));
- __ mov(r5, Operand(r5, LSR, Code::KindField::kShift));
- __ cmp(r5, Operand(Code::BUILTIN));
+ __ Move(r5, masm->CodeObject());
+ __ cmp(entry, r5);
__ b(eq, &gotta_call_runtime_no_stack);
- // Yes, install the full code.
+
+ // Install the SFI's code entry.
__ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, entry, r5);
@@ -1609,14 +1565,9 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
__ mov(pc, r0);
}
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
- void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
- } \
- void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+ void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
}
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
@@ -2158,7 +2109,8 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Create the list of arguments from the array-like argumentsList.
{
- Label create_arguments, create_array, create_runtime, done_create;
+ Label create_arguments, create_array, create_holey_array, create_runtime,
+ done_create;
__ JumpIfSmi(r0, &create_runtime);
// Load the map of argumentsList into r2.
@@ -2202,17 +2154,37 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
__ mov(r0, r4);
__ b(&done_create);
+ // For holey JSArrays we need to check that the array prototype chain
+ // protector is intact and our prototype is the Array.prototype actually.
+ __ bind(&create_holey_array);
+ __ ldr(r2, FieldMemOperand(r2, Map::kPrototypeOffset));
+ __ ldr(r4, ContextMemOperand(r4, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+ __ cmp(r2, r4);
+ __ b(ne, &create_runtime);
+ __ LoadRoot(r4, Heap::kArrayProtectorRootIndex);
+ __ ldr(r2, FieldMemOperand(r4, PropertyCell::kValueOffset));
+ __ cmp(r2, Operand(Smi::FromInt(Isolate::kProtectorValid)));
+ __ b(ne, &create_runtime);
+ __ ldr(r2, FieldMemOperand(r0, JSArray::kLengthOffset));
+ __ ldr(r0, FieldMemOperand(r0, JSArray::kElementsOffset));
+ __ SmiUntag(r2);
+ __ b(&done_create);
+
// Try to create the list from a JSArray object.
+ // -- r2 and r4 must be preserved till bne create_holey_array.
__ bind(&create_array);
- __ ldr(r2, FieldMemOperand(r2, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(r2);
+ __ ldr(r5, FieldMemOperand(r2, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(r5);
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
- __ cmp(r2, Operand(FAST_ELEMENTS));
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ __ cmp(r5, Operand(FAST_HOLEY_ELEMENTS));
__ b(hi, &create_runtime);
- __ cmp(r2, Operand(FAST_HOLEY_SMI_ELEMENTS));
- __ b(eq, &create_runtime);
+ // Only FAST_XXX after this point, FAST_HOLEY_XXX are odd values.
+ __ tst(r5, Operand(1));
+ __ b(ne, &create_holey_array);
+ // FAST_SMI_ELEMENTS or FAST_ELEMENTS after this point.
__ ldr(r2, FieldMemOperand(r0, JSArray::kLengthOffset));
__ ldr(r0, FieldMemOperand(r0, JSArray::kElementsOffset));
__ SmiUntag(r2);
@@ -2247,12 +2219,16 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Push arguments onto the stack (thisArgument is already on the stack).
{
__ mov(r4, Operand(0));
+ __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
Label done, loop;
__ bind(&loop);
__ cmp(r4, r2);
__ b(eq, &done);
__ add(ip, r0, Operand(r4, LSL, kPointerSizeLog2));
__ ldr(ip, FieldMemOperand(ip, FixedArray::kHeaderSize));
+ __ cmp(r5, ip);
+ __ mov(ip, r6, LeaveCC, eq);
__ Push(ip);
__ add(r4, r4, Operand(1));
__ b(&loop);
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index aeb0508a20..08cf664724 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -315,11 +315,11 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameScope scope(masm, StackFrame::MANUAL);
- FastNewObjectStub stub(masm->isolate());
__ SmiTag(x6);
__ EnterBuiltinFrame(cp, x1, x6);
__ Push(x2); // first argument
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(x2);
__ LeaveBuiltinFrame(cp, x1, x6);
__ SmiUntag(x6);
@@ -467,11 +467,11 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameScope scope(masm, StackFrame::MANUAL);
- FastNewObjectStub stub(masm->isolate());
__ SmiTag(x6);
__ EnterBuiltinFrame(cp, x1, x6);
__ Push(x2); // first argument
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(x2);
__ LeaveBuiltinFrame(cp, x1, x6);
__ SmiUntag(x6);
@@ -569,8 +569,8 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
if (create_implicit_receiver) {
// Allocate the new receiver object.
__ Push(constructor, new_target);
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Mov(x4, x0);
__ Pop(new_target, constructor);
@@ -744,18 +744,17 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Str(x2, FieldMemOperand(x1, JSGeneratorObject::kResumeModeOffset));
// Load suspended function and context.
- __ Ldr(cp, FieldMemOperand(x1, JSGeneratorObject::kContextOffset));
__ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
+ __ Ldr(cp, FieldMemOperand(x4, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
Label stepping_prepared;
- ExternalReference last_step_action =
- ExternalReference::debug_last_step_action_address(masm->isolate());
- STATIC_ASSERT(StepFrame > StepIn);
- __ Mov(x10, Operand(last_step_action));
+ ExternalReference debug_hook =
+ ExternalReference::debug_hook_on_function_call_address(masm->isolate());
+ __ Mov(x10, Operand(debug_hook));
__ Ldrsb(x10, MemOperand(x10));
- __ CompareAndBranch(x10, Operand(StepIn), ge, &prepare_step_in_if_stepping);
+ __ CompareAndBranch(x10, Operand(0), ne, &prepare_step_in_if_stepping);
// Flood function if we need to continue stepping in the suspended generator.
ExternalReference debug_suspended_generator =
@@ -789,14 +788,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ LoadRoot(x11, Heap::kTheHoleValueRootIndex);
__ PushMultipleTimes(x11, w10);
- // Dispatch on the kind of generator object.
- Label old_generator;
- __ Ldr(x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
- __ CompareObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE);
- __ B(ne, &old_generator);
+ // Underlying function needs to have bytecode available.
+ if (FLAG_debug_code) {
+ __ Ldr(x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
+ __ CompareObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE);
+ __ Assert(eq, kMissingBytecodeArray);
+ }
- // New-style (ignition/turbofan) generator object
+ // Resume (Ignition/TurboFan) generator object.
{
__ Ldr(x0, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(w0, FieldMemOperand(
@@ -810,54 +810,11 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Jump(x5);
}
- // Old-style (full-codegen) generator object
- __ bind(&old_generator);
- {
- // Enter a new JavaScript frame, and initialize its slots as they were when
- // the generator was suspended.
- FrameScope scope(masm, StackFrame::MANUAL);
- __ Push(lr, fp);
- __ Move(fp, jssp);
- __ Push(cp, x4);
-
- // Restore the operand stack.
- __ Ldr(x0, FieldMemOperand(x1, JSGeneratorObject::kOperandStackOffset));
- __ Ldr(w3, UntagSmiFieldMemOperand(x0, FixedArray::kLengthOffset));
- __ Add(x0, x0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ Add(x3, x0, Operand(x3, LSL, kPointerSizeLog2));
- {
- Label done_loop, loop;
- __ Bind(&loop);
- __ Cmp(x0, x3);
- __ B(eq, &done_loop);
- __ Ldr(x10, MemOperand(x0, kPointerSize, PostIndex));
- __ Push(x10);
- __ B(&loop);
- __ Bind(&done_loop);
- }
-
- // Reset operand stack so we don't leak.
- __ LoadRoot(x10, Heap::kEmptyFixedArrayRootIndex);
- __ Str(x10, FieldMemOperand(x1, JSGeneratorObject::kOperandStackOffset));
-
- // Resume the generator function at the continuation.
- __ Ldr(x10, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(x10, FieldMemOperand(x10, SharedFunctionInfo::kCodeOffset));
- __ Add(x10, x10, Code::kHeaderSize - kHeapObjectTag);
- __ Ldrsw(x11, UntagSmiFieldMemOperand(
- x1, JSGeneratorObject::kContinuationOffset));
- __ Add(x10, x10, x11);
- __ Mov(x12, Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
- __ Str(x12, FieldMemOperand(x1, JSGeneratorObject::kContinuationOffset));
- __ Move(x0, x1); // Continuation expects generator object in x0.
- __ Br(x10);
- }
-
__ Bind(&prepare_step_in_if_stepping);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(x1, x2, x4);
- __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ __ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(x2, x1);
__ Ldr(x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
}
@@ -1082,6 +1039,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
+ // Reset code age.
+ __ Mov(x10, Operand(BytecodeArray::kNoAgeBytecodeAge));
+ __ Strb(x10, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kBytecodeAgeOffset));
+
// Load the initial bytecode offset.
__ Mov(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
@@ -1411,12 +1373,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ Ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ Cmp(temp, native_context);
__ B(ne, &loop_bottom);
- // OSR id set to none?
- __ Ldr(temp, FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousOsrAstId));
- const int bailout_id = BailoutId::None().ToInt();
- __ Cmp(temp, Operand(Smi::FromInt(bailout_id)));
- __ B(ne, &loop_bottom);
// Literals available?
__ Ldr(temp, FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousLiterals));
@@ -1478,14 +1434,14 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ TestAndBranchIfAnySet(
temp, 1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte,
&gotta_call_runtime);
- // Is the full code valid?
+
+ // If SFI points to anything other than CompileLazy, install that.
__ Ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
- __ Ldr(x5, FieldMemOperand(entry, Code::kFlagsOffset));
- __ and_(x5, x5, Operand(Code::KindField::kMask));
- __ Mov(x5, Operand(x5, LSR, Code::KindField::kShift));
- __ Cmp(x5, Operand(Code::BUILTIN));
+ __ Move(temp, masm->CodeObject());
+ __ Cmp(entry, temp);
__ B(eq, &gotta_call_runtime);
- // Yes, install the full code.
+
+ // Install the SFI's code entry.
__ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, entry, x5);
@@ -1599,14 +1555,9 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
__ Br(x0);
}
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
- void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
- } \
- void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+ void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
}
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
@@ -2218,7 +2169,8 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Create the list of arguments from the array-like argumentsList.
{
- Label create_arguments, create_array, create_runtime, done_create;
+ Label create_arguments, create_array, create_holey_array, create_runtime,
+ done_create;
__ JumpIfSmi(arguments_list, &create_runtime);
// Load native context.
@@ -2240,7 +2192,7 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
__ B(eq, &create_arguments);
// Check if argumentsList is a fast JSArray.
- __ CompareInstanceType(arguments_list_map, native_context, JS_ARRAY_TYPE);
+ __ CompareInstanceType(arguments_list_map, x10, JS_ARRAY_TYPE);
__ B(eq, &create_array);
// Ask the runtime to create the list (actually a FixedArray).
@@ -2265,14 +2217,42 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
__ Mov(args, x10);
__ B(&done_create);
+ // For holey JSArrays we need to check that the array prototype chain
+ // protector is intact and our prototype is the Array.prototype actually.
+ __ Bind(&create_holey_array);
+ // -- x2 : arguments_list_map
+ // -- x4 : native_context
+ Register arguments_list_prototype = x2;
+ __ Ldr(arguments_list_prototype,
+ FieldMemOperand(arguments_list_map, Map::kPrototypeOffset));
+ __ Ldr(x10, ContextMemOperand(native_context,
+ Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+ __ Cmp(arguments_list_prototype, x10);
+ __ B(ne, &create_runtime);
+ __ LoadRoot(x10, Heap::kArrayProtectorRootIndex);
+ __ Ldrsw(x11, UntagSmiFieldMemOperand(x10, PropertyCell::kValueOffset));
+ __ Cmp(x11, Isolate::kProtectorValid);
+ __ B(ne, &create_runtime);
+ __ Ldrsw(len,
+ UntagSmiFieldMemOperand(arguments_list, JSArray::kLengthOffset));
+ __ Ldr(args, FieldMemOperand(arguments_list, JSArray::kElementsOffset));
+ __ B(&done_create);
+
// Try to create the list from a JSArray object.
__ Bind(&create_array);
__ Ldr(x10, FieldMemOperand(arguments_list_map, Map::kBitField2Offset));
__ DecodeField<Map::ElementsKindBits>(x10);
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
- // Branch for anything that's not FAST_{SMI_}ELEMENTS.
- __ TestAndBranchIfAnySet(x10, ~FAST_ELEMENTS, &create_runtime);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ // Check if it is a holey array, the order of the cmp is important as
+ // anything higher than FAST_HOLEY_ELEMENTS will fall back to runtime.
+ __ Cmp(x10, FAST_HOLEY_ELEMENTS);
+ __ B(hi, &create_runtime);
+ // Only FAST_XXX after this point, FAST_HOLEY_XXX are odd values.
+ __ Tbnz(x10, 0, &create_holey_array);
+ // FAST_SMI_ELEMENTS or FAST_ELEMENTS after this point.
__ Ldrsw(len,
UntagSmiFieldMemOperand(arguments_list, JSArray::kLengthOffset));
__ Ldr(args, FieldMemOperand(arguments_list, JSArray::kElementsOffset));
@@ -2306,16 +2286,24 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Push arguments onto the stack (thisArgument is already on the stack).
{
- Label done, loop;
+ Label done, push, loop;
Register src = x4;
__ Add(src, args, FixedArray::kHeaderSize - kHeapObjectTag);
__ Mov(x0, len); // The 'len' argument for Call() or Construct().
__ Cbz(len, &done);
+ Register the_hole_value = x11;
+ Register undefined_value = x12;
+ // We do not use the CompareRoot macro as it would do a LoadRoot behind the
+ // scenes and we want to avoid that in a loop.
+ __ LoadRoot(the_hole_value, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
__ Claim(len);
__ Bind(&loop);
__ Sub(len, len, 1);
__ Ldr(x10, MemOperand(src, kPointerSize, PostIndex));
+ __ Cmp(x10, the_hole_value);
+ __ Csel(x10, x10, undefined_value, ne);
__ Poke(x10, Operand(len, LSL, kPointerSizeLog2));
__ Cbnz(len, &loop);
__ Bind(&done);
diff --git a/deps/v8/src/builtins/builtins-api.cc b/deps/v8/src/builtins/builtins-api.cc
index defc4dcf62..d3798c3857 100644
--- a/deps/v8/src/builtins/builtins-api.cc
+++ b/deps/v8/src/builtins/builtins-api.cc
@@ -77,6 +77,7 @@ MUST_USE_RESULT MaybeHandle<Object> HandleApiCallHelper(
!isolate->MayAccess(handle(isolate->context()), js_receiver)) {
isolate->ReportFailedAccessCheck(js_receiver);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return isolate->factory()->undefined_value();
}
raw_holder = GetCompatibleReceiver(isolate, *fun_data, *js_receiver);
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index c09f11b2e8..047d88ecea 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -6,6 +6,7 @@
#include "src/builtins/builtins-utils.h"
#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
#include "src/contexts.h"
#include "src/elements.h"
@@ -32,7 +33,7 @@ inline bool ClampedToInteger(Isolate* isolate, Object* object, int* out) {
*out = static_cast<int>(value);
}
return true;
- } else if (object->IsUndefined(isolate) || object->IsNull(isolate)) {
+ } else if (object->IsNullOrUndefined(isolate)) {
*out = 0;
return true;
} else if (object->IsBoolean()) {
@@ -55,7 +56,13 @@ inline bool GetSloppyArgumentsLength(Isolate* isolate, Handle<JSObject> object,
Object* len_obj = object->InObjectPropertyAt(JSArgumentsObject::kLengthIndex);
if (!len_obj->IsSmi()) return false;
*out = Max(0, Smi::cast(len_obj)->value());
- return *out <= object->elements()->length();
+
+ FixedArray* parameters = FixedArray::cast(object->elements());
+ if (object->HasSloppyArgumentsElements()) {
+ FixedArray* arguments = FixedArray::cast(parameters->get(1));
+ return *out <= arguments->length();
+ }
+ return *out <= parameters->length();
}
inline bool IsJSArrayFastElementMovingAllowed(Isolate* isolate,
@@ -144,14 +151,15 @@ MUST_USE_RESULT static Object* CallJsIntrinsic(Isolate* isolate,
int argc = args.length() - 1;
ScopedVector<Handle<Object>> argv(argc);
for (int i = 0; i < argc; ++i) {
- argv[i] = args.at<Object>(i + 1);
+ argv[i] = args.at(i + 1);
}
RETURN_RESULT_OR_FAILURE(
isolate,
Execution::Call(isolate, function, args.receiver(), argc, argv.start()));
}
+} // namespace
-Object* DoArrayPush(Isolate* isolate, BuiltinArguments args) {
+BUILTIN(ArrayPush) {
HandleScope scope(isolate);
Handle<Object> receiver = args.receiver();
if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1)) {
@@ -174,19 +182,158 @@ Object* DoArrayPush(Isolate* isolate, BuiltinArguments args) {
int new_length = accessor->Push(array, &args, to_add);
return Smi::FromInt(new_length);
}
-} // namespace
-BUILTIN(ArrayPush) { return DoArrayPush(isolate, args); }
-
-// TODO(verwaest): This is a temporary helper until the FastArrayPush stub can
-// tailcall to the builtin directly.
-RUNTIME_FUNCTION(Runtime_ArrayPush) {
- DCHECK_EQ(2, args.length());
- Arguments* incoming = reinterpret_cast<Arguments*>(args[0]);
- // Rewrap the arguments as builtins arguments.
- int argc = incoming->length() + BuiltinArguments::kNumExtraArgsWithReceiver;
- BuiltinArguments caller_args(argc, incoming->arguments() + 1);
- return DoArrayPush(isolate, caller_args);
+void Builtins::Generate_FastArrayPush(compiler::CodeAssemblerState* state) {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+ CodeStubAssembler assembler(state);
+ Variable arg_index(&assembler, MachineType::PointerRepresentation());
+ Label default_label(&assembler, &arg_index);
+ Label smi_transition(&assembler);
+ Label object_push_pre(&assembler);
+ Label object_push(&assembler, &arg_index);
+ Label double_push(&assembler, &arg_index);
+ Label double_transition(&assembler);
+ Label runtime(&assembler, Label::kDeferred);
+
+ Node* argc = assembler.Parameter(BuiltinDescriptor::kArgumentsCount);
+ Node* context = assembler.Parameter(BuiltinDescriptor::kContext);
+ Node* new_target = assembler.Parameter(BuiltinDescriptor::kNewTarget);
+
+ CodeStubArguments args(&assembler, argc);
+ Node* receiver = args.GetReceiver();
+ Node* kind = nullptr;
+
+ Label fast(&assembler);
+ {
+ assembler.BranchIfFastJSArray(
+ receiver, context, CodeStubAssembler::FastJSArrayAccessMode::ANY_ACCESS,
+ &fast, &runtime);
+ }
+
+ assembler.Bind(&fast);
+ {
+ // Disallow pushing onto prototypes. It might be the JSArray prototype.
+ // Disallow pushing onto non-extensible objects.
+ assembler.Comment("Disallow pushing onto prototypes");
+ Node* map = assembler.LoadMap(receiver);
+ Node* bit_field2 = assembler.LoadMapBitField2(map);
+ int mask = static_cast<int>(Map::IsPrototypeMapBits::kMask) |
+ (1 << Map::kIsExtensible);
+ Node* test = assembler.Word32And(bit_field2, assembler.Int32Constant(mask));
+ assembler.GotoIf(
+ assembler.Word32NotEqual(
+ test, assembler.Int32Constant(1 << Map::kIsExtensible)),
+ &runtime);
+
+ // Disallow pushing onto arrays in dictionary named property mode. We need
+ // to figure out whether the length property is still writable.
+ assembler.Comment(
+ "Disallow pushing onto arrays in dictionary named property mode");
+ assembler.GotoIf(assembler.IsDictionaryMap(map), &runtime);
+
+ // Check whether the length property is writable. The length property is the
+ // only default named property on arrays. It's nonconfigurable, hence is
+ // guaranteed to stay the first property.
+ Node* descriptors = assembler.LoadMapDescriptors(map);
+ Node* details = assembler.LoadFixedArrayElement(
+ descriptors, DescriptorArray::ToDetailsIndex(0));
+ assembler.GotoIf(
+ assembler.IsSetSmi(details, PropertyDetails::kAttributesReadOnlyMask),
+ &runtime);
+
+ arg_index.Bind(assembler.IntPtrConstant(0));
+ kind = assembler.DecodeWord32<Map::ElementsKindBits>(bit_field2);
+
+ assembler.GotoIf(
+ assembler.Int32GreaterThan(
+ kind, assembler.Int32Constant(FAST_HOLEY_SMI_ELEMENTS)),
+ &object_push_pre);
+
+ Node* new_length = assembler.BuildAppendJSArray(
+ FAST_SMI_ELEMENTS, context, receiver, args, arg_index, &smi_transition);
+ args.PopAndReturn(new_length);
+ }
+
+ // If the argument is not a smi, then use a heavyweight SetProperty to
+ // transition the array for only the single next element. If the argument is
+ // a smi, the failure is due to some other reason and we should fall back on
+ // the most generic implementation for the rest of the array.
+ assembler.Bind(&smi_transition);
+ {
+ Node* arg = args.AtIndex(arg_index.value());
+ assembler.GotoIf(assembler.TaggedIsSmi(arg), &default_label);
+ Node* length = assembler.LoadJSArrayLength(receiver);
+ // TODO(danno): Use the KeyedStoreGeneric stub here when possible,
+ // calling into the runtime to do the elements transition is overkill.
+ assembler.CallRuntime(Runtime::kSetProperty, context, receiver, length, arg,
+ assembler.SmiConstant(STRICT));
+ assembler.Increment(arg_index);
+ assembler.GotoIfNotNumber(arg, &object_push);
+ assembler.Goto(&double_push);
+ }
+
+ assembler.Bind(&object_push_pre);
+ {
+ assembler.Branch(assembler.Int32GreaterThan(
+ kind, assembler.Int32Constant(FAST_HOLEY_ELEMENTS)),
+ &double_push, &object_push);
+ }
+
+ assembler.Bind(&object_push);
+ {
+ Node* new_length = assembler.BuildAppendJSArray(
+ FAST_ELEMENTS, context, receiver, args, arg_index, &default_label);
+ args.PopAndReturn(new_length);
+ }
+
+ assembler.Bind(&double_push);
+ {
+ Node* new_length =
+ assembler.BuildAppendJSArray(FAST_DOUBLE_ELEMENTS, context, receiver,
+ args, arg_index, &double_transition);
+ args.PopAndReturn(new_length);
+ }
+
+ // If the argument is not a double, then use a heavyweight SetProperty to
+ // transition the array for only the single next element. If the argument is
+ // a double, the failure is due to some other reason and we should fall back
+ // on the most generic implementation for the rest of the array.
+ assembler.Bind(&double_transition);
+ {
+ Node* arg = args.AtIndex(arg_index.value());
+ assembler.GotoIfNumber(arg, &default_label);
+ Node* length = assembler.LoadJSArrayLength(receiver);
+ // TODO(danno): Use the KeyedStoreGeneric stub here when possible,
+ // calling into the runtime to do the elements transition is overkill.
+ assembler.CallRuntime(Runtime::kSetProperty, context, receiver, length, arg,
+ assembler.SmiConstant(STRICT));
+ assembler.Increment(arg_index);
+ assembler.Goto(&object_push);
+ }
+
+ // Fallback that stores un-processed arguments using the full, heavyweight
+ // SetProperty machinery.
+ assembler.Bind(&default_label);
+ {
+ args.ForEach(
+ [&assembler, receiver, context, &arg_index](Node* arg) {
+ Node* length = assembler.LoadJSArrayLength(receiver);
+ assembler.CallRuntime(Runtime::kSetProperty, context, receiver,
+ length, arg, assembler.SmiConstant(STRICT));
+ },
+ arg_index.value());
+ args.PopAndReturn(assembler.LoadJSArrayLength(receiver));
+ }
+
+ assembler.Bind(&runtime);
+ {
+ Node* target = assembler.LoadFromFrame(
+ StandardFrameConstants::kFunctionOffset, MachineType::TaggedPointer());
+ assembler.TailCallStub(CodeFactory::ArrayPush(assembler.isolate()), context,
+ target, new_target, argc);
+ }
}
BUILTIN(ArrayPop) {
@@ -461,8 +608,9 @@ class ArrayConcatVisitor {
SeededNumberDictionary::cast(*storage_));
// The object holding this backing store has just been allocated, so
// it cannot yet be used as a prototype.
- Handle<SeededNumberDictionary> result =
- SeededNumberDictionary::AtNumberPut(dict, index, elm, false);
+ Handle<JSObject> not_a_prototype_holder;
+ Handle<SeededNumberDictionary> result = SeededNumberDictionary::AtNumberPut(
+ dict, index, elm, not_a_prototype_holder);
if (!result.is_identical_to(dict)) {
// Dictionary needed to grow.
clear_storage();
@@ -533,9 +681,10 @@ class ArrayConcatVisitor {
if (!element->IsTheHole(isolate_)) {
// The object holding this backing store has just been allocated, so
// it cannot yet be used as a prototype.
+ Handle<JSObject> not_a_prototype_holder;
Handle<SeededNumberDictionary> new_storage =
SeededNumberDictionary::AtNumberPut(slow_storage, i, element,
- false);
+ not_a_prototype_holder);
if (!new_storage.is_identical_to(slow_storage)) {
slow_storage = loop_scope.CloseAndEscape(new_storage);
}
@@ -1001,8 +1150,9 @@ Object* Slow_ArrayConcat(BuiltinArguments* args, Handle<Object> species,
// If estimated number of elements is more than half of length, a
// fixed array (fast case) is more time and space-efficient than a
// dictionary.
- bool fast_case =
- is_array_species && (estimate_nof_elements * 2) >= estimate_result_length;
+ bool fast_case = is_array_species &&
+ (estimate_nof_elements * 2) >= estimate_result_length &&
+ isolate->IsIsConcatSpreadableLookupChainIntact();
if (fast_case && kind == FAST_DOUBLE_ELEMENTS) {
Handle<FixedArrayBase> storage =
@@ -1202,7 +1352,7 @@ BUILTIN(ArrayConcat) {
Handle<Object> receiver = args.receiver();
// TODO(bmeurer): Do we really care about the exact exception message here?
- if (receiver->IsNull(isolate) || receiver->IsUndefined(isolate)) {
+ if (receiver->IsNullOrUndefined(isolate)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
isolate->factory()->NewStringFromAsciiChecked(
@@ -1237,146 +1387,149 @@ BUILTIN(ArrayConcat) {
return Slow_ArrayConcat(&args, species, isolate);
}
-void Builtins::Generate_ArrayIsArray(CodeStubAssembler* assembler) {
+void Builtins::Generate_ArrayIsArray(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef CodeStubAssembler::Label Label;
+ CodeStubAssembler assembler(state);
- Node* object = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
+ Node* object = assembler.Parameter(1);
+ Node* context = assembler.Parameter(4);
- Label call_runtime(assembler), return_true(assembler),
- return_false(assembler);
+ Label call_runtime(&assembler), return_true(&assembler),
+ return_false(&assembler);
- assembler->GotoIf(assembler->TaggedIsSmi(object), &return_false);
- Node* instance_type = assembler->LoadInstanceType(object);
+ assembler.GotoIf(assembler.TaggedIsSmi(object), &return_false);
+ Node* instance_type = assembler.LoadInstanceType(object);
- assembler->GotoIf(assembler->Word32Equal(
- instance_type, assembler->Int32Constant(JS_ARRAY_TYPE)),
- &return_true);
+ assembler.GotoIf(assembler.Word32Equal(
+ instance_type, assembler.Int32Constant(JS_ARRAY_TYPE)),
+ &return_true);
// TODO(verwaest): Handle proxies in-place.
- assembler->Branch(assembler->Word32Equal(
- instance_type, assembler->Int32Constant(JS_PROXY_TYPE)),
- &call_runtime, &return_false);
+ assembler.Branch(assembler.Word32Equal(
+ instance_type, assembler.Int32Constant(JS_PROXY_TYPE)),
+ &call_runtime, &return_false);
- assembler->Bind(&return_true);
- assembler->Return(assembler->BooleanConstant(true));
+ assembler.Bind(&return_true);
+ assembler.Return(assembler.BooleanConstant(true));
- assembler->Bind(&return_false);
- assembler->Return(assembler->BooleanConstant(false));
+ assembler.Bind(&return_false);
+ assembler.Return(assembler.BooleanConstant(false));
- assembler->Bind(&call_runtime);
- assembler->Return(
- assembler->CallRuntime(Runtime::kArrayIsArray, context, object));
+ assembler.Bind(&call_runtime);
+ assembler.Return(
+ assembler.CallRuntime(Runtime::kArrayIsArray, context, object));
}
-void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
+void Builtins::Generate_ArrayIncludes(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Variable Variable;
+ CodeStubAssembler assembler(state);
- Node* array = assembler->Parameter(0);
- Node* search_element = assembler->Parameter(1);
- Node* start_from = assembler->Parameter(2);
- Node* context = assembler->Parameter(3 + 2);
+ Node* array = assembler.Parameter(0);
+ Node* search_element = assembler.Parameter(1);
+ Node* start_from = assembler.Parameter(2);
+ Node* context = assembler.Parameter(3 + 2);
- Node* intptr_zero = assembler->IntPtrConstant(0);
- Node* intptr_one = assembler->IntPtrConstant(1);
+ Node* intptr_zero = assembler.IntPtrConstant(0);
+ Node* intptr_one = assembler.IntPtrConstant(1);
- Node* the_hole = assembler->TheHoleConstant();
- Node* undefined = assembler->UndefinedConstant();
- Node* heap_number_map = assembler->HeapNumberMapConstant();
+ Node* the_hole = assembler.TheHoleConstant();
+ Node* undefined = assembler.UndefinedConstant();
- Variable len_var(assembler, MachineType::PointerRepresentation()),
- index_var(assembler, MachineType::PointerRepresentation()),
- start_from_var(assembler, MachineType::PointerRepresentation());
+ Variable len_var(&assembler, MachineType::PointerRepresentation()),
+ index_var(&assembler, MachineType::PointerRepresentation()),
+ start_from_var(&assembler, MachineType::PointerRepresentation());
- Label init_k(assembler), return_true(assembler), return_false(assembler),
- call_runtime(assembler);
+ Label init_k(&assembler), return_true(&assembler), return_false(&assembler),
+ call_runtime(&assembler);
- Label init_len(assembler);
+ Label init_len(&assembler);
index_var.Bind(intptr_zero);
len_var.Bind(intptr_zero);
// Take slow path if not a JSArray, if retrieving elements requires
// traversing prototype, or if access checks are required.
- assembler->BranchIfFastJSArray(array, context, &init_len, &call_runtime);
+ assembler.BranchIfFastJSArray(
+ array, context, CodeStubAssembler::FastJSArrayAccessMode::INBOUNDS_READ,
+ &init_len, &call_runtime);
- assembler->Bind(&init_len);
+ assembler.Bind(&init_len);
{
// Handle case where JSArray length is not an Smi in the runtime
- Node* len = assembler->LoadObjectField(array, JSArray::kLengthOffset);
- assembler->GotoUnless(assembler->TaggedIsSmi(len), &call_runtime);
+ Node* len = assembler.LoadObjectField(array, JSArray::kLengthOffset);
+ assembler.GotoUnless(assembler.TaggedIsSmi(len), &call_runtime);
- len_var.Bind(assembler->SmiToWord(len));
- assembler->Branch(assembler->WordEqual(len_var.value(), intptr_zero),
- &return_false, &init_k);
+ len_var.Bind(assembler.SmiToWord(len));
+ assembler.Branch(assembler.WordEqual(len_var.value(), intptr_zero),
+ &return_false, &init_k);
}
- assembler->Bind(&init_k);
+ assembler.Bind(&init_k);
{
- Label done(assembler), init_k_smi(assembler), init_k_heap_num(assembler),
- init_k_zero(assembler), init_k_n(assembler);
- Node* tagged_n = assembler->ToInteger(context, start_from);
+ Label done(&assembler), init_k_smi(&assembler), init_k_heap_num(&assembler),
+ init_k_zero(&assembler), init_k_n(&assembler);
+ Node* tagged_n = assembler.ToInteger(context, start_from);
- assembler->Branch(assembler->TaggedIsSmi(tagged_n), &init_k_smi,
- &init_k_heap_num);
+ assembler.Branch(assembler.TaggedIsSmi(tagged_n), &init_k_smi,
+ &init_k_heap_num);
- assembler->Bind(&init_k_smi);
+ assembler.Bind(&init_k_smi);
{
- start_from_var.Bind(assembler->SmiUntag(tagged_n));
- assembler->Goto(&init_k_n);
+ start_from_var.Bind(assembler.SmiUntag(tagged_n));
+ assembler.Goto(&init_k_n);
}
- assembler->Bind(&init_k_heap_num);
+ assembler.Bind(&init_k_heap_num);
{
- Label do_return_false(assembler);
+ Label do_return_false(&assembler);
// This round is lossless for all valid lengths.
- Node* fp_len = assembler->RoundIntPtrToFloat64(len_var.value());
- Node* fp_n = assembler->LoadHeapNumberValue(tagged_n);
- assembler->GotoIf(assembler->Float64GreaterThanOrEqual(fp_n, fp_len),
- &do_return_false);
- start_from_var.Bind(assembler->ChangeInt32ToIntPtr(
- assembler->TruncateFloat64ToWord32(fp_n)));
- assembler->Goto(&init_k_n);
-
- assembler->Bind(&do_return_false);
+ Node* fp_len = assembler.RoundIntPtrToFloat64(len_var.value());
+ Node* fp_n = assembler.LoadHeapNumberValue(tagged_n);
+ assembler.GotoIf(assembler.Float64GreaterThanOrEqual(fp_n, fp_len),
+ &do_return_false);
+ start_from_var.Bind(assembler.ChangeInt32ToIntPtr(
+ assembler.TruncateFloat64ToWord32(fp_n)));
+ assembler.Goto(&init_k_n);
+
+ assembler.Bind(&do_return_false);
{
index_var.Bind(intptr_zero);
- assembler->Goto(&return_false);
+ assembler.Goto(&return_false);
}
}
- assembler->Bind(&init_k_n);
+ assembler.Bind(&init_k_n);
{
- Label if_positive(assembler), if_negative(assembler), done(assembler);
- assembler->Branch(
- assembler->IntPtrLessThan(start_from_var.value(), intptr_zero),
+ Label if_positive(&assembler), if_negative(&assembler), done(&assembler);
+ assembler.Branch(
+ assembler.IntPtrLessThan(start_from_var.value(), intptr_zero),
&if_negative, &if_positive);
- assembler->Bind(&if_positive);
+ assembler.Bind(&if_positive);
{
index_var.Bind(start_from_var.value());
- assembler->Goto(&done);
+ assembler.Goto(&done);
}
- assembler->Bind(&if_negative);
+ assembler.Bind(&if_negative);
{
index_var.Bind(
- assembler->IntPtrAdd(len_var.value(), start_from_var.value()));
- assembler->Branch(
- assembler->IntPtrLessThan(index_var.value(), intptr_zero),
+ assembler.IntPtrAdd(len_var.value(), start_from_var.value()));
+ assembler.Branch(
+ assembler.IntPtrLessThan(index_var.value(), intptr_zero),
&init_k_zero, &done);
}
- assembler->Bind(&init_k_zero);
+ assembler.Bind(&init_k_zero);
{
index_var.Bind(intptr_zero);
- assembler->Goto(&done);
+ assembler.Goto(&done);
}
- assembler->Bind(&done);
+ assembler.Bind(&done);
}
}
@@ -1385,443 +1538,435 @@ void Builtins::Generate_ArrayIncludes(CodeStubAssembler* assembler) {
FAST_HOLEY_ELEMENTS, FAST_DOUBLE_ELEMENTS, FAST_HOLEY_DOUBLE_ELEMENTS,
};
- Label if_smiorobjects(assembler), if_packed_doubles(assembler),
- if_holey_doubles(assembler);
+ Label if_smiorobjects(&assembler), if_packed_doubles(&assembler),
+ if_holey_doubles(&assembler);
Label* element_kind_handlers[] = {&if_smiorobjects, &if_smiorobjects,
&if_smiorobjects, &if_smiorobjects,
&if_packed_doubles, &if_holey_doubles};
- Node* map = assembler->LoadMap(array);
- Node* elements_kind = assembler->LoadMapElementsKind(map);
- Node* elements = assembler->LoadElements(array);
- assembler->Switch(elements_kind, &return_false, kElementsKind,
- element_kind_handlers, arraysize(kElementsKind));
+ Node* map = assembler.LoadMap(array);
+ Node* elements_kind = assembler.LoadMapElementsKind(map);
+ Node* elements = assembler.LoadElements(array);
+ assembler.Switch(elements_kind, &return_false, kElementsKind,
+ element_kind_handlers, arraysize(kElementsKind));
- assembler->Bind(&if_smiorobjects);
+ assembler.Bind(&if_smiorobjects);
{
- Variable search_num(assembler, MachineRepresentation::kFloat64);
- Label ident_loop(assembler, &index_var),
- heap_num_loop(assembler, &search_num),
- string_loop(assembler, &index_var), simd_loop(assembler),
- undef_loop(assembler, &index_var), not_smi(assembler),
- not_heap_num(assembler);
-
- assembler->GotoUnless(assembler->TaggedIsSmi(search_element), &not_smi);
- search_num.Bind(assembler->SmiToFloat64(search_element));
- assembler->Goto(&heap_num_loop);
-
- assembler->Bind(&not_smi);
- assembler->GotoIf(assembler->WordEqual(search_element, undefined),
- &undef_loop);
- Node* map = assembler->LoadMap(search_element);
- assembler->GotoIf(assembler->WordNotEqual(map, heap_number_map),
- &not_heap_num);
- search_num.Bind(assembler->LoadHeapNumberValue(search_element));
- assembler->Goto(&heap_num_loop);
-
- assembler->Bind(&not_heap_num);
- Node* search_type = assembler->LoadMapInstanceType(map);
- assembler->GotoIf(assembler->IsStringInstanceType(search_type),
- &string_loop);
- assembler->GotoIf(
- assembler->Word32Equal(search_type,
- assembler->Int32Constant(SIMD128_VALUE_TYPE)),
+ Variable search_num(&assembler, MachineRepresentation::kFloat64);
+ Label ident_loop(&assembler, &index_var),
+ heap_num_loop(&assembler, &search_num),
+ string_loop(&assembler, &index_var), simd_loop(&assembler),
+ undef_loop(&assembler, &index_var), not_smi(&assembler),
+ not_heap_num(&assembler);
+
+ assembler.GotoUnless(assembler.TaggedIsSmi(search_element), &not_smi);
+ search_num.Bind(assembler.SmiToFloat64(search_element));
+ assembler.Goto(&heap_num_loop);
+
+ assembler.Bind(&not_smi);
+ assembler.GotoIf(assembler.WordEqual(search_element, undefined),
+ &undef_loop);
+ Node* map = assembler.LoadMap(search_element);
+ assembler.GotoUnless(assembler.IsHeapNumberMap(map), &not_heap_num);
+ search_num.Bind(assembler.LoadHeapNumberValue(search_element));
+ assembler.Goto(&heap_num_loop);
+
+ assembler.Bind(&not_heap_num);
+ Node* search_type = assembler.LoadMapInstanceType(map);
+ assembler.GotoIf(assembler.IsStringInstanceType(search_type), &string_loop);
+ assembler.GotoIf(
+ assembler.Word32Equal(search_type,
+ assembler.Int32Constant(SIMD128_VALUE_TYPE)),
&simd_loop);
- assembler->Goto(&ident_loop);
+ assembler.Goto(&ident_loop);
- assembler->Bind(&ident_loop);
+ assembler.Bind(&ident_loop);
{
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
- Node* element_k = assembler->LoadFixedArrayElement(
- elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->GotoIf(assembler->WordEqual(element_k, search_element),
- &return_true);
+ Node* element_k =
+ assembler.LoadFixedArrayElement(elements, index_var.value());
+ assembler.GotoIf(assembler.WordEqual(element_k, search_element),
+ &return_true);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&ident_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&ident_loop);
}
- assembler->Bind(&undef_loop);
+ assembler.Bind(&undef_loop);
{
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
- Node* element_k = assembler->LoadFixedArrayElement(
- elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->GotoIf(assembler->WordEqual(element_k, undefined),
- &return_true);
- assembler->GotoIf(assembler->WordEqual(element_k, the_hole),
- &return_true);
+ Node* element_k =
+ assembler.LoadFixedArrayElement(elements, index_var.value());
+ assembler.GotoIf(assembler.WordEqual(element_k, undefined), &return_true);
+ assembler.GotoIf(assembler.WordEqual(element_k, the_hole), &return_true);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&undef_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&undef_loop);
}
- assembler->Bind(&heap_num_loop);
+ assembler.Bind(&heap_num_loop);
{
- Label nan_loop(assembler, &index_var),
- not_nan_loop(assembler, &index_var);
- assembler->BranchIfFloat64IsNaN(search_num.value(), &nan_loop,
- &not_nan_loop);
+ Label nan_loop(&assembler, &index_var),
+ not_nan_loop(&assembler, &index_var);
+ assembler.BranchIfFloat64IsNaN(search_num.value(), &nan_loop,
+ &not_nan_loop);
- assembler->Bind(&not_nan_loop);
+ assembler.Bind(&not_nan_loop);
{
- Label continue_loop(assembler), not_smi(assembler);
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ Label continue_loop(&assembler), not_smi(&assembler);
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
- Node* element_k = assembler->LoadFixedArrayElement(
- elements, index_var.value(), 0,
- CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->GotoUnless(assembler->TaggedIsSmi(element_k), &not_smi);
- assembler->Branch(
- assembler->Float64Equal(search_num.value(),
- assembler->SmiToFloat64(element_k)),
+ Node* element_k =
+ assembler.LoadFixedArrayElement(elements, index_var.value());
+ assembler.GotoUnless(assembler.TaggedIsSmi(element_k), &not_smi);
+ assembler.Branch(
+ assembler.Float64Equal(search_num.value(),
+ assembler.SmiToFloat64(element_k)),
&return_true, &continue_loop);
- assembler->Bind(&not_smi);
- assembler->GotoIf(assembler->WordNotEqual(assembler->LoadMap(element_k),
- heap_number_map),
- &continue_loop);
- assembler->Branch(
- assembler->Float64Equal(search_num.value(),
- assembler->LoadHeapNumberValue(element_k)),
+ assembler.Bind(&not_smi);
+ assembler.GotoUnless(
+ assembler.IsHeapNumberMap(assembler.LoadMap(element_k)),
+ &continue_loop);
+ assembler.Branch(
+ assembler.Float64Equal(search_num.value(),
+ assembler.LoadHeapNumberValue(element_k)),
&return_true, &continue_loop);
- assembler->Bind(&continue_loop);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&not_nan_loop);
+ assembler.Bind(&continue_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&not_nan_loop);
}
- assembler->Bind(&nan_loop);
+ assembler.Bind(&nan_loop);
{
- Label continue_loop(assembler);
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ Label continue_loop(&assembler);
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
- Node* element_k = assembler->LoadFixedArrayElement(
- elements, index_var.value(), 0,
- CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->GotoIf(assembler->TaggedIsSmi(element_k), &continue_loop);
- assembler->GotoIf(assembler->WordNotEqual(assembler->LoadMap(element_k),
- heap_number_map),
- &continue_loop);
- assembler->BranchIfFloat64IsNaN(
- assembler->LoadHeapNumberValue(element_k), &return_true,
+ Node* element_k =
+ assembler.LoadFixedArrayElement(elements, index_var.value());
+ assembler.GotoIf(assembler.TaggedIsSmi(element_k), &continue_loop);
+ assembler.GotoUnless(
+ assembler.IsHeapNumberMap(assembler.LoadMap(element_k)),
&continue_loop);
+ assembler.BranchIfFloat64IsNaN(assembler.LoadHeapNumberValue(element_k),
+ &return_true, &continue_loop);
- assembler->Bind(&continue_loop);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&nan_loop);
+ assembler.Bind(&continue_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&nan_loop);
}
}
- assembler->Bind(&string_loop);
+ assembler.Bind(&string_loop);
{
- Label continue_loop(assembler);
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ Label continue_loop(&assembler);
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
- Node* element_k = assembler->LoadFixedArrayElement(
- elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->GotoIf(assembler->TaggedIsSmi(element_k), &continue_loop);
- assembler->GotoUnless(assembler->IsStringInstanceType(
- assembler->LoadInstanceType(element_k)),
- &continue_loop);
+ Node* element_k =
+ assembler.LoadFixedArrayElement(elements, index_var.value());
+ assembler.GotoIf(assembler.TaggedIsSmi(element_k), &continue_loop);
+ assembler.GotoUnless(
+ assembler.IsStringInstanceType(assembler.LoadInstanceType(element_k)),
+ &continue_loop);
// TODO(bmeurer): Consider inlining the StringEqual logic here.
- Callable callable = CodeFactory::StringEqual(assembler->isolate());
+ Callable callable = CodeFactory::StringEqual(assembler.isolate());
Node* result =
- assembler->CallStub(callable, context, search_element, element_k);
- assembler->Branch(
- assembler->WordEqual(assembler->BooleanConstant(true), result),
+ assembler.CallStub(callable, context, search_element, element_k);
+ assembler.Branch(
+ assembler.WordEqual(assembler.BooleanConstant(true), result),
&return_true, &continue_loop);
- assembler->Bind(&continue_loop);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&string_loop);
+ assembler.Bind(&continue_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&string_loop);
}
- assembler->Bind(&simd_loop);
+ assembler.Bind(&simd_loop);
{
- Label continue_loop(assembler, &index_var),
- loop_body(assembler, &index_var);
- Node* map = assembler->LoadMap(search_element);
-
- assembler->Goto(&loop_body);
- assembler->Bind(&loop_body);
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ Label continue_loop(&assembler, &index_var),
+ loop_body(&assembler, &index_var);
+ Node* map = assembler.LoadMap(search_element);
+
+ assembler.Goto(&loop_body);
+ assembler.Bind(&loop_body);
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
- Node* element_k = assembler->LoadFixedArrayElement(
- elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->GotoIf(assembler->TaggedIsSmi(element_k), &continue_loop);
+ Node* element_k =
+ assembler.LoadFixedArrayElement(elements, index_var.value());
+ assembler.GotoIf(assembler.TaggedIsSmi(element_k), &continue_loop);
- Node* map_k = assembler->LoadMap(element_k);
- assembler->BranchIfSimd128Equal(search_element, map, element_k, map_k,
- &return_true, &continue_loop);
+ Node* map_k = assembler.LoadMap(element_k);
+ assembler.BranchIfSimd128Equal(search_element, map, element_k, map_k,
+ &return_true, &continue_loop);
- assembler->Bind(&continue_loop);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&loop_body);
+ assembler.Bind(&continue_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&loop_body);
}
}
- assembler->Bind(&if_packed_doubles);
+ assembler.Bind(&if_packed_doubles);
{
- Label nan_loop(assembler, &index_var), not_nan_loop(assembler, &index_var),
- hole_loop(assembler, &index_var), search_notnan(assembler);
- Variable search_num(assembler, MachineRepresentation::kFloat64);
+ Label nan_loop(&assembler, &index_var),
+ not_nan_loop(&assembler, &index_var), hole_loop(&assembler, &index_var),
+ search_notnan(&assembler);
+ Variable search_num(&assembler, MachineRepresentation::kFloat64);
- assembler->GotoUnless(assembler->TaggedIsSmi(search_element),
- &search_notnan);
- search_num.Bind(assembler->SmiToFloat64(search_element));
- assembler->Goto(&not_nan_loop);
+ assembler.GotoUnless(assembler.TaggedIsSmi(search_element), &search_notnan);
+ search_num.Bind(assembler.SmiToFloat64(search_element));
+ assembler.Goto(&not_nan_loop);
- assembler->Bind(&search_notnan);
- assembler->GotoIf(assembler->WordNotEqual(
- assembler->LoadMap(search_element), heap_number_map),
- &return_false);
+ assembler.Bind(&search_notnan);
+ assembler.GotoUnless(
+ assembler.IsHeapNumberMap(assembler.LoadMap(search_element)),
+ &return_false);
- search_num.Bind(assembler->LoadHeapNumberValue(search_element));
+ search_num.Bind(assembler.LoadHeapNumberValue(search_element));
- assembler->BranchIfFloat64IsNaN(search_num.value(), &nan_loop,
- &not_nan_loop);
+ assembler.BranchIfFloat64IsNaN(search_num.value(), &nan_loop,
+ &not_nan_loop);
// Search for HeapNumber
- assembler->Bind(&not_nan_loop);
+ assembler.Bind(&not_nan_loop);
{
- Label continue_loop(assembler);
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ Label continue_loop(&assembler);
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
- Node* element_k = assembler->LoadFixedDoubleArrayElement(
- elements, index_var.value(), MachineType::Float64(), 0,
- CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->Branch(assembler->Float64Equal(element_k, search_num.value()),
- &return_true, &continue_loop);
- assembler->Bind(&continue_loop);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&not_nan_loop);
+ Node* element_k = assembler.LoadFixedDoubleArrayElement(
+ elements, index_var.value(), MachineType::Float64());
+ assembler.Branch(assembler.Float64Equal(element_k, search_num.value()),
+ &return_true, &continue_loop);
+ assembler.Bind(&continue_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&not_nan_loop);
}
// Search for NaN
- assembler->Bind(&nan_loop);
+ assembler.Bind(&nan_loop);
{
- Label continue_loop(assembler);
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ Label continue_loop(&assembler);
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
- Node* element_k = assembler->LoadFixedDoubleArrayElement(
- elements, index_var.value(), MachineType::Float64(), 0,
- CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->BranchIfFloat64IsNaN(element_k, &return_true, &continue_loop);
- assembler->Bind(&continue_loop);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&nan_loop);
+ Node* element_k = assembler.LoadFixedDoubleArrayElement(
+ elements, index_var.value(), MachineType::Float64());
+ assembler.BranchIfFloat64IsNaN(element_k, &return_true, &continue_loop);
+ assembler.Bind(&continue_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&nan_loop);
}
}
- assembler->Bind(&if_holey_doubles);
+ assembler.Bind(&if_holey_doubles);
{
- Label nan_loop(assembler, &index_var), not_nan_loop(assembler, &index_var),
- hole_loop(assembler, &index_var), search_notnan(assembler);
- Variable search_num(assembler, MachineRepresentation::kFloat64);
+ Label nan_loop(&assembler, &index_var),
+ not_nan_loop(&assembler, &index_var), hole_loop(&assembler, &index_var),
+ search_notnan(&assembler);
+ Variable search_num(&assembler, MachineRepresentation::kFloat64);
- assembler->GotoUnless(assembler->TaggedIsSmi(search_element),
- &search_notnan);
- search_num.Bind(assembler->SmiToFloat64(search_element));
- assembler->Goto(&not_nan_loop);
+ assembler.GotoUnless(assembler.TaggedIsSmi(search_element), &search_notnan);
+ search_num.Bind(assembler.SmiToFloat64(search_element));
+ assembler.Goto(&not_nan_loop);
- assembler->Bind(&search_notnan);
- assembler->GotoIf(assembler->WordEqual(search_element, undefined),
- &hole_loop);
- assembler->GotoIf(assembler->WordNotEqual(
- assembler->LoadMap(search_element), heap_number_map),
- &return_false);
+ assembler.Bind(&search_notnan);
+ assembler.GotoIf(assembler.WordEqual(search_element, undefined),
+ &hole_loop);
+ assembler.GotoUnless(
+ assembler.IsHeapNumberMap(assembler.LoadMap(search_element)),
+ &return_false);
- search_num.Bind(assembler->LoadHeapNumberValue(search_element));
+ search_num.Bind(assembler.LoadHeapNumberValue(search_element));
- assembler->BranchIfFloat64IsNaN(search_num.value(), &nan_loop,
- &not_nan_loop);
+ assembler.BranchIfFloat64IsNaN(search_num.value(), &nan_loop,
+ &not_nan_loop);
// Search for HeapNumber
- assembler->Bind(&not_nan_loop);
+ assembler.Bind(&not_nan_loop);
{
- Label continue_loop(assembler);
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ Label continue_loop(&assembler);
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
// Load double value or continue if it contains a double hole.
- Node* element_k = assembler->LoadFixedDoubleArrayElement(
+ Node* element_k = assembler.LoadFixedDoubleArrayElement(
elements, index_var.value(), MachineType::Float64(), 0,
CodeStubAssembler::INTPTR_PARAMETERS, &continue_loop);
- assembler->Branch(assembler->Float64Equal(element_k, search_num.value()),
- &return_true, &continue_loop);
- assembler->Bind(&continue_loop);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&not_nan_loop);
+ assembler.Branch(assembler.Float64Equal(element_k, search_num.value()),
+ &return_true, &continue_loop);
+ assembler.Bind(&continue_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&not_nan_loop);
}
// Search for NaN
- assembler->Bind(&nan_loop);
+ assembler.Bind(&nan_loop);
{
- Label continue_loop(assembler);
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ Label continue_loop(&assembler);
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
// Load double value or continue if it contains a double hole.
- Node* element_k = assembler->LoadFixedDoubleArrayElement(
+ Node* element_k = assembler.LoadFixedDoubleArrayElement(
elements, index_var.value(), MachineType::Float64(), 0,
CodeStubAssembler::INTPTR_PARAMETERS, &continue_loop);
- assembler->BranchIfFloat64IsNaN(element_k, &return_true, &continue_loop);
- assembler->Bind(&continue_loop);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&nan_loop);
+ assembler.BranchIfFloat64IsNaN(element_k, &return_true, &continue_loop);
+ assembler.Bind(&continue_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&nan_loop);
}
// Search for the Hole
- assembler->Bind(&hole_loop);
+ assembler.Bind(&hole_loop);
{
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_false);
// Check if the element is a double hole, but don't load it.
- assembler->LoadFixedDoubleArrayElement(
+ assembler.LoadFixedDoubleArrayElement(
elements, index_var.value(), MachineType::None(), 0,
CodeStubAssembler::INTPTR_PARAMETERS, &return_true);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&hole_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&hole_loop);
}
}
- assembler->Bind(&return_true);
- assembler->Return(assembler->BooleanConstant(true));
+ assembler.Bind(&return_true);
+ assembler.Return(assembler.BooleanConstant(true));
- assembler->Bind(&return_false);
- assembler->Return(assembler->BooleanConstant(false));
+ assembler.Bind(&return_false);
+ assembler.Return(assembler.BooleanConstant(false));
- assembler->Bind(&call_runtime);
- assembler->Return(assembler->CallRuntime(Runtime::kArrayIncludes_Slow,
- context, array, search_element,
- start_from));
+ assembler.Bind(&call_runtime);
+ assembler.Return(assembler.CallRuntime(Runtime::kArrayIncludes_Slow, context,
+ array, search_element, start_from));
}
-void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
+void Builtins::Generate_ArrayIndexOf(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Variable Variable;
+ CodeStubAssembler assembler(state);
- Node* array = assembler->Parameter(0);
- Node* search_element = assembler->Parameter(1);
- Node* start_from = assembler->Parameter(2);
- Node* context = assembler->Parameter(3 + 2);
+ Node* array = assembler.Parameter(0);
+ Node* search_element = assembler.Parameter(1);
+ Node* start_from = assembler.Parameter(2);
+ Node* context = assembler.Parameter(3 + 2);
- Node* intptr_zero = assembler->IntPtrConstant(0);
- Node* intptr_one = assembler->IntPtrConstant(1);
+ Node* intptr_zero = assembler.IntPtrConstant(0);
+ Node* intptr_one = assembler.IntPtrConstant(1);
- Node* undefined = assembler->UndefinedConstant();
- Node* heap_number_map = assembler->HeapNumberMapConstant();
+ Node* undefined = assembler.UndefinedConstant();
- Variable len_var(assembler, MachineType::PointerRepresentation()),
- index_var(assembler, MachineType::PointerRepresentation()),
- start_from_var(assembler, MachineType::PointerRepresentation());
+ Variable len_var(&assembler, MachineType::PointerRepresentation()),
+ index_var(&assembler, MachineType::PointerRepresentation()),
+ start_from_var(&assembler, MachineType::PointerRepresentation());
- Label init_k(assembler), return_found(assembler), return_not_found(assembler),
- call_runtime(assembler);
+ Label init_k(&assembler), return_found(&assembler),
+ return_not_found(&assembler), call_runtime(&assembler);
- Label init_len(assembler);
+ Label init_len(&assembler);
index_var.Bind(intptr_zero);
len_var.Bind(intptr_zero);
// Take slow path if not a JSArray, if retrieving elements requires
// traversing prototype, or if access checks are required.
- assembler->BranchIfFastJSArray(array, context, &init_len, &call_runtime);
+ assembler.BranchIfFastJSArray(
+ array, context, CodeStubAssembler::FastJSArrayAccessMode::INBOUNDS_READ,
+ &init_len, &call_runtime);
- assembler->Bind(&init_len);
+ assembler.Bind(&init_len);
{
// Handle case where JSArray length is not an Smi in the runtime
- Node* len = assembler->LoadObjectField(array, JSArray::kLengthOffset);
- assembler->GotoUnless(assembler->TaggedIsSmi(len), &call_runtime);
+ Node* len = assembler.LoadObjectField(array, JSArray::kLengthOffset);
+ assembler.GotoUnless(assembler.TaggedIsSmi(len), &call_runtime);
- len_var.Bind(assembler->SmiToWord(len));
- assembler->Branch(assembler->WordEqual(len_var.value(), intptr_zero),
- &return_not_found, &init_k);
+ len_var.Bind(assembler.SmiToWord(len));
+ assembler.Branch(assembler.WordEqual(len_var.value(), intptr_zero),
+ &return_not_found, &init_k);
}
- assembler->Bind(&init_k);
+ assembler.Bind(&init_k);
{
- Label done(assembler), init_k_smi(assembler), init_k_heap_num(assembler),
- init_k_zero(assembler), init_k_n(assembler);
- Node* tagged_n = assembler->ToInteger(context, start_from);
+ Label done(&assembler), init_k_smi(&assembler), init_k_heap_num(&assembler),
+ init_k_zero(&assembler), init_k_n(&assembler);
+ Node* tagged_n = assembler.ToInteger(context, start_from);
- assembler->Branch(assembler->TaggedIsSmi(tagged_n), &init_k_smi,
- &init_k_heap_num);
+ assembler.Branch(assembler.TaggedIsSmi(tagged_n), &init_k_smi,
+ &init_k_heap_num);
- assembler->Bind(&init_k_smi);
+ assembler.Bind(&init_k_smi);
{
- start_from_var.Bind(assembler->SmiUntag(tagged_n));
- assembler->Goto(&init_k_n);
+ start_from_var.Bind(assembler.SmiUntag(tagged_n));
+ assembler.Goto(&init_k_n);
}
- assembler->Bind(&init_k_heap_num);
+ assembler.Bind(&init_k_heap_num);
{
- Label do_return_not_found(assembler);
+ Label do_return_not_found(&assembler);
// This round is lossless for all valid lengths.
- Node* fp_len = assembler->RoundIntPtrToFloat64(len_var.value());
- Node* fp_n = assembler->LoadHeapNumberValue(tagged_n);
- assembler->GotoIf(assembler->Float64GreaterThanOrEqual(fp_n, fp_len),
- &do_return_not_found);
- start_from_var.Bind(assembler->ChangeInt32ToIntPtr(
- assembler->TruncateFloat64ToWord32(fp_n)));
- assembler->Goto(&init_k_n);
-
- assembler->Bind(&do_return_not_found);
+ Node* fp_len = assembler.RoundIntPtrToFloat64(len_var.value());
+ Node* fp_n = assembler.LoadHeapNumberValue(tagged_n);
+ assembler.GotoIf(assembler.Float64GreaterThanOrEqual(fp_n, fp_len),
+ &do_return_not_found);
+ start_from_var.Bind(assembler.ChangeInt32ToIntPtr(
+ assembler.TruncateFloat64ToWord32(fp_n)));
+ assembler.Goto(&init_k_n);
+
+ assembler.Bind(&do_return_not_found);
{
index_var.Bind(intptr_zero);
- assembler->Goto(&return_not_found);
+ assembler.Goto(&return_not_found);
}
}
- assembler->Bind(&init_k_n);
+ assembler.Bind(&init_k_n);
{
- Label if_positive(assembler), if_negative(assembler), done(assembler);
- assembler->Branch(
- assembler->IntPtrLessThan(start_from_var.value(), intptr_zero),
+ Label if_positive(&assembler), if_negative(&assembler), done(&assembler);
+ assembler.Branch(
+ assembler.IntPtrLessThan(start_from_var.value(), intptr_zero),
&if_negative, &if_positive);
- assembler->Bind(&if_positive);
+ assembler.Bind(&if_positive);
{
index_var.Bind(start_from_var.value());
- assembler->Goto(&done);
+ assembler.Goto(&done);
}
- assembler->Bind(&if_negative);
+ assembler.Bind(&if_negative);
{
index_var.Bind(
- assembler->IntPtrAdd(len_var.value(), start_from_var.value()));
- assembler->Branch(
- assembler->IntPtrLessThan(index_var.value(), intptr_zero),
+ assembler.IntPtrAdd(len_var.value(), start_from_var.value()));
+ assembler.Branch(
+ assembler.IntPtrLessThan(index_var.value(), intptr_zero),
&init_k_zero, &done);
}
- assembler->Bind(&init_k_zero);
+ assembler.Bind(&init_k_zero);
{
index_var.Bind(intptr_zero);
- assembler->Goto(&done);
+ assembler.Goto(&done);
}
- assembler->Bind(&done);
+ assembler.Bind(&done);
}
}
@@ -1830,384 +1975,387 @@ void Builtins::Generate_ArrayIndexOf(CodeStubAssembler* assembler) {
FAST_HOLEY_ELEMENTS, FAST_DOUBLE_ELEMENTS, FAST_HOLEY_DOUBLE_ELEMENTS,
};
- Label if_smiorobjects(assembler), if_packed_doubles(assembler),
- if_holey_doubles(assembler);
+ Label if_smiorobjects(&assembler), if_packed_doubles(&assembler),
+ if_holey_doubles(&assembler);
Label* element_kind_handlers[] = {&if_smiorobjects, &if_smiorobjects,
&if_smiorobjects, &if_smiorobjects,
&if_packed_doubles, &if_holey_doubles};
- Node* map = assembler->LoadMap(array);
- Node* elements_kind = assembler->LoadMapElementsKind(map);
- Node* elements = assembler->LoadElements(array);
- assembler->Switch(elements_kind, &return_not_found, kElementsKind,
- element_kind_handlers, arraysize(kElementsKind));
+ Node* map = assembler.LoadMap(array);
+ Node* elements_kind = assembler.LoadMapElementsKind(map);
+ Node* elements = assembler.LoadElements(array);
+ assembler.Switch(elements_kind, &return_not_found, kElementsKind,
+ element_kind_handlers, arraysize(kElementsKind));
- assembler->Bind(&if_smiorobjects);
+ assembler.Bind(&if_smiorobjects);
{
- Variable search_num(assembler, MachineRepresentation::kFloat64);
- Label ident_loop(assembler, &index_var),
- heap_num_loop(assembler, &search_num),
- string_loop(assembler, &index_var), simd_loop(assembler),
- undef_loop(assembler, &index_var), not_smi(assembler),
- not_heap_num(assembler);
-
- assembler->GotoUnless(assembler->TaggedIsSmi(search_element), &not_smi);
- search_num.Bind(assembler->SmiToFloat64(search_element));
- assembler->Goto(&heap_num_loop);
-
- assembler->Bind(&not_smi);
- assembler->GotoIf(assembler->WordEqual(search_element, undefined),
- &undef_loop);
- Node* map = assembler->LoadMap(search_element);
- assembler->GotoIf(assembler->WordNotEqual(map, heap_number_map),
- &not_heap_num);
- search_num.Bind(assembler->LoadHeapNumberValue(search_element));
- assembler->Goto(&heap_num_loop);
-
- assembler->Bind(&not_heap_num);
- Node* search_type = assembler->LoadMapInstanceType(map);
- assembler->GotoIf(assembler->IsStringInstanceType(search_type),
- &string_loop);
- assembler->GotoIf(
- assembler->Word32Equal(search_type,
- assembler->Int32Constant(SIMD128_VALUE_TYPE)),
+ Variable search_num(&assembler, MachineRepresentation::kFloat64);
+ Label ident_loop(&assembler, &index_var),
+ heap_num_loop(&assembler, &search_num),
+ string_loop(&assembler, &index_var), simd_loop(&assembler),
+ undef_loop(&assembler, &index_var), not_smi(&assembler),
+ not_heap_num(&assembler);
+
+ assembler.GotoUnless(assembler.TaggedIsSmi(search_element), &not_smi);
+ search_num.Bind(assembler.SmiToFloat64(search_element));
+ assembler.Goto(&heap_num_loop);
+
+ assembler.Bind(&not_smi);
+ assembler.GotoIf(assembler.WordEqual(search_element, undefined),
+ &undef_loop);
+ Node* map = assembler.LoadMap(search_element);
+ assembler.GotoUnless(assembler.IsHeapNumberMap(map), &not_heap_num);
+ search_num.Bind(assembler.LoadHeapNumberValue(search_element));
+ assembler.Goto(&heap_num_loop);
+
+ assembler.Bind(&not_heap_num);
+ Node* search_type = assembler.LoadMapInstanceType(map);
+ assembler.GotoIf(assembler.IsStringInstanceType(search_type), &string_loop);
+ assembler.GotoIf(
+ assembler.Word32Equal(search_type,
+ assembler.Int32Constant(SIMD128_VALUE_TYPE)),
&simd_loop);
- assembler->Goto(&ident_loop);
+ assembler.Goto(&ident_loop);
- assembler->Bind(&ident_loop);
+ assembler.Bind(&ident_loop);
{
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_not_found);
- Node* element_k = assembler->LoadFixedArrayElement(
- elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->GotoIf(assembler->WordEqual(element_k, search_element),
- &return_found);
+ Node* element_k =
+ assembler.LoadFixedArrayElement(elements, index_var.value());
+ assembler.GotoIf(assembler.WordEqual(element_k, search_element),
+ &return_found);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&ident_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&ident_loop);
}
- assembler->Bind(&undef_loop);
+ assembler.Bind(&undef_loop);
{
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_not_found);
- Node* element_k = assembler->LoadFixedArrayElement(
- elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->GotoIf(assembler->WordEqual(element_k, undefined),
- &return_found);
+ Node* element_k =
+ assembler.LoadFixedArrayElement(elements, index_var.value());
+ assembler.GotoIf(assembler.WordEqual(element_k, undefined),
+ &return_found);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&undef_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&undef_loop);
}
- assembler->Bind(&heap_num_loop);
+ assembler.Bind(&heap_num_loop);
{
- Label not_nan_loop(assembler, &index_var);
- assembler->BranchIfFloat64IsNaN(search_num.value(), &return_not_found,
- &not_nan_loop);
+ Label not_nan_loop(&assembler, &index_var);
+ assembler.BranchIfFloat64IsNaN(search_num.value(), &return_not_found,
+ &not_nan_loop);
- assembler->Bind(&not_nan_loop);
+ assembler.Bind(&not_nan_loop);
{
- Label continue_loop(assembler), not_smi(assembler);
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ Label continue_loop(&assembler), not_smi(&assembler);
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_not_found);
- Node* element_k = assembler->LoadFixedArrayElement(
- elements, index_var.value(), 0,
- CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->GotoUnless(assembler->TaggedIsSmi(element_k), &not_smi);
- assembler->Branch(
- assembler->Float64Equal(search_num.value(),
- assembler->SmiToFloat64(element_k)),
+ Node* element_k =
+ assembler.LoadFixedArrayElement(elements, index_var.value());
+ assembler.GotoUnless(assembler.TaggedIsSmi(element_k), &not_smi);
+ assembler.Branch(
+ assembler.Float64Equal(search_num.value(),
+ assembler.SmiToFloat64(element_k)),
&return_found, &continue_loop);
- assembler->Bind(&not_smi);
- assembler->GotoIf(assembler->WordNotEqual(assembler->LoadMap(element_k),
- heap_number_map),
- &continue_loop);
- assembler->Branch(
- assembler->Float64Equal(search_num.value(),
- assembler->LoadHeapNumberValue(element_k)),
+ assembler.Bind(&not_smi);
+ assembler.GotoUnless(
+ assembler.IsHeapNumberMap(assembler.LoadMap(element_k)),
+ &continue_loop);
+ assembler.Branch(
+ assembler.Float64Equal(search_num.value(),
+ assembler.LoadHeapNumberValue(element_k)),
&return_found, &continue_loop);
- assembler->Bind(&continue_loop);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&not_nan_loop);
+ assembler.Bind(&continue_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&not_nan_loop);
}
}
- assembler->Bind(&string_loop);
+ assembler.Bind(&string_loop);
{
- Label continue_loop(assembler);
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ Label continue_loop(&assembler);
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_not_found);
- Node* element_k = assembler->LoadFixedArrayElement(
- elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->GotoIf(assembler->TaggedIsSmi(element_k), &continue_loop);
- assembler->GotoUnless(assembler->IsStringInstanceType(
- assembler->LoadInstanceType(element_k)),
- &continue_loop);
+ Node* element_k =
+ assembler.LoadFixedArrayElement(elements, index_var.value());
+ assembler.GotoIf(assembler.TaggedIsSmi(element_k), &continue_loop);
+ assembler.GotoUnless(
+ assembler.IsStringInstanceType(assembler.LoadInstanceType(element_k)),
+ &continue_loop);
// TODO(bmeurer): Consider inlining the StringEqual logic here.
- Callable callable = CodeFactory::StringEqual(assembler->isolate());
+ Callable callable = CodeFactory::StringEqual(assembler.isolate());
Node* result =
- assembler->CallStub(callable, context, search_element, element_k);
- assembler->Branch(
- assembler->WordEqual(assembler->BooleanConstant(true), result),
+ assembler.CallStub(callable, context, search_element, element_k);
+ assembler.Branch(
+ assembler.WordEqual(assembler.BooleanConstant(true), result),
&return_found, &continue_loop);
- assembler->Bind(&continue_loop);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&string_loop);
+ assembler.Bind(&continue_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&string_loop);
}
- assembler->Bind(&simd_loop);
+ assembler.Bind(&simd_loop);
{
- Label continue_loop(assembler, &index_var),
- loop_body(assembler, &index_var);
- Node* map = assembler->LoadMap(search_element);
-
- assembler->Goto(&loop_body);
- assembler->Bind(&loop_body);
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ Label continue_loop(&assembler, &index_var),
+ loop_body(&assembler, &index_var);
+ Node* map = assembler.LoadMap(search_element);
+
+ assembler.Goto(&loop_body);
+ assembler.Bind(&loop_body);
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_not_found);
- Node* element_k = assembler->LoadFixedArrayElement(
- elements, index_var.value(), 0, CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->GotoIf(assembler->TaggedIsSmi(element_k), &continue_loop);
+ Node* element_k =
+ assembler.LoadFixedArrayElement(elements, index_var.value());
+ assembler.GotoIf(assembler.TaggedIsSmi(element_k), &continue_loop);
- Node* map_k = assembler->LoadMap(element_k);
- assembler->BranchIfSimd128Equal(search_element, map, element_k, map_k,
- &return_found, &continue_loop);
+ Node* map_k = assembler.LoadMap(element_k);
+ assembler.BranchIfSimd128Equal(search_element, map, element_k, map_k,
+ &return_found, &continue_loop);
- assembler->Bind(&continue_loop);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&loop_body);
+ assembler.Bind(&continue_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&loop_body);
}
}
- assembler->Bind(&if_packed_doubles);
+ assembler.Bind(&if_packed_doubles);
{
- Label not_nan_loop(assembler, &index_var), search_notnan(assembler);
- Variable search_num(assembler, MachineRepresentation::kFloat64);
+ Label not_nan_loop(&assembler, &index_var), search_notnan(&assembler);
+ Variable search_num(&assembler, MachineRepresentation::kFloat64);
- assembler->GotoUnless(assembler->TaggedIsSmi(search_element),
- &search_notnan);
- search_num.Bind(assembler->SmiToFloat64(search_element));
- assembler->Goto(&not_nan_loop);
+ assembler.GotoUnless(assembler.TaggedIsSmi(search_element), &search_notnan);
+ search_num.Bind(assembler.SmiToFloat64(search_element));
+ assembler.Goto(&not_nan_loop);
- assembler->Bind(&search_notnan);
- assembler->GotoIf(assembler->WordNotEqual(
- assembler->LoadMap(search_element), heap_number_map),
- &return_not_found);
+ assembler.Bind(&search_notnan);
+ assembler.GotoUnless(
+ assembler.IsHeapNumberMap(assembler.LoadMap(search_element)),
+ &return_not_found);
- search_num.Bind(assembler->LoadHeapNumberValue(search_element));
+ search_num.Bind(assembler.LoadHeapNumberValue(search_element));
- assembler->BranchIfFloat64IsNaN(search_num.value(), &return_not_found,
- &not_nan_loop);
+ assembler.BranchIfFloat64IsNaN(search_num.value(), &return_not_found,
+ &not_nan_loop);
// Search for HeapNumber
- assembler->Bind(&not_nan_loop);
+ assembler.Bind(&not_nan_loop);
{
- Label continue_loop(assembler);
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ Label continue_loop(&assembler);
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_not_found);
- Node* element_k = assembler->LoadFixedDoubleArrayElement(
- elements, index_var.value(), MachineType::Float64(), 0,
- CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->Branch(assembler->Float64Equal(element_k, search_num.value()),
- &return_found, &continue_loop);
- assembler->Bind(&continue_loop);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&not_nan_loop);
+ Node* element_k = assembler.LoadFixedDoubleArrayElement(
+ elements, index_var.value(), MachineType::Float64());
+ assembler.Branch(assembler.Float64Equal(element_k, search_num.value()),
+ &return_found, &continue_loop);
+ assembler.Bind(&continue_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&not_nan_loop);
}
}
- assembler->Bind(&if_holey_doubles);
+ assembler.Bind(&if_holey_doubles);
{
- Label not_nan_loop(assembler, &index_var), search_notnan(assembler);
- Variable search_num(assembler, MachineRepresentation::kFloat64);
+ Label not_nan_loop(&assembler, &index_var), search_notnan(&assembler);
+ Variable search_num(&assembler, MachineRepresentation::kFloat64);
- assembler->GotoUnless(assembler->TaggedIsSmi(search_element),
- &search_notnan);
- search_num.Bind(assembler->SmiToFloat64(search_element));
- assembler->Goto(&not_nan_loop);
+ assembler.GotoUnless(assembler.TaggedIsSmi(search_element), &search_notnan);
+ search_num.Bind(assembler.SmiToFloat64(search_element));
+ assembler.Goto(&not_nan_loop);
- assembler->Bind(&search_notnan);
- assembler->GotoIf(assembler->WordNotEqual(
- assembler->LoadMap(search_element), heap_number_map),
- &return_not_found);
+ assembler.Bind(&search_notnan);
+ assembler.GotoUnless(
+ assembler.IsHeapNumberMap(assembler.LoadMap(search_element)),
+ &return_not_found);
- search_num.Bind(assembler->LoadHeapNumberValue(search_element));
+ search_num.Bind(assembler.LoadHeapNumberValue(search_element));
- assembler->BranchIfFloat64IsNaN(search_num.value(), &return_not_found,
- &not_nan_loop);
+ assembler.BranchIfFloat64IsNaN(search_num.value(), &return_not_found,
+ &not_nan_loop);
// Search for HeapNumber
- assembler->Bind(&not_nan_loop);
+ assembler.Bind(&not_nan_loop);
{
- Label continue_loop(assembler);
- assembler->GotoUnless(
- assembler->UintPtrLessThan(index_var.value(), len_var.value()),
+ Label continue_loop(&assembler);
+ assembler.GotoUnless(
+ assembler.UintPtrLessThan(index_var.value(), len_var.value()),
&return_not_found);
// Load double value or continue if it contains a double hole.
- Node* element_k = assembler->LoadFixedDoubleArrayElement(
+ Node* element_k = assembler.LoadFixedDoubleArrayElement(
elements, index_var.value(), MachineType::Float64(), 0,
CodeStubAssembler::INTPTR_PARAMETERS, &continue_loop);
- assembler->Branch(assembler->Float64Equal(element_k, search_num.value()),
- &return_found, &continue_loop);
- assembler->Bind(&continue_loop);
- index_var.Bind(assembler->IntPtrAdd(index_var.value(), intptr_one));
- assembler->Goto(&not_nan_loop);
+ assembler.Branch(assembler.Float64Equal(element_k, search_num.value()),
+ &return_found, &continue_loop);
+ assembler.Bind(&continue_loop);
+ index_var.Bind(assembler.IntPtrAdd(index_var.value(), intptr_one));
+ assembler.Goto(&not_nan_loop);
}
}
- assembler->Bind(&return_found);
- assembler->Return(assembler->ChangeInt32ToTagged(index_var.value()));
+ assembler.Bind(&return_found);
+ assembler.Return(assembler.SmiTag(index_var.value()));
- assembler->Bind(&return_not_found);
- assembler->Return(assembler->NumberConstant(-1));
+ assembler.Bind(&return_not_found);
+ assembler.Return(assembler.NumberConstant(-1));
- assembler->Bind(&call_runtime);
- assembler->Return(assembler->CallRuntime(Runtime::kArrayIndexOf, context,
- array, search_element, start_from));
+ assembler.Bind(&call_runtime);
+ assembler.Return(assembler.CallRuntime(Runtime::kArrayIndexOf, context, array,
+ search_element, start_from));
}
namespace {
template <IterationKind kIterationKind>
-void Generate_ArrayPrototypeIterationMethod(CodeStubAssembler* assembler) {
+void Generate_ArrayPrototypeIterationMethod(
+ compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Variable Variable;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(0);
- Node* context = assembler->Parameter(3);
+ Node* receiver = assembler.Parameter(0);
+ Node* context = assembler.Parameter(3);
- Variable var_array(assembler, MachineRepresentation::kTagged);
- Variable var_map(assembler, MachineRepresentation::kTagged);
- Variable var_type(assembler, MachineRepresentation::kWord32);
+ Variable var_array(&assembler, MachineRepresentation::kTagged);
+ Variable var_map(&assembler, MachineRepresentation::kTagged);
+ Variable var_type(&assembler, MachineRepresentation::kWord32);
- Label if_isnotobject(assembler, Label::kDeferred);
- Label create_array_iterator(assembler);
+ Label if_isnotobject(&assembler, Label::kDeferred);
+ Label create_array_iterator(&assembler);
- assembler->GotoIf(assembler->TaggedIsSmi(receiver), &if_isnotobject);
+ assembler.GotoIf(assembler.TaggedIsSmi(receiver), &if_isnotobject);
var_array.Bind(receiver);
- var_map.Bind(assembler->LoadMap(receiver));
- var_type.Bind(assembler->LoadMapInstanceType(var_map.value()));
- assembler->Branch(assembler->IsJSReceiverInstanceType(var_type.value()),
- &create_array_iterator, &if_isnotobject);
+ var_map.Bind(assembler.LoadMap(receiver));
+ var_type.Bind(assembler.LoadMapInstanceType(var_map.value()));
+ assembler.Branch(assembler.IsJSReceiverInstanceType(var_type.value()),
+ &create_array_iterator, &if_isnotobject);
- assembler->Bind(&if_isnotobject);
+ assembler.Bind(&if_isnotobject);
{
- Callable callable = CodeFactory::ToObject(assembler->isolate());
- Node* result = assembler->CallStub(callable, context, receiver);
+ Callable callable = CodeFactory::ToObject(assembler.isolate());
+ Node* result = assembler.CallStub(callable, context, receiver);
var_array.Bind(result);
- var_map.Bind(assembler->LoadMap(result));
- var_type.Bind(assembler->LoadMapInstanceType(var_map.value()));
- assembler->Goto(&create_array_iterator);
+ var_map.Bind(assembler.LoadMap(result));
+ var_type.Bind(assembler.LoadMapInstanceType(var_map.value()));
+ assembler.Goto(&create_array_iterator);
}
- assembler->Bind(&create_array_iterator);
- assembler->Return(assembler->CreateArrayIterator(
- var_array.value(), var_map.value(), var_type.value(), context,
- kIterationKind));
+ assembler.Bind(&create_array_iterator);
+ assembler.Return(
+ assembler.CreateArrayIterator(var_array.value(), var_map.value(),
+ var_type.value(), context, kIterationKind));
}
} // namespace
-void Builtins::Generate_ArrayPrototypeValues(CodeStubAssembler* assembler) {
- Generate_ArrayPrototypeIterationMethod<IterationKind::kValues>(assembler);
+void Builtins::Generate_ArrayPrototypeValues(
+ compiler::CodeAssemblerState* state) {
+ Generate_ArrayPrototypeIterationMethod<IterationKind::kValues>(state);
}
-void Builtins::Generate_ArrayPrototypeEntries(CodeStubAssembler* assembler) {
- Generate_ArrayPrototypeIterationMethod<IterationKind::kEntries>(assembler);
+void Builtins::Generate_ArrayPrototypeEntries(
+ compiler::CodeAssemblerState* state) {
+ Generate_ArrayPrototypeIterationMethod<IterationKind::kEntries>(state);
}
-void Builtins::Generate_ArrayPrototypeKeys(CodeStubAssembler* assembler) {
- Generate_ArrayPrototypeIterationMethod<IterationKind::kKeys>(assembler);
+void Builtins::Generate_ArrayPrototypeKeys(
+ compiler::CodeAssemblerState* state) {
+ Generate_ArrayPrototypeIterationMethod<IterationKind::kKeys>(state);
}
void Builtins::Generate_ArrayIteratorPrototypeNext(
- CodeStubAssembler* assembler) {
+ compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Variable Variable;
+ CodeStubAssembler assembler(state);
+
+ Handle<String> operation = assembler.factory()->NewStringFromAsciiChecked(
+ "Array Iterator.prototype.next", TENURED);
- Node* iterator = assembler->Parameter(0);
- Node* context = assembler->Parameter(3);
+ Node* iterator = assembler.Parameter(0);
+ Node* context = assembler.Parameter(3);
- Variable var_value(assembler, MachineRepresentation::kTagged);
- Variable var_done(assembler, MachineRepresentation::kTagged);
+ Variable var_value(&assembler, MachineRepresentation::kTagged);
+ Variable var_done(&assembler, MachineRepresentation::kTagged);
// Required, or else `throw_bad_receiver` fails a DCHECK due to these
// variables not being bound along all paths, despite not being used.
- var_done.Bind(assembler->TrueConstant());
- var_value.Bind(assembler->UndefinedConstant());
+ var_done.Bind(assembler.TrueConstant());
+ var_value.Bind(assembler.UndefinedConstant());
- Label throw_bad_receiver(assembler, Label::kDeferred);
- Label set_done(assembler);
- Label allocate_key_result(assembler);
- Label allocate_entry_if_needed(assembler);
- Label allocate_iterator_result(assembler);
- Label generic_values(assembler);
+ Label throw_bad_receiver(&assembler, Label::kDeferred);
+ Label set_done(&assembler);
+ Label allocate_key_result(&assembler);
+ Label allocate_entry_if_needed(&assembler);
+ Label allocate_iterator_result(&assembler);
+ Label generic_values(&assembler);
// If O does not have all of the internal slots of an Array Iterator Instance
// (22.1.5.3), throw a TypeError exception
- assembler->GotoIf(assembler->TaggedIsSmi(iterator), &throw_bad_receiver);
- Node* instance_type = assembler->LoadInstanceType(iterator);
- assembler->GotoIf(
- assembler->Uint32LessThan(
- assembler->Int32Constant(LAST_ARRAY_ITERATOR_TYPE -
- FIRST_ARRAY_ITERATOR_TYPE),
- assembler->Int32Sub(instance_type, assembler->Int32Constant(
- FIRST_ARRAY_ITERATOR_TYPE))),
+ assembler.GotoIf(assembler.TaggedIsSmi(iterator), &throw_bad_receiver);
+ Node* instance_type = assembler.LoadInstanceType(iterator);
+ assembler.GotoIf(
+ assembler.Uint32LessThan(
+ assembler.Int32Constant(LAST_ARRAY_ITERATOR_TYPE -
+ FIRST_ARRAY_ITERATOR_TYPE),
+ assembler.Int32Sub(instance_type, assembler.Int32Constant(
+ FIRST_ARRAY_ITERATOR_TYPE))),
&throw_bad_receiver);
// Let a be O.[[IteratedObject]].
- Node* array = assembler->LoadObjectField(
+ Node* array = assembler.LoadObjectField(
iterator, JSArrayIterator::kIteratedObjectOffset);
// Let index be O.[[ArrayIteratorNextIndex]].
Node* index =
- assembler->LoadObjectField(iterator, JSArrayIterator::kNextIndexOffset);
- Node* orig_map = assembler->LoadObjectField(
+ assembler.LoadObjectField(iterator, JSArrayIterator::kNextIndexOffset);
+ Node* orig_map = assembler.LoadObjectField(
iterator, JSArrayIterator::kIteratedObjectMapOffset);
- Node* array_map = assembler->LoadMap(array);
+ Node* array_map = assembler.LoadMap(array);
- Label if_isfastarray(assembler), if_isnotfastarray(assembler);
+ Label if_isfastarray(&assembler), if_isnotfastarray(&assembler),
+ if_isdetached(&assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(orig_map, array_map), &if_isfastarray,
- &if_isnotfastarray);
+ assembler.Branch(assembler.WordEqual(orig_map, array_map), &if_isfastarray,
+ &if_isnotfastarray);
- assembler->Bind(&if_isfastarray);
+ assembler.Bind(&if_isfastarray);
{
- CSA_ASSERT(assembler,
- assembler->Word32Equal(assembler->LoadMapInstanceType(array_map),
- assembler->Int32Constant(JS_ARRAY_TYPE)));
+ CSA_ASSERT(&assembler,
+ assembler.Word32Equal(assembler.LoadMapInstanceType(array_map),
+ assembler.Int32Constant(JS_ARRAY_TYPE)));
- Node* length = assembler->LoadObjectField(array, JSArray::kLengthOffset);
+ Node* length = assembler.LoadObjectField(array, JSArray::kLengthOffset);
- CSA_ASSERT(assembler, assembler->TaggedIsSmi(length));
- CSA_ASSERT(assembler, assembler->TaggedIsSmi(index));
+ CSA_ASSERT(&assembler, assembler.TaggedIsSmi(length));
+ CSA_ASSERT(&assembler, assembler.TaggedIsSmi(index));
- assembler->GotoUnless(assembler->SmiBelow(index, length), &set_done);
+ assembler.GotoUnless(assembler.SmiBelow(index, length), &set_done);
- Node* one = assembler->SmiConstant(Smi::FromInt(1));
- assembler->StoreObjectFieldNoWriteBarrier(
- iterator, JSArrayIterator::kNextIndexOffset,
- assembler->IntPtrAdd(assembler->BitcastTaggedToWord(index),
- assembler->BitcastTaggedToWord(one)));
+ Node* one = assembler.SmiConstant(Smi::FromInt(1));
+ assembler.StoreObjectFieldNoWriteBarrier(iterator,
+ JSArrayIterator::kNextIndexOffset,
+ assembler.SmiAdd(index, one));
- var_done.Bind(assembler->FalseConstant());
- Node* elements = assembler->LoadElements(array);
+ var_done.Bind(assembler.FalseConstant());
+ Node* elements = assembler.LoadElements(array);
static int32_t kInstanceType[] = {
JS_FAST_ARRAY_KEY_ITERATOR_TYPE,
@@ -2225,8 +2373,8 @@ void Builtins::Generate_ArrayIteratorPrototypeNext(
JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE,
};
- Label packed_object_values(assembler), holey_object_values(assembler),
- packed_double_values(assembler), holey_double_values(assembler);
+ Label packed_object_values(&assembler), holey_object_values(&assembler),
+ packed_double_values(&assembler), holey_double_values(&assembler);
Label* kInstanceTypeHandlers[] = {
&allocate_key_result, &packed_object_values, &holey_object_values,
&packed_object_values, &holey_object_values, &packed_double_values,
@@ -2234,216 +2382,192 @@ void Builtins::Generate_ArrayIteratorPrototypeNext(
&packed_object_values, &holey_object_values, &packed_double_values,
&holey_double_values};
- assembler->Switch(instance_type, &throw_bad_receiver, kInstanceType,
- kInstanceTypeHandlers, arraysize(kInstanceType));
+ assembler.Switch(instance_type, &throw_bad_receiver, kInstanceType,
+ kInstanceTypeHandlers, arraysize(kInstanceType));
- assembler->Bind(&packed_object_values);
+ assembler.Bind(&packed_object_values);
{
- var_value.Bind(assembler->LoadFixedArrayElement(
+ var_value.Bind(assembler.LoadFixedArrayElement(
elements, index, 0, CodeStubAssembler::SMI_PARAMETERS));
- assembler->Goto(&allocate_entry_if_needed);
+ assembler.Goto(&allocate_entry_if_needed);
}
- assembler->Bind(&packed_double_values);
+ assembler.Bind(&packed_double_values);
{
- Node* value = assembler->LoadFixedDoubleArrayElement(
+ Node* value = assembler.LoadFixedDoubleArrayElement(
elements, index, MachineType::Float64(), 0,
CodeStubAssembler::SMI_PARAMETERS);
- var_value.Bind(assembler->AllocateHeapNumberWithValue(value));
- assembler->Goto(&allocate_entry_if_needed);
+ var_value.Bind(assembler.AllocateHeapNumberWithValue(value));
+ assembler.Goto(&allocate_entry_if_needed);
}
- assembler->Bind(&holey_object_values);
+ assembler.Bind(&holey_object_values);
{
// Check the array_protector cell, and take the slow path if it's invalid.
Node* invalid =
- assembler->SmiConstant(Smi::FromInt(Isolate::kProtectorInvalid));
- Node* cell = assembler->LoadRoot(Heap::kArrayProtectorRootIndex);
+ assembler.SmiConstant(Smi::FromInt(Isolate::kProtectorInvalid));
+ Node* cell = assembler.LoadRoot(Heap::kArrayProtectorRootIndex);
Node* cell_value =
- assembler->LoadObjectField(cell, PropertyCell::kValueOffset);
- assembler->GotoIf(assembler->WordEqual(cell_value, invalid),
- &generic_values);
+ assembler.LoadObjectField(cell, PropertyCell::kValueOffset);
+ assembler.GotoIf(assembler.WordEqual(cell_value, invalid),
+ &generic_values);
- var_value.Bind(assembler->UndefinedConstant());
- Node* value = assembler->LoadFixedArrayElement(
+ var_value.Bind(assembler.UndefinedConstant());
+ Node* value = assembler.LoadFixedArrayElement(
elements, index, 0, CodeStubAssembler::SMI_PARAMETERS);
- assembler->GotoIf(
- assembler->WordEqual(value, assembler->TheHoleConstant()),
- &allocate_entry_if_needed);
+ assembler.GotoIf(assembler.WordEqual(value, assembler.TheHoleConstant()),
+ &allocate_entry_if_needed);
var_value.Bind(value);
- assembler->Goto(&allocate_entry_if_needed);
+ assembler.Goto(&allocate_entry_if_needed);
}
- assembler->Bind(&holey_double_values);
+ assembler.Bind(&holey_double_values);
{
// Check the array_protector cell, and take the slow path if it's invalid.
Node* invalid =
- assembler->SmiConstant(Smi::FromInt(Isolate::kProtectorInvalid));
- Node* cell = assembler->LoadRoot(Heap::kArrayProtectorRootIndex);
+ assembler.SmiConstant(Smi::FromInt(Isolate::kProtectorInvalid));
+ Node* cell = assembler.LoadRoot(Heap::kArrayProtectorRootIndex);
Node* cell_value =
- assembler->LoadObjectField(cell, PropertyCell::kValueOffset);
- assembler->GotoIf(assembler->WordEqual(cell_value, invalid),
- &generic_values);
+ assembler.LoadObjectField(cell, PropertyCell::kValueOffset);
+ assembler.GotoIf(assembler.WordEqual(cell_value, invalid),
+ &generic_values);
- var_value.Bind(assembler->UndefinedConstant());
- Node* value = assembler->LoadFixedDoubleArrayElement(
+ var_value.Bind(assembler.UndefinedConstant());
+ Node* value = assembler.LoadFixedDoubleArrayElement(
elements, index, MachineType::Float64(), 0,
CodeStubAssembler::SMI_PARAMETERS, &allocate_entry_if_needed);
- var_value.Bind(assembler->AllocateHeapNumberWithValue(value));
- assembler->Goto(&allocate_entry_if_needed);
+ var_value.Bind(assembler.AllocateHeapNumberWithValue(value));
+ assembler.Goto(&allocate_entry_if_needed);
}
}
- assembler->Bind(&if_isnotfastarray);
+ assembler.Bind(&if_isnotfastarray);
{
- Label if_istypedarray(assembler), if_isgeneric(assembler);
+ Label if_istypedarray(&assembler), if_isgeneric(&assembler);
// If a is undefined, return CreateIterResultObject(undefined, true)
- assembler->GotoIf(
- assembler->WordEqual(array, assembler->UndefinedConstant()),
- &allocate_iterator_result);
+ assembler.GotoIf(assembler.WordEqual(array, assembler.UndefinedConstant()),
+ &allocate_iterator_result);
- Node* array_type = assembler->LoadInstanceType(array);
- assembler->Branch(
- assembler->Word32Equal(array_type,
- assembler->Int32Constant(JS_TYPED_ARRAY_TYPE)),
+ Node* array_type = assembler.LoadInstanceType(array);
+ assembler.Branch(
+ assembler.Word32Equal(array_type,
+ assembler.Int32Constant(JS_TYPED_ARRAY_TYPE)),
&if_istypedarray, &if_isgeneric);
- assembler->Bind(&if_isgeneric);
+ assembler.Bind(&if_isgeneric);
{
- Label if_wasfastarray(assembler);
+ Label if_wasfastarray(&assembler);
Node* length = nullptr;
{
- Variable var_length(assembler, MachineRepresentation::kTagged);
- Label if_isarray(assembler), if_isnotarray(assembler), done(assembler);
- assembler->Branch(
- assembler->Word32Equal(array_type,
- assembler->Int32Constant(JS_ARRAY_TYPE)),
+ Variable var_length(&assembler, MachineRepresentation::kTagged);
+ Label if_isarray(&assembler), if_isnotarray(&assembler),
+ done(&assembler);
+ assembler.Branch(
+ assembler.Word32Equal(array_type,
+ assembler.Int32Constant(JS_ARRAY_TYPE)),
&if_isarray, &if_isnotarray);
- assembler->Bind(&if_isarray);
+ assembler.Bind(&if_isarray);
{
var_length.Bind(
- assembler->LoadObjectField(array, JSArray::kLengthOffset));
+ assembler.LoadObjectField(array, JSArray::kLengthOffset));
// Invalidate protector cell if needed
- assembler->Branch(
- assembler->WordNotEqual(orig_map, assembler->UndefinedConstant()),
+ assembler.Branch(
+ assembler.WordNotEqual(orig_map, assembler.UndefinedConstant()),
&if_wasfastarray, &done);
- assembler->Bind(&if_wasfastarray);
+ assembler.Bind(&if_wasfastarray);
{
- Label if_invalid(assembler, Label::kDeferred);
+ Label if_invalid(&assembler, Label::kDeferred);
// A fast array iterator transitioned to a slow iterator during
// iteration. Invalidate fast_array_iteration_prtoector cell to
// prevent potential deopt loops.
- assembler->StoreObjectFieldNoWriteBarrier(
+ assembler.StoreObjectFieldNoWriteBarrier(
iterator, JSArrayIterator::kIteratedObjectMapOffset,
- assembler->UndefinedConstant());
- assembler->GotoIf(
- assembler->Uint32LessThanOrEqual(
- instance_type, assembler->Int32Constant(
+ assembler.UndefinedConstant());
+ assembler.GotoIf(
+ assembler.Uint32LessThanOrEqual(
+ instance_type, assembler.Int32Constant(
JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE)),
&done);
- Node* invalid = assembler->SmiConstant(
- Smi::FromInt(Isolate::kProtectorInvalid));
- Node* cell = assembler->LoadRoot(
- Heap::kFastArrayIterationProtectorRootIndex);
- assembler->StoreObjectFieldNoWriteBarrier(cell, Cell::kValueOffset,
- invalid);
- assembler->Goto(&done);
+ Node* invalid =
+ assembler.SmiConstant(Smi::FromInt(Isolate::kProtectorInvalid));
+ Node* cell =
+ assembler.LoadRoot(Heap::kFastArrayIterationProtectorRootIndex);
+ assembler.StoreObjectFieldNoWriteBarrier(cell, Cell::kValueOffset,
+ invalid);
+ assembler.Goto(&done);
}
}
- assembler->Bind(&if_isnotarray);
+ assembler.Bind(&if_isnotarray);
{
- Node* length_string = assembler->HeapConstant(
- assembler->isolate()->factory()->length_string());
- Callable get_property =
- CodeFactory::GetProperty(assembler->isolate());
+ Node* length_string = assembler.HeapConstant(
+ assembler.isolate()->factory()->length_string());
+ Callable get_property = CodeFactory::GetProperty(assembler.isolate());
Node* length =
- assembler->CallStub(get_property, context, array, length_string);
- Callable to_length = CodeFactory::ToLength(assembler->isolate());
- var_length.Bind(assembler->CallStub(to_length, context, length));
- assembler->Goto(&done);
+ assembler.CallStub(get_property, context, array, length_string);
+ Callable to_length = CodeFactory::ToLength(assembler.isolate());
+ var_length.Bind(assembler.CallStub(to_length, context, length));
+ assembler.Goto(&done);
}
- assembler->Bind(&done);
+ assembler.Bind(&done);
length = var_length.value();
}
- assembler->GotoUnlessNumberLessThan(index, length, &set_done);
+ assembler.GotoUnlessNumberLessThan(index, length, &set_done);
- assembler->StoreObjectField(iterator, JSArrayIterator::kNextIndexOffset,
- assembler->NumberInc(index));
- var_done.Bind(assembler->FalseConstant());
+ assembler.StoreObjectField(iterator, JSArrayIterator::kNextIndexOffset,
+ assembler.NumberInc(index));
+ var_done.Bind(assembler.FalseConstant());
- assembler->Branch(
- assembler->Uint32LessThanOrEqual(
+ assembler.Branch(
+ assembler.Uint32LessThanOrEqual(
instance_type,
- assembler->Int32Constant(JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE)),
+ assembler.Int32Constant(JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE)),
&allocate_key_result, &generic_values);
- assembler->Bind(&generic_values);
+ assembler.Bind(&generic_values);
{
- Callable get_property = CodeFactory::GetProperty(assembler->isolate());
- var_value.Bind(
- assembler->CallStub(get_property, context, array, index));
- assembler->Goto(&allocate_entry_if_needed);
+ Callable get_property = CodeFactory::GetProperty(assembler.isolate());
+ var_value.Bind(assembler.CallStub(get_property, context, array, index));
+ assembler.Goto(&allocate_entry_if_needed);
}
}
- assembler->Bind(&if_istypedarray);
+ assembler.Bind(&if_istypedarray);
{
- Node* length = nullptr;
- {
- Variable var_length(assembler, MachineRepresentation::kTagged);
- Label if_isdetached(assembler, Label::kDeferred),
- if_isnotdetached(assembler), done(assembler);
+ Node* buffer =
+ assembler.LoadObjectField(array, JSTypedArray::kBufferOffset);
+ assembler.GotoIf(assembler.IsDetachedBuffer(buffer), &if_isdetached);
- Node* buffer =
- assembler->LoadObjectField(array, JSTypedArray::kBufferOffset);
- assembler->Branch(assembler->IsDetachedBuffer(buffer), &if_isdetached,
- &if_isnotdetached);
+ Node* length =
+ assembler.LoadObjectField(array, JSTypedArray::kLengthOffset);
- assembler->Bind(&if_isnotdetached);
- {
- var_length.Bind(
- assembler->LoadObjectField(array, JSTypedArray::kLengthOffset));
- assembler->Goto(&done);
- }
+ CSA_ASSERT(&assembler, assembler.TaggedIsSmi(length));
+ CSA_ASSERT(&assembler, assembler.TaggedIsSmi(index));
- assembler->Bind(&if_isdetached);
- {
- // TODO(caitp): If IsDetached(buffer) is true, throw a TypeError, per
- // https://github.com/tc39/ecma262/issues/713
- var_length.Bind(assembler->SmiConstant(Smi::kZero));
- assembler->Goto(&done);
- }
-
- assembler->Bind(&done);
- length = var_length.value();
- }
- CSA_ASSERT(assembler, assembler->TaggedIsSmi(length));
- CSA_ASSERT(assembler, assembler->TaggedIsSmi(index));
-
- assembler->GotoUnless(assembler->SmiBelow(index, length), &set_done);
+ assembler.GotoUnless(assembler.SmiBelow(index, length), &set_done);
- Node* one = assembler->SmiConstant(Smi::FromInt(1));
- assembler->StoreObjectFieldNoWriteBarrier(
+ Node* one = assembler.SmiConstant(1);
+ assembler.StoreObjectFieldNoWriteBarrier(
iterator, JSArrayIterator::kNextIndexOffset,
- assembler->IntPtrAdd(assembler->BitcastTaggedToWord(index),
- assembler->BitcastTaggedToWord(one)));
- var_done.Bind(assembler->FalseConstant());
+ assembler.SmiAdd(index, one));
+ var_done.Bind(assembler.FalseConstant());
- Node* elements = assembler->LoadElements(array);
- Node* base_ptr = assembler->LoadObjectField(
+ Node* elements = assembler.LoadElements(array);
+ Node* base_ptr = assembler.LoadObjectField(
elements, FixedTypedArrayBase::kBasePointerOffset);
- Node* external_ptr = assembler->LoadObjectField(
- elements, FixedTypedArrayBase::kExternalPointerOffset);
- Node* data_ptr = assembler->IntPtrAdd(base_ptr, external_ptr);
+ Node* external_ptr = assembler.LoadObjectField(
+ elements, FixedTypedArrayBase::kExternalPointerOffset,
+ MachineType::Pointer());
+ Node* data_ptr = assembler.IntPtrAdd(
+ assembler.BitcastTaggedToWord(base_ptr), external_ptr);
static int32_t kInstanceType[] = {
JS_TYPED_ARRAY_KEY_ITERATOR_TYPE,
@@ -2467,10 +2591,10 @@ void Builtins::Generate_ArrayIteratorPrototypeNext(
JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE,
};
- Label uint8_values(assembler), int8_values(assembler),
- uint16_values(assembler), int16_values(assembler),
- uint32_values(assembler), int32_values(assembler),
- float32_values(assembler), float64_values(assembler);
+ Label uint8_values(&assembler), int8_values(&assembler),
+ uint16_values(&assembler), int16_values(&assembler),
+ uint32_values(&assembler), int32_values(&assembler),
+ float32_values(&assembler), float64_values(&assembler);
Label* kInstanceTypeHandlers[] = {
&allocate_key_result, &uint8_values, &uint8_values,
&int8_values, &uint16_values, &int16_values,
@@ -2481,152 +2605,158 @@ void Builtins::Generate_ArrayIteratorPrototypeNext(
&float64_values,
};
- var_done.Bind(assembler->FalseConstant());
- assembler->Switch(instance_type, &throw_bad_receiver, kInstanceType,
- kInstanceTypeHandlers, arraysize(kInstanceType));
+ var_done.Bind(assembler.FalseConstant());
+ assembler.Switch(instance_type, &throw_bad_receiver, kInstanceType,
+ kInstanceTypeHandlers, arraysize(kInstanceType));
- assembler->Bind(&uint8_values);
+ assembler.Bind(&uint8_values);
{
- Node* value_uint8 = assembler->LoadFixedTypedArrayElement(
+ Node* value_uint8 = assembler.LoadFixedTypedArrayElement(
data_ptr, index, UINT8_ELEMENTS, CodeStubAssembler::SMI_PARAMETERS);
- var_value.Bind(assembler->SmiFromWord(value_uint8));
- assembler->Goto(&allocate_entry_if_needed);
+ var_value.Bind(assembler.SmiFromWord32(value_uint8));
+ assembler.Goto(&allocate_entry_if_needed);
}
- assembler->Bind(&int8_values);
+ assembler.Bind(&int8_values);
{
- Node* value_int8 = assembler->LoadFixedTypedArrayElement(
+ Node* value_int8 = assembler.LoadFixedTypedArrayElement(
data_ptr, index, INT8_ELEMENTS, CodeStubAssembler::SMI_PARAMETERS);
- var_value.Bind(assembler->SmiFromWord(value_int8));
- assembler->Goto(&allocate_entry_if_needed);
+ var_value.Bind(assembler.SmiFromWord32(value_int8));
+ assembler.Goto(&allocate_entry_if_needed);
}
- assembler->Bind(&uint16_values);
+ assembler.Bind(&uint16_values);
{
- Node* value_uint16 = assembler->LoadFixedTypedArrayElement(
+ Node* value_uint16 = assembler.LoadFixedTypedArrayElement(
data_ptr, index, UINT16_ELEMENTS,
CodeStubAssembler::SMI_PARAMETERS);
- var_value.Bind(assembler->SmiFromWord(value_uint16));
- assembler->Goto(&allocate_entry_if_needed);
+ var_value.Bind(assembler.SmiFromWord32(value_uint16));
+ assembler.Goto(&allocate_entry_if_needed);
}
- assembler->Bind(&int16_values);
+ assembler.Bind(&int16_values);
{
- Node* value_int16 = assembler->LoadFixedTypedArrayElement(
+ Node* value_int16 = assembler.LoadFixedTypedArrayElement(
data_ptr, index, INT16_ELEMENTS, CodeStubAssembler::SMI_PARAMETERS);
- var_value.Bind(assembler->SmiFromWord(value_int16));
- assembler->Goto(&allocate_entry_if_needed);
+ var_value.Bind(assembler.SmiFromWord32(value_int16));
+ assembler.Goto(&allocate_entry_if_needed);
}
- assembler->Bind(&uint32_values);
+ assembler.Bind(&uint32_values);
{
- Node* value_uint32 = assembler->LoadFixedTypedArrayElement(
+ Node* value_uint32 = assembler.LoadFixedTypedArrayElement(
data_ptr, index, UINT32_ELEMENTS,
CodeStubAssembler::SMI_PARAMETERS);
- var_value.Bind(assembler->ChangeUint32ToTagged(value_uint32));
- assembler->Goto(&allocate_entry_if_needed);
+ var_value.Bind(assembler.ChangeUint32ToTagged(value_uint32));
+ assembler.Goto(&allocate_entry_if_needed);
}
- assembler->Bind(&int32_values);
+ assembler.Bind(&int32_values);
{
- Node* value_int32 = assembler->LoadFixedTypedArrayElement(
+ Node* value_int32 = assembler.LoadFixedTypedArrayElement(
data_ptr, index, INT32_ELEMENTS, CodeStubAssembler::SMI_PARAMETERS);
- var_value.Bind(assembler->ChangeInt32ToTagged(value_int32));
- assembler->Goto(&allocate_entry_if_needed);
+ var_value.Bind(assembler.ChangeInt32ToTagged(value_int32));
+ assembler.Goto(&allocate_entry_if_needed);
}
- assembler->Bind(&float32_values);
+ assembler.Bind(&float32_values);
{
- Node* value_float32 = assembler->LoadFixedTypedArrayElement(
+ Node* value_float32 = assembler.LoadFixedTypedArrayElement(
data_ptr, index, FLOAT32_ELEMENTS,
CodeStubAssembler::SMI_PARAMETERS);
- var_value.Bind(assembler->AllocateHeapNumberWithValue(
- assembler->ChangeFloat32ToFloat64(value_float32)));
- assembler->Goto(&allocate_entry_if_needed);
+ var_value.Bind(assembler.AllocateHeapNumberWithValue(
+ assembler.ChangeFloat32ToFloat64(value_float32)));
+ assembler.Goto(&allocate_entry_if_needed);
}
- assembler->Bind(&float64_values);
+ assembler.Bind(&float64_values);
{
- Node* value_float64 = assembler->LoadFixedTypedArrayElement(
+ Node* value_float64 = assembler.LoadFixedTypedArrayElement(
data_ptr, index, FLOAT64_ELEMENTS,
CodeStubAssembler::SMI_PARAMETERS);
- var_value.Bind(assembler->AllocateHeapNumberWithValue(value_float64));
- assembler->Goto(&allocate_entry_if_needed);
+ var_value.Bind(assembler.AllocateHeapNumberWithValue(value_float64));
+ assembler.Goto(&allocate_entry_if_needed);
}
}
}
- assembler->Bind(&set_done);
+ assembler.Bind(&set_done);
{
- assembler->StoreObjectFieldNoWriteBarrier(
+ assembler.StoreObjectFieldNoWriteBarrier(
iterator, JSArrayIterator::kIteratedObjectOffset,
- assembler->UndefinedConstant());
- assembler->Goto(&allocate_iterator_result);
+ assembler.UndefinedConstant());
+ assembler.Goto(&allocate_iterator_result);
}
- assembler->Bind(&allocate_key_result);
+ assembler.Bind(&allocate_key_result);
{
var_value.Bind(index);
- var_done.Bind(assembler->FalseConstant());
- assembler->Goto(&allocate_iterator_result);
+ var_done.Bind(assembler.FalseConstant());
+ assembler.Goto(&allocate_iterator_result);
}
- assembler->Bind(&allocate_entry_if_needed);
+ assembler.Bind(&allocate_entry_if_needed);
{
- assembler->GotoIf(
- assembler->Int32GreaterThan(
+ assembler.GotoIf(
+ assembler.Int32GreaterThan(
instance_type,
- assembler->Int32Constant(LAST_ARRAY_KEY_VALUE_ITERATOR_TYPE)),
+ assembler.Int32Constant(LAST_ARRAY_KEY_VALUE_ITERATOR_TYPE)),
&allocate_iterator_result);
- Node* elements = assembler->AllocateFixedArray(FAST_ELEMENTS,
- assembler->Int32Constant(2));
- assembler->StoreFixedArrayElement(elements, assembler->Int32Constant(0),
- index, SKIP_WRITE_BARRIER);
- assembler->StoreFixedArrayElement(elements, assembler->Int32Constant(1),
- var_value.value(), SKIP_WRITE_BARRIER);
-
- Node* entry = assembler->Allocate(JSArray::kSize);
- Node* map = assembler->LoadContextElement(
- assembler->LoadNativeContext(context),
- Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX);
-
- assembler->StoreMapNoWriteBarrier(entry, map);
- assembler->StoreObjectFieldRoot(entry, JSArray::kPropertiesOffset,
- Heap::kEmptyFixedArrayRootIndex);
- assembler->StoreObjectFieldNoWriteBarrier(entry, JSArray::kElementsOffset,
- elements);
- assembler->StoreObjectFieldNoWriteBarrier(
- entry, JSArray::kLengthOffset, assembler->SmiConstant(Smi::FromInt(2)));
+ Node* elements = assembler.AllocateFixedArray(FAST_ELEMENTS,
+ assembler.IntPtrConstant(2));
+ assembler.StoreFixedArrayElement(elements, 0, index, SKIP_WRITE_BARRIER);
+ assembler.StoreFixedArrayElement(elements, 1, var_value.value(),
+ SKIP_WRITE_BARRIER);
+
+ Node* entry = assembler.Allocate(JSArray::kSize);
+ Node* map =
+ assembler.LoadContextElement(assembler.LoadNativeContext(context),
+ Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX);
+
+ assembler.StoreMapNoWriteBarrier(entry, map);
+ assembler.StoreObjectFieldRoot(entry, JSArray::kPropertiesOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ assembler.StoreObjectFieldNoWriteBarrier(entry, JSArray::kElementsOffset,
+ elements);
+ assembler.StoreObjectFieldNoWriteBarrier(
+ entry, JSArray::kLengthOffset, assembler.SmiConstant(Smi::FromInt(2)));
var_value.Bind(entry);
- assembler->Goto(&allocate_iterator_result);
+ assembler.Goto(&allocate_iterator_result);
}
- assembler->Bind(&allocate_iterator_result);
+ assembler.Bind(&allocate_iterator_result);
{
- Node* result = assembler->Allocate(JSIteratorResult::kSize);
+ Node* result = assembler.Allocate(JSIteratorResult::kSize);
Node* map =
- assembler->LoadContextElement(assembler->LoadNativeContext(context),
- Context::ITERATOR_RESULT_MAP_INDEX);
- assembler->StoreMapNoWriteBarrier(result, map);
- assembler->StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOffset,
- Heap::kEmptyFixedArrayRootIndex);
- assembler->StoreObjectFieldRoot(result, JSIteratorResult::kElementsOffset,
- Heap::kEmptyFixedArrayRootIndex);
- assembler->StoreObjectFieldNoWriteBarrier(
+ assembler.LoadContextElement(assembler.LoadNativeContext(context),
+ Context::ITERATOR_RESULT_MAP_INDEX);
+ assembler.StoreMapNoWriteBarrier(result, map);
+ assembler.StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ assembler.StoreObjectFieldRoot(result, JSIteratorResult::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ assembler.StoreObjectFieldNoWriteBarrier(
result, JSIteratorResult::kValueOffset, var_value.value());
- assembler->StoreObjectFieldNoWriteBarrier(
+ assembler.StoreObjectFieldNoWriteBarrier(
result, JSIteratorResult::kDoneOffset, var_done.value());
- assembler->Return(result);
+ assembler.Return(result);
}
- assembler->Bind(&throw_bad_receiver);
+ assembler.Bind(&throw_bad_receiver);
{
// The {receiver} is not a valid JSArrayIterator.
- Node* result = assembler->CallRuntime(
+ Node* result = assembler.CallRuntime(
Runtime::kThrowIncompatibleMethodReceiver, context,
- assembler->HeapConstant(assembler->factory()->NewStringFromAsciiChecked(
- "Array Iterator.prototype.next", TENURED)),
- iterator);
- assembler->Return(result);
+ assembler.HeapConstant(operation), iterator);
+ assembler.Return(result);
+ }
+
+ assembler.Bind(&if_isdetached);
+ {
+ Node* message = assembler.SmiConstant(MessageTemplate::kDetachedOperation);
+ Node* result =
+ assembler.CallRuntime(Runtime::kThrowTypeError, context, message,
+ assembler.HeapConstant(operation));
+ assembler.Return(result);
}
}
diff --git a/deps/v8/src/builtins/builtins-boolean.cc b/deps/v8/src/builtins/builtins-boolean.cc
index e7ccf95973..81232230ff 100644
--- a/deps/v8/src/builtins/builtins-boolean.cc
+++ b/deps/v8/src/builtins/builtins-boolean.cc
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/code-stub-assembler.h"
namespace v8 {
namespace internal {
@@ -34,28 +35,32 @@ BUILTIN(BooleanConstructor_ConstructStub) {
}
// ES6 section 19.3.3.2 Boolean.prototype.toString ( )
-void Builtins::Generate_BooleanPrototypeToString(CodeStubAssembler* assembler) {
+void Builtins::Generate_BooleanPrototypeToString(
+ compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(0);
- Node* context = assembler->Parameter(3);
+ Node* receiver = assembler.Parameter(0);
+ Node* context = assembler.Parameter(3);
- Node* value = assembler->ToThisValue(
+ Node* value = assembler.ToThisValue(
context, receiver, PrimitiveType::kBoolean, "Boolean.prototype.toString");
- Node* result = assembler->LoadObjectField(value, Oddball::kToStringOffset);
- assembler->Return(result);
+ Node* result = assembler.LoadObjectField(value, Oddball::kToStringOffset);
+ assembler.Return(result);
}
// ES6 section 19.3.3.3 Boolean.prototype.valueOf ( )
-void Builtins::Generate_BooleanPrototypeValueOf(CodeStubAssembler* assembler) {
+void Builtins::Generate_BooleanPrototypeValueOf(
+ compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(0);
- Node* context = assembler->Parameter(3);
+ Node* receiver = assembler.Parameter(0);
+ Node* context = assembler.Parameter(3);
- Node* result = assembler->ToThisValue(
+ Node* result = assembler.ToThisValue(
context, receiver, PrimitiveType::kBoolean, "Boolean.prototype.valueOf");
- assembler->Return(result);
+ assembler.Return(result);
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-constructor.cc b/deps/v8/src/builtins/builtins-constructor.cc
new file mode 100644
index 0000000000..db3ffb0b91
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-constructor.cc
@@ -0,0 +1,772 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-constructor.h"
+#include "src/ast/ast.h"
+#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+typedef compiler::Node Node;
+
+Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
+ Node* feedback_vector,
+ Node* slot,
+ Node* context) {
+ typedef compiler::CodeAssembler::Label Label;
+ typedef compiler::CodeAssembler::Variable Variable;
+
+ Isolate* isolate = this->isolate();
+ Factory* factory = isolate->factory();
+ IncrementCounter(isolate->counters()->fast_new_closure_total(), 1);
+
+ // Create a new closure from the given function info in new space
+ Node* result = Allocate(JSFunction::kSize);
+
+ // Calculate the index of the map we should install on the function based on
+ // the FunctionKind and LanguageMode of the function.
+ // Note: Must be kept in sync with Context::FunctionMapIndex
+ Node* compiler_hints =
+ LoadObjectField(shared_info, SharedFunctionInfo::kCompilerHintsOffset,
+ MachineType::Uint32());
+ Node* is_strict = Word32And(
+ compiler_hints, Int32Constant(1 << SharedFunctionInfo::kStrictModeBit));
+
+ Label if_normal(this), if_generator(this), if_async(this),
+ if_class_constructor(this), if_function_without_prototype(this),
+ load_map(this);
+ Variable map_index(this, MachineType::PointerRepresentation());
+
+ STATIC_ASSERT(FunctionKind::kNormalFunction == 0);
+ Node* is_not_normal =
+ Word32And(compiler_hints,
+ Int32Constant(SharedFunctionInfo::kAllFunctionKindBitsMask));
+ GotoUnless(is_not_normal, &if_normal);
+
+ Node* is_generator = Word32And(
+ compiler_hints, Int32Constant(FunctionKind::kGeneratorFunction
+ << SharedFunctionInfo::kFunctionKindShift));
+ GotoIf(is_generator, &if_generator);
+
+ Node* is_async = Word32And(
+ compiler_hints, Int32Constant(FunctionKind::kAsyncFunction
+ << SharedFunctionInfo::kFunctionKindShift));
+ GotoIf(is_async, &if_async);
+
+ Node* is_class_constructor = Word32And(
+ compiler_hints, Int32Constant(FunctionKind::kClassConstructor
+ << SharedFunctionInfo::kFunctionKindShift));
+ GotoIf(is_class_constructor, &if_class_constructor);
+
+ if (FLAG_debug_code) {
+ // Function must be a function without a prototype.
+ CSA_ASSERT(
+ this,
+ Word32And(compiler_hints,
+ Int32Constant((FunctionKind::kAccessorFunction |
+ FunctionKind::kArrowFunction |
+ FunctionKind::kConciseMethod)
+ << SharedFunctionInfo::kFunctionKindShift)));
+ }
+ Goto(&if_function_without_prototype);
+
+ Bind(&if_normal);
+ {
+ map_index.Bind(SelectIntPtrConstant(is_strict,
+ Context::STRICT_FUNCTION_MAP_INDEX,
+ Context::SLOPPY_FUNCTION_MAP_INDEX));
+ Goto(&load_map);
+ }
+
+ Bind(&if_generator);
+ {
+ map_index.Bind(IntPtrConstant(Context::GENERATOR_FUNCTION_MAP_INDEX));
+ Goto(&load_map);
+ }
+
+ Bind(&if_async);
+ {
+ map_index.Bind(IntPtrConstant(Context::ASYNC_FUNCTION_MAP_INDEX));
+ Goto(&load_map);
+ }
+
+ Bind(&if_class_constructor);
+ {
+ map_index.Bind(IntPtrConstant(Context::CLASS_FUNCTION_MAP_INDEX));
+ Goto(&load_map);
+ }
+
+ Bind(&if_function_without_prototype);
+ {
+ map_index.Bind(
+ IntPtrConstant(Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX));
+ Goto(&load_map);
+ }
+
+ Bind(&load_map);
+
+ // Get the function map in the current native context and set that
+ // as the map of the allocated object.
+ Node* native_context = LoadNativeContext(context);
+ Node* map_slot_value =
+ LoadFixedArrayElement(native_context, map_index.value());
+ StoreMapNoWriteBarrier(result, map_slot_value);
+
+ // Initialize the rest of the function.
+ Node* empty_fixed_array = HeapConstant(factory->empty_fixed_array());
+ Node* empty_literals_array = HeapConstant(factory->empty_literals_array());
+ StoreObjectFieldNoWriteBarrier(result, JSObject::kPropertiesOffset,
+ empty_fixed_array);
+ StoreObjectFieldNoWriteBarrier(result, JSObject::kElementsOffset,
+ empty_fixed_array);
+ StoreObjectFieldNoWriteBarrier(result, JSFunction::kLiteralsOffset,
+ empty_literals_array);
+ StoreObjectFieldNoWriteBarrier(
+ result, JSFunction::kPrototypeOrInitialMapOffset, TheHoleConstant());
+ StoreObjectFieldNoWriteBarrier(result, JSFunction::kSharedFunctionInfoOffset,
+ shared_info);
+ StoreObjectFieldNoWriteBarrier(result, JSFunction::kContextOffset, context);
+ Handle<Code> lazy_builtin_handle(
+ isolate->builtins()->builtin(Builtins::kCompileLazy));
+ Node* lazy_builtin = HeapConstant(lazy_builtin_handle);
+ Node* lazy_builtin_entry =
+ IntPtrAdd(BitcastTaggedToWord(lazy_builtin),
+ IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
+ StoreObjectFieldNoWriteBarrier(result, JSFunction::kCodeEntryOffset,
+ lazy_builtin_entry,
+ MachineType::PointerRepresentation());
+ StoreObjectFieldNoWriteBarrier(result, JSFunction::kNextFunctionLinkOffset,
+ UndefinedConstant());
+
+ return result;
+}
+
+TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
+ Node* shared = Parameter(FastNewClosureDescriptor::kSharedFunctionInfo);
+ Node* context = Parameter(FastNewClosureDescriptor::kContext);
+ Node* vector = Parameter(FastNewClosureDescriptor::kVector);
+ Node* slot = Parameter(FastNewClosureDescriptor::kSlot);
+ Return(EmitFastNewClosure(shared, vector, slot, context));
+}
+
+TF_BUILTIN(FastNewObject, ConstructorBuiltinsAssembler) {
+ typedef FastNewObjectDescriptor Descriptor;
+ Node* context = Parameter(Descriptor::kContext);
+ Node* target = Parameter(Descriptor::kTarget);
+ Node* new_target = Parameter(Descriptor::kNewTarget);
+
+ Label call_runtime(this);
+
+ Node* result = EmitFastNewObject(context, target, new_target, &call_runtime);
+ Return(result);
+
+ Bind(&call_runtime);
+ TailCallRuntime(Runtime::kNewObject, context, target, new_target);
+}
+
+Node* ConstructorBuiltinsAssembler::EmitFastNewObject(Node* context,
+ Node* target,
+ Node* new_target) {
+ Variable var_obj(this, MachineRepresentation::kTagged);
+ Label call_runtime(this), end(this);
+
+ Node* result = EmitFastNewObject(context, target, new_target, &call_runtime);
+ var_obj.Bind(result);
+ Goto(&end);
+
+ Bind(&call_runtime);
+ var_obj.Bind(CallRuntime(Runtime::kNewObject, context, target, new_target));
+ Goto(&end);
+
+ Bind(&end);
+ return var_obj.value();
+}
+
+Node* ConstructorBuiltinsAssembler::EmitFastNewObject(
+ Node* context, Node* target, Node* new_target,
+ CodeAssemblerLabel* call_runtime) {
+ CSA_ASSERT(this, HasInstanceType(target, JS_FUNCTION_TYPE));
+ CSA_ASSERT(this, IsJSReceiver(new_target));
+
+ // Verify that the new target is a JSFunction.
+ Label fast(this), end(this);
+ GotoIf(HasInstanceType(new_target, JS_FUNCTION_TYPE), &fast);
+ Goto(call_runtime);
+
+ Bind(&fast);
+
+ // Load the initial map and verify that it's in fact a map.
+ Node* initial_map =
+ LoadObjectField(new_target, JSFunction::kPrototypeOrInitialMapOffset);
+ GotoIf(TaggedIsSmi(initial_map), call_runtime);
+ GotoIf(DoesntHaveInstanceType(initial_map, MAP_TYPE), call_runtime);
+
+ // Fall back to runtime if the target differs from the new target's
+ // initial map constructor.
+ Node* new_target_constructor =
+ LoadObjectField(initial_map, Map::kConstructorOrBackPointerOffset);
+ GotoIf(WordNotEqual(target, new_target_constructor), call_runtime);
+
+ Node* instance_size_words = ChangeUint32ToWord(LoadObjectField(
+ initial_map, Map::kInstanceSizeOffset, MachineType::Uint8()));
+ Node* instance_size =
+ WordShl(instance_size_words, IntPtrConstant(kPointerSizeLog2));
+
+ Node* object = Allocate(instance_size);
+ StoreMapNoWriteBarrier(object, initial_map);
+ Node* empty_array = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldNoWriteBarrier(object, JSObject::kPropertiesOffset,
+ empty_array);
+ StoreObjectFieldNoWriteBarrier(object, JSObject::kElementsOffset,
+ empty_array);
+
+ instance_size_words = ChangeUint32ToWord(LoadObjectField(
+ initial_map, Map::kInstanceSizeOffset, MachineType::Uint8()));
+ instance_size =
+ WordShl(instance_size_words, IntPtrConstant(kPointerSizeLog2));
+
+ // Perform in-object slack tracking if requested.
+ Node* bit_field3 = LoadMapBitField3(initial_map);
+ Label slack_tracking(this), finalize(this, Label::kDeferred), done(this);
+ GotoIf(IsSetWord32<Map::ConstructionCounter>(bit_field3), &slack_tracking);
+
+ // Initialize remaining fields.
+ {
+ Comment("no slack tracking");
+ InitializeFieldsWithRoot(object, IntPtrConstant(JSObject::kHeaderSize),
+ instance_size, Heap::kUndefinedValueRootIndex);
+ Goto(&end);
+ }
+
+ {
+ Bind(&slack_tracking);
+
+ // Decrease generous allocation count.
+ STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+ Comment("update allocation count");
+ Node* new_bit_field3 = Int32Sub(
+ bit_field3, Int32Constant(1 << Map::ConstructionCounter::kShift));
+ StoreObjectFieldNoWriteBarrier(initial_map, Map::kBitField3Offset,
+ new_bit_field3,
+ MachineRepresentation::kWord32);
+ GotoIf(IsClearWord32<Map::ConstructionCounter>(new_bit_field3), &finalize);
+
+ Node* unused_fields = LoadObjectField(
+ initial_map, Map::kUnusedPropertyFieldsOffset, MachineType::Uint8());
+ Node* used_size =
+ IntPtrSub(instance_size, WordShl(ChangeUint32ToWord(unused_fields),
+ IntPtrConstant(kPointerSizeLog2)));
+
+ Comment("initialize filler fields (no finalize)");
+ InitializeFieldsWithRoot(object, used_size, instance_size,
+ Heap::kOnePointerFillerMapRootIndex);
+
+ Comment("initialize undefined fields (no finalize)");
+ InitializeFieldsWithRoot(object, IntPtrConstant(JSObject::kHeaderSize),
+ used_size, Heap::kUndefinedValueRootIndex);
+ Goto(&end);
+ }
+
+ {
+ // Finalize the instance size.
+ Bind(&finalize);
+
+ Node* unused_fields = LoadObjectField(
+ initial_map, Map::kUnusedPropertyFieldsOffset, MachineType::Uint8());
+ Node* used_size =
+ IntPtrSub(instance_size, WordShl(ChangeUint32ToWord(unused_fields),
+ IntPtrConstant(kPointerSizeLog2)));
+
+ Comment("initialize filler fields (finalize)");
+ InitializeFieldsWithRoot(object, used_size, instance_size,
+ Heap::kOnePointerFillerMapRootIndex);
+
+ Comment("initialize undefined fields (finalize)");
+ InitializeFieldsWithRoot(object, IntPtrConstant(JSObject::kHeaderSize),
+ used_size, Heap::kUndefinedValueRootIndex);
+
+ CallRuntime(Runtime::kFinalizeInstanceSize, context, initial_map);
+ Goto(&end);
+ }
+
+ Bind(&end);
+ return object;
+}
+
+Node* ConstructorBuiltinsAssembler::EmitFastNewFunctionContext(
+ Node* function, Node* slots, Node* context, ScopeType scope_type) {
+ slots = ChangeUint32ToWord(slots);
+
+ // TODO(ishell): Use CSA::OptimalParameterMode() here.
+ CodeStubAssembler::ParameterMode mode = CodeStubAssembler::INTPTR_PARAMETERS;
+ Node* min_context_slots = IntPtrConstant(Context::MIN_CONTEXT_SLOTS);
+ Node* length = IntPtrAdd(slots, min_context_slots);
+ Node* size = GetFixedArrayAllocationSize(length, FAST_ELEMENTS, mode);
+
+ // Create a new closure from the given function info in new space
+ Node* function_context = Allocate(size);
+
+ Heap::RootListIndex context_type;
+ switch (scope_type) {
+ case EVAL_SCOPE:
+ context_type = Heap::kEvalContextMapRootIndex;
+ break;
+ case FUNCTION_SCOPE:
+ context_type = Heap::kFunctionContextMapRootIndex;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ StoreMapNoWriteBarrier(function_context, context_type);
+ StoreObjectFieldNoWriteBarrier(function_context, Context::kLengthOffset,
+ SmiTag(length));
+
+ // Set up the fixed slots.
+ StoreFixedArrayElement(function_context, Context::CLOSURE_INDEX, function,
+ SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(function_context, Context::PREVIOUS_INDEX, context,
+ SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(function_context, Context::EXTENSION_INDEX,
+ TheHoleConstant(), SKIP_WRITE_BARRIER);
+
+ // Copy the native context from the previous context.
+ Node* native_context = LoadNativeContext(context);
+ StoreFixedArrayElement(function_context, Context::NATIVE_CONTEXT_INDEX,
+ native_context, SKIP_WRITE_BARRIER);
+
+ // Initialize the rest of the slots to undefined.
+ Node* undefined = UndefinedConstant();
+ BuildFastFixedArrayForEach(
+ function_context, FAST_ELEMENTS, min_context_slots, length,
+ [this, undefined](Node* context, Node* offset) {
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, context, offset,
+ undefined);
+ },
+ mode);
+
+ return function_context;
+}
+
+// static
+int ConstructorBuiltinsAssembler::MaximumFunctionContextSlots() {
+ return FLAG_test_small_max_function_context_stub_size ? kSmallMaximumSlots
+ : kMaximumSlots;
+}
+
+TF_BUILTIN(FastNewFunctionContextEval, ConstructorBuiltinsAssembler) {
+ Node* function = Parameter(FastNewFunctionContextDescriptor::kFunction);
+ Node* slots = Parameter(FastNewFunctionContextDescriptor::kSlots);
+ Node* context = Parameter(FastNewFunctionContextDescriptor::kContext);
+ Return(EmitFastNewFunctionContext(function, slots, context,
+ ScopeType::EVAL_SCOPE));
+}
+
+TF_BUILTIN(FastNewFunctionContextFunction, ConstructorBuiltinsAssembler) {
+ Node* function = Parameter(FastNewFunctionContextDescriptor::kFunction);
+ Node* slots = Parameter(FastNewFunctionContextDescriptor::kSlots);
+ Node* context = Parameter(FastNewFunctionContextDescriptor::kContext);
+ Return(EmitFastNewFunctionContext(function, slots, context,
+ ScopeType::FUNCTION_SCOPE));
+}
+
+Handle<Code> Builtins::NewFunctionContext(ScopeType scope_type) {
+ switch (scope_type) {
+ case ScopeType::EVAL_SCOPE:
+ return FastNewFunctionContextEval();
+ case ScopeType::FUNCTION_SCOPE:
+ return FastNewFunctionContextFunction();
+ default:
+ UNREACHABLE();
+ }
+ return Handle<Code>::null();
+}
+
+Node* ConstructorBuiltinsAssembler::EmitFastCloneRegExp(Node* closure,
+ Node* literal_index,
+ Node* pattern,
+ Node* flags,
+ Node* context) {
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+ typedef compiler::Node Node;
+
+ Label call_runtime(this, Label::kDeferred), end(this);
+
+ Variable result(this, MachineRepresentation::kTagged);
+
+ Node* literals_array = LoadObjectField(closure, JSFunction::kLiteralsOffset);
+ Node* boilerplate =
+ LoadFixedArrayElement(literals_array, literal_index,
+ LiteralsArray::kFirstLiteralIndex * kPointerSize,
+ CodeStubAssembler::SMI_PARAMETERS);
+ GotoIf(IsUndefined(boilerplate), &call_runtime);
+
+ {
+ int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+ Node* copy = Allocate(size);
+ for (int offset = 0; offset < size; offset += kPointerSize) {
+ Node* value = LoadObjectField(boilerplate, offset);
+ StoreObjectFieldNoWriteBarrier(copy, offset, value);
+ }
+ result.Bind(copy);
+ Goto(&end);
+ }
+
+ Bind(&call_runtime);
+ {
+ result.Bind(CallRuntime(Runtime::kCreateRegExpLiteral, context, closure,
+ literal_index, pattern, flags));
+ Goto(&end);
+ }
+
+ Bind(&end);
+ return result.value();
+}
+
+TF_BUILTIN(FastCloneRegExp, ConstructorBuiltinsAssembler) {
+ Node* closure = Parameter(FastCloneRegExpDescriptor::kClosure);
+ Node* literal_index = Parameter(FastCloneRegExpDescriptor::kLiteralIndex);
+ Node* pattern = Parameter(FastCloneRegExpDescriptor::kPattern);
+ Node* flags = Parameter(FastCloneRegExpDescriptor::kFlags);
+ Node* context = Parameter(FastCloneRegExpDescriptor::kContext);
+
+ Return(EmitFastCloneRegExp(closure, literal_index, pattern, flags, context));
+}
+
+Node* ConstructorBuiltinsAssembler::NonEmptyShallowClone(
+ Node* boilerplate, Node* boilerplate_map, Node* boilerplate_elements,
+ Node* allocation_site, Node* capacity, ElementsKind kind) {
+ typedef CodeStubAssembler::ParameterMode ParameterMode;
+
+ ParameterMode param_mode = OptimalParameterMode();
+
+ Node* length = LoadJSArrayLength(boilerplate);
+ capacity = TaggedToParameter(capacity, param_mode);
+
+ Node *array, *elements;
+ std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
+ kind, boilerplate_map, length, allocation_site, capacity, param_mode);
+
+ Comment("copy elements header");
+ // Header consists of map and length.
+ STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
+ StoreMap(elements, LoadMap(boilerplate_elements));
+ {
+ int offset = FixedArrayBase::kLengthOffset;
+ StoreObjectFieldNoWriteBarrier(
+ elements, offset, LoadObjectField(boilerplate_elements, offset));
+ }
+
+ length = TaggedToParameter(length, param_mode);
+
+ Comment("copy boilerplate elements");
+ CopyFixedArrayElements(kind, boilerplate_elements, elements, length,
+ SKIP_WRITE_BARRIER, param_mode);
+ IncrementCounter(isolate()->counters()->inlined_copied_elements(), 1);
+
+ return array;
+}
+
+Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowArray(
+ Node* closure, Node* literal_index, Node* context,
+ CodeAssemblerLabel* call_runtime, AllocationSiteMode allocation_site_mode) {
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+ typedef compiler::Node Node;
+
+ Label zero_capacity(this), cow_elements(this), fast_elements(this),
+ return_result(this);
+ Variable result(this, MachineRepresentation::kTagged);
+
+ Node* literals_array = LoadObjectField(closure, JSFunction::kLiteralsOffset);
+ Node* allocation_site =
+ LoadFixedArrayElement(literals_array, literal_index,
+ LiteralsArray::kFirstLiteralIndex * kPointerSize,
+ CodeStubAssembler::SMI_PARAMETERS);
+
+ GotoIf(IsUndefined(allocation_site), call_runtime);
+ allocation_site =
+ LoadFixedArrayElement(literals_array, literal_index,
+ LiteralsArray::kFirstLiteralIndex * kPointerSize,
+ CodeStubAssembler::SMI_PARAMETERS);
+
+ Node* boilerplate =
+ LoadObjectField(allocation_site, AllocationSite::kTransitionInfoOffset);
+ Node* boilerplate_map = LoadMap(boilerplate);
+ Node* boilerplate_elements = LoadElements(boilerplate);
+ Node* capacity = LoadFixedArrayBaseLength(boilerplate_elements);
+ allocation_site =
+ allocation_site_mode == TRACK_ALLOCATION_SITE ? allocation_site : nullptr;
+
+ Node* zero = SmiConstant(Smi::kZero);
+ GotoIf(SmiEqual(capacity, zero), &zero_capacity);
+
+ Node* elements_map = LoadMap(boilerplate_elements);
+ GotoIf(IsFixedCOWArrayMap(elements_map), &cow_elements);
+
+ GotoIf(IsFixedArrayMap(elements_map), &fast_elements);
+ {
+ Comment("fast double elements path");
+ if (FLAG_debug_code) {
+ Label correct_elements_map(this), abort(this, Label::kDeferred);
+ Branch(IsFixedDoubleArrayMap(elements_map), &correct_elements_map,
+ &abort);
+
+ Bind(&abort);
+ {
+ Node* abort_id = SmiConstant(
+ Smi::FromInt(BailoutReason::kExpectedFixedDoubleArrayMap));
+ CallRuntime(Runtime::kAbort, context, abort_id);
+ result.Bind(UndefinedConstant());
+ Goto(&return_result);
+ }
+ Bind(&correct_elements_map);
+ }
+
+ Node* array =
+ NonEmptyShallowClone(boilerplate, boilerplate_map, boilerplate_elements,
+ allocation_site, capacity, FAST_DOUBLE_ELEMENTS);
+ result.Bind(array);
+ Goto(&return_result);
+ }
+
+ Bind(&fast_elements);
+ {
+ Comment("fast elements path");
+ Node* array =
+ NonEmptyShallowClone(boilerplate, boilerplate_map, boilerplate_elements,
+ allocation_site, capacity, FAST_ELEMENTS);
+ result.Bind(array);
+ Goto(&return_result);
+ }
+
+ Variable length(this, MachineRepresentation::kTagged),
+ elements(this, MachineRepresentation::kTagged);
+ Label allocate_without_elements(this);
+
+ Bind(&cow_elements);
+ {
+ Comment("fixed cow path");
+ length.Bind(LoadJSArrayLength(boilerplate));
+ elements.Bind(boilerplate_elements);
+
+ Goto(&allocate_without_elements);
+ }
+
+ Bind(&zero_capacity);
+ {
+ Comment("zero capacity path");
+ length.Bind(zero);
+ elements.Bind(LoadRoot(Heap::kEmptyFixedArrayRootIndex));
+
+ Goto(&allocate_without_elements);
+ }
+
+ Bind(&allocate_without_elements);
+ {
+ Node* array = AllocateUninitializedJSArrayWithoutElements(
+ FAST_ELEMENTS, boilerplate_map, length.value(), allocation_site);
+ StoreObjectField(array, JSObject::kElementsOffset, elements.value());
+ result.Bind(array);
+ Goto(&return_result);
+ }
+
+ Bind(&return_result);
+ return result.value();
+}
+
+void ConstructorBuiltinsAssembler::CreateFastCloneShallowArrayBuiltin(
+ AllocationSiteMode allocation_site_mode) {
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Label Label;
+
+ Node* closure = Parameter(FastCloneShallowArrayDescriptor::kClosure);
+ Node* literal_index =
+ Parameter(FastCloneShallowArrayDescriptor::kLiteralIndex);
+ Node* constant_elements =
+ Parameter(FastCloneShallowArrayDescriptor::kConstantElements);
+ Node* context = Parameter(FastCloneShallowArrayDescriptor::kContext);
+ Label call_runtime(this, Label::kDeferred);
+ Return(EmitFastCloneShallowArray(closure, literal_index, context,
+ &call_runtime, allocation_site_mode));
+
+ Bind(&call_runtime);
+ {
+ Comment("call runtime");
+ Node* flags =
+ SmiConstant(Smi::FromInt(ArrayLiteral::kShallowElements |
+ (allocation_site_mode == TRACK_ALLOCATION_SITE
+ ? 0
+ : ArrayLiteral::kDisableMementos)));
+ Return(CallRuntime(Runtime::kCreateArrayLiteral, context, closure,
+ literal_index, constant_elements, flags));
+ }
+}
+
+TF_BUILTIN(FastCloneShallowArrayTrack, ConstructorBuiltinsAssembler) {
+ CreateFastCloneShallowArrayBuiltin(TRACK_ALLOCATION_SITE);
+}
+
+TF_BUILTIN(FastCloneShallowArrayDontTrack, ConstructorBuiltinsAssembler) {
+ CreateFastCloneShallowArrayBuiltin(DONT_TRACK_ALLOCATION_SITE);
+}
+
+Handle<Code> Builtins::NewCloneShallowArray(
+ AllocationSiteMode allocation_mode) {
+ switch (allocation_mode) {
+ case TRACK_ALLOCATION_SITE:
+ return FastCloneShallowArrayTrack();
+ case DONT_TRACK_ALLOCATION_SITE:
+ return FastCloneShallowArrayDontTrack();
+ default:
+ UNREACHABLE();
+ }
+ return Handle<Code>::null();
+}
+
+// static
+int ConstructorBuiltinsAssembler::FastCloneShallowObjectPropertiesCount(
+ int literal_length) {
+ // This heuristic of setting empty literals to have
+ // kInitialGlobalObjectUnusedPropertiesCount must remain in-sync with the
+ // runtime.
+ // TODO(verwaest): Unify this with the heuristic in the runtime.
+ return literal_length == 0
+ ? JSObject::kInitialGlobalObjectUnusedPropertiesCount
+ : literal_length;
+}
+
+Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowObject(
+ CodeAssemblerLabel* call_runtime, Node* closure, Node* literals_index,
+ Node* properties_count) {
+ Node* literals_array = LoadObjectField(closure, JSFunction::kLiteralsOffset);
+ Node* allocation_site =
+ LoadFixedArrayElement(literals_array, literals_index,
+ LiteralsArray::kFirstLiteralIndex * kPointerSize,
+ CodeStubAssembler::SMI_PARAMETERS);
+ GotoIf(IsUndefined(allocation_site), call_runtime);
+
+ // Calculate the object and allocation size based on the properties count.
+ Node* object_size = IntPtrAdd(WordShl(properties_count, kPointerSizeLog2),
+ IntPtrConstant(JSObject::kHeaderSize));
+ Node* allocation_size = object_size;
+ if (FLAG_allocation_site_pretenuring) {
+ allocation_size =
+ IntPtrAdd(object_size, IntPtrConstant(AllocationMemento::kSize));
+ }
+ Node* boilerplate =
+ LoadObjectField(allocation_site, AllocationSite::kTransitionInfoOffset);
+ Node* boilerplate_map = LoadMap(boilerplate);
+ Node* instance_size = LoadMapInstanceSize(boilerplate_map);
+ Node* size_in_words = WordShr(object_size, kPointerSizeLog2);
+ GotoUnless(WordEqual(instance_size, size_in_words), call_runtime);
+
+ Node* copy = Allocate(allocation_size);
+
+ // Copy boilerplate elements.
+ Variable offset(this, MachineType::PointerRepresentation());
+ offset.Bind(IntPtrConstant(-kHeapObjectTag));
+ Node* end_offset = IntPtrAdd(object_size, offset.value());
+ Label loop_body(this, &offset), loop_check(this, &offset);
+ // We should always have an object size greater than zero.
+ Goto(&loop_body);
+ Bind(&loop_body);
+ {
+ // The Allocate above guarantees that the copy lies in new space. This
+ // allows us to skip write barriers. This is necessary since we may also be
+ // copying unboxed doubles.
+ Node* field = Load(MachineType::IntPtr(), boilerplate, offset.value());
+ StoreNoWriteBarrier(MachineType::PointerRepresentation(), copy,
+ offset.value(), field);
+ Goto(&loop_check);
+ }
+ Bind(&loop_check);
+ {
+ offset.Bind(IntPtrAdd(offset.value(), IntPtrConstant(kPointerSize)));
+ GotoUnless(IntPtrGreaterThanOrEqual(offset.value(), end_offset),
+ &loop_body);
+ }
+
+ if (FLAG_allocation_site_pretenuring) {
+ Node* memento = InnerAllocate(copy, object_size);
+ StoreMapNoWriteBarrier(memento, Heap::kAllocationMementoMapRootIndex);
+ StoreObjectFieldNoWriteBarrier(
+ memento, AllocationMemento::kAllocationSiteOffset, allocation_site);
+ Node* memento_create_count = LoadObjectField(
+ allocation_site, AllocationSite::kPretenureCreateCountOffset);
+ memento_create_count =
+ SmiAdd(memento_create_count, SmiConstant(Smi::FromInt(1)));
+ StoreObjectFieldNoWriteBarrier(allocation_site,
+ AllocationSite::kPretenureCreateCountOffset,
+ memento_create_count);
+ }
+
+ // TODO(verwaest): Allocate and fill in double boxes.
+ return copy;
+}
+
+void ConstructorBuiltinsAssembler::CreateFastCloneShallowObjectBuiltin(
+ int properties_count) {
+ DCHECK_GE(properties_count, 0);
+ DCHECK_LE(properties_count, kMaximumClonedShallowObjectProperties);
+ Label call_runtime(this);
+ Node* closure = Parameter(0);
+ Node* literals_index = Parameter(1);
+
+ Node* properties_count_node =
+ IntPtrConstant(FastCloneShallowObjectPropertiesCount(properties_count));
+ Node* copy = EmitFastCloneShallowObject(
+ &call_runtime, closure, literals_index, properties_count_node);
+ Return(copy);
+
+ Bind(&call_runtime);
+ Node* constant_properties = Parameter(2);
+ Node* flags = Parameter(3);
+ Node* context = Parameter(4);
+ TailCallRuntime(Runtime::kCreateObjectLiteral, context, closure,
+ literals_index, constant_properties, flags);
+}
+
+#define SHALLOW_OBJECT_BUILTIN(props) \
+ TF_BUILTIN(FastCloneShallowObject##props, ConstructorBuiltinsAssembler) { \
+ CreateFastCloneShallowObjectBuiltin(props); \
+ }
+
+SHALLOW_OBJECT_BUILTIN(0);
+SHALLOW_OBJECT_BUILTIN(1);
+SHALLOW_OBJECT_BUILTIN(2);
+SHALLOW_OBJECT_BUILTIN(3);
+SHALLOW_OBJECT_BUILTIN(4);
+SHALLOW_OBJECT_BUILTIN(5);
+SHALLOW_OBJECT_BUILTIN(6);
+
+Handle<Code> Builtins::NewCloneShallowObject(int length) {
+ switch (length) {
+ case 0:
+ return FastCloneShallowObject0();
+ case 1:
+ return FastCloneShallowObject1();
+ case 2:
+ return FastCloneShallowObject2();
+ case 3:
+ return FastCloneShallowObject3();
+ case 4:
+ return FastCloneShallowObject4();
+ case 5:
+ return FastCloneShallowObject5();
+ case 6:
+ return FastCloneShallowObject6();
+ default:
+ UNREACHABLE();
+ }
+ return Handle<Code>::null();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-constructor.h b/deps/v8/src/builtins/builtins-constructor.h
new file mode 100644
index 0000000000..68629a7bd3
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-constructor.h
@@ -0,0 +1,68 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+typedef compiler::Node Node;
+typedef compiler::CodeAssemblerState CodeAssemblerState;
+typedef compiler::CodeAssemblerLabel CodeAssemblerLabel;
+
+class ConstructorBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit ConstructorBuiltinsAssembler(CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ Node* EmitFastNewClosure(Node* shared_info, Node* feedback_vector, Node* slot,
+ Node* context);
+ Node* EmitFastNewFunctionContext(Node* closure, Node* slots, Node* context,
+ ScopeType scope_type);
+ static int MaximumFunctionContextSlots();
+
+ Node* EmitFastCloneRegExp(Node* closure, Node* literal_index, Node* pattern,
+ Node* flags, Node* context);
+ Node* EmitFastCloneShallowArray(Node* closure, Node* literal_index,
+ Node* context,
+ CodeAssemblerLabel* call_runtime,
+ AllocationSiteMode allocation_site_mode);
+
+ // Maximum number of elements in copied array (chosen so that even an array
+ // backed by a double backing store will fit into new-space).
+ static const int kMaximumClonedShallowArrayElements =
+ JSArray::kInitialMaxFastElementArray * kPointerSize / kDoubleSize;
+
+ void CreateFastCloneShallowArrayBuiltin(
+ AllocationSiteMode allocation_site_mode);
+
+ // Maximum number of properties in copied objects.
+ static const int kMaximumClonedShallowObjectProperties = 6;
+ static int FastCloneShallowObjectPropertiesCount(int literal_length);
+ Node* EmitFastCloneShallowObject(CodeAssemblerLabel* call_runtime,
+ Node* closure, Node* literals_index,
+ Node* properties_count);
+ void CreateFastCloneShallowObjectBuiltin(int properties_count);
+
+ Node* EmitFastNewObject(Node* context, Node* target, Node* new_target);
+
+ Node* EmitFastNewObject(Node* context, Node* target, Node* new_target,
+ CodeAssemblerLabel* call_runtime);
+
+ private:
+ static const int kMaximumSlots = 0x8000;
+ static const int kSmallMaximumSlots = 10;
+
+ Node* NonEmptyShallowClone(Node* boilerplate, Node* boilerplate_map,
+ Node* boilerplate_elements, Node* allocation_site,
+ Node* capacity, ElementsKind kind);
+
+ // FastNewFunctionContext can only allocate closures which fit in the
+ // new space.
+ STATIC_ASSERT(((kMaximumSlots + Context::MIN_CONTEXT_SLOTS) * kPointerSize +
+ FixedArray::kHeaderSize) < kMaxRegularHeapObjectSize);
+};
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-conversion.cc b/deps/v8/src/builtins/builtins-conversion.cc
index 0eaf79ca23..177b739c4b 100644
--- a/deps/v8/src/builtins/builtins-conversion.cc
+++ b/deps/v8/src/builtins/builtins-conversion.cc
@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
namespace v8 {
namespace internal {
@@ -95,110 +96,116 @@ void Generate_NonPrimitiveToPrimitive(CodeStubAssembler* assembler,
} // anonymous namespace
void Builtins::Generate_NonPrimitiveToPrimitive_Default(
- CodeStubAssembler* assembler) {
- Generate_NonPrimitiveToPrimitive(assembler, ToPrimitiveHint::kDefault);
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_NonPrimitiveToPrimitive(&assembler, ToPrimitiveHint::kDefault);
}
void Builtins::Generate_NonPrimitiveToPrimitive_Number(
- CodeStubAssembler* assembler) {
- Generate_NonPrimitiveToPrimitive(assembler, ToPrimitiveHint::kNumber);
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_NonPrimitiveToPrimitive(&assembler, ToPrimitiveHint::kNumber);
}
void Builtins::Generate_NonPrimitiveToPrimitive_String(
- CodeStubAssembler* assembler) {
- Generate_NonPrimitiveToPrimitive(assembler, ToPrimitiveHint::kString);
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_NonPrimitiveToPrimitive(&assembler, ToPrimitiveHint::kString);
}
-void Builtins::Generate_StringToNumber(CodeStubAssembler* assembler) {
+void Builtins::Generate_StringToNumber(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef TypeConversionDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* input = assembler->Parameter(Descriptor::kArgument);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* input = assembler.Parameter(Descriptor::kArgument);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- assembler->Return(assembler->StringToNumber(context, input));
+ assembler.Return(assembler.StringToNumber(context, input));
}
-void Builtins::Generate_ToName(CodeStubAssembler* assembler) {
+void Builtins::Generate_ToName(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef TypeConversionDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* input = assembler->Parameter(Descriptor::kArgument);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* input = assembler.Parameter(Descriptor::kArgument);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- assembler->Return(assembler->ToName(context, input));
+ assembler.Return(assembler.ToName(context, input));
}
// static
-void Builtins::Generate_NonNumberToNumber(CodeStubAssembler* assembler) {
+void Builtins::Generate_NonNumberToNumber(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef TypeConversionDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* input = assembler->Parameter(Descriptor::kArgument);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* input = assembler.Parameter(Descriptor::kArgument);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- assembler->Return(assembler->NonNumberToNumber(context, input));
+ assembler.Return(assembler.NonNumberToNumber(context, input));
}
// ES6 section 7.1.3 ToNumber ( argument )
-void Builtins::Generate_ToNumber(CodeStubAssembler* assembler) {
+void Builtins::Generate_ToNumber(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef TypeConversionDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* input = assembler->Parameter(Descriptor::kArgument);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* input = assembler.Parameter(Descriptor::kArgument);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- assembler->Return(assembler->ToNumber(context, input));
+ assembler.Return(assembler.ToNumber(context, input));
}
-void Builtins::Generate_ToString(CodeStubAssembler* assembler) {
+void Builtins::Generate_ToString(compiler::CodeAssemblerState* state) {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
typedef TypeConversionDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* input = assembler->Parameter(Descriptor::kArgument);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* input = assembler.Parameter(Descriptor::kArgument);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- Label is_number(assembler);
- Label runtime(assembler);
+ Label is_number(&assembler);
+ Label runtime(&assembler);
- assembler->GotoIf(assembler->TaggedIsSmi(input), &is_number);
+ assembler.GotoIf(assembler.TaggedIsSmi(input), &is_number);
- Node* input_map = assembler->LoadMap(input);
- Node* input_instance_type = assembler->LoadMapInstanceType(input_map);
+ Node* input_map = assembler.LoadMap(input);
+ Node* input_instance_type = assembler.LoadMapInstanceType(input_map);
- Label not_string(assembler);
- assembler->GotoUnless(assembler->IsStringInstanceType(input_instance_type),
- &not_string);
- assembler->Return(input);
+ Label not_string(&assembler);
+ assembler.GotoUnless(assembler.IsStringInstanceType(input_instance_type),
+ &not_string);
+ assembler.Return(input);
- Label not_heap_number(assembler);
+ Label not_heap_number(&assembler);
- assembler->Bind(&not_string);
+ assembler.Bind(&not_string);
{
- assembler->GotoUnless(
- assembler->WordEqual(input_map, assembler->HeapNumberMapConstant()),
- &not_heap_number);
- assembler->Goto(&is_number);
+ assembler.GotoUnless(assembler.IsHeapNumberMap(input_map),
+ &not_heap_number);
+ assembler.Goto(&is_number);
}
- assembler->Bind(&is_number);
- { assembler->Return(assembler->NumberToString(context, input)); }
+ assembler.Bind(&is_number);
+ { assembler.Return(assembler.NumberToString(context, input)); }
- assembler->Bind(&not_heap_number);
+ assembler.Bind(&not_heap_number);
{
- assembler->GotoIf(
- assembler->Word32NotEqual(input_instance_type,
- assembler->Int32Constant(ODDBALL_TYPE)),
+ assembler.GotoIf(
+ assembler.Word32NotEqual(input_instance_type,
+ assembler.Int32Constant(ODDBALL_TYPE)),
&runtime);
- assembler->Return(
- assembler->LoadObjectField(input, Oddball::kToStringOffset));
+ assembler.Return(
+ assembler.LoadObjectField(input, Oddball::kToStringOffset));
}
- assembler->Bind(&runtime);
+ assembler.Bind(&runtime);
{
- assembler->Return(
- assembler->CallRuntime(Runtime::kToString, context, input));
+ assembler.Return(assembler.CallRuntime(Runtime::kToString, context, input));
}
}
@@ -283,194 +290,200 @@ void Generate_OrdinaryToPrimitive(CodeStubAssembler* assembler,
} // anonymous namespace
void Builtins::Generate_OrdinaryToPrimitive_Number(
- CodeStubAssembler* assembler) {
- Generate_OrdinaryToPrimitive(assembler, OrdinaryToPrimitiveHint::kNumber);
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_OrdinaryToPrimitive(&assembler, OrdinaryToPrimitiveHint::kNumber);
}
void Builtins::Generate_OrdinaryToPrimitive_String(
- CodeStubAssembler* assembler) {
- Generate_OrdinaryToPrimitive(assembler, OrdinaryToPrimitiveHint::kString);
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_OrdinaryToPrimitive(&assembler, OrdinaryToPrimitiveHint::kString);
}
// ES6 section 7.1.2 ToBoolean ( argument )
-void Builtins::Generate_ToBoolean(CodeStubAssembler* assembler) {
+void Builtins::Generate_ToBoolean(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef CodeStubAssembler::Label Label;
typedef TypeConversionDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* value = assembler->Parameter(Descriptor::kArgument);
+ Node* value = assembler.Parameter(Descriptor::kArgument);
- Label return_true(assembler), return_false(assembler);
- assembler->BranchIfToBooleanIsTrue(value, &return_true, &return_false);
+ Label return_true(&assembler), return_false(&assembler);
+ assembler.BranchIfToBooleanIsTrue(value, &return_true, &return_false);
- assembler->Bind(&return_true);
- assembler->Return(assembler->BooleanConstant(true));
+ assembler.Bind(&return_true);
+ assembler.Return(assembler.BooleanConstant(true));
- assembler->Bind(&return_false);
- assembler->Return(assembler->BooleanConstant(false));
+ assembler.Bind(&return_false);
+ assembler.Return(assembler.BooleanConstant(false));
}
-void Builtins::Generate_ToLength(CodeStubAssembler* assembler) {
+void Builtins::Generate_ToLength(compiler::CodeAssemblerState* state) {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
typedef CodeStubAssembler::Variable Variable;
+ CodeStubAssembler assembler(state);
- Node* context = assembler->Parameter(1);
+ Node* context = assembler.Parameter(1);
// We might need to loop once for ToNumber conversion.
- Variable var_len(assembler, MachineRepresentation::kTagged);
- Label loop(assembler, &var_len);
- var_len.Bind(assembler->Parameter(0));
- assembler->Goto(&loop);
- assembler->Bind(&loop);
+ Variable var_len(&assembler, MachineRepresentation::kTagged);
+ Label loop(&assembler, &var_len);
+ var_len.Bind(assembler.Parameter(0));
+ assembler.Goto(&loop);
+ assembler.Bind(&loop);
{
// Shared entry points.
- Label return_len(assembler),
- return_two53minus1(assembler, Label::kDeferred),
- return_zero(assembler, Label::kDeferred);
+ Label return_len(&assembler),
+ return_two53minus1(&assembler, Label::kDeferred),
+ return_zero(&assembler, Label::kDeferred);
// Load the current {len} value.
Node* len = var_len.value();
// Check if {len} is a positive Smi.
- assembler->GotoIf(assembler->WordIsPositiveSmi(len), &return_len);
+ assembler.GotoIf(assembler.TaggedIsPositiveSmi(len), &return_len);
// Check if {len} is a (negative) Smi.
- assembler->GotoIf(assembler->TaggedIsSmi(len), &return_zero);
+ assembler.GotoIf(assembler.TaggedIsSmi(len), &return_zero);
// Check if {len} is a HeapNumber.
- Label if_lenisheapnumber(assembler),
- if_lenisnotheapnumber(assembler, Label::kDeferred);
- assembler->Branch(assembler->IsHeapNumberMap(assembler->LoadMap(len)),
- &if_lenisheapnumber, &if_lenisnotheapnumber);
+ Label if_lenisheapnumber(&assembler),
+ if_lenisnotheapnumber(&assembler, Label::kDeferred);
+ assembler.Branch(assembler.IsHeapNumberMap(assembler.LoadMap(len)),
+ &if_lenisheapnumber, &if_lenisnotheapnumber);
- assembler->Bind(&if_lenisheapnumber);
+ assembler.Bind(&if_lenisheapnumber);
{
// Load the floating-point value of {len}.
- Node* len_value = assembler->LoadHeapNumberValue(len);
+ Node* len_value = assembler.LoadHeapNumberValue(len);
// Check if {len} is not greater than zero.
- assembler->GotoUnless(assembler->Float64GreaterThan(
- len_value, assembler->Float64Constant(0.0)),
- &return_zero);
+ assembler.GotoUnless(assembler.Float64GreaterThan(
+ len_value, assembler.Float64Constant(0.0)),
+ &return_zero);
// Check if {len} is greater than or equal to 2^53-1.
- assembler->GotoIf(
- assembler->Float64GreaterThanOrEqual(
- len_value, assembler->Float64Constant(kMaxSafeInteger)),
+ assembler.GotoIf(
+ assembler.Float64GreaterThanOrEqual(
+ len_value, assembler.Float64Constant(kMaxSafeInteger)),
&return_two53minus1);
// Round the {len} towards -Infinity.
- Node* value = assembler->Float64Floor(len_value);
- Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
+ Node* value = assembler.Float64Floor(len_value);
+ Node* result = assembler.ChangeFloat64ToTagged(value);
+ assembler.Return(result);
}
- assembler->Bind(&if_lenisnotheapnumber);
+ assembler.Bind(&if_lenisnotheapnumber);
{
// Need to convert {len} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(assembler->isolate());
- var_len.Bind(assembler->CallStub(callable, context, len));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(assembler.isolate());
+ var_len.Bind(assembler.CallStub(callable, context, len));
+ assembler.Goto(&loop);
}
- assembler->Bind(&return_len);
- assembler->Return(var_len.value());
+ assembler.Bind(&return_len);
+ assembler.Return(var_len.value());
- assembler->Bind(&return_two53minus1);
- assembler->Return(assembler->NumberConstant(kMaxSafeInteger));
+ assembler.Bind(&return_two53minus1);
+ assembler.Return(assembler.NumberConstant(kMaxSafeInteger));
- assembler->Bind(&return_zero);
- assembler->Return(assembler->SmiConstant(Smi::kZero));
+ assembler.Bind(&return_zero);
+ assembler.Return(assembler.SmiConstant(Smi::kZero));
}
}
-void Builtins::Generate_ToInteger(CodeStubAssembler* assembler) {
+void Builtins::Generate_ToInteger(compiler::CodeAssemblerState* state) {
typedef TypeConversionDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- compiler::Node* input = assembler->Parameter(Descriptor::kArgument);
- compiler::Node* context = assembler->Parameter(Descriptor::kContext);
+ compiler::Node* input = assembler.Parameter(Descriptor::kArgument);
+ compiler::Node* context = assembler.Parameter(Descriptor::kContext);
- assembler->Return(assembler->ToInteger(context, input));
+ assembler.Return(assembler.ToInteger(context, input));
}
// ES6 section 7.1.13 ToObject (argument)
-void Builtins::Generate_ToObject(CodeStubAssembler* assembler) {
+void Builtins::Generate_ToObject(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Variable Variable;
typedef TypeConversionDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Label if_number(assembler, Label::kDeferred), if_notsmi(assembler),
- if_jsreceiver(assembler), if_noconstructor(assembler, Label::kDeferred),
- if_wrapjsvalue(assembler);
+ Label if_number(&assembler, Label::kDeferred), if_notsmi(&assembler),
+ if_jsreceiver(&assembler), if_noconstructor(&assembler, Label::kDeferred),
+ if_wrapjsvalue(&assembler);
- Node* object = assembler->Parameter(Descriptor::kArgument);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* object = assembler.Parameter(Descriptor::kArgument);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- Variable constructor_function_index_var(assembler,
+ Variable constructor_function_index_var(&assembler,
MachineType::PointerRepresentation());
- assembler->Branch(assembler->TaggedIsSmi(object), &if_number, &if_notsmi);
+ assembler.Branch(assembler.TaggedIsSmi(object), &if_number, &if_notsmi);
- assembler->Bind(&if_notsmi);
- Node* map = assembler->LoadMap(object);
+ assembler.Bind(&if_notsmi);
+ Node* map = assembler.LoadMap(object);
- assembler->GotoIf(assembler->IsHeapNumberMap(map), &if_number);
+ assembler.GotoIf(assembler.IsHeapNumberMap(map), &if_number);
- Node* instance_type = assembler->LoadMapInstanceType(map);
- assembler->GotoIf(assembler->IsJSReceiverInstanceType(instance_type),
- &if_jsreceiver);
+ Node* instance_type = assembler.LoadMapInstanceType(map);
+ assembler.GotoIf(assembler.IsJSReceiverInstanceType(instance_type),
+ &if_jsreceiver);
Node* constructor_function_index =
- assembler->LoadMapConstructorFunctionIndex(map);
- assembler->GotoIf(assembler->WordEqual(constructor_function_index,
- assembler->IntPtrConstant(
- Map::kNoConstructorFunctionIndex)),
- &if_noconstructor);
+ assembler.LoadMapConstructorFunctionIndex(map);
+ assembler.GotoIf(assembler.WordEqual(constructor_function_index,
+ assembler.IntPtrConstant(
+ Map::kNoConstructorFunctionIndex)),
+ &if_noconstructor);
constructor_function_index_var.Bind(constructor_function_index);
- assembler->Goto(&if_wrapjsvalue);
+ assembler.Goto(&if_wrapjsvalue);
- assembler->Bind(&if_number);
+ assembler.Bind(&if_number);
constructor_function_index_var.Bind(
- assembler->IntPtrConstant(Context::NUMBER_FUNCTION_INDEX));
- assembler->Goto(&if_wrapjsvalue);
-
- assembler->Bind(&if_wrapjsvalue);
- Node* native_context = assembler->LoadNativeContext(context);
- Node* constructor = assembler->LoadFixedArrayElement(
- native_context, constructor_function_index_var.value(), 0,
- CodeStubAssembler::INTPTR_PARAMETERS);
- Node* initial_map = assembler->LoadObjectField(
+ assembler.IntPtrConstant(Context::NUMBER_FUNCTION_INDEX));
+ assembler.Goto(&if_wrapjsvalue);
+
+ assembler.Bind(&if_wrapjsvalue);
+ Node* native_context = assembler.LoadNativeContext(context);
+ Node* constructor = assembler.LoadFixedArrayElement(
+ native_context, constructor_function_index_var.value());
+ Node* initial_map = assembler.LoadObjectField(
constructor, JSFunction::kPrototypeOrInitialMapOffset);
- Node* js_value = assembler->Allocate(JSValue::kSize);
- assembler->StoreMapNoWriteBarrier(js_value, initial_map);
- assembler->StoreObjectFieldRoot(js_value, JSValue::kPropertiesOffset,
- Heap::kEmptyFixedArrayRootIndex);
- assembler->StoreObjectFieldRoot(js_value, JSObject::kElementsOffset,
- Heap::kEmptyFixedArrayRootIndex);
- assembler->StoreObjectField(js_value, JSValue::kValueOffset, object);
- assembler->Return(js_value);
-
- assembler->Bind(&if_noconstructor);
- assembler->TailCallRuntime(
+ Node* js_value = assembler.Allocate(JSValue::kSize);
+ assembler.StoreMapNoWriteBarrier(js_value, initial_map);
+ assembler.StoreObjectFieldRoot(js_value, JSValue::kPropertiesOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ assembler.StoreObjectFieldRoot(js_value, JSObject::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ assembler.StoreObjectField(js_value, JSValue::kValueOffset, object);
+ assembler.Return(js_value);
+
+ assembler.Bind(&if_noconstructor);
+ assembler.TailCallRuntime(
Runtime::kThrowUndefinedOrNullToObject, context,
- assembler->HeapConstant(assembler->factory()->NewStringFromAsciiChecked(
- "ToObject", TENURED)));
+ assembler.HeapConstant(
+ assembler.factory()->NewStringFromAsciiChecked("ToObject", TENURED)));
- assembler->Bind(&if_jsreceiver);
- assembler->Return(object);
+ assembler.Bind(&if_jsreceiver);
+ assembler.Return(object);
}
// ES6 section 12.5.5 typeof operator
-void Builtins::Generate_Typeof(CodeStubAssembler* assembler) {
+void Builtins::Generate_Typeof(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef TypeofDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* object = assembler->Parameter(Descriptor::kObject);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* object = assembler.Parameter(Descriptor::kObject);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- assembler->Return(assembler->Typeof(object, context));
+ assembler.Return(assembler.Typeof(object, context));
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-date.cc b/deps/v8/src/builtins/builtins-date.cc
index 949620b6b2..df74321093 100644
--- a/deps/v8/src/builtins/builtins-date.cc
+++ b/deps/v8/src/builtins/builtins-date.cc
@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
-
+#include "src/builtins/builtins.h"
+#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
#include "src/dateparser-inl.h"
namespace v8 {
@@ -209,7 +210,7 @@ BUILTIN(DateConstructor_ConstructStub) {
if (argc == 0) {
time_val = JSDate::CurrentTimeValue(isolate);
} else if (argc == 1) {
- Handle<Object> value = args.at<Object>(1);
+ Handle<Object> value = args.at(1);
if (value->IsJSDate()) {
time_val = Handle<JSDate>::cast(value)->value()->Number();
} else {
@@ -226,37 +227,37 @@ BUILTIN(DateConstructor_ConstructStub) {
} else {
Handle<Object> year_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year_object,
- Object::ToNumber(args.at<Object>(1)));
+ Object::ToNumber(args.at(1)));
Handle<Object> month_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month_object,
- Object::ToNumber(args.at<Object>(2)));
+ Object::ToNumber(args.at(2)));
double year = year_object->Number();
double month = month_object->Number();
double date = 1.0, hours = 0.0, minutes = 0.0, seconds = 0.0, ms = 0.0;
if (argc >= 3) {
Handle<Object> date_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date_object,
- Object::ToNumber(args.at<Object>(3)));
+ Object::ToNumber(args.at(3)));
date = date_object->Number();
if (argc >= 4) {
Handle<Object> hours_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, hours_object, Object::ToNumber(args.at<Object>(4)));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, hours_object,
+ Object::ToNumber(args.at(4)));
hours = hours_object->Number();
if (argc >= 5) {
Handle<Object> minutes_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, minutes_object, Object::ToNumber(args.at<Object>(5)));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, minutes_object,
+ Object::ToNumber(args.at(5)));
minutes = minutes_object->Number();
if (argc >= 6) {
Handle<Object> seconds_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, seconds_object, Object::ToNumber(args.at<Object>(6)));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, seconds_object,
+ Object::ToNumber(args.at(6)));
seconds = seconds_object->Number();
if (argc >= 7) {
Handle<Object> ms_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, ms_object, Object::ToNumber(args.at<Object>(7)));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms_object,
+ Object::ToNumber(args.at(7)));
ms = ms_object->Number();
}
}
@@ -306,38 +307,37 @@ BUILTIN(DateUTC) {
if (argc >= 1) {
Handle<Object> year_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year_object,
- Object::ToNumber(args.at<Object>(1)));
+ Object::ToNumber(args.at(1)));
year = year_object->Number();
if (argc >= 2) {
Handle<Object> month_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month_object,
- Object::ToNumber(args.at<Object>(2)));
+ Object::ToNumber(args.at(2)));
month = month_object->Number();
if (argc >= 3) {
Handle<Object> date_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, date_object, Object::ToNumber(args.at<Object>(3)));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date_object,
+ Object::ToNumber(args.at(3)));
date = date_object->Number();
if (argc >= 4) {
Handle<Object> hours_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, hours_object, Object::ToNumber(args.at<Object>(4)));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, hours_object,
+ Object::ToNumber(args.at(4)));
hours = hours_object->Number();
if (argc >= 5) {
Handle<Object> minutes_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, minutes_object, Object::ToNumber(args.at<Object>(5)));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, minutes_object,
+ Object::ToNumber(args.at(5)));
minutes = minutes_object->Number();
if (argc >= 6) {
Handle<Object> seconds_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, seconds_object,
- Object::ToNumber(args.at<Object>(6)));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, seconds_object,
+ Object::ToNumber(args.at(6)));
seconds = seconds_object->Number();
if (argc >= 7) {
Handle<Object> ms_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, ms_object, Object::ToNumber(args.at<Object>(7)));
+ isolate, ms_object, Object::ToNumber(args.at(7)));
ms = ms_object->Number();
}
}
@@ -394,11 +394,11 @@ BUILTIN(DatePrototypeSetFullYear) {
dt = day;
}
if (argc >= 2) {
- Handle<Object> month = args.at<Object>(2);
+ Handle<Object> month = args.at(2);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month, Object::ToNumber(month));
m = month->Number();
if (argc >= 3) {
- Handle<Object> date = args.at<Object>(3);
+ Handle<Object> date = args.at(3);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
dt = date->Number();
}
@@ -425,15 +425,15 @@ BUILTIN(DatePrototypeSetHours) {
double s = (time_within_day / 1000) % 60;
double milli = time_within_day % 1000;
if (argc >= 2) {
- Handle<Object> min = args.at<Object>(2);
+ Handle<Object> min = args.at(2);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min, Object::ToNumber(min));
m = min->Number();
if (argc >= 3) {
- Handle<Object> sec = args.at<Object>(3);
+ Handle<Object> sec = args.at(3);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
s = sec->Number();
if (argc >= 4) {
- Handle<Object> ms = args.at<Object>(4);
+ Handle<Object> ms = args.at(4);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
milli = ms->Number();
}
@@ -482,11 +482,11 @@ BUILTIN(DatePrototypeSetMinutes) {
double s = (time_within_day / 1000) % 60;
double milli = time_within_day % 1000;
if (argc >= 2) {
- Handle<Object> sec = args.at<Object>(2);
+ Handle<Object> sec = args.at(2);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
s = sec->Number();
if (argc >= 3) {
- Handle<Object> ms = args.at<Object>(3);
+ Handle<Object> ms = args.at(3);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
milli = ms->Number();
}
@@ -514,7 +514,7 @@ BUILTIN(DatePrototypeSetMonth) {
double m = month->Number();
double dt = day;
if (argc >= 2) {
- Handle<Object> date = args.at<Object>(2);
+ Handle<Object> date = args.at(2);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
dt = date->Number();
}
@@ -541,7 +541,7 @@ BUILTIN(DatePrototypeSetSeconds) {
double s = sec->Number();
double milli = time_within_day % 1000;
if (argc >= 2) {
- Handle<Object> ms = args.at<Object>(2);
+ Handle<Object> ms = args.at(2);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
milli = ms->Number();
}
@@ -595,11 +595,11 @@ BUILTIN(DatePrototypeSetUTCFullYear) {
dt = day;
}
if (argc >= 2) {
- Handle<Object> month = args.at<Object>(2);
+ Handle<Object> month = args.at(2);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, month, Object::ToNumber(month));
m = month->Number();
if (argc >= 3) {
- Handle<Object> date = args.at<Object>(3);
+ Handle<Object> date = args.at(3);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
dt = date->Number();
}
@@ -625,15 +625,15 @@ BUILTIN(DatePrototypeSetUTCHours) {
double s = (time_within_day / 1000) % 60;
double milli = time_within_day % 1000;
if (argc >= 2) {
- Handle<Object> min = args.at<Object>(2);
+ Handle<Object> min = args.at(2);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, min, Object::ToNumber(min));
m = min->Number();
if (argc >= 3) {
- Handle<Object> sec = args.at<Object>(3);
+ Handle<Object> sec = args.at(3);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
s = sec->Number();
if (argc >= 4) {
- Handle<Object> ms = args.at<Object>(4);
+ Handle<Object> ms = args.at(4);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
milli = ms->Number();
}
@@ -680,11 +680,11 @@ BUILTIN(DatePrototypeSetUTCMinutes) {
double s = (time_within_day / 1000) % 60;
double milli = time_within_day % 1000;
if (argc >= 2) {
- Handle<Object> sec = args.at<Object>(2);
+ Handle<Object> sec = args.at(2);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, sec, Object::ToNumber(sec));
s = sec->Number();
if (argc >= 3) {
- Handle<Object> ms = args.at<Object>(3);
+ Handle<Object> ms = args.at(3);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
milli = ms->Number();
}
@@ -711,7 +711,7 @@ BUILTIN(DatePrototypeSetUTCMonth) {
double m = month->Number();
double dt = day;
if (argc >= 2) {
- Handle<Object> date = args.at<Object>(2);
+ Handle<Object> date = args.at(2);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, date, Object::ToNumber(date));
dt = date->Number();
}
@@ -737,7 +737,7 @@ BUILTIN(DatePrototypeSetUTCSeconds) {
double s = sec->Number();
double milli = time_within_day % 1000;
if (argc >= 2) {
- Handle<Object> ms = args.at<Object>(2);
+ Handle<Object> ms = args.at(2);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, ms, Object::ToNumber(ms));
milli = ms->Number();
}
@@ -825,22 +825,6 @@ BUILTIN(DatePrototypeToUTCString) {
return *isolate->factory()->NewStringFromAsciiChecked(buffer);
}
-// ES6 section 20.3.4.44 Date.prototype.valueOf ( )
-BUILTIN(DatePrototypeValueOf) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSDate, date, "Date.prototype.valueOf");
- return date->value();
-}
-
-// ES6 section 20.3.4.45 Date.prototype [ @@toPrimitive ] ( hint )
-BUILTIN(DatePrototypeToPrimitive) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CHECK_RECEIVER(JSReceiver, receiver, "Date.prototype [ @@toPrimitive ]");
- Handle<Object> hint = args.at<Object>(1);
- RETURN_RESULT_OR_FAILURE(isolate, JSDate::ToPrimitive(receiver, hint));
-}
-
// ES6 section B.2.4.1 Date.prototype.getYear ( )
BUILTIN(DatePrototypeGetYear) {
HandleScope scope(isolate);
@@ -908,9 +892,10 @@ BUILTIN(DatePrototypeToJson) {
}
}
-// static
-void Builtins::Generate_DatePrototype_GetField(CodeStubAssembler* assembler,
- int field_index) {
+namespace {
+
+void Generate_DatePrototype_GetField(CodeStubAssembler* assembler,
+ int field_index) {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
@@ -952,7 +937,7 @@ void Builtins::Generate_DatePrototype_GetField(CodeStubAssembler* assembler,
Node* function = assembler->ExternalConstant(
ExternalReference::get_date_field_function(assembler->isolate()));
Node* result = assembler->CallCFunction2(
- MachineType::AnyTagged(), MachineType::Pointer(),
+ MachineType::AnyTagged(), MachineType::AnyTagged(),
MachineType::AnyTagged(), function, receiver, field_index_smi);
assembler->Return(result);
}
@@ -965,100 +950,223 @@ void Builtins::Generate_DatePrototype_GetField(CodeStubAssembler* assembler,
}
}
+} // namespace
+
// static
-void Builtins::Generate_DatePrototypeGetDate(CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kDay);
+void Builtins::Generate_DatePrototypeGetDate(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kDay);
}
// static
-void Builtins::Generate_DatePrototypeGetDay(CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kWeekday);
+void Builtins::Generate_DatePrototypeGetDay(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kWeekday);
}
// static
-void Builtins::Generate_DatePrototypeGetFullYear(CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kYear);
+void Builtins::Generate_DatePrototypeGetFullYear(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kYear);
}
// static
-void Builtins::Generate_DatePrototypeGetHours(CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kHour);
+void Builtins::Generate_DatePrototypeGetHours(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kHour);
}
// static
void Builtins::Generate_DatePrototypeGetMilliseconds(
- CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kMillisecond);
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kMillisecond);
}
// static
-void Builtins::Generate_DatePrototypeGetMinutes(CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kMinute);
+void Builtins::Generate_DatePrototypeGetMinutes(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kMinute);
}
// static
-void Builtins::Generate_DatePrototypeGetMonth(CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kMonth);
+void Builtins::Generate_DatePrototypeGetMonth(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kMonth);
}
// static
-void Builtins::Generate_DatePrototypeGetSeconds(CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kSecond);
+void Builtins::Generate_DatePrototypeGetSeconds(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kSecond);
}
// static
-void Builtins::Generate_DatePrototypeGetTime(CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kDateValue);
+void Builtins::Generate_DatePrototypeGetTime(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kDateValue);
}
// static
void Builtins::Generate_DatePrototypeGetTimezoneOffset(
- CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kTimezoneOffset);
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kTimezoneOffset);
}
// static
-void Builtins::Generate_DatePrototypeGetUTCDate(CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kDayUTC);
+void Builtins::Generate_DatePrototypeGetUTCDate(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kDayUTC);
}
// static
-void Builtins::Generate_DatePrototypeGetUTCDay(CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kWeekdayUTC);
+void Builtins::Generate_DatePrototypeGetUTCDay(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kWeekdayUTC);
}
// static
void Builtins::Generate_DatePrototypeGetUTCFullYear(
- CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kYearUTC);
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kYearUTC);
}
// static
-void Builtins::Generate_DatePrototypeGetUTCHours(CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kHourUTC);
+void Builtins::Generate_DatePrototypeGetUTCHours(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kHourUTC);
}
// static
void Builtins::Generate_DatePrototypeGetUTCMilliseconds(
- CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kMillisecondUTC);
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kMillisecondUTC);
}
// static
void Builtins::Generate_DatePrototypeGetUTCMinutes(
- CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kMinuteUTC);
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kMinuteUTC);
}
// static
-void Builtins::Generate_DatePrototypeGetUTCMonth(CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kMonthUTC);
+void Builtins::Generate_DatePrototypeGetUTCMonth(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kMonthUTC);
}
// static
void Builtins::Generate_DatePrototypeGetUTCSeconds(
- CodeStubAssembler* assembler) {
- Generate_DatePrototype_GetField(assembler, JSDate::kSecondUTC);
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kSecondUTC);
+}
+
+// static
+void Builtins::Generate_DatePrototypeValueOf(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_DatePrototype_GetField(&assembler, JSDate::kDateValue);
+}
+
+// static
+void Builtins::Generate_DatePrototypeToPrimitive(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+
+ Node* receiver = assembler.Parameter(0);
+ Node* hint = assembler.Parameter(1);
+ Node* context = assembler.Parameter(4);
+
+ // Check if the {receiver} is actually a JSReceiver.
+ Label receiver_is_invalid(&assembler, Label::kDeferred);
+ assembler.GotoIf(assembler.TaggedIsSmi(receiver), &receiver_is_invalid);
+ assembler.GotoUnless(assembler.IsJSReceiver(receiver), &receiver_is_invalid);
+
+ // Dispatch to the appropriate OrdinaryToPrimitive builtin.
+ Label hint_is_number(&assembler), hint_is_string(&assembler),
+ hint_is_invalid(&assembler, Label::kDeferred);
+
+ // Fast cases for internalized strings.
+ Node* number_string = assembler.LoadRoot(Heap::knumber_stringRootIndex);
+ assembler.GotoIf(assembler.WordEqual(hint, number_string), &hint_is_number);
+ Node* default_string = assembler.LoadRoot(Heap::kdefault_stringRootIndex);
+ assembler.GotoIf(assembler.WordEqual(hint, default_string), &hint_is_string);
+ Node* string_string = assembler.LoadRoot(Heap::kstring_stringRootIndex);
+ assembler.GotoIf(assembler.WordEqual(hint, string_string), &hint_is_string);
+
+ // Slow-case with actual string comparisons.
+ Callable string_equal = CodeFactory::StringEqual(assembler.isolate());
+ assembler.GotoIf(assembler.TaggedIsSmi(hint), &hint_is_invalid);
+ assembler.GotoUnless(assembler.IsString(hint), &hint_is_invalid);
+ assembler.GotoIf(assembler.WordEqual(assembler.CallStub(string_equal, context,
+ hint, number_string),
+ assembler.TrueConstant()),
+ &hint_is_number);
+ assembler.GotoIf(assembler.WordEqual(assembler.CallStub(string_equal, context,
+ hint, default_string),
+ assembler.TrueConstant()),
+ &hint_is_string);
+ assembler.GotoIf(assembler.WordEqual(assembler.CallStub(string_equal, context,
+ hint, string_string),
+ assembler.TrueConstant()),
+ &hint_is_string);
+ assembler.Goto(&hint_is_invalid);
+
+ // Use the OrdinaryToPrimitive builtin to convert to a Number.
+ assembler.Bind(&hint_is_number);
+ {
+ Callable callable = CodeFactory::OrdinaryToPrimitive(
+ assembler.isolate(), OrdinaryToPrimitiveHint::kNumber);
+ Node* result = assembler.CallStub(callable, context, receiver);
+ assembler.Return(result);
+ }
+
+ // Use the OrdinaryToPrimitive builtin to convert to a String.
+ assembler.Bind(&hint_is_string);
+ {
+ Callable callable = CodeFactory::OrdinaryToPrimitive(
+ assembler.isolate(), OrdinaryToPrimitiveHint::kString);
+ Node* result = assembler.CallStub(callable, context, receiver);
+ assembler.Return(result);
+ }
+
+ // Raise a TypeError if the {hint} is invalid.
+ assembler.Bind(&hint_is_invalid);
+ {
+ Node* result =
+ assembler.CallRuntime(Runtime::kThrowInvalidHint, context, hint);
+ assembler.Return(result);
+ }
+
+ // Raise a TypeError if the {receiver} is not a JSReceiver instance.
+ assembler.Bind(&receiver_is_invalid);
+ {
+ Node* result = assembler.CallRuntime(
+ Runtime::kThrowIncompatibleMethodReceiver, context,
+ assembler.HeapConstant(assembler.factory()->NewStringFromAsciiChecked(
+ "Date.prototype [ @@toPrimitive ]", TENURED)),
+ receiver);
+ assembler.Return(result);
+ }
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-function.cc b/deps/v8/src/builtins/builtins-function.cc
index 9a8ee796b5..818e09a722 100644
--- a/deps/v8/src/builtins/builtins-function.cc
+++ b/deps/v8/src/builtins/builtins-function.cc
@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
-
+#include "src/builtins/builtins.h"
+#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
#include "src/compiler.h"
#include "src/string-builder.h"
@@ -42,8 +43,7 @@ MaybeHandle<Object> CreateDynamicFunction(Isolate* isolate,
if (i > 1) builder.AppendCharacter(',');
Handle<String> param;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, param, Object::ToString(isolate, args.at<Object>(i)),
- Object);
+ isolate, param, Object::ToString(isolate, args.at(i)), Object);
param = String::Flatten(param);
builder.AppendString(param);
// If the formal parameters string include ) - an illegal
@@ -61,14 +61,13 @@ MaybeHandle<Object> CreateDynamicFunction(Isolate* isolate,
// If the formal parameters include an unbalanced block comment, the
// function must be rejected. Since JavaScript does not allow nested
// comments we can include a trailing block comment to catch this.
- builder.AppendCString("\n/**/");
+ builder.AppendCString("\n/*``*/");
}
builder.AppendCString(") {\n");
if (argc > 0) {
Handle<String> body;
ASSIGN_RETURN_ON_EXCEPTION(
- isolate, body, Object::ToString(isolate, args.at<Object>(argc)),
- Object);
+ isolate, body, Object::ToString(isolate, args.at(argc)), Object);
builder.AppendString(body);
}
builder.AppendCString("\n})");
@@ -179,9 +178,9 @@ Object* DoFunctionBind(Isolate* isolate, BuiltinArguments args) {
Handle<Object> this_arg = isolate->factory()->undefined_value();
ScopedVector<Handle<Object>> argv(std::max(0, args.length() - 2));
if (args.length() > 1) {
- this_arg = args.at<Object>(1);
+ this_arg = args.at(1);
for (int i = 2; i < args.length(); ++i) {
- argv[i - 2] = args.at<Object>(i);
+ argv[i - 2] = args.at(i);
}
}
Handle<JSBoundFunction> function;
@@ -255,6 +254,184 @@ Object* DoFunctionBind(Isolate* isolate, BuiltinArguments args) {
// ES6 section 19.2.3.2 Function.prototype.bind ( thisArg, ...args )
BUILTIN(FunctionPrototypeBind) { return DoFunctionBind(isolate, args); }
+void Builtins::Generate_FastFunctionPrototypeBind(
+ compiler::CodeAssemblerState* state) {
+ using compiler::Node;
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+
+ CodeStubAssembler assembler(state);
+ Label slow(&assembler);
+
+ Node* argc = assembler.Parameter(BuiltinDescriptor::kArgumentsCount);
+ Node* context = assembler.Parameter(BuiltinDescriptor::kContext);
+ Node* new_target = assembler.Parameter(BuiltinDescriptor::kNewTarget);
+
+ CodeStubArguments args(&assembler, argc);
+
+ // Check that receiver has instance type of JS_FUNCTION_TYPE
+ Node* receiver = args.GetReceiver();
+ assembler.GotoIf(assembler.TaggedIsSmi(receiver), &slow);
+
+ Node* receiver_map = assembler.LoadMap(receiver);
+ Node* instance_type = assembler.LoadMapInstanceType(receiver_map);
+ assembler.GotoIf(
+ assembler.Word32NotEqual(instance_type,
+ assembler.Int32Constant(JS_FUNCTION_TYPE)),
+ &slow);
+
+ // Disallow binding of slow-mode functions. We need to figure out whether the
+ // length and name property are in the original state.
+ assembler.Comment("Disallow binding of slow-mode functions");
+ assembler.GotoIf(assembler.IsDictionaryMap(receiver_map), &slow);
+
+ // Check whether the length and name properties are still present as
+ // AccessorInfo objects. In that case, their value can be recomputed even if
+ // the actual value on the object changes.
+ assembler.Comment("Check descriptor array length");
+ Node* descriptors = assembler.LoadMapDescriptors(receiver_map);
+ Node* descriptors_length = assembler.LoadFixedArrayBaseLength(descriptors);
+ assembler.GotoIf(assembler.SmiLessThanOrEqual(descriptors_length,
+ assembler.SmiConstant(1)),
+ &slow);
+
+ // Check whether the length and name properties are still present as
+ // AccessorInfo objects. In that case, their value can be recomputed even if
+ // the actual value on the object changes.
+ assembler.Comment("Check name and length properties");
+ const int length_index = JSFunction::kLengthDescriptorIndex;
+ Node* maybe_length = assembler.LoadFixedArrayElement(
+ descriptors, DescriptorArray::ToKeyIndex(length_index));
+ assembler.GotoIf(
+ assembler.WordNotEqual(maybe_length,
+ assembler.LoadRoot(Heap::klength_stringRootIndex)),
+ &slow);
+
+ Node* maybe_length_accessor = assembler.LoadFixedArrayElement(
+ descriptors, DescriptorArray::ToValueIndex(length_index));
+ assembler.GotoIf(assembler.TaggedIsSmi(maybe_length_accessor), &slow);
+ Node* length_value_map = assembler.LoadMap(maybe_length_accessor);
+ assembler.GotoUnless(assembler.IsAccessorInfoMap(length_value_map), &slow);
+
+ const int name_index = JSFunction::kNameDescriptorIndex;
+ Node* maybe_name = assembler.LoadFixedArrayElement(
+ descriptors, DescriptorArray::ToKeyIndex(name_index));
+ assembler.GotoIf(
+ assembler.WordNotEqual(maybe_name,
+ assembler.LoadRoot(Heap::kname_stringRootIndex)),
+ &slow);
+
+ Node* maybe_name_accessor = assembler.LoadFixedArrayElement(
+ descriptors, DescriptorArray::ToValueIndex(name_index));
+ assembler.GotoIf(assembler.TaggedIsSmi(maybe_name_accessor), &slow);
+ Node* name_value_map = assembler.LoadMap(maybe_name_accessor);
+ assembler.GotoUnless(assembler.IsAccessorInfoMap(name_value_map), &slow);
+
+ // Choose the right bound function map based on whether the target is
+ // constructable.
+ assembler.Comment("Choose the right bound function map");
+ Variable bound_function_map(&assembler, MachineRepresentation::kTagged);
+ Label with_constructor(&assembler);
+ CodeStubAssembler::VariableList vars({&bound_function_map}, assembler.zone());
+ Node* native_context = assembler.LoadNativeContext(context);
+
+ Label map_done(&assembler, vars);
+ Node* bit_field = assembler.LoadMapBitField(receiver_map);
+ int mask = static_cast<int>(1 << Map::kIsConstructor);
+ assembler.GotoIf(assembler.IsSetWord32(bit_field, mask), &with_constructor);
+
+ bound_function_map.Bind(assembler.LoadContextElement(
+ native_context, Context::BOUND_FUNCTION_WITHOUT_CONSTRUCTOR_MAP_INDEX));
+ assembler.Goto(&map_done);
+
+ assembler.Bind(&with_constructor);
+ bound_function_map.Bind(assembler.LoadContextElement(
+ native_context, Context::BOUND_FUNCTION_WITH_CONSTRUCTOR_MAP_INDEX));
+ assembler.Goto(&map_done);
+
+ assembler.Bind(&map_done);
+
+ // Verify that __proto__ matches that of a the target bound function.
+ assembler.Comment("Verify that __proto__ matches target bound function");
+ Node* prototype = assembler.LoadMapPrototype(receiver_map);
+ Node* expected_prototype =
+ assembler.LoadMapPrototype(bound_function_map.value());
+ assembler.GotoIf(assembler.WordNotEqual(prototype, expected_prototype),
+ &slow);
+
+ // Allocate the arguments array.
+ assembler.Comment("Allocate the arguments array");
+ Variable argument_array(&assembler, MachineRepresentation::kTagged);
+ Label empty_arguments(&assembler);
+ Label arguments_done(&assembler, &argument_array);
+ assembler.GotoIf(
+ assembler.Uint32LessThanOrEqual(argc, assembler.Int32Constant(1)),
+ &empty_arguments);
+ Node* elements_length = assembler.ChangeUint32ToWord(
+ assembler.Int32Sub(argc, assembler.Int32Constant(1)));
+ Node* elements = assembler.AllocateFixedArray(FAST_ELEMENTS, elements_length);
+ Variable index(&assembler, MachineType::PointerRepresentation());
+ index.Bind(assembler.IntPtrConstant(0));
+ CodeStubAssembler::VariableList foreach_vars({&index}, assembler.zone());
+ args.ForEach(foreach_vars,
+ [&assembler, elements, &index](compiler::Node* arg) {
+ assembler.StoreFixedArrayElement(elements, index.value(), arg);
+ assembler.Increment(index);
+ },
+ assembler.IntPtrConstant(1));
+ argument_array.Bind(elements);
+ assembler.Goto(&arguments_done);
+
+ assembler.Bind(&empty_arguments);
+ argument_array.Bind(assembler.EmptyFixedArrayConstant());
+ assembler.Goto(&arguments_done);
+
+ assembler.Bind(&arguments_done);
+
+ // Determine bound receiver.
+ assembler.Comment("Determine bound receiver");
+ Variable bound_receiver(&assembler, MachineRepresentation::kTagged);
+ Label has_receiver(&assembler);
+ Label receiver_done(&assembler, &bound_receiver);
+ assembler.GotoIf(assembler.Word32NotEqual(argc, assembler.Int32Constant(0)),
+ &has_receiver);
+ bound_receiver.Bind(assembler.UndefinedConstant());
+ assembler.Goto(&receiver_done);
+
+ assembler.Bind(&has_receiver);
+ bound_receiver.Bind(args.AtIndex(0));
+ assembler.Goto(&receiver_done);
+
+ assembler.Bind(&receiver_done);
+
+ // Allocate the resulting bound function.
+ assembler.Comment("Allocate the resulting bound function");
+ Node* bound_function = assembler.Allocate(JSBoundFunction::kSize);
+ assembler.StoreMapNoWriteBarrier(bound_function, bound_function_map.value());
+ assembler.StoreObjectFieldNoWriteBarrier(
+ bound_function, JSBoundFunction::kBoundTargetFunctionOffset, receiver);
+ assembler.StoreObjectFieldNoWriteBarrier(bound_function,
+ JSBoundFunction::kBoundThisOffset,
+ bound_receiver.value());
+ assembler.StoreObjectFieldNoWriteBarrier(
+ bound_function, JSBoundFunction::kBoundArgumentsOffset,
+ argument_array.value());
+ Node* empty_fixed_array = assembler.EmptyFixedArrayConstant();
+ assembler.StoreObjectFieldNoWriteBarrier(
+ bound_function, JSObject::kPropertiesOffset, empty_fixed_array);
+ assembler.StoreObjectFieldNoWriteBarrier(
+ bound_function, JSObject::kElementsOffset, empty_fixed_array);
+
+ args.PopAndReturn(bound_function);
+ assembler.Bind(&slow);
+
+ Node* target = assembler.LoadFromFrame(
+ StandardFrameConstants::kFunctionOffset, MachineType::TaggedPointer());
+ assembler.TailCallStub(
+ CodeFactory::FunctionPrototypeBind(assembler.isolate()), context, target,
+ new_target, argc);
+}
+
// TODO(verwaest): This is a temporary helper until the FastFunctionBind stub
// can tailcall to the builtin directly.
RUNTIME_FUNCTION(Runtime_FunctionBind) {
@@ -283,14 +460,15 @@ BUILTIN(FunctionPrototypeToString) {
// ES6 section 19.2.3.6 Function.prototype [ @@hasInstance ] ( V )
void Builtins::Generate_FunctionPrototypeHasInstance(
- CodeStubAssembler* assembler) {
+ compiler::CodeAssemblerState* state) {
using compiler::Node;
+ CodeStubAssembler assembler(state);
- Node* f = assembler->Parameter(0);
- Node* v = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
- Node* result = assembler->OrdinaryHasInstance(context, f, v);
- assembler->Return(result);
+ Node* f = assembler.Parameter(0);
+ Node* v = assembler.Parameter(1);
+ Node* context = assembler.Parameter(4);
+ Node* result = assembler.OrdinaryHasInstance(context, f, v);
+ assembler.Return(result);
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-generator.cc b/deps/v8/src/builtins/builtins-generator.cc
index fe1f2d2304..d22c3cdd64 100644
--- a/deps/v8/src/builtins/builtins-generator.cc
+++ b/deps/v8/src/builtins/builtins-generator.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
-
+#include "src/builtins/builtins.h"
#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
namespace v8 {
namespace internal {
@@ -65,18 +65,20 @@ void Generate_GeneratorPrototypeResume(
assembler->Bind(&if_receiverisclosed);
{
+ Callable create_iter_result_object =
+ CodeFactory::CreateIterResultObject(assembler->isolate());
+
// The {receiver} is closed already.
Node* result = nullptr;
switch (resume_mode) {
case JSGeneratorObject::kNext:
- result = assembler->CallRuntime(Runtime::kCreateIterResultObject,
- context, assembler->UndefinedConstant(),
- assembler->BooleanConstant(true));
+ result = assembler->CallStub(create_iter_result_object, context,
+ assembler->UndefinedConstant(),
+ assembler->TrueConstant());
break;
case JSGeneratorObject::kReturn:
- result =
- assembler->CallRuntime(Runtime::kCreateIterResultObject, context,
- value, assembler->BooleanConstant(true));
+ result = assembler->CallStub(create_iter_result_object, context, value,
+ assembler->TrueConstant());
break;
case JSGeneratorObject::kThrow:
result = assembler->CallRuntime(Runtime::kThrow, context, value);
@@ -96,20 +98,26 @@ void Generate_GeneratorPrototypeResume(
} // anonymous namespace
// ES6 section 25.3.1.2 Generator.prototype.next ( value )
-void Builtins::Generate_GeneratorPrototypeNext(CodeStubAssembler* assembler) {
- Generate_GeneratorPrototypeResume(assembler, JSGeneratorObject::kNext,
+void Builtins::Generate_GeneratorPrototypeNext(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_GeneratorPrototypeResume(&assembler, JSGeneratorObject::kNext,
"[Generator].prototype.next");
}
// ES6 section 25.3.1.3 Generator.prototype.return ( value )
-void Builtins::Generate_GeneratorPrototypeReturn(CodeStubAssembler* assembler) {
- Generate_GeneratorPrototypeResume(assembler, JSGeneratorObject::kReturn,
+void Builtins::Generate_GeneratorPrototypeReturn(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_GeneratorPrototypeResume(&assembler, JSGeneratorObject::kReturn,
"[Generator].prototype.return");
}
// ES6 section 25.3.1.4 Generator.prototype.throw ( exception )
-void Builtins::Generate_GeneratorPrototypeThrow(CodeStubAssembler* assembler) {
- Generate_GeneratorPrototypeResume(assembler, JSGeneratorObject::kThrow,
+void Builtins::Generate_GeneratorPrototypeThrow(
+ compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ Generate_GeneratorPrototypeResume(&assembler, JSGeneratorObject::kThrow,
"[Generator].prototype.throw");
}
diff --git a/deps/v8/src/builtins/builtins-global.cc b/deps/v8/src/builtins/builtins-global.cc
index 1fa0967aa9..6c97a0bbad 100644
--- a/deps/v8/src/builtins/builtins-global.cc
+++ b/deps/v8/src/builtins/builtins-global.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
-
+#include "src/builtins/builtins.h"
#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
#include "src/compiler.h"
#include "src/uri.h"
@@ -101,111 +101,110 @@ BUILTIN(GlobalEval) {
}
// ES6 section 18.2.2 isFinite ( number )
-void Builtins::Generate_GlobalIsFinite(CodeStubAssembler* assembler) {
+void Builtins::Generate_GlobalIsFinite(compiler::CodeAssemblerState* state) {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
typedef CodeStubAssembler::Variable Variable;
+ CodeStubAssembler assembler(state);
- Node* context = assembler->Parameter(4);
+ Node* context = assembler.Parameter(4);
- Label return_true(assembler), return_false(assembler);
+ Label return_true(&assembler), return_false(&assembler);
// We might need to loop once for ToNumber conversion.
- Variable var_num(assembler, MachineRepresentation::kTagged);
- Label loop(assembler, &var_num);
- var_num.Bind(assembler->Parameter(1));
- assembler->Goto(&loop);
- assembler->Bind(&loop);
+ Variable var_num(&assembler, MachineRepresentation::kTagged);
+ Label loop(&assembler, &var_num);
+ var_num.Bind(assembler.Parameter(1));
+ assembler.Goto(&loop);
+ assembler.Bind(&loop);
{
// Load the current {num} value.
Node* num = var_num.value();
// Check if {num} is a Smi or a HeapObject.
- assembler->GotoIf(assembler->TaggedIsSmi(num), &return_true);
+ assembler.GotoIf(assembler.TaggedIsSmi(num), &return_true);
// Check if {num} is a HeapNumber.
- Label if_numisheapnumber(assembler),
- if_numisnotheapnumber(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(assembler->LoadMap(num),
- assembler->HeapNumberMapConstant()),
- &if_numisheapnumber, &if_numisnotheapnumber);
+ Label if_numisheapnumber(&assembler),
+ if_numisnotheapnumber(&assembler, Label::kDeferred);
+ assembler.Branch(assembler.IsHeapNumberMap(assembler.LoadMap(num)),
+ &if_numisheapnumber, &if_numisnotheapnumber);
- assembler->Bind(&if_numisheapnumber);
+ assembler.Bind(&if_numisheapnumber);
{
// Check if {num} contains a finite, non-NaN value.
- Node* num_value = assembler->LoadHeapNumberValue(num);
- assembler->BranchIfFloat64IsNaN(
- assembler->Float64Sub(num_value, num_value), &return_false,
- &return_true);
+ Node* num_value = assembler.LoadHeapNumberValue(num);
+ assembler.BranchIfFloat64IsNaN(assembler.Float64Sub(num_value, num_value),
+ &return_false, &return_true);
}
- assembler->Bind(&if_numisnotheapnumber);
+ assembler.Bind(&if_numisnotheapnumber);
{
// Need to convert {num} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(assembler->isolate());
- var_num.Bind(assembler->CallStub(callable, context, num));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(assembler.isolate());
+ var_num.Bind(assembler.CallStub(callable, context, num));
+ assembler.Goto(&loop);
}
}
- assembler->Bind(&return_true);
- assembler->Return(assembler->BooleanConstant(true));
+ assembler.Bind(&return_true);
+ assembler.Return(assembler.BooleanConstant(true));
- assembler->Bind(&return_false);
- assembler->Return(assembler->BooleanConstant(false));
+ assembler.Bind(&return_false);
+ assembler.Return(assembler.BooleanConstant(false));
}
// ES6 section 18.2.3 isNaN ( number )
-void Builtins::Generate_GlobalIsNaN(CodeStubAssembler* assembler) {
+void Builtins::Generate_GlobalIsNaN(compiler::CodeAssemblerState* state) {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
typedef CodeStubAssembler::Variable Variable;
+ CodeStubAssembler assembler(state);
- Node* context = assembler->Parameter(4);
+ Node* context = assembler.Parameter(4);
- Label return_true(assembler), return_false(assembler);
+ Label return_true(&assembler), return_false(&assembler);
// We might need to loop once for ToNumber conversion.
- Variable var_num(assembler, MachineRepresentation::kTagged);
- Label loop(assembler, &var_num);
- var_num.Bind(assembler->Parameter(1));
- assembler->Goto(&loop);
- assembler->Bind(&loop);
+ Variable var_num(&assembler, MachineRepresentation::kTagged);
+ Label loop(&assembler, &var_num);
+ var_num.Bind(assembler.Parameter(1));
+ assembler.Goto(&loop);
+ assembler.Bind(&loop);
{
// Load the current {num} value.
Node* num = var_num.value();
// Check if {num} is a Smi or a HeapObject.
- assembler->GotoIf(assembler->TaggedIsSmi(num), &return_false);
+ assembler.GotoIf(assembler.TaggedIsSmi(num), &return_false);
// Check if {num} is a HeapNumber.
- Label if_numisheapnumber(assembler),
- if_numisnotheapnumber(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(assembler->LoadMap(num),
- assembler->HeapNumberMapConstant()),
- &if_numisheapnumber, &if_numisnotheapnumber);
+ Label if_numisheapnumber(&assembler),
+ if_numisnotheapnumber(&assembler, Label::kDeferred);
+ assembler.Branch(assembler.IsHeapNumberMap(assembler.LoadMap(num)),
+ &if_numisheapnumber, &if_numisnotheapnumber);
- assembler->Bind(&if_numisheapnumber);
+ assembler.Bind(&if_numisheapnumber);
{
// Check if {num} contains a NaN.
- Node* num_value = assembler->LoadHeapNumberValue(num);
- assembler->BranchIfFloat64IsNaN(num_value, &return_true, &return_false);
+ Node* num_value = assembler.LoadHeapNumberValue(num);
+ assembler.BranchIfFloat64IsNaN(num_value, &return_true, &return_false);
}
- assembler->Bind(&if_numisnotheapnumber);
+ assembler.Bind(&if_numisnotheapnumber);
{
// Need to convert {num} to a Number first.
- Callable callable = CodeFactory::NonNumberToNumber(assembler->isolate());
- var_num.Bind(assembler->CallStub(callable, context, num));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(assembler.isolate());
+ var_num.Bind(assembler.CallStub(callable, context, num));
+ assembler.Goto(&loop);
}
}
- assembler->Bind(&return_true);
- assembler->Return(assembler->BooleanConstant(true));
+ assembler.Bind(&return_true);
+ assembler.Return(assembler.BooleanConstant(true));
- assembler->Bind(&return_false);
- assembler->Return(assembler->BooleanConstant(false));
+ assembler.Bind(&return_false);
+ assembler.Return(assembler.BooleanConstant(false));
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-handler.cc b/deps/v8/src/builtins/builtins-handler.cc
index 88597f8add..42b35d0d2f 100644
--- a/deps/v8/src/builtins/builtins-handler.cc
+++ b/deps/v8/src/builtins/builtins-handler.cc
@@ -4,6 +4,8 @@
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
+#include "src/code-stub-assembler.h"
+#include "src/ic/accessor-assembler.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/ic/keyed-store-generic.h"
@@ -12,59 +14,46 @@ namespace v8 {
namespace internal {
void Builtins::Generate_KeyedLoadIC_Megamorphic_TF(
- CodeStubAssembler* assembler) {
+ compiler::CodeAssemblerState* state) {
+ AccessorAssembler::GenerateKeyedLoadICMegamorphic(state);
+}
+
+void Builtins::Generate_KeyedLoadIC_Miss(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef LoadWithVectorDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
-
- CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
- assembler->KeyedLoadICGeneric(&p);
-}
-
-void Builtins::Generate_KeyedLoadIC_Miss(MacroAssembler* masm) {
- KeyedLoadIC::GenerateMiss(masm);
-}
-void Builtins::Generate_KeyedLoadIC_Slow(MacroAssembler* masm) {
- KeyedLoadIC::GenerateRuntimeGetProperty(masm);
-}
+ Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+ Node* name = assembler.Parameter(Descriptor::kName);
+ Node* slot = assembler.Parameter(Descriptor::kSlot);
+ Node* vector = assembler.Parameter(Descriptor::kVector);
+ Node* context = assembler.Parameter(Descriptor::kContext);
-void Builtins::Generate_KeyedStoreIC_Megamorphic(MacroAssembler* masm) {
- KeyedStoreIC::GenerateMegamorphic(masm, SLOPPY);
+ assembler.TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver, name,
+ slot, vector);
}
-void Builtins::Generate_KeyedStoreIC_Megamorphic_Strict(MacroAssembler* masm) {
- KeyedStoreIC::GenerateMegamorphic(masm, STRICT);
-}
-
-void KeyedStoreICMegamorphic(CodeStubAssembler* assembler, LanguageMode mode) {
+void Builtins::Generate_KeyedLoadIC_Slow(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
- typedef StoreWithVectorDescriptor Descriptor;
+ typedef LoadWithVectorDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* value = assembler->Parameter(Descriptor::kValue);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+ Node* name = assembler.Parameter(Descriptor::kName);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- CodeStubAssembler::StoreICParameters p(context, receiver, name, value, slot,
- vector);
- KeyedStoreGenericGenerator::Generate(assembler, &p, mode);
+ assembler.TailCallRuntime(Runtime::kKeyedGetProperty, context, receiver,
+ name);
}
void Builtins::Generate_KeyedStoreIC_Megamorphic_TF(
- CodeStubAssembler* assembler) {
- KeyedStoreICMegamorphic(assembler, SLOPPY);
+ compiler::CodeAssemblerState* state) {
+ KeyedStoreGenericGenerator::Generate(state, SLOPPY);
}
void Builtins::Generate_KeyedStoreIC_Megamorphic_Strict_TF(
- CodeStubAssembler* assembler) {
- KeyedStoreICMegamorphic(assembler, STRICT);
+ compiler::CodeAssemblerState* state) {
+ KeyedStoreGenericGenerator::Generate(state, STRICT);
}
void Builtins::Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
@@ -75,80 +64,149 @@ void Builtins::Generate_KeyedStoreIC_Slow(MacroAssembler* masm) {
KeyedStoreIC::GenerateSlow(masm);
}
-void Builtins::Generate_LoadGlobalIC_Miss(CodeStubAssembler* assembler) {
+void Builtins::Generate_LoadGlobalIC_Miss(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef LoadGlobalWithVectorDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* name = assembler.Parameter(Descriptor::kName);
+ Node* slot = assembler.Parameter(Descriptor::kSlot);
+ Node* vector = assembler.Parameter(Descriptor::kVector);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- assembler->TailCallRuntime(Runtime::kLoadGlobalIC_Miss, context, slot,
- vector);
+ assembler.TailCallRuntime(Runtime::kLoadGlobalIC_Miss, context, name, slot,
+ vector);
}
-void Builtins::Generate_LoadGlobalIC_Slow(CodeStubAssembler* assembler) {
+void Builtins::Generate_LoadGlobalIC_Slow(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef LoadGlobalWithVectorDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* name = assembler.Parameter(Descriptor::kName);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- assembler->TailCallRuntime(Runtime::kLoadGlobalIC_Slow, context, slot,
- vector);
+ assembler.TailCallRuntime(Runtime::kLoadGlobalIC_Slow, context, name);
}
void Builtins::Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
NamedLoadHandlerCompiler::GenerateLoadViaGetterForDeopt(masm);
}
-void Builtins::Generate_LoadIC_Miss(CodeStubAssembler* assembler) {
+void Builtins::Generate_LoadIC_Miss(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef LoadWithVectorDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+ Node* name = assembler.Parameter(Descriptor::kName);
+ Node* slot = assembler.Parameter(Descriptor::kSlot);
+ Node* vector = assembler.Parameter(Descriptor::kVector);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- assembler->TailCallRuntime(Runtime::kLoadIC_Miss, context, receiver, name,
- slot, vector);
+ assembler.TailCallRuntime(Runtime::kLoadIC_Miss, context, receiver, name,
+ slot, vector);
}
-void Builtins::Generate_LoadIC_Normal(MacroAssembler* masm) {
- LoadIC::GenerateNormal(masm);
-}
+TF_BUILTIN(LoadIC_Normal, CodeStubAssembler) {
+ typedef LoadWithVectorDescriptor Descriptor;
-void Builtins::Generate_LoadIC_Slow(CodeStubAssembler* assembler) {
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* context = Parameter(Descriptor::kContext);
+
+ Label slow(this);
+ {
+ Node* properties = LoadProperties(receiver);
+ Variable var_name_index(this, MachineType::PointerRepresentation());
+ Label found(this, &var_name_index);
+ NameDictionaryLookup<NameDictionary>(properties, name, &found,
+ &var_name_index, &slow);
+ Bind(&found);
+ {
+ Variable var_details(this, MachineRepresentation::kWord32);
+ Variable var_value(this, MachineRepresentation::kTagged);
+ LoadPropertyFromNameDictionary(properties, var_name_index.value(),
+ &var_details, &var_value);
+ Node* value = CallGetterIfAccessor(var_value.value(), var_details.value(),
+ context, receiver, &slow);
+ Return(value);
+ }
+ }
+
+ Bind(&slow);
+ TailCallRuntime(Runtime::kGetProperty, context, receiver, name);
+}
+
+void Builtins::Generate_LoadIC_Slow(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef LoadWithVectorDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+ Node* name = assembler.Parameter(Descriptor::kName);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- assembler->TailCallRuntime(Runtime::kGetProperty, context, receiver, name);
+ assembler.TailCallRuntime(Runtime::kGetProperty, context, receiver, name);
}
-void Builtins::Generate_StoreIC_Miss(CodeStubAssembler* assembler) {
+void Builtins::Generate_StoreIC_Miss(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef StoreWithVectorDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* value = assembler->Parameter(Descriptor::kValue);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+ Node* name = assembler.Parameter(Descriptor::kName);
+ Node* value = assembler.Parameter(Descriptor::kValue);
+ Node* slot = assembler.Parameter(Descriptor::kSlot);
+ Node* vector = assembler.Parameter(Descriptor::kVector);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- assembler->TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot,
- vector, receiver, name);
+ assembler.TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot,
+ vector, receiver, name);
}
-void Builtins::Generate_StoreIC_Normal(MacroAssembler* masm) {
- StoreIC::GenerateNormal(masm);
+TF_BUILTIN(StoreIC_Normal, CodeStubAssembler) {
+ typedef StoreWithVectorDescriptor Descriptor;
+
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* value = Parameter(Descriptor::kValue);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
+
+ Label slow(this);
+ {
+ Node* properties = LoadProperties(receiver);
+ Variable var_name_index(this, MachineType::PointerRepresentation());
+ Label found(this, &var_name_index);
+ NameDictionaryLookup<NameDictionary>(properties, name, &found,
+ &var_name_index, &slow);
+ Bind(&found);
+ {
+ const int kNameToDetailsOffset = (NameDictionary::kEntryDetailsIndex -
+ NameDictionary::kEntryKeyIndex) *
+ kPointerSize;
+ Node* details = LoadFixedArrayElement(properties, var_name_index.value(),
+ kNameToDetailsOffset);
+ // Check that the property is a writable data property (no accessor).
+ const int kTypeAndReadOnlyMask = PropertyDetails::KindField::kMask |
+ PropertyDetails::kAttributesReadOnlyMask;
+ STATIC_ASSERT(kData == 0);
+ GotoIf(IsSetSmi(details, kTypeAndReadOnlyMask), &slow);
+ const int kNameToValueOffset =
+ (NameDictionary::kEntryValueIndex - NameDictionary::kEntryKeyIndex) *
+ kPointerSize;
+ StoreFixedArrayElement(properties, var_name_index.value(), value,
+ UPDATE_WRITE_BARRIER, kNameToValueOffset);
+ Return(value);
+ }
+ }
+
+ Bind(&slow);
+ TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot, vector,
+ receiver, name);
}
void Builtins::Generate_StoreIC_Setter_ForDeopt(MacroAssembler* masm) {
@@ -156,30 +214,33 @@ void Builtins::Generate_StoreIC_Setter_ForDeopt(MacroAssembler* masm) {
}
namespace {
-void Generate_StoreIC_Slow(CodeStubAssembler* assembler,
+void Generate_StoreIC_Slow(compiler::CodeAssemblerState* state,
LanguageMode language_mode) {
typedef compiler::Node Node;
typedef StoreWithVectorDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* value = assembler->Parameter(Descriptor::kValue);
- Node* context = assembler->Parameter(Descriptor::kContext);
- Node* lang_mode = assembler->SmiConstant(Smi::FromInt(language_mode));
+ Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+ Node* name = assembler.Parameter(Descriptor::kName);
+ Node* value = assembler.Parameter(Descriptor::kValue);
+ Node* context = assembler.Parameter(Descriptor::kContext);
+ Node* lang_mode = assembler.SmiConstant(Smi::FromInt(language_mode));
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- assembler->TailCallRuntime(Runtime::kSetProperty, context, receiver, name,
- value, lang_mode);
+ assembler.TailCallRuntime(Runtime::kSetProperty, context, receiver, name,
+ value, lang_mode);
}
} // anonymous namespace
-void Builtins::Generate_StoreIC_SlowSloppy(CodeStubAssembler* assembler) {
- Generate_StoreIC_Slow(assembler, SLOPPY);
+void Builtins::Generate_StoreIC_SlowSloppy(
+ compiler::CodeAssemblerState* state) {
+ Generate_StoreIC_Slow(state, SLOPPY);
}
-void Builtins::Generate_StoreIC_SlowStrict(CodeStubAssembler* assembler) {
- Generate_StoreIC_Slow(assembler, STRICT);
+void Builtins::Generate_StoreIC_SlowStrict(
+ compiler::CodeAssemblerState* state) {
+ Generate_StoreIC_Slow(state, STRICT);
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-ic.cc b/deps/v8/src/builtins/builtins-ic.cc
new file mode 100644
index 0000000000..398d512dcf
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-ic.cc
@@ -0,0 +1,78 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/code-stub-assembler.h"
+#include "src/ic/accessor-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+TF_BUILTIN(LoadIC, CodeStubAssembler) {
+ AccessorAssembler::GenerateLoadIC(state());
+}
+
+TF_BUILTIN(KeyedLoadIC, CodeStubAssembler) {
+ AccessorAssembler::GenerateKeyedLoadICTF(state());
+}
+
+TF_BUILTIN(LoadICTrampoline, CodeStubAssembler) {
+ AccessorAssembler::GenerateLoadICTrampoline(state());
+}
+
+TF_BUILTIN(KeyedLoadICTrampoline, CodeStubAssembler) {
+ AccessorAssembler::GenerateKeyedLoadICTrampolineTF(state());
+}
+
+TF_BUILTIN(StoreIC, CodeStubAssembler) {
+ AccessorAssembler::GenerateStoreIC(state());
+}
+
+TF_BUILTIN(StoreICTrampoline, CodeStubAssembler) {
+ AccessorAssembler::GenerateStoreICTrampoline(state());
+}
+
+TF_BUILTIN(StoreICStrict, CodeStubAssembler) {
+ AccessorAssembler::GenerateStoreIC(state());
+}
+
+TF_BUILTIN(StoreICStrictTrampoline, CodeStubAssembler) {
+ AccessorAssembler::GenerateStoreICTrampoline(state());
+}
+
+TF_BUILTIN(KeyedStoreIC, CodeStubAssembler) {
+ AccessorAssembler::GenerateKeyedStoreICTF(state(), SLOPPY);
+}
+
+TF_BUILTIN(KeyedStoreICTrampoline, CodeStubAssembler) {
+ AccessorAssembler::GenerateKeyedStoreICTrampolineTF(state(), SLOPPY);
+}
+
+TF_BUILTIN(KeyedStoreICStrict, CodeStubAssembler) {
+ AccessorAssembler::GenerateKeyedStoreICTF(state(), STRICT);
+}
+
+TF_BUILTIN(KeyedStoreICStrictTrampoline, CodeStubAssembler) {
+ AccessorAssembler::GenerateKeyedStoreICTrampolineTF(state(), STRICT);
+}
+
+TF_BUILTIN(LoadGlobalIC, CodeStubAssembler) {
+ AccessorAssembler::GenerateLoadGlobalIC(state(), NOT_INSIDE_TYPEOF);
+}
+
+TF_BUILTIN(LoadGlobalICInsideTypeof, CodeStubAssembler) {
+ AccessorAssembler::GenerateLoadGlobalIC(state(), INSIDE_TYPEOF);
+}
+
+TF_BUILTIN(LoadGlobalICTrampoline, CodeStubAssembler) {
+ AccessorAssembler::GenerateLoadGlobalICTrampoline(state(), NOT_INSIDE_TYPEOF);
+}
+
+TF_BUILTIN(LoadGlobalICInsideTypeofTrampoline, CodeStubAssembler) {
+ AccessorAssembler::GenerateLoadGlobalICTrampoline(state(), INSIDE_TYPEOF);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-internal.cc b/deps/v8/src/builtins/builtins-internal.cc
index bec6ff3645..f94ed0c16f 100644
--- a/deps/v8/src/builtins/builtins-internal.cc
+++ b/deps/v8/src/builtins/builtins-internal.cc
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/code-stub-assembler.h"
#include "src/interface-descriptors.h"
#include "src/macro-assembler.h"
@@ -54,86 +55,263 @@ void Builtins::Generate_StackCheck(MacroAssembler* masm) {
// TurboFan support builtins.
void Builtins::Generate_CopyFastSmiOrObjectElements(
- CodeStubAssembler* assembler) {
+ compiler::CodeAssemblerState* state) {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
typedef CopyFastSmiOrObjectElementsDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* object = assembler->Parameter(Descriptor::kObject);
+ Node* object = assembler.Parameter(Descriptor::kObject);
// Load the {object}s elements.
- Node* source = assembler->LoadObjectField(object, JSObject::kElementsOffset);
+ Node* source = assembler.LoadObjectField(object, JSObject::kElementsOffset);
- CodeStubAssembler::ParameterMode mode = assembler->OptimalParameterMode();
- Node* length = assembler->UntagParameter(
- assembler->LoadFixedArrayBaseLength(source), mode);
+ CodeStubAssembler::ParameterMode mode = assembler.OptimalParameterMode();
+ Node* length = assembler.TaggedToParameter(
+ assembler.LoadFixedArrayBaseLength(source), mode);
// Check if we can allocate in new space.
ElementsKind kind = FAST_ELEMENTS;
int max_elements = FixedArrayBase::GetMaxLengthForNewSpaceAllocation(kind);
- Label if_newspace(assembler), if_oldspace(assembler);
- assembler->Branch(
- assembler->UintPtrLessThan(
- length, assembler->IntPtrOrSmiConstant(max_elements, mode)),
+ Label if_newspace(&assembler), if_oldspace(&assembler);
+ assembler.Branch(
+ assembler.UintPtrOrSmiLessThan(
+ length, assembler.IntPtrOrSmiConstant(max_elements, mode), mode),
&if_newspace, &if_oldspace);
- assembler->Bind(&if_newspace);
+ assembler.Bind(&if_newspace);
{
- Node* target = assembler->AllocateFixedArray(kind, length, mode);
- assembler->CopyFixedArrayElements(kind, source, target, length,
- SKIP_WRITE_BARRIER, mode);
- assembler->StoreObjectField(object, JSObject::kElementsOffset, target);
- assembler->Return(target);
+ Node* target = assembler.AllocateFixedArray(kind, length, mode);
+ assembler.CopyFixedArrayElements(kind, source, target, length,
+ SKIP_WRITE_BARRIER, mode);
+ assembler.StoreObjectField(object, JSObject::kElementsOffset, target);
+ assembler.Return(target);
}
- assembler->Bind(&if_oldspace);
+ assembler.Bind(&if_oldspace);
{
- Node* target = assembler->AllocateFixedArray(
- kind, length, mode, CodeStubAssembler::kPretenured);
- assembler->CopyFixedArrayElements(kind, source, target, length,
- UPDATE_WRITE_BARRIER, mode);
- assembler->StoreObjectField(object, JSObject::kElementsOffset, target);
- assembler->Return(target);
+ Node* target = assembler.AllocateFixedArray(kind, length, mode,
+ CodeStubAssembler::kPretenured);
+ assembler.CopyFixedArrayElements(kind, source, target, length,
+ UPDATE_WRITE_BARRIER, mode);
+ assembler.StoreObjectField(object, JSObject::kElementsOffset, target);
+ assembler.Return(target);
}
}
-void Builtins::Generate_GrowFastDoubleElements(CodeStubAssembler* assembler) {
+void Builtins::Generate_GrowFastDoubleElements(
+ compiler::CodeAssemblerState* state) {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
typedef GrowArrayElementsDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
- Node* object = assembler->Parameter(Descriptor::kObject);
- Node* key = assembler->Parameter(Descriptor::kKey);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* object = assembler.Parameter(Descriptor::kObject);
+ Node* key = assembler.Parameter(Descriptor::kKey);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- Label runtime(assembler, CodeStubAssembler::Label::kDeferred);
- Node* elements = assembler->LoadElements(object);
- elements = assembler->TryGrowElementsCapacity(
+ Label runtime(&assembler, CodeStubAssembler::Label::kDeferred);
+ Node* elements = assembler.LoadElements(object);
+ elements = assembler.TryGrowElementsCapacity(
object, elements, FAST_DOUBLE_ELEMENTS, key, &runtime);
- assembler->Return(elements);
+ assembler.Return(elements);
- assembler->Bind(&runtime);
- assembler->TailCallRuntime(Runtime::kGrowArrayElements, context, object, key);
+ assembler.Bind(&runtime);
+ assembler.TailCallRuntime(Runtime::kGrowArrayElements, context, object, key);
}
void Builtins::Generate_GrowFastSmiOrObjectElements(
- CodeStubAssembler* assembler) {
+ compiler::CodeAssemblerState* state) {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
typedef GrowArrayElementsDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
+
+ Node* object = assembler.Parameter(Descriptor::kObject);
+ Node* key = assembler.Parameter(Descriptor::kKey);
+ Node* context = assembler.Parameter(Descriptor::kContext);
+
+ Label runtime(&assembler, CodeStubAssembler::Label::kDeferred);
+ Node* elements = assembler.LoadElements(object);
+ elements = assembler.TryGrowElementsCapacity(object, elements, FAST_ELEMENTS,
+ key, &runtime);
+ assembler.Return(elements);
+
+ assembler.Bind(&runtime);
+ assembler.TailCallRuntime(Runtime::kGrowArrayElements, context, object, key);
+}
+
+namespace {
+
+void Generate_NewArgumentsElements(CodeStubAssembler* assembler,
+ compiler::Node* frame,
+ compiler::Node* length) {
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+ typedef compiler::Node Node;
+
+ // Check if we can allocate in new space.
+ ElementsKind kind = FAST_ELEMENTS;
+ int max_elements = FixedArray::GetMaxLengthForNewSpaceAllocation(kind);
+ Label if_newspace(assembler), if_oldspace(assembler, Label::kDeferred);
+ assembler->Branch(assembler->IntPtrLessThan(
+ length, assembler->IntPtrConstant(max_elements)),
+ &if_newspace, &if_oldspace);
+
+ assembler->Bind(&if_newspace);
+ {
+ // Prefer EmptyFixedArray in case of non-positive {length} (the {length}
+ // can be negative here for rest parameters).
+ Label if_empty(assembler), if_notempty(assembler);
+ assembler->Branch(
+ assembler->IntPtrLessThanOrEqual(length, assembler->IntPtrConstant(0)),
+ &if_empty, &if_notempty);
+
+ assembler->Bind(&if_empty);
+ assembler->Return(assembler->EmptyFixedArrayConstant());
+
+ assembler->Bind(&if_notempty);
+ {
+ // Allocate a FixedArray in new space.
+ Node* result = assembler->AllocateFixedArray(kind, length);
+
+ // Compute the effective {offset} into the {frame}.
+ Node* offset = assembler->IntPtrAdd(length, assembler->IntPtrConstant(1));
+
+ // Copy the parameters from {frame} (starting at {offset}) to {result}.
+ Variable var_index(assembler, MachineType::PointerRepresentation());
+ Label loop(assembler, &var_index), done_loop(assembler);
+ var_index.Bind(assembler->IntPtrConstant(0));
+ assembler->Goto(&loop);
+ assembler->Bind(&loop);
+ {
+ // Load the current {index}.
+ Node* index = var_index.value();
+
+ // Check if we are done.
+ assembler->GotoIf(assembler->WordEqual(index, length), &done_loop);
+
+ // Load the parameter at the given {index}.
+ Node* value = assembler->Load(
+ MachineType::AnyTagged(), frame,
+ assembler->WordShl(assembler->IntPtrSub(offset, index),
+ assembler->IntPtrConstant(kPointerSizeLog2)));
+
+ // Store the {value} into the {result}.
+ assembler->StoreFixedArrayElement(result, index, value,
+ SKIP_WRITE_BARRIER);
+
+ // Continue with next {index}.
+ var_index.Bind(
+ assembler->IntPtrAdd(index, assembler->IntPtrConstant(1)));
+ assembler->Goto(&loop);
+ }
- Node* object = assembler->Parameter(Descriptor::kObject);
- Node* key = assembler->Parameter(Descriptor::kKey);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ assembler->Bind(&done_loop);
+ assembler->Return(result);
+ }
+ }
+
+ assembler->Bind(&if_oldspace);
+ {
+ // Allocate in old space (or large object space).
+ assembler->TailCallRuntime(
+ Runtime::kNewArgumentsElements, assembler->NoContextConstant(),
+ assembler->BitcastWordToTagged(frame), assembler->SmiFromWord(length));
+ }
+}
+
+} // namespace
+
+void Builtins::Generate_NewUnmappedArgumentsElements(
+ compiler::CodeAssemblerState* state) {
+ typedef CodeStubAssembler::Label Label;
+ typedef CodeStubAssembler::Variable Variable;
+ typedef compiler::Node Node;
+ typedef NewArgumentsElementsDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
+
+ Node* formal_parameter_count =
+ assembler.Parameter(Descriptor::kFormalParameterCount);
+
+ // Determine the frame that holds the parameters.
+ Label done(&assembler);
+ Variable var_frame(&assembler, MachineType::PointerRepresentation()),
+ var_length(&assembler, MachineType::PointerRepresentation());
+ var_frame.Bind(assembler.LoadParentFramePointer());
+ var_length.Bind(formal_parameter_count);
+ Node* parent_frame = assembler.Load(
+ MachineType::Pointer(), var_frame.value(),
+ assembler.IntPtrConstant(StandardFrameConstants::kCallerFPOffset));
+ Node* parent_frame_type =
+ assembler.Load(MachineType::AnyTagged(), parent_frame,
+ assembler.IntPtrConstant(
+ CommonFrameConstants::kContextOrFrameTypeOffset));
+ assembler.GotoUnless(
+ assembler.WordEqual(
+ parent_frame_type,
+ assembler.SmiConstant(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))),
+ &done);
+ {
+ // Determine the length from the ArgumentsAdaptorFrame.
+ Node* length = assembler.LoadAndUntagSmi(
+ parent_frame, ArgumentsAdaptorFrameConstants::kLengthOffset);
- Label runtime(assembler, CodeStubAssembler::Label::kDeferred);
- Node* elements = assembler->LoadElements(object);
- elements = assembler->TryGrowElementsCapacity(object, elements, FAST_ELEMENTS,
- key, &runtime);
- assembler->Return(elements);
+ // Take the arguments from the ArgumentsAdaptorFrame.
+ var_frame.Bind(parent_frame);
+ var_length.Bind(length);
+ }
+ assembler.Goto(&done);
+
+ // Allocate the actual FixedArray for the elements.
+ assembler.Bind(&done);
+ Generate_NewArgumentsElements(&assembler, var_frame.value(),
+ var_length.value());
+}
+
+void Builtins::Generate_NewRestParameterElements(
+ compiler::CodeAssemblerState* state) {
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef NewArgumentsElementsDescriptor Descriptor;
+ CodeStubAssembler assembler(state);
+
+ Node* formal_parameter_count =
+ assembler.Parameter(Descriptor::kFormalParameterCount);
+
+ // Check if we have an ArgumentsAdaptorFrame, as we will only have rest
+ // parameters in that case.
+ Label if_empty(&assembler);
+ Node* frame = assembler.Load(
+ MachineType::Pointer(), assembler.LoadParentFramePointer(),
+ assembler.IntPtrConstant(StandardFrameConstants::kCallerFPOffset));
+ Node* frame_type =
+ assembler.Load(MachineType::AnyTagged(), frame,
+ assembler.IntPtrConstant(
+ CommonFrameConstants::kContextOrFrameTypeOffset));
+ assembler.GotoUnless(
+ assembler.WordEqual(frame_type, assembler.SmiConstant(Smi::FromInt(
+ StackFrame::ARGUMENTS_ADAPTOR))),
+ &if_empty);
+
+ // Determine the length from the ArgumentsAdaptorFrame.
+ Node* frame_length = assembler.LoadAndUntagSmi(
+ frame, ArgumentsAdaptorFrameConstants::kLengthOffset);
+
+ // Compute the actual rest parameter length (may be negative).
+ Node* length = assembler.IntPtrSub(frame_length, formal_parameter_count);
+
+ // Allocate the actual FixedArray for the elements.
+ Generate_NewArgumentsElements(&assembler, frame, length);
+
+ // No rest parameters, return an empty FixedArray.
+ assembler.Bind(&if_empty);
+ assembler.Return(assembler.EmptyFixedArrayConstant());
+}
- assembler->Bind(&runtime);
- assembler->TailCallRuntime(Runtime::kGrowArrayElements, context, object, key);
+void Builtins::Generate_ReturnReceiver(compiler::CodeAssemblerState* state) {
+ CodeStubAssembler assembler(state);
+ assembler.Return(assembler.Parameter(0));
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-iterator.cc b/deps/v8/src/builtins/builtins-iterator.cc
deleted file mode 100644
index 7f74c20667..0000000000
--- a/deps/v8/src/builtins/builtins-iterator.cc
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/builtins/builtins-utils.h"
-#include "src/builtins/builtins.h"
-#include "src/frames-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void Builtins::Generate_IteratorPrototypeIterator(
- CodeStubAssembler* assembler) {
- assembler->Return(assembler->Parameter(0));
-}
-
-BUILTIN(ModuleNamespaceIterator) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- Handle<Object> receiver = args.at<Object>(0);
-
- if (!receiver->IsJSModuleNamespace()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
- isolate->factory()->iterator_symbol(), receiver));
- }
- auto ns = Handle<JSModuleNamespace>::cast(receiver);
-
- Handle<FixedArray> names =
- KeyAccumulator::GetKeys(ns, KeyCollectionMode::kOwnOnly, SKIP_SYMBOLS)
- .ToHandleChecked();
- return *isolate->factory()->NewJSFixedArrayIterator(names);
-}
-
-BUILTIN(FixedArrayIteratorNext) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- Handle<Object> receiver = args.at<Object>(0);
-
- // It is an error if this function is called on anything other than the
- // particular iterator object for which the function was created.
- if (!receiver->IsJSFixedArrayIterator() ||
- Handle<JSFixedArrayIterator>::cast(receiver)->initial_next() !=
- *args.target()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
- isolate->factory()->next_string(), receiver));
- }
-
- auto iterator = Handle<JSFixedArrayIterator>::cast(receiver);
- Handle<Object> value;
- bool done;
-
- int index = iterator->index();
- if (index < iterator->array()->length()) {
- value = handle(iterator->array()->get(index), isolate);
- done = false;
- iterator->set_index(index + 1);
- } else {
- value = isolate->factory()->undefined_value();
- done = true;
- }
-
- return *isolate->factory()->NewJSIteratorResult(value, done);
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-math.cc b/deps/v8/src/builtins/builtins-math.cc
index 30f12ba12c..1305e73db0 100644
--- a/deps/v8/src/builtins/builtins-math.cc
+++ b/deps/v8/src/builtins/builtins-math.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
-
+#include "src/builtins/builtins.h"
#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
namespace v8 {
namespace internal {
@@ -13,332 +13,300 @@ namespace internal {
// -----------------------------------------------------------------------------
// ES6 section 20.2.2 Function Properties of the Math Object
-// ES6 section - 20.2.2.1 Math.abs ( x )
-void Builtins::Generate_MathAbs(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
+class MathBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit MathBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
- Node* context = assembler->Parameter(4);
+ protected:
+ void MathRoundingOperation(Node* (CodeStubAssembler::*float64op)(Node*));
+ void MathUnaryOperation(Node* (CodeStubAssembler::*float64op)(Node*));
+};
+
+// ES6 section - 20.2.2.1 Math.abs ( x )
+TF_BUILTIN(MathAbs, CodeStubAssembler) {
+ Node* context = Parameter(4);
// We might need to loop once for ToNumber conversion.
- Variable var_x(assembler, MachineRepresentation::kTagged);
- Label loop(assembler, &var_x);
- var_x.Bind(assembler->Parameter(1));
- assembler->Goto(&loop);
- assembler->Bind(&loop);
+ Variable var_x(this, MachineRepresentation::kTagged);
+ Label loop(this, &var_x);
+ var_x.Bind(Parameter(1));
+ Goto(&loop);
+ Bind(&loop);
{
// Load the current {x} value.
Node* x = var_x.value();
// Check if {x} is a Smi or a HeapObject.
- Label if_xissmi(assembler), if_xisnotsmi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(x), &if_xissmi, &if_xisnotsmi);
+ Label if_xissmi(this), if_xisnotsmi(this);
+ Branch(TaggedIsSmi(x), &if_xissmi, &if_xisnotsmi);
- assembler->Bind(&if_xissmi);
+ Bind(&if_xissmi);
{
// Check if {x} is already positive.
- Label if_xispositive(assembler), if_xisnotpositive(assembler);
- assembler->BranchIfSmiLessThanOrEqual(
- assembler->SmiConstant(Smi::FromInt(0)), x, &if_xispositive,
- &if_xisnotpositive);
+ Label if_xispositive(this), if_xisnotpositive(this);
+ BranchIfSmiLessThanOrEqual(SmiConstant(Smi::FromInt(0)), x,
+ &if_xispositive, &if_xisnotpositive);
- assembler->Bind(&if_xispositive);
+ Bind(&if_xispositive);
{
// Just return the input {x}.
- assembler->Return(x);
+ Return(x);
}
- assembler->Bind(&if_xisnotpositive);
+ Bind(&if_xisnotpositive);
{
// Try to negate the {x} value.
- Node* pair = assembler->IntPtrSubWithOverflow(
- assembler->IntPtrConstant(0), assembler->BitcastTaggedToWord(x));
- Node* overflow = assembler->Projection(1, pair);
- Label if_overflow(assembler, Label::kDeferred),
- if_notoverflow(assembler);
- assembler->Branch(overflow, &if_overflow, &if_notoverflow);
-
- assembler->Bind(&if_notoverflow);
+ Node* pair =
+ IntPtrSubWithOverflow(IntPtrConstant(0), BitcastTaggedToWord(x));
+ Node* overflow = Projection(1, pair);
+ Label if_overflow(this, Label::kDeferred), if_notoverflow(this);
+ Branch(overflow, &if_overflow, &if_notoverflow);
+
+ Bind(&if_notoverflow);
{
// There is a Smi representation for negated {x}.
- Node* result = assembler->Projection(0, pair);
- result = assembler->BitcastWordToTagged(result);
- assembler->Return(result);
+ Node* result = Projection(0, pair);
+ Return(BitcastWordToTagged(result));
}
- assembler->Bind(&if_overflow);
- {
- Node* result = assembler->NumberConstant(0.0 - Smi::kMinValue);
- assembler->Return(result);
- }
+ Bind(&if_overflow);
+ { Return(NumberConstant(0.0 - Smi::kMinValue)); }
}
}
- assembler->Bind(&if_xisnotsmi);
+ Bind(&if_xisnotsmi);
{
// Check if {x} is a HeapNumber.
- Label if_xisheapnumber(assembler),
- if_xisnotheapnumber(assembler, Label::kDeferred);
- assembler->Branch(
- assembler->WordEqual(assembler->LoadMap(x),
- assembler->HeapNumberMapConstant()),
- &if_xisheapnumber, &if_xisnotheapnumber);
-
- assembler->Bind(&if_xisheapnumber);
+ Label if_xisheapnumber(this), if_xisnotheapnumber(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(LoadMap(x)), &if_xisheapnumber,
+ &if_xisnotheapnumber);
+
+ Bind(&if_xisheapnumber);
{
- Node* x_value = assembler->LoadHeapNumberValue(x);
- Node* value = assembler->Float64Abs(x_value);
- Node* result = assembler->AllocateHeapNumberWithValue(value);
- assembler->Return(result);
+ Node* x_value = LoadHeapNumberValue(x);
+ Node* value = Float64Abs(x_value);
+ Node* result = AllocateHeapNumberWithValue(value);
+ Return(result);
}
- assembler->Bind(&if_xisnotheapnumber);
+ Bind(&if_xisnotheapnumber);
{
// Need to convert {x} to a Number first.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_x.Bind(assembler->CallStub(callable, context, x));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_x.Bind(CallStub(callable, context, x));
+ Goto(&loop);
}
}
}
}
-namespace {
-
-void Generate_MathRoundingOperation(
- CodeStubAssembler* assembler,
- compiler::Node* (CodeStubAssembler::*float64op)(compiler::Node*)) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
-
- Node* context = assembler->Parameter(4);
+void MathBuiltinsAssembler::MathRoundingOperation(
+ Node* (CodeStubAssembler::*float64op)(Node*)) {
+ Node* context = Parameter(4);
// We might need to loop once for ToNumber conversion.
- Variable var_x(assembler, MachineRepresentation::kTagged);
- Label loop(assembler, &var_x);
- var_x.Bind(assembler->Parameter(1));
- assembler->Goto(&loop);
- assembler->Bind(&loop);
+ Variable var_x(this, MachineRepresentation::kTagged);
+ Label loop(this, &var_x);
+ var_x.Bind(Parameter(1));
+ Goto(&loop);
+ Bind(&loop);
{
// Load the current {x} value.
Node* x = var_x.value();
// Check if {x} is a Smi or a HeapObject.
- Label if_xissmi(assembler), if_xisnotsmi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(x), &if_xissmi, &if_xisnotsmi);
+ Label if_xissmi(this), if_xisnotsmi(this);
+ Branch(TaggedIsSmi(x), &if_xissmi, &if_xisnotsmi);
- assembler->Bind(&if_xissmi);
+ Bind(&if_xissmi);
{
// Nothing to do when {x} is a Smi.
- assembler->Return(x);
+ Return(x);
}
- assembler->Bind(&if_xisnotsmi);
+ Bind(&if_xisnotsmi);
{
// Check if {x} is a HeapNumber.
- Label if_xisheapnumber(assembler),
- if_xisnotheapnumber(assembler, Label::kDeferred);
- assembler->Branch(
- assembler->WordEqual(assembler->LoadMap(x),
- assembler->HeapNumberMapConstant()),
- &if_xisheapnumber, &if_xisnotheapnumber);
-
- assembler->Bind(&if_xisheapnumber);
+ Label if_xisheapnumber(this), if_xisnotheapnumber(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(LoadMap(x)), &if_xisheapnumber,
+ &if_xisnotheapnumber);
+
+ Bind(&if_xisheapnumber);
{
- Node* x_value = assembler->LoadHeapNumberValue(x);
- Node* value = (assembler->*float64op)(x_value);
- Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
+ Node* x_value = LoadHeapNumberValue(x);
+ Node* value = (this->*float64op)(x_value);
+ Node* result = ChangeFloat64ToTagged(value);
+ Return(result);
}
- assembler->Bind(&if_xisnotheapnumber);
+ Bind(&if_xisnotheapnumber);
{
// Need to convert {x} to a Number first.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_x.Bind(assembler->CallStub(callable, context, x));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_x.Bind(CallStub(callable, context, x));
+ Goto(&loop);
}
}
}
}
-void Generate_MathUnaryOperation(
- CodeStubAssembler* assembler,
- compiler::Node* (CodeStubAssembler::*float64op)(compiler::Node*)) {
- typedef compiler::Node Node;
-
- Node* x = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
- Node* value = (assembler->*float64op)(x_value);
- Node* result = assembler->AllocateHeapNumberWithValue(value);
- assembler->Return(result);
+void MathBuiltinsAssembler::MathUnaryOperation(
+ Node* (CodeStubAssembler::*float64op)(Node*)) {
+ Node* x = Parameter(1);
+ Node* context = Parameter(4);
+ Node* x_value = TruncateTaggedToFloat64(context, x);
+ Node* value = (this->*float64op)(x_value);
+ Node* result = AllocateHeapNumberWithValue(value);
+ Return(result);
}
-} // namespace
-
// ES6 section 20.2.2.2 Math.acos ( x )
-void Builtins::Generate_MathAcos(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Acos);
+TF_BUILTIN(MathAcos, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Acos);
}
// ES6 section 20.2.2.3 Math.acosh ( x )
-void Builtins::Generate_MathAcosh(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Acosh);
+TF_BUILTIN(MathAcosh, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Acosh);
}
// ES6 section 20.2.2.4 Math.asin ( x )
-void Builtins::Generate_MathAsin(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Asin);
+TF_BUILTIN(MathAsin, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Asin);
}
// ES6 section 20.2.2.5 Math.asinh ( x )
-void Builtins::Generate_MathAsinh(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Asinh);
+TF_BUILTIN(MathAsinh, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Asinh);
}
-
// ES6 section 20.2.2.6 Math.atan ( x )
-void Builtins::Generate_MathAtan(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Atan);
+TF_BUILTIN(MathAtan, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Atan);
}
// ES6 section 20.2.2.7 Math.atanh ( x )
-void Builtins::Generate_MathAtanh(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Atanh);
+TF_BUILTIN(MathAtanh, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Atanh);
}
// ES6 section 20.2.2.8 Math.atan2 ( y, x )
-void Builtins::Generate_MathAtan2(CodeStubAssembler* assembler) {
- using compiler::Node;
+TF_BUILTIN(MathAtan2, CodeStubAssembler) {
+ Node* y = Parameter(1);
+ Node* x = Parameter(2);
+ Node* context = Parameter(5);
- Node* y = assembler->Parameter(1);
- Node* x = assembler->Parameter(2);
- Node* context = assembler->Parameter(5);
- Node* y_value = assembler->TruncateTaggedToFloat64(context, y);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
- Node* value = assembler->Float64Atan2(y_value, x_value);
- Node* result = assembler->AllocateHeapNumberWithValue(value);
- assembler->Return(result);
+ Node* y_value = TruncateTaggedToFloat64(context, y);
+ Node* x_value = TruncateTaggedToFloat64(context, x);
+ Node* value = Float64Atan2(y_value, x_value);
+ Node* result = AllocateHeapNumberWithValue(value);
+ Return(result);
}
// ES6 section 20.2.2.10 Math.ceil ( x )
-void Builtins::Generate_MathCeil(CodeStubAssembler* assembler) {
- Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Ceil);
+TF_BUILTIN(MathCeil, MathBuiltinsAssembler) {
+ MathRoundingOperation(&CodeStubAssembler::Float64Ceil);
}
// ES6 section 20.2.2.9 Math.cbrt ( x )
-void Builtins::Generate_MathCbrt(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Cbrt);
+TF_BUILTIN(MathCbrt, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Cbrt);
}
// ES6 section 20.2.2.11 Math.clz32 ( x )
-void Builtins::Generate_MathClz32(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
-
- Node* context = assembler->Parameter(4);
+TF_BUILTIN(MathClz32, CodeStubAssembler) {
+ Node* context = Parameter(4);
// Shared entry point for the clz32 operation.
- Variable var_clz32_x(assembler, MachineRepresentation::kWord32);
- Label do_clz32(assembler);
+ Variable var_clz32_x(this, MachineRepresentation::kWord32);
+ Label do_clz32(this);
// We might need to loop once for ToNumber conversion.
- Variable var_x(assembler, MachineRepresentation::kTagged);
- Label loop(assembler, &var_x);
- var_x.Bind(assembler->Parameter(1));
- assembler->Goto(&loop);
- assembler->Bind(&loop);
+ Variable var_x(this, MachineRepresentation::kTagged);
+ Label loop(this, &var_x);
+ var_x.Bind(Parameter(1));
+ Goto(&loop);
+ Bind(&loop);
{
// Load the current {x} value.
Node* x = var_x.value();
// Check if {x} is a Smi or a HeapObject.
- Label if_xissmi(assembler), if_xisnotsmi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(x), &if_xissmi, &if_xisnotsmi);
+ Label if_xissmi(this), if_xisnotsmi(this);
+ Branch(TaggedIsSmi(x), &if_xissmi, &if_xisnotsmi);
- assembler->Bind(&if_xissmi);
+ Bind(&if_xissmi);
{
- var_clz32_x.Bind(assembler->SmiToWord32(x));
- assembler->Goto(&do_clz32);
+ var_clz32_x.Bind(SmiToWord32(x));
+ Goto(&do_clz32);
}
- assembler->Bind(&if_xisnotsmi);
+ Bind(&if_xisnotsmi);
{
// Check if {x} is a HeapNumber.
- Label if_xisheapnumber(assembler),
- if_xisnotheapnumber(assembler, Label::kDeferred);
- assembler->Branch(
- assembler->WordEqual(assembler->LoadMap(x),
- assembler->HeapNumberMapConstant()),
- &if_xisheapnumber, &if_xisnotheapnumber);
-
- assembler->Bind(&if_xisheapnumber);
+ Label if_xisheapnumber(this), if_xisnotheapnumber(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(LoadMap(x)), &if_xisheapnumber,
+ &if_xisnotheapnumber);
+
+ Bind(&if_xisheapnumber);
{
- var_clz32_x.Bind(assembler->TruncateHeapNumberValueToWord32(x));
- assembler->Goto(&do_clz32);
+ var_clz32_x.Bind(TruncateHeapNumberValueToWord32(x));
+ Goto(&do_clz32);
}
- assembler->Bind(&if_xisnotheapnumber);
+ Bind(&if_xisnotheapnumber);
{
// Need to convert {x} to a Number first.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_x.Bind(assembler->CallStub(callable, context, x));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_x.Bind(CallStub(callable, context, x));
+ Goto(&loop);
}
}
}
- assembler->Bind(&do_clz32);
+ Bind(&do_clz32);
{
Node* x_value = var_clz32_x.value();
- Node* value = assembler->Word32Clz(x_value);
- Node* result = assembler->ChangeInt32ToTagged(value);
- assembler->Return(result);
+ Node* value = Word32Clz(x_value);
+ Node* result = ChangeInt32ToTagged(value);
+ Return(result);
}
}
// ES6 section 20.2.2.12 Math.cos ( x )
-void Builtins::Generate_MathCos(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Cos);
+TF_BUILTIN(MathCos, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Cos);
}
// ES6 section 20.2.2.13 Math.cosh ( x )
-void Builtins::Generate_MathCosh(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Cosh);
+TF_BUILTIN(MathCosh, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Cosh);
}
// ES6 section 20.2.2.14 Math.exp ( x )
-void Builtins::Generate_MathExp(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Exp);
+TF_BUILTIN(MathExp, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Exp);
}
// ES6 section 20.2.2.15 Math.expm1 ( x )
-void Builtins::Generate_MathExpm1(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Expm1);
+TF_BUILTIN(MathExpm1, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Expm1);
}
// ES6 section 20.2.2.16 Math.floor ( x )
-void Builtins::Generate_MathFloor(CodeStubAssembler* assembler) {
- Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Floor);
+TF_BUILTIN(MathFloor, MathBuiltinsAssembler) {
+ MathRoundingOperation(&CodeStubAssembler::Float64Floor);
}
// ES6 section 20.2.2.17 Math.fround ( x )
-void Builtins::Generate_MathFround(CodeStubAssembler* assembler) {
- using compiler::Node;
-
- Node* x = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
- Node* value32 = assembler->TruncateFloat64ToFloat32(x_value);
- Node* value = assembler->ChangeFloat32ToFloat64(value32);
- Node* result = assembler->AllocateHeapNumberWithValue(value);
- assembler->Return(result);
+TF_BUILTIN(MathFround, CodeStubAssembler) {
+ Node* x = Parameter(1);
+ Node* context = Parameter(4);
+ Node* x_value = TruncateTaggedToFloat64(context, x);
+ Node* value32 = TruncateFloat64ToFloat32(x_value);
+ Node* value = ChangeFloat32ToFloat64(value32);
+ Node* result = AllocateHeapNumberWithValue(value);
+ Return(result);
}
// ES6 section 20.2.2.18 Math.hypot ( value1, value2, ...values )
@@ -351,7 +319,7 @@ BUILTIN(MathHypot) {
bool one_arg_is_nan = false;
List<double> abs_values(length);
for (int i = 0; i < length; i++) {
- Handle<Object> x = args.at<Object>(i + 1);
+ Handle<Object> x = args.at(i + 1);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x, Object::ToNumber(x));
double abs_value = std::abs(x->Number());
@@ -394,153 +362,134 @@ BUILTIN(MathHypot) {
}
// ES6 section 20.2.2.19 Math.imul ( x, y )
-void Builtins::Generate_MathImul(CodeStubAssembler* assembler) {
- using compiler::Node;
-
- Node* x = assembler->Parameter(1);
- Node* y = assembler->Parameter(2);
- Node* context = assembler->Parameter(5);
- Node* x_value = assembler->TruncateTaggedToWord32(context, x);
- Node* y_value = assembler->TruncateTaggedToWord32(context, y);
- Node* value = assembler->Int32Mul(x_value, y_value);
- Node* result = assembler->ChangeInt32ToTagged(value);
- assembler->Return(result);
+TF_BUILTIN(MathImul, CodeStubAssembler) {
+ Node* x = Parameter(1);
+ Node* y = Parameter(2);
+ Node* context = Parameter(5);
+ Node* x_value = TruncateTaggedToWord32(context, x);
+ Node* y_value = TruncateTaggedToWord32(context, y);
+ Node* value = Int32Mul(x_value, y_value);
+ Node* result = ChangeInt32ToTagged(value);
+ Return(result);
}
// ES6 section 20.2.2.20 Math.log ( x )
-void Builtins::Generate_MathLog(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Log);
+TF_BUILTIN(MathLog, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Log);
}
// ES6 section 20.2.2.21 Math.log1p ( x )
-void Builtins::Generate_MathLog1p(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Log1p);
+TF_BUILTIN(MathLog1p, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Log1p);
}
// ES6 section 20.2.2.22 Math.log10 ( x )
-void Builtins::Generate_MathLog10(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Log10);
+TF_BUILTIN(MathLog10, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Log10);
}
// ES6 section 20.2.2.23 Math.log2 ( x )
-void Builtins::Generate_MathLog2(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Log2);
+TF_BUILTIN(MathLog2, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Log2);
}
// ES6 section 20.2.2.26 Math.pow ( x, y )
-void Builtins::Generate_MathPow(CodeStubAssembler* assembler) {
- using compiler::Node;
-
- Node* x = assembler->Parameter(1);
- Node* y = assembler->Parameter(2);
- Node* context = assembler->Parameter(5);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
- Node* y_value = assembler->TruncateTaggedToFloat64(context, y);
- Node* value = assembler->Float64Pow(x_value, y_value);
- Node* result = assembler->ChangeFloat64ToTagged(value);
- assembler->Return(result);
+TF_BUILTIN(MathPow, CodeStubAssembler) {
+ Node* x = Parameter(1);
+ Node* y = Parameter(2);
+ Node* context = Parameter(5);
+ Node* x_value = TruncateTaggedToFloat64(context, x);
+ Node* y_value = TruncateTaggedToFloat64(context, y);
+ Node* value = Float64Pow(x_value, y_value);
+ Node* result = ChangeFloat64ToTagged(value);
+ Return(result);
}
// ES6 section 20.2.2.27 Math.random ( )
-void Builtins::Generate_MathRandom(CodeStubAssembler* assembler) {
- using compiler::Node;
-
- Node* context = assembler->Parameter(3);
- Node* native_context = assembler->LoadNativeContext(context);
+TF_BUILTIN(MathRandom, CodeStubAssembler) {
+ Node* context = Parameter(3);
+ Node* native_context = LoadNativeContext(context);
// Load cache index.
- CodeStubAssembler::Variable smi_index(assembler,
- MachineRepresentation::kTagged);
- smi_index.Bind(assembler->LoadContextElement(
- native_context, Context::MATH_RANDOM_INDEX_INDEX));
+ Variable smi_index(this, MachineRepresentation::kTagged);
+ smi_index.Bind(
+ LoadContextElement(native_context, Context::MATH_RANDOM_INDEX_INDEX));
// Cached random numbers are exhausted if index is 0. Go to slow path.
- CodeStubAssembler::Label if_cached(assembler);
- assembler->GotoIf(assembler->SmiAbove(smi_index.value(),
- assembler->SmiConstant(Smi::kZero)),
- &if_cached);
+ Label if_cached(this);
+ GotoIf(SmiAbove(smi_index.value(), SmiConstant(Smi::kZero)), &if_cached);
// Cache exhausted, populate the cache. Return value is the new index.
- smi_index.Bind(
- assembler->CallRuntime(Runtime::kGenerateRandomNumbers, context));
- assembler->Goto(&if_cached);
+ smi_index.Bind(CallRuntime(Runtime::kGenerateRandomNumbers, context));
+ Goto(&if_cached);
// Compute next index by decrement.
- assembler->Bind(&if_cached);
- Node* new_smi_index = assembler->SmiSub(
- smi_index.value(), assembler->SmiConstant(Smi::FromInt(1)));
- assembler->StoreContextElement(
- native_context, Context::MATH_RANDOM_INDEX_INDEX, new_smi_index);
+ Bind(&if_cached);
+ Node* new_smi_index = SmiSub(smi_index.value(), SmiConstant(Smi::FromInt(1)));
+ StoreContextElement(native_context, Context::MATH_RANDOM_INDEX_INDEX,
+ new_smi_index);
// Load and return next cached random number.
- Node* array = assembler->LoadContextElement(native_context,
- Context::MATH_RANDOM_CACHE_INDEX);
- Node* random = assembler->LoadFixedDoubleArrayElement(
- array, new_smi_index, MachineType::Float64(), 0,
- CodeStubAssembler::SMI_PARAMETERS);
- assembler->Return(assembler->AllocateHeapNumberWithValue(random));
+ Node* array =
+ LoadContextElement(native_context, Context::MATH_RANDOM_CACHE_INDEX);
+ Node* random = LoadFixedDoubleArrayElement(
+ array, new_smi_index, MachineType::Float64(), 0, SMI_PARAMETERS);
+ Return(AllocateHeapNumberWithValue(random));
}
// ES6 section 20.2.2.28 Math.round ( x )
-void Builtins::Generate_MathRound(CodeStubAssembler* assembler) {
- Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Round);
+TF_BUILTIN(MathRound, MathBuiltinsAssembler) {
+ MathRoundingOperation(&CodeStubAssembler::Float64Round);
}
// ES6 section 20.2.2.29 Math.sign ( x )
-void Builtins::Generate_MathSign(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- using compiler::Node;
-
+TF_BUILTIN(MathSign, CodeStubAssembler) {
// Convert the {x} value to a Number.
- Node* x = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
- Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
+ Node* x = Parameter(1);
+ Node* context = Parameter(4);
+ Node* x_value = TruncateTaggedToFloat64(context, x);
// Return -1 if {x} is negative, 1 if {x} is positive, or {x} itself.
- Label if_xisnegative(assembler), if_xispositive(assembler);
- assembler->GotoIf(
- assembler->Float64LessThan(x_value, assembler->Float64Constant(0.0)),
- &if_xisnegative);
- assembler->GotoIf(
- assembler->Float64LessThan(assembler->Float64Constant(0.0), x_value),
- &if_xispositive);
- assembler->Return(assembler->ChangeFloat64ToTagged(x_value));
+ Label if_xisnegative(this), if_xispositive(this);
+ GotoIf(Float64LessThan(x_value, Float64Constant(0.0)), &if_xisnegative);
+ GotoIf(Float64LessThan(Float64Constant(0.0), x_value), &if_xispositive);
+ Return(ChangeFloat64ToTagged(x_value));
- assembler->Bind(&if_xisnegative);
- assembler->Return(assembler->SmiConstant(Smi::FromInt(-1)));
+ Bind(&if_xisnegative);
+ Return(SmiConstant(Smi::FromInt(-1)));
- assembler->Bind(&if_xispositive);
- assembler->Return(assembler->SmiConstant(Smi::FromInt(1)));
+ Bind(&if_xispositive);
+ Return(SmiConstant(Smi::FromInt(1)));
}
// ES6 section 20.2.2.30 Math.sin ( x )
-void Builtins::Generate_MathSin(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Sin);
+TF_BUILTIN(MathSin, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Sin);
}
// ES6 section 20.2.2.31 Math.sinh ( x )
-void Builtins::Generate_MathSinh(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Sinh);
+TF_BUILTIN(MathSinh, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Sinh);
}
// ES6 section 20.2.2.32 Math.sqrt ( x )
-void Builtins::Generate_MathSqrt(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Sqrt);
+TF_BUILTIN(MathSqrt, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Sqrt);
}
// ES6 section 20.2.2.33 Math.tan ( x )
-void Builtins::Generate_MathTan(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Tan);
+TF_BUILTIN(MathTan, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Tan);
}
// ES6 section 20.2.2.34 Math.tanh ( x )
-void Builtins::Generate_MathTanh(CodeStubAssembler* assembler) {
- Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Tanh);
+TF_BUILTIN(MathTanh, MathBuiltinsAssembler) {
+ MathUnaryOperation(&CodeStubAssembler::Float64Tanh);
}
// ES6 section 20.2.2.35 Math.trunc ( x )
-void Builtins::Generate_MathTrunc(CodeStubAssembler* assembler) {
- Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Trunc);
+TF_BUILTIN(MathTrunc, MathBuiltinsAssembler) {
+ MathRoundingOperation(&CodeStubAssembler::Float64Trunc);
}
void Builtins::Generate_MathMax(MacroAssembler* masm) {
diff --git a/deps/v8/src/builtins/builtins-number.cc b/deps/v8/src/builtins/builtins-number.cc
index 3e2bc556b6..7e750139de 100644
--- a/deps/v8/src/builtins/builtins-number.cc
+++ b/deps/v8/src/builtins/builtins-number.cc
@@ -5,253 +5,251 @@
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
namespace v8 {
namespace internal {
+class NumberBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit NumberBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ protected:
+ template <Signedness signed_result = kSigned>
+ void BitwiseOp(std::function<Node*(Node* lhs, Node* rhs)> body) {
+ Node* left = Parameter(0);
+ Node* right = Parameter(1);
+ Node* context = Parameter(2);
+
+ Node* lhs_value = TruncateTaggedToWord32(context, left);
+ Node* rhs_value = TruncateTaggedToWord32(context, right);
+ Node* value = body(lhs_value, rhs_value);
+ Node* result = signed_result == kSigned ? ChangeInt32ToTagged(value)
+ : ChangeUint32ToTagged(value);
+ Return(result);
+ }
+
+ template <Signedness signed_result = kSigned>
+ void BitwiseShiftOp(std::function<Node*(Node* lhs, Node* shift_count)> body) {
+ BitwiseOp<signed_result>([this, body](Node* lhs, Node* rhs) {
+ Node* shift_count = Word32And(rhs, Int32Constant(0x1f));
+ return body(lhs, shift_count);
+ });
+ }
+
+ void RelationalComparisonBuiltin(RelationalComparisonMode mode) {
+ Node* lhs = Parameter(0);
+ Node* rhs = Parameter(1);
+ Node* context = Parameter(2);
+
+ Return(RelationalComparison(mode, lhs, rhs, context));
+ }
+};
+
// -----------------------------------------------------------------------------
// ES6 section 20.1 Number Objects
// ES6 section 20.1.2.2 Number.isFinite ( number )
-void Builtins::Generate_NumberIsFinite(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
+TF_BUILTIN(NumberIsFinite, CodeStubAssembler) {
+ Node* number = Parameter(1);
- Node* number = assembler->Parameter(1);
-
- Label return_true(assembler), return_false(assembler);
+ Label return_true(this), return_false(this);
// Check if {number} is a Smi.
- assembler->GotoIf(assembler->TaggedIsSmi(number), &return_true);
+ GotoIf(TaggedIsSmi(number), &return_true);
// Check if {number} is a HeapNumber.
- assembler->GotoUnless(
- assembler->WordEqual(assembler->LoadMap(number),
- assembler->HeapNumberMapConstant()),
- &return_false);
+ GotoUnless(IsHeapNumberMap(LoadMap(number)), &return_false);
// Check if {number} contains a finite, non-NaN value.
- Node* number_value = assembler->LoadHeapNumberValue(number);
- assembler->BranchIfFloat64IsNaN(
- assembler->Float64Sub(number_value, number_value), &return_false,
- &return_true);
+ Node* number_value = LoadHeapNumberValue(number);
+ BranchIfFloat64IsNaN(Float64Sub(number_value, number_value), &return_false,
+ &return_true);
- assembler->Bind(&return_true);
- assembler->Return(assembler->BooleanConstant(true));
+ Bind(&return_true);
+ Return(BooleanConstant(true));
- assembler->Bind(&return_false);
- assembler->Return(assembler->BooleanConstant(false));
+ Bind(&return_false);
+ Return(BooleanConstant(false));
}
// ES6 section 20.1.2.3 Number.isInteger ( number )
-void Builtins::Generate_NumberIsInteger(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
+TF_BUILTIN(NumberIsInteger, CodeStubAssembler) {
+ Node* number = Parameter(1);
- Node* number = assembler->Parameter(1);
-
- Label return_true(assembler), return_false(assembler);
+ Label return_true(this), return_false(this);
// Check if {number} is a Smi.
- assembler->GotoIf(assembler->TaggedIsSmi(number), &return_true);
+ GotoIf(TaggedIsSmi(number), &return_true);
// Check if {number} is a HeapNumber.
- assembler->GotoUnless(
- assembler->WordEqual(assembler->LoadMap(number),
- assembler->HeapNumberMapConstant()),
- &return_false);
+ GotoUnless(IsHeapNumberMap(LoadMap(number)), &return_false);
// Load the actual value of {number}.
- Node* number_value = assembler->LoadHeapNumberValue(number);
+ Node* number_value = LoadHeapNumberValue(number);
// Truncate the value of {number} to an integer (or an infinity).
- Node* integer = assembler->Float64Trunc(number_value);
+ Node* integer = Float64Trunc(number_value);
// Check if {number}s value matches the integer (ruling out the infinities).
- assembler->Branch(
- assembler->Float64Equal(assembler->Float64Sub(number_value, integer),
- assembler->Float64Constant(0.0)),
- &return_true, &return_false);
+ Branch(Float64Equal(Float64Sub(number_value, integer), Float64Constant(0.0)),
+ &return_true, &return_false);
- assembler->Bind(&return_true);
- assembler->Return(assembler->BooleanConstant(true));
+ Bind(&return_true);
+ Return(BooleanConstant(true));
- assembler->Bind(&return_false);
- assembler->Return(assembler->BooleanConstant(false));
+ Bind(&return_false);
+ Return(BooleanConstant(false));
}
// ES6 section 20.1.2.4 Number.isNaN ( number )
-void Builtins::Generate_NumberIsNaN(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
+TF_BUILTIN(NumberIsNaN, CodeStubAssembler) {
+ Node* number = Parameter(1);
- Node* number = assembler->Parameter(1);
-
- Label return_true(assembler), return_false(assembler);
+ Label return_true(this), return_false(this);
// Check if {number} is a Smi.
- assembler->GotoIf(assembler->TaggedIsSmi(number), &return_false);
+ GotoIf(TaggedIsSmi(number), &return_false);
// Check if {number} is a HeapNumber.
- assembler->GotoUnless(
- assembler->WordEqual(assembler->LoadMap(number),
- assembler->HeapNumberMapConstant()),
- &return_false);
+ GotoUnless(IsHeapNumberMap(LoadMap(number)), &return_false);
// Check if {number} contains a NaN value.
- Node* number_value = assembler->LoadHeapNumberValue(number);
- assembler->BranchIfFloat64IsNaN(number_value, &return_true, &return_false);
+ Node* number_value = LoadHeapNumberValue(number);
+ BranchIfFloat64IsNaN(number_value, &return_true, &return_false);
- assembler->Bind(&return_true);
- assembler->Return(assembler->BooleanConstant(true));
+ Bind(&return_true);
+ Return(BooleanConstant(true));
- assembler->Bind(&return_false);
- assembler->Return(assembler->BooleanConstant(false));
+ Bind(&return_false);
+ Return(BooleanConstant(false));
}
// ES6 section 20.1.2.5 Number.isSafeInteger ( number )
-void Builtins::Generate_NumberIsSafeInteger(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
-
- Node* number = assembler->Parameter(1);
+TF_BUILTIN(NumberIsSafeInteger, CodeStubAssembler) {
+ Node* number = Parameter(1);
- Label return_true(assembler), return_false(assembler);
+ Label return_true(this), return_false(this);
// Check if {number} is a Smi.
- assembler->GotoIf(assembler->TaggedIsSmi(number), &return_true);
+ GotoIf(TaggedIsSmi(number), &return_true);
// Check if {number} is a HeapNumber.
- assembler->GotoUnless(
- assembler->WordEqual(assembler->LoadMap(number),
- assembler->HeapNumberMapConstant()),
- &return_false);
+ GotoUnless(IsHeapNumberMap(LoadMap(number)), &return_false);
// Load the actual value of {number}.
- Node* number_value = assembler->LoadHeapNumberValue(number);
+ Node* number_value = LoadHeapNumberValue(number);
// Truncate the value of {number} to an integer (or an infinity).
- Node* integer = assembler->Float64Trunc(number_value);
+ Node* integer = Float64Trunc(number_value);
// Check if {number}s value matches the integer (ruling out the infinities).
- assembler->GotoUnless(
- assembler->Float64Equal(assembler->Float64Sub(number_value, integer),
- assembler->Float64Constant(0.0)),
+ GotoUnless(
+ Float64Equal(Float64Sub(number_value, integer), Float64Constant(0.0)),
&return_false);
// Check if the {integer} value is in safe integer range.
- assembler->Branch(assembler->Float64LessThanOrEqual(
- assembler->Float64Abs(integer),
- assembler->Float64Constant(kMaxSafeInteger)),
- &return_true, &return_false);
+ Branch(Float64LessThanOrEqual(Float64Abs(integer),
+ Float64Constant(kMaxSafeInteger)),
+ &return_true, &return_false);
- assembler->Bind(&return_true);
- assembler->Return(assembler->BooleanConstant(true));
+ Bind(&return_true);
+ Return(BooleanConstant(true));
- assembler->Bind(&return_false);
- assembler->Return(assembler->BooleanConstant(false));
+ Bind(&return_false);
+ Return(BooleanConstant(false));
}
// ES6 section 20.1.2.12 Number.parseFloat ( string )
-void Builtins::Generate_NumberParseFloat(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
-
- Node* context = assembler->Parameter(4);
+TF_BUILTIN(NumberParseFloat, CodeStubAssembler) {
+ Node* context = Parameter(4);
// We might need to loop once for ToString conversion.
- Variable var_input(assembler, MachineRepresentation::kTagged);
- Label loop(assembler, &var_input);
- var_input.Bind(assembler->Parameter(1));
- assembler->Goto(&loop);
- assembler->Bind(&loop);
+ Variable var_input(this, MachineRepresentation::kTagged);
+ Label loop(this, &var_input);
+ var_input.Bind(Parameter(1));
+ Goto(&loop);
+ Bind(&loop);
{
// Load the current {input} value.
Node* input = var_input.value();
// Check if the {input} is a HeapObject or a Smi.
- Label if_inputissmi(assembler), if_inputisnotsmi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(input), &if_inputissmi,
- &if_inputisnotsmi);
+ Label if_inputissmi(this), if_inputisnotsmi(this);
+ Branch(TaggedIsSmi(input), &if_inputissmi, &if_inputisnotsmi);
- assembler->Bind(&if_inputissmi);
+ Bind(&if_inputissmi);
{
// The {input} is already a Number, no need to do anything.
- assembler->Return(input);
+ Return(input);
}
- assembler->Bind(&if_inputisnotsmi);
+ Bind(&if_inputisnotsmi);
{
// The {input} is a HeapObject, check if it's already a String.
- Label if_inputisstring(assembler), if_inputisnotstring(assembler);
- Node* input_map = assembler->LoadMap(input);
- Node* input_instance_type = assembler->LoadMapInstanceType(input_map);
- assembler->Branch(assembler->IsStringInstanceType(input_instance_type),
- &if_inputisstring, &if_inputisnotstring);
+ Label if_inputisstring(this), if_inputisnotstring(this);
+ Node* input_map = LoadMap(input);
+ Node* input_instance_type = LoadMapInstanceType(input_map);
+ Branch(IsStringInstanceType(input_instance_type), &if_inputisstring,
+ &if_inputisnotstring);
- assembler->Bind(&if_inputisstring);
+ Bind(&if_inputisstring);
{
// The {input} is already a String, check if {input} contains
// a cached array index.
- Label if_inputcached(assembler), if_inputnotcached(assembler);
- Node* input_hash = assembler->LoadNameHashField(input);
- Node* input_bit = assembler->Word32And(
- input_hash,
- assembler->Int32Constant(String::kContainsCachedArrayIndexMask));
- assembler->Branch(
- assembler->Word32Equal(input_bit, assembler->Int32Constant(0)),
- &if_inputcached, &if_inputnotcached);
-
- assembler->Bind(&if_inputcached);
+ Label if_inputcached(this), if_inputnotcached(this);
+ Node* input_hash = LoadNameHashField(input);
+ Node* input_bit = Word32And(
+ input_hash, Int32Constant(String::kContainsCachedArrayIndexMask));
+ Branch(Word32Equal(input_bit, Int32Constant(0)), &if_inputcached,
+ &if_inputnotcached);
+
+ Bind(&if_inputcached);
{
// Just return the {input}s cached array index.
Node* input_array_index =
- assembler->DecodeWordFromWord32<String::ArrayIndexValueBits>(
- input_hash);
- assembler->Return(assembler->SmiTag(input_array_index));
+ DecodeWordFromWord32<String::ArrayIndexValueBits>(input_hash);
+ Return(SmiTag(input_array_index));
}
- assembler->Bind(&if_inputnotcached);
+ Bind(&if_inputnotcached);
{
// Need to fall back to the runtime to convert {input} to double.
- assembler->Return(assembler->CallRuntime(Runtime::kStringParseFloat,
- context, input));
+ Return(CallRuntime(Runtime::kStringParseFloat, context, input));
}
}
- assembler->Bind(&if_inputisnotstring);
+ Bind(&if_inputisnotstring);
{
// The {input} is neither a String nor a Smi, check for HeapNumber.
- Label if_inputisnumber(assembler),
- if_inputisnotnumber(assembler, Label::kDeferred);
- assembler->Branch(
- assembler->WordEqual(input_map, assembler->HeapNumberMapConstant()),
- &if_inputisnumber, &if_inputisnotnumber);
+ Label if_inputisnumber(this),
+ if_inputisnotnumber(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(input_map), &if_inputisnumber,
+ &if_inputisnotnumber);
- assembler->Bind(&if_inputisnumber);
+ Bind(&if_inputisnumber);
{
// The {input} is already a Number, take care of -0.
- Label if_inputiszero(assembler), if_inputisnotzero(assembler);
- Node* input_value = assembler->LoadHeapNumberValue(input);
- assembler->Branch(assembler->Float64Equal(
- input_value, assembler->Float64Constant(0.0)),
- &if_inputiszero, &if_inputisnotzero);
+ Label if_inputiszero(this), if_inputisnotzero(this);
+ Node* input_value = LoadHeapNumberValue(input);
+ Branch(Float64Equal(input_value, Float64Constant(0.0)),
+ &if_inputiszero, &if_inputisnotzero);
- assembler->Bind(&if_inputiszero);
- assembler->Return(assembler->SmiConstant(0));
+ Bind(&if_inputiszero);
+ Return(SmiConstant(0));
- assembler->Bind(&if_inputisnotzero);
- assembler->Return(input);
+ Bind(&if_inputisnotzero);
+ Return(input);
}
- assembler->Bind(&if_inputisnotnumber);
+ Bind(&if_inputisnotnumber);
{
// Need to convert the {input} to String first.
// TODO(bmeurer): This could be more efficient if necessary.
- Callable callable = CodeFactory::ToString(assembler->isolate());
- var_input.Bind(assembler->CallStub(callable, context, input));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::ToString(isolate());
+ var_input.Bind(CallStub(callable, context, input));
+ Goto(&loop);
}
}
}
@@ -259,106 +257,86 @@ void Builtins::Generate_NumberParseFloat(CodeStubAssembler* assembler) {
}
// ES6 section 20.1.2.13 Number.parseInt ( string, radix )
-void Builtins::Generate_NumberParseInt(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
-
- Node* input = assembler->Parameter(1);
- Node* radix = assembler->Parameter(2);
- Node* context = assembler->Parameter(5);
+TF_BUILTIN(NumberParseInt, CodeStubAssembler) {
+ Node* input = Parameter(1);
+ Node* radix = Parameter(2);
+ Node* context = Parameter(5);
// Check if {radix} is treated as 10 (i.e. undefined, 0 or 10).
- Label if_radix10(assembler), if_generic(assembler, Label::kDeferred);
- assembler->GotoIf(assembler->WordEqual(radix, assembler->UndefinedConstant()),
- &if_radix10);
- assembler->GotoIf(
- assembler->WordEqual(radix, assembler->SmiConstant(Smi::FromInt(10))),
- &if_radix10);
- assembler->GotoIf(
- assembler->WordEqual(radix, assembler->SmiConstant(Smi::FromInt(0))),
- &if_radix10);
- assembler->Goto(&if_generic);
-
- assembler->Bind(&if_radix10);
+ Label if_radix10(this), if_generic(this, Label::kDeferred);
+ GotoIf(WordEqual(radix, UndefinedConstant()), &if_radix10);
+ GotoIf(WordEqual(radix, SmiConstant(Smi::FromInt(10))), &if_radix10);
+ GotoIf(WordEqual(radix, SmiConstant(Smi::FromInt(0))), &if_radix10);
+ Goto(&if_generic);
+
+ Bind(&if_radix10);
{
// Check if we can avoid the ToString conversion on {input}.
- Label if_inputissmi(assembler), if_inputisheapnumber(assembler),
- if_inputisstring(assembler);
- assembler->GotoIf(assembler->TaggedIsSmi(input), &if_inputissmi);
- Node* input_map = assembler->LoadMap(input);
- assembler->GotoIf(
- assembler->WordEqual(input_map, assembler->HeapNumberMapConstant()),
- &if_inputisheapnumber);
- Node* input_instance_type = assembler->LoadMapInstanceType(input_map);
- assembler->Branch(assembler->IsStringInstanceType(input_instance_type),
- &if_inputisstring, &if_generic);
-
- assembler->Bind(&if_inputissmi);
+ Label if_inputissmi(this), if_inputisheapnumber(this),
+ if_inputisstring(this);
+ GotoIf(TaggedIsSmi(input), &if_inputissmi);
+ Node* input_map = LoadMap(input);
+ GotoIf(IsHeapNumberMap(input_map), &if_inputisheapnumber);
+ Node* input_instance_type = LoadMapInstanceType(input_map);
+ Branch(IsStringInstanceType(input_instance_type), &if_inputisstring,
+ &if_generic);
+
+ Bind(&if_inputissmi);
{
// Just return the {input}.
- assembler->Return(input);
+ Return(input);
}
- assembler->Bind(&if_inputisheapnumber);
+ Bind(&if_inputisheapnumber);
{
// Check if the {input} value is in Signed32 range.
- Label if_inputissigned32(assembler);
- Node* input_value = assembler->LoadHeapNumberValue(input);
- Node* input_value32 = assembler->TruncateFloat64ToWord32(input_value);
- assembler->GotoIf(
- assembler->Float64Equal(
- input_value, assembler->ChangeInt32ToFloat64(input_value32)),
- &if_inputissigned32);
+ Label if_inputissigned32(this);
+ Node* input_value = LoadHeapNumberValue(input);
+ Node* input_value32 = TruncateFloat64ToWord32(input_value);
+ GotoIf(Float64Equal(input_value, ChangeInt32ToFloat64(input_value32)),
+ &if_inputissigned32);
// Check if the absolute {input} value is in the ]0.01,1e9[ range.
- Node* input_value_abs = assembler->Float64Abs(input_value);
+ Node* input_value_abs = Float64Abs(input_value);
- assembler->GotoUnless(
- assembler->Float64LessThan(input_value_abs,
- assembler->Float64Constant(1e9)),
- &if_generic);
- assembler->Branch(assembler->Float64LessThan(
- assembler->Float64Constant(0.01), input_value_abs),
- &if_inputissigned32, &if_generic);
+ GotoUnless(Float64LessThan(input_value_abs, Float64Constant(1e9)),
+ &if_generic);
+ Branch(Float64LessThan(Float64Constant(0.01), input_value_abs),
+ &if_inputissigned32, &if_generic);
// Return the truncated int32 value, and return the tagged result.
- assembler->Bind(&if_inputissigned32);
- Node* result = assembler->ChangeInt32ToTagged(input_value32);
- assembler->Return(result);
+ Bind(&if_inputissigned32);
+ Node* result = ChangeInt32ToTagged(input_value32);
+ Return(result);
}
- assembler->Bind(&if_inputisstring);
+ Bind(&if_inputisstring);
{
// Check if the String {input} has a cached array index.
- Node* input_hash = assembler->LoadNameHashField(input);
- Node* input_bit = assembler->Word32And(
- input_hash,
- assembler->Int32Constant(String::kContainsCachedArrayIndexMask));
- assembler->GotoIf(
- assembler->Word32NotEqual(input_bit, assembler->Int32Constant(0)),
- &if_generic);
+ Node* input_hash = LoadNameHashField(input);
+ Node* input_bit = Word32And(
+ input_hash, Int32Constant(String::kContainsCachedArrayIndexMask));
+ GotoIf(Word32NotEqual(input_bit, Int32Constant(0)), &if_generic);
// Return the cached array index as result.
Node* input_index =
- assembler->DecodeWordFromWord32<String::ArrayIndexValueBits>(
- input_hash);
- Node* result = assembler->SmiTag(input_index);
- assembler->Return(result);
+ DecodeWordFromWord32<String::ArrayIndexValueBits>(input_hash);
+ Node* result = SmiTag(input_index);
+ Return(result);
}
}
- assembler->Bind(&if_generic);
+ Bind(&if_generic);
{
- Node* result =
- assembler->CallRuntime(Runtime::kStringParseInt, context, input, radix);
- assembler->Return(result);
+ Node* result = CallRuntime(Runtime::kStringParseInt, context, input, radix);
+ Return(result);
}
}
// ES6 section 20.1.3.2 Number.prototype.toExponential ( fractionDigits )
BUILTIN(NumberPrototypeToExponential) {
HandleScope scope(isolate);
- Handle<Object> value = args.at<Object>(0);
+ Handle<Object> value = args.at(0);
Handle<Object> fraction_digits = args.atOrUndefined(isolate, 1);
// Unwrap the receiver {value}.
@@ -401,7 +379,7 @@ BUILTIN(NumberPrototypeToExponential) {
// ES6 section 20.1.3.3 Number.prototype.toFixed ( fractionDigits )
BUILTIN(NumberPrototypeToFixed) {
HandleScope scope(isolate);
- Handle<Object> value = args.at<Object>(0);
+ Handle<Object> value = args.at(0);
Handle<Object> fraction_digits = args.atOrUndefined(isolate, 1);
// Unwrap the receiver {value}.
@@ -444,7 +422,7 @@ BUILTIN(NumberPrototypeToFixed) {
// ES6 section 20.1.3.4 Number.prototype.toLocaleString ( [ r1 [ , r2 ] ] )
BUILTIN(NumberPrototypeToLocaleString) {
HandleScope scope(isolate);
- Handle<Object> value = args.at<Object>(0);
+ Handle<Object> value = args.at(0);
// Unwrap the receiver {value}.
if (value->IsJSValue()) {
@@ -464,7 +442,7 @@ BUILTIN(NumberPrototypeToLocaleString) {
// ES6 section 20.1.3.5 Number.prototype.toPrecision ( precision )
BUILTIN(NumberPrototypeToPrecision) {
HandleScope scope(isolate);
- Handle<Object> value = args.at<Object>(0);
+ Handle<Object> value = args.at(0);
Handle<Object> precision = args.atOrUndefined(isolate, 1);
// Unwrap the receiver {value}.
@@ -508,7 +486,7 @@ BUILTIN(NumberPrototypeToPrecision) {
// ES6 section 20.1.3.6 Number.prototype.toString ( [ radix ] )
BUILTIN(NumberPrototypeToString) {
HandleScope scope(isolate);
- Handle<Object> value = args.at<Object>(0);
+ Handle<Object> value = args.at(0);
Handle<Object> radix = args.atOrUndefined(isolate, 1);
// Unwrap the receiver {value}.
@@ -543,7 +521,8 @@ BUILTIN(NumberPrototypeToString) {
}
// Fast case where the result is a one character string.
- if (IsUint32Double(value_number) && value_number < radix_number) {
+ if ((IsUint32Double(value_number) && value_number < radix_number) ||
+ value_number == -0.0) {
// Character array used for conversion.
static const char kCharTable[] = "0123456789abcdefghijklmnopqrstuvwxyz";
return *isolate->factory()->LookupSingleCharacterStringFromCode(
@@ -564,342 +543,315 @@ BUILTIN(NumberPrototypeToString) {
}
// ES6 section 20.1.3.7 Number.prototype.valueOf ( )
-void Builtins::Generate_NumberPrototypeValueOf(CodeStubAssembler* assembler) {
- typedef compiler::Node Node;
+TF_BUILTIN(NumberPrototypeValueOf, CodeStubAssembler) {
+ Node* receiver = Parameter(0);
+ Node* context = Parameter(3);
- Node* receiver = assembler->Parameter(0);
- Node* context = assembler->Parameter(3);
-
- Node* result = assembler->ToThisValue(
- context, receiver, PrimitiveType::kNumber, "Number.prototype.valueOf");
- assembler->Return(result);
+ Node* result = ToThisValue(context, receiver, PrimitiveType::kNumber,
+ "Number.prototype.valueOf");
+ Return(result);
}
-// static
-void Builtins::Generate_Add(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
-
- Node* left = assembler->Parameter(0);
- Node* right = assembler->Parameter(1);
- Node* context = assembler->Parameter(2);
+TF_BUILTIN(Add, CodeStubAssembler) {
+ Node* left = Parameter(0);
+ Node* right = Parameter(1);
+ Node* context = Parameter(2);
// Shared entry for floating point addition.
- Label do_fadd(assembler);
- Variable var_fadd_lhs(assembler, MachineRepresentation::kFloat64),
- var_fadd_rhs(assembler, MachineRepresentation::kFloat64);
+ Label do_fadd(this);
+ Variable var_fadd_lhs(this, MachineRepresentation::kFloat64),
+ var_fadd_rhs(this, MachineRepresentation::kFloat64);
// We might need to loop several times due to ToPrimitive, ToString and/or
// ToNumber conversions.
- Variable var_lhs(assembler, MachineRepresentation::kTagged),
- var_rhs(assembler, MachineRepresentation::kTagged),
- var_result(assembler, MachineRepresentation::kTagged);
+ Variable var_lhs(this, MachineRepresentation::kTagged),
+ var_rhs(this, MachineRepresentation::kTagged),
+ var_result(this, MachineRepresentation::kTagged);
Variable* loop_vars[2] = {&var_lhs, &var_rhs};
- Label loop(assembler, 2, loop_vars), end(assembler),
- string_add_convert_left(assembler, Label::kDeferred),
- string_add_convert_right(assembler, Label::kDeferred);
+ Label loop(this, 2, loop_vars), end(this),
+ string_add_convert_left(this, Label::kDeferred),
+ string_add_convert_right(this, Label::kDeferred);
var_lhs.Bind(left);
var_rhs.Bind(right);
- assembler->Goto(&loop);
- assembler->Bind(&loop);
+ Goto(&loop);
+ Bind(&loop);
{
// Load the current {lhs} and {rhs} values.
Node* lhs = var_lhs.value();
Node* rhs = var_rhs.value();
// Check if the {lhs} is a Smi or a HeapObject.
- Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(lhs), &if_lhsissmi,
- &if_lhsisnotsmi);
+ Label if_lhsissmi(this), if_lhsisnotsmi(this);
+ Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
- assembler->Bind(&if_lhsissmi);
+ Bind(&if_lhsissmi);
{
// Check if the {rhs} is also a Smi.
- Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
- &if_rhsisnotsmi);
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
- assembler->Bind(&if_rhsissmi);
+ Bind(&if_rhsissmi);
{
// Try fast Smi addition first.
- Node* pair = assembler->IntPtrAddWithOverflow(
- assembler->BitcastTaggedToWord(lhs),
- assembler->BitcastTaggedToWord(rhs));
- Node* overflow = assembler->Projection(1, pair);
+ Node* pair = IntPtrAddWithOverflow(BitcastTaggedToWord(lhs),
+ BitcastTaggedToWord(rhs));
+ Node* overflow = Projection(1, pair);
// Check if the Smi additon overflowed.
- Label if_overflow(assembler), if_notoverflow(assembler);
- assembler->Branch(overflow, &if_overflow, &if_notoverflow);
+ Label if_overflow(this), if_notoverflow(this);
+ Branch(overflow, &if_overflow, &if_notoverflow);
- assembler->Bind(&if_overflow);
+ Bind(&if_overflow);
{
- var_fadd_lhs.Bind(assembler->SmiToFloat64(lhs));
- var_fadd_rhs.Bind(assembler->SmiToFloat64(rhs));
- assembler->Goto(&do_fadd);
+ var_fadd_lhs.Bind(SmiToFloat64(lhs));
+ var_fadd_rhs.Bind(SmiToFloat64(rhs));
+ Goto(&do_fadd);
}
- assembler->Bind(&if_notoverflow);
- var_result.Bind(assembler->BitcastWordToTaggedSigned(
- assembler->Projection(0, pair)));
- assembler->Goto(&end);
+ Bind(&if_notoverflow);
+ var_result.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
+ Goto(&end);
}
- assembler->Bind(&if_rhsisnotsmi);
+ Bind(&if_rhsisnotsmi);
{
// Load the map of {rhs}.
- Node* rhs_map = assembler->LoadMap(rhs);
+ Node* rhs_map = LoadMap(rhs);
// Check if the {rhs} is a HeapNumber.
- Label if_rhsisnumber(assembler),
- if_rhsisnotnumber(assembler, Label::kDeferred);
- assembler->Branch(assembler->IsHeapNumberMap(rhs_map), &if_rhsisnumber,
- &if_rhsisnotnumber);
+ Label if_rhsisnumber(this), if_rhsisnotnumber(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(rhs_map), &if_rhsisnumber, &if_rhsisnotnumber);
- assembler->Bind(&if_rhsisnumber);
+ Bind(&if_rhsisnumber);
{
- var_fadd_lhs.Bind(assembler->SmiToFloat64(lhs));
- var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
- assembler->Goto(&do_fadd);
+ var_fadd_lhs.Bind(SmiToFloat64(lhs));
+ var_fadd_rhs.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fadd);
}
- assembler->Bind(&if_rhsisnotnumber);
+ Bind(&if_rhsisnotnumber);
{
// Load the instance type of {rhs}.
- Node* rhs_instance_type = assembler->LoadMapInstanceType(rhs_map);
+ Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
// Check if the {rhs} is a String.
- Label if_rhsisstring(assembler, Label::kDeferred),
- if_rhsisnotstring(assembler, Label::kDeferred);
- assembler->Branch(assembler->IsStringInstanceType(rhs_instance_type),
- &if_rhsisstring, &if_rhsisnotstring);
+ Label if_rhsisstring(this, Label::kDeferred),
+ if_rhsisnotstring(this, Label::kDeferred);
+ Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
+ &if_rhsisnotstring);
- assembler->Bind(&if_rhsisstring);
+ Bind(&if_rhsisstring);
{
var_lhs.Bind(lhs);
var_rhs.Bind(rhs);
- assembler->Goto(&string_add_convert_left);
+ Goto(&string_add_convert_left);
}
- assembler->Bind(&if_rhsisnotstring);
+ Bind(&if_rhsisnotstring);
{
// Check if {rhs} is a JSReceiver.
- Label if_rhsisreceiver(assembler, Label::kDeferred),
- if_rhsisnotreceiver(assembler, Label::kDeferred);
- assembler->Branch(
- assembler->IsJSReceiverInstanceType(rhs_instance_type),
- &if_rhsisreceiver, &if_rhsisnotreceiver);
+ Label if_rhsisreceiver(this, Label::kDeferred),
+ if_rhsisnotreceiver(this, Label::kDeferred);
+ Branch(IsJSReceiverInstanceType(rhs_instance_type),
+ &if_rhsisreceiver, &if_rhsisnotreceiver);
- assembler->Bind(&if_rhsisreceiver);
+ Bind(&if_rhsisreceiver);
{
// Convert {rhs} to a primitive first passing no hint.
Callable callable =
- CodeFactory::NonPrimitiveToPrimitive(assembler->isolate());
- var_rhs.Bind(assembler->CallStub(callable, context, rhs));
- assembler->Goto(&loop);
+ CodeFactory::NonPrimitiveToPrimitive(isolate());
+ var_rhs.Bind(CallStub(callable, context, rhs));
+ Goto(&loop);
}
- assembler->Bind(&if_rhsisnotreceiver);
+ Bind(&if_rhsisnotreceiver);
{
// Convert {rhs} to a Number first.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_rhs.Bind(assembler->CallStub(callable, context, rhs));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_rhs.Bind(CallStub(callable, context, rhs));
+ Goto(&loop);
}
}
}
}
}
- assembler->Bind(&if_lhsisnotsmi);
+ Bind(&if_lhsisnotsmi);
{
// Load the map and instance type of {lhs}.
- Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
+ Node* lhs_instance_type = LoadInstanceType(lhs);
// Check if {lhs} is a String.
- Label if_lhsisstring(assembler), if_lhsisnotstring(assembler);
- assembler->Branch(assembler->IsStringInstanceType(lhs_instance_type),
- &if_lhsisstring, &if_lhsisnotstring);
+ Label if_lhsisstring(this), if_lhsisnotstring(this);
+ Branch(IsStringInstanceType(lhs_instance_type), &if_lhsisstring,
+ &if_lhsisnotstring);
- assembler->Bind(&if_lhsisstring);
+ Bind(&if_lhsisstring);
{
var_lhs.Bind(lhs);
var_rhs.Bind(rhs);
- assembler->Goto(&string_add_convert_right);
+ Goto(&string_add_convert_right);
}
- assembler->Bind(&if_lhsisnotstring);
+ Bind(&if_lhsisnotstring);
{
// Check if {rhs} is a Smi.
- Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
- &if_rhsisnotsmi);
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
- assembler->Bind(&if_rhsissmi);
+ Bind(&if_rhsissmi);
{
// Check if {lhs} is a Number.
- Label if_lhsisnumber(assembler),
- if_lhsisnotnumber(assembler, Label::kDeferred);
- assembler->Branch(assembler->Word32Equal(
- lhs_instance_type,
- assembler->Int32Constant(HEAP_NUMBER_TYPE)),
- &if_lhsisnumber, &if_lhsisnotnumber);
-
- assembler->Bind(&if_lhsisnumber);
+ Label if_lhsisnumber(this), if_lhsisnotnumber(this, Label::kDeferred);
+ Branch(
+ Word32Equal(lhs_instance_type, Int32Constant(HEAP_NUMBER_TYPE)),
+ &if_lhsisnumber, &if_lhsisnotnumber);
+
+ Bind(&if_lhsisnumber);
{
// The {lhs} is a HeapNumber, the {rhs} is a Smi, just add them.
- var_fadd_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
- var_fadd_rhs.Bind(assembler->SmiToFloat64(rhs));
- assembler->Goto(&do_fadd);
+ var_fadd_lhs.Bind(LoadHeapNumberValue(lhs));
+ var_fadd_rhs.Bind(SmiToFloat64(rhs));
+ Goto(&do_fadd);
}
- assembler->Bind(&if_lhsisnotnumber);
+ Bind(&if_lhsisnotnumber);
{
// The {lhs} is neither a Number nor a String, and the {rhs} is a
// Smi.
- Label if_lhsisreceiver(assembler, Label::kDeferred),
- if_lhsisnotreceiver(assembler, Label::kDeferred);
- assembler->Branch(
- assembler->IsJSReceiverInstanceType(lhs_instance_type),
- &if_lhsisreceiver, &if_lhsisnotreceiver);
+ Label if_lhsisreceiver(this, Label::kDeferred),
+ if_lhsisnotreceiver(this, Label::kDeferred);
+ Branch(IsJSReceiverInstanceType(lhs_instance_type),
+ &if_lhsisreceiver, &if_lhsisnotreceiver);
- assembler->Bind(&if_lhsisreceiver);
+ Bind(&if_lhsisreceiver);
{
// Convert {lhs} to a primitive first passing no hint.
Callable callable =
- CodeFactory::NonPrimitiveToPrimitive(assembler->isolate());
- var_lhs.Bind(assembler->CallStub(callable, context, lhs));
- assembler->Goto(&loop);
+ CodeFactory::NonPrimitiveToPrimitive(isolate());
+ var_lhs.Bind(CallStub(callable, context, lhs));
+ Goto(&loop);
}
- assembler->Bind(&if_lhsisnotreceiver);
+ Bind(&if_lhsisnotreceiver);
{
// Convert {lhs} to a Number first.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_lhs.Bind(assembler->CallStub(callable, context, lhs));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_lhs.Bind(CallStub(callable, context, lhs));
+ Goto(&loop);
}
}
}
- assembler->Bind(&if_rhsisnotsmi);
+ Bind(&if_rhsisnotsmi);
{
// Load the instance type of {rhs}.
- Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
+ Node* rhs_instance_type = LoadInstanceType(rhs);
// Check if {rhs} is a String.
- Label if_rhsisstring(assembler), if_rhsisnotstring(assembler);
- assembler->Branch(assembler->IsStringInstanceType(rhs_instance_type),
- &if_rhsisstring, &if_rhsisnotstring);
+ Label if_rhsisstring(this), if_rhsisnotstring(this);
+ Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
+ &if_rhsisnotstring);
- assembler->Bind(&if_rhsisstring);
+ Bind(&if_rhsisstring);
{
var_lhs.Bind(lhs);
var_rhs.Bind(rhs);
- assembler->Goto(&string_add_convert_left);
+ Goto(&string_add_convert_left);
}
- assembler->Bind(&if_rhsisnotstring);
+ Bind(&if_rhsisnotstring);
{
// Check if {lhs} is a HeapNumber.
- Label if_lhsisnumber(assembler), if_lhsisnotnumber(assembler);
- assembler->Branch(assembler->Word32Equal(
- lhs_instance_type,
- assembler->Int32Constant(HEAP_NUMBER_TYPE)),
- &if_lhsisnumber, &if_lhsisnotnumber);
+ Label if_lhsisnumber(this), if_lhsisnotnumber(this);
+ Branch(
+ Word32Equal(lhs_instance_type, Int32Constant(HEAP_NUMBER_TYPE)),
+ &if_lhsisnumber, &if_lhsisnotnumber);
- assembler->Bind(&if_lhsisnumber);
+ Bind(&if_lhsisnumber);
{
// Check if {rhs} is also a HeapNumber.
- Label if_rhsisnumber(assembler),
- if_rhsisnotnumber(assembler, Label::kDeferred);
- assembler->Branch(assembler->Word32Equal(
- rhs_instance_type,
- assembler->Int32Constant(HEAP_NUMBER_TYPE)),
- &if_rhsisnumber, &if_rhsisnotnumber);
-
- assembler->Bind(&if_rhsisnumber);
+ Label if_rhsisnumber(this),
+ if_rhsisnotnumber(this, Label::kDeferred);
+ Branch(Word32Equal(rhs_instance_type,
+ Int32Constant(HEAP_NUMBER_TYPE)),
+ &if_rhsisnumber, &if_rhsisnotnumber);
+
+ Bind(&if_rhsisnumber);
{
// Perform a floating point addition.
- var_fadd_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
- var_fadd_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
- assembler->Goto(&do_fadd);
+ var_fadd_lhs.Bind(LoadHeapNumberValue(lhs));
+ var_fadd_rhs.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fadd);
}
- assembler->Bind(&if_rhsisnotnumber);
+ Bind(&if_rhsisnotnumber);
{
// Check if {rhs} is a JSReceiver.
- Label if_rhsisreceiver(assembler, Label::kDeferred),
- if_rhsisnotreceiver(assembler, Label::kDeferred);
- assembler->Branch(
- assembler->IsJSReceiverInstanceType(rhs_instance_type),
- &if_rhsisreceiver, &if_rhsisnotreceiver);
+ Label if_rhsisreceiver(this, Label::kDeferred),
+ if_rhsisnotreceiver(this, Label::kDeferred);
+ Branch(IsJSReceiverInstanceType(rhs_instance_type),
+ &if_rhsisreceiver, &if_rhsisnotreceiver);
- assembler->Bind(&if_rhsisreceiver);
+ Bind(&if_rhsisreceiver);
{
// Convert {rhs} to a primitive first passing no hint.
- Callable callable = CodeFactory::NonPrimitiveToPrimitive(
- assembler->isolate());
- var_rhs.Bind(assembler->CallStub(callable, context, rhs));
- assembler->Goto(&loop);
+ Callable callable =
+ CodeFactory::NonPrimitiveToPrimitive(isolate());
+ var_rhs.Bind(CallStub(callable, context, rhs));
+ Goto(&loop);
}
- assembler->Bind(&if_rhsisnotreceiver);
+ Bind(&if_rhsisnotreceiver);
{
// Convert {rhs} to a Number first.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_rhs.Bind(assembler->CallStub(callable, context, rhs));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_rhs.Bind(CallStub(callable, context, rhs));
+ Goto(&loop);
}
}
}
- assembler->Bind(&if_lhsisnotnumber);
+ Bind(&if_lhsisnotnumber);
{
// Check if {lhs} is a JSReceiver.
- Label if_lhsisreceiver(assembler, Label::kDeferred),
- if_lhsisnotreceiver(assembler);
- assembler->Branch(
- assembler->IsJSReceiverInstanceType(lhs_instance_type),
- &if_lhsisreceiver, &if_lhsisnotreceiver);
+ Label if_lhsisreceiver(this, Label::kDeferred),
+ if_lhsisnotreceiver(this);
+ Branch(IsJSReceiverInstanceType(lhs_instance_type),
+ &if_lhsisreceiver, &if_lhsisnotreceiver);
- assembler->Bind(&if_lhsisreceiver);
+ Bind(&if_lhsisreceiver);
{
// Convert {lhs} to a primitive first passing no hint.
Callable callable =
- CodeFactory::NonPrimitiveToPrimitive(assembler->isolate());
- var_lhs.Bind(assembler->CallStub(callable, context, lhs));
- assembler->Goto(&loop);
+ CodeFactory::NonPrimitiveToPrimitive(isolate());
+ var_lhs.Bind(CallStub(callable, context, lhs));
+ Goto(&loop);
}
- assembler->Bind(&if_lhsisnotreceiver);
+ Bind(&if_lhsisnotreceiver);
{
// Check if {rhs} is a JSReceiver.
- Label if_rhsisreceiver(assembler, Label::kDeferred),
- if_rhsisnotreceiver(assembler, Label::kDeferred);
- assembler->Branch(
- assembler->IsJSReceiverInstanceType(rhs_instance_type),
- &if_rhsisreceiver, &if_rhsisnotreceiver);
+ Label if_rhsisreceiver(this, Label::kDeferred),
+ if_rhsisnotreceiver(this, Label::kDeferred);
+ Branch(IsJSReceiverInstanceType(rhs_instance_type),
+ &if_rhsisreceiver, &if_rhsisnotreceiver);
- assembler->Bind(&if_rhsisreceiver);
+ Bind(&if_rhsisreceiver);
{
// Convert {rhs} to a primitive first passing no hint.
- Callable callable = CodeFactory::NonPrimitiveToPrimitive(
- assembler->isolate());
- var_rhs.Bind(assembler->CallStub(callable, context, rhs));
- assembler->Goto(&loop);
+ Callable callable =
+ CodeFactory::NonPrimitiveToPrimitive(isolate());
+ var_rhs.Bind(CallStub(callable, context, rhs));
+ Goto(&loop);
}
- assembler->Bind(&if_rhsisnotreceiver);
+ Bind(&if_rhsisnotreceiver);
{
// Convert {lhs} to a Number first.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_lhs.Bind(assembler->CallStub(callable, context, lhs));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_lhs.Bind(CallStub(callable, context, lhs));
+ Goto(&loop);
}
}
}
@@ -908,910 +860,755 @@ void Builtins::Generate_Add(CodeStubAssembler* assembler) {
}
}
}
- assembler->Bind(&string_add_convert_left);
+ Bind(&string_add_convert_left);
{
// Convert {lhs}, which is a Smi, to a String and concatenate the
// resulting string with the String {rhs}.
- Callable callable = CodeFactory::StringAdd(
- assembler->isolate(), STRING_ADD_CONVERT_LEFT, NOT_TENURED);
- var_result.Bind(assembler->CallStub(callable, context, var_lhs.value(),
- var_rhs.value()));
- assembler->Goto(&end);
+ Callable callable =
+ CodeFactory::StringAdd(isolate(), STRING_ADD_CONVERT_LEFT, NOT_TENURED);
+ var_result.Bind(
+ CallStub(callable, context, var_lhs.value(), var_rhs.value()));
+ Goto(&end);
}
- assembler->Bind(&string_add_convert_right);
+ Bind(&string_add_convert_right);
{
// Convert {lhs}, which is a Smi, to a String and concatenate the
// resulting string with the String {rhs}.
Callable callable = CodeFactory::StringAdd(
- assembler->isolate(), STRING_ADD_CONVERT_RIGHT, NOT_TENURED);
- var_result.Bind(assembler->CallStub(callable, context, var_lhs.value(),
- var_rhs.value()));
- assembler->Goto(&end);
+ isolate(), STRING_ADD_CONVERT_RIGHT, NOT_TENURED);
+ var_result.Bind(
+ CallStub(callable, context, var_lhs.value(), var_rhs.value()));
+ Goto(&end);
}
- assembler->Bind(&do_fadd);
+ Bind(&do_fadd);
{
Node* lhs_value = var_fadd_lhs.value();
Node* rhs_value = var_fadd_rhs.value();
- Node* value = assembler->Float64Add(lhs_value, rhs_value);
- Node* result = assembler->AllocateHeapNumberWithValue(value);
+ Node* value = Float64Add(lhs_value, rhs_value);
+ Node* result = AllocateHeapNumberWithValue(value);
var_result.Bind(result);
- assembler->Goto(&end);
+ Goto(&end);
}
- assembler->Bind(&end);
- assembler->Return(var_result.value());
+ Bind(&end);
+ Return(var_result.value());
}
-void Builtins::Generate_Subtract(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
-
- Node* left = assembler->Parameter(0);
- Node* right = assembler->Parameter(1);
- Node* context = assembler->Parameter(2);
+TF_BUILTIN(Subtract, CodeStubAssembler) {
+ Node* left = Parameter(0);
+ Node* right = Parameter(1);
+ Node* context = Parameter(2);
// Shared entry for floating point subtraction.
- Label do_fsub(assembler), end(assembler);
- Variable var_fsub_lhs(assembler, MachineRepresentation::kFloat64),
- var_fsub_rhs(assembler, MachineRepresentation::kFloat64);
+ Label do_fsub(this), end(this);
+ Variable var_fsub_lhs(this, MachineRepresentation::kFloat64),
+ var_fsub_rhs(this, MachineRepresentation::kFloat64);
// We might need to loop several times due to ToPrimitive and/or ToNumber
// conversions.
- Variable var_lhs(assembler, MachineRepresentation::kTagged),
- var_rhs(assembler, MachineRepresentation::kTagged),
- var_result(assembler, MachineRepresentation::kTagged);
+ Variable var_lhs(this, MachineRepresentation::kTagged),
+ var_rhs(this, MachineRepresentation::kTagged),
+ var_result(this, MachineRepresentation::kTagged);
Variable* loop_vars[2] = {&var_lhs, &var_rhs};
- Label loop(assembler, 2, loop_vars);
+ Label loop(this, 2, loop_vars);
var_lhs.Bind(left);
var_rhs.Bind(right);
- assembler->Goto(&loop);
- assembler->Bind(&loop);
+ Goto(&loop);
+ Bind(&loop);
{
// Load the current {lhs} and {rhs} values.
Node* lhs = var_lhs.value();
Node* rhs = var_rhs.value();
// Check if the {lhs} is a Smi or a HeapObject.
- Label if_lhsissmi(assembler), if_lhsisnotsmi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(lhs), &if_lhsissmi,
- &if_lhsisnotsmi);
+ Label if_lhsissmi(this), if_lhsisnotsmi(this);
+ Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
- assembler->Bind(&if_lhsissmi);
+ Bind(&if_lhsissmi);
{
// Check if the {rhs} is also a Smi.
- Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
- &if_rhsisnotsmi);
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
- assembler->Bind(&if_rhsissmi);
+ Bind(&if_rhsissmi);
{
// Try a fast Smi subtraction first.
- Node* pair = assembler->IntPtrSubWithOverflow(
- assembler->BitcastTaggedToWord(lhs),
- assembler->BitcastTaggedToWord(rhs));
- Node* overflow = assembler->Projection(1, pair);
+ Node* pair = IntPtrSubWithOverflow(BitcastTaggedToWord(lhs),
+ BitcastTaggedToWord(rhs));
+ Node* overflow = Projection(1, pair);
// Check if the Smi subtraction overflowed.
- Label if_overflow(assembler), if_notoverflow(assembler);
- assembler->Branch(overflow, &if_overflow, &if_notoverflow);
+ Label if_overflow(this), if_notoverflow(this);
+ Branch(overflow, &if_overflow, &if_notoverflow);
- assembler->Bind(&if_overflow);
+ Bind(&if_overflow);
{
// The result doesn't fit into Smi range.
- var_fsub_lhs.Bind(assembler->SmiToFloat64(lhs));
- var_fsub_rhs.Bind(assembler->SmiToFloat64(rhs));
- assembler->Goto(&do_fsub);
+ var_fsub_lhs.Bind(SmiToFloat64(lhs));
+ var_fsub_rhs.Bind(SmiToFloat64(rhs));
+ Goto(&do_fsub);
}
- assembler->Bind(&if_notoverflow);
- var_result.Bind(assembler->BitcastWordToTaggedSigned(
- assembler->Projection(0, pair)));
- assembler->Goto(&end);
+ Bind(&if_notoverflow);
+ var_result.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
+ Goto(&end);
}
- assembler->Bind(&if_rhsisnotsmi);
+ Bind(&if_rhsisnotsmi);
{
// Load the map of the {rhs}.
- Node* rhs_map = assembler->LoadMap(rhs);
+ Node* rhs_map = LoadMap(rhs);
// Check if {rhs} is a HeapNumber.
- Label if_rhsisnumber(assembler),
- if_rhsisnotnumber(assembler, Label::kDeferred);
- assembler->Branch(assembler->IsHeapNumberMap(rhs_map), &if_rhsisnumber,
- &if_rhsisnotnumber);
+ Label if_rhsisnumber(this), if_rhsisnotnumber(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(rhs_map), &if_rhsisnumber, &if_rhsisnotnumber);
- assembler->Bind(&if_rhsisnumber);
+ Bind(&if_rhsisnumber);
{
// Perform a floating point subtraction.
- var_fsub_lhs.Bind(assembler->SmiToFloat64(lhs));
- var_fsub_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
- assembler->Goto(&do_fsub);
+ var_fsub_lhs.Bind(SmiToFloat64(lhs));
+ var_fsub_rhs.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fsub);
}
- assembler->Bind(&if_rhsisnotnumber);
+ Bind(&if_rhsisnotnumber);
{
// Convert the {rhs} to a Number first.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_rhs.Bind(assembler->CallStub(callable, context, rhs));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_rhs.Bind(CallStub(callable, context, rhs));
+ Goto(&loop);
}
}
}
- assembler->Bind(&if_lhsisnotsmi);
+ Bind(&if_lhsisnotsmi);
{
// Load the map of the {lhs}.
- Node* lhs_map = assembler->LoadMap(lhs);
+ Node* lhs_map = LoadMap(lhs);
// Check if the {lhs} is a HeapNumber.
- Label if_lhsisnumber(assembler),
- if_lhsisnotnumber(assembler, Label::kDeferred);
- Node* number_map = assembler->HeapNumberMapConstant();
- assembler->Branch(assembler->WordEqual(lhs_map, number_map),
- &if_lhsisnumber, &if_lhsisnotnumber);
+ Label if_lhsisnumber(this), if_lhsisnotnumber(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(lhs_map), &if_lhsisnumber, &if_lhsisnotnumber);
- assembler->Bind(&if_lhsisnumber);
+ Bind(&if_lhsisnumber);
{
// Check if the {rhs} is a Smi.
- Label if_rhsissmi(assembler), if_rhsisnotsmi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(rhs), &if_rhsissmi,
- &if_rhsisnotsmi);
+ Label if_rhsissmi(this), if_rhsisnotsmi(this);
+ Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
- assembler->Bind(&if_rhsissmi);
+ Bind(&if_rhsissmi);
{
// Perform a floating point subtraction.
- var_fsub_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
- var_fsub_rhs.Bind(assembler->SmiToFloat64(rhs));
- assembler->Goto(&do_fsub);
+ var_fsub_lhs.Bind(LoadHeapNumberValue(lhs));
+ var_fsub_rhs.Bind(SmiToFloat64(rhs));
+ Goto(&do_fsub);
}
- assembler->Bind(&if_rhsisnotsmi);
+ Bind(&if_rhsisnotsmi);
{
// Load the map of the {rhs}.
- Node* rhs_map = assembler->LoadMap(rhs);
+ Node* rhs_map = LoadMap(rhs);
// Check if the {rhs} is a HeapNumber.
- Label if_rhsisnumber(assembler),
- if_rhsisnotnumber(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(rhs_map, number_map),
- &if_rhsisnumber, &if_rhsisnotnumber);
+ Label if_rhsisnumber(this), if_rhsisnotnumber(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(rhs_map), &if_rhsisnumber, &if_rhsisnotnumber);
- assembler->Bind(&if_rhsisnumber);
+ Bind(&if_rhsisnumber);
{
// Perform a floating point subtraction.
- var_fsub_lhs.Bind(assembler->LoadHeapNumberValue(lhs));
- var_fsub_rhs.Bind(assembler->LoadHeapNumberValue(rhs));
- assembler->Goto(&do_fsub);
+ var_fsub_lhs.Bind(LoadHeapNumberValue(lhs));
+ var_fsub_rhs.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fsub);
}
- assembler->Bind(&if_rhsisnotnumber);
+ Bind(&if_rhsisnotnumber);
{
// Convert the {rhs} to a Number first.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_rhs.Bind(assembler->CallStub(callable, context, rhs));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_rhs.Bind(CallStub(callable, context, rhs));
+ Goto(&loop);
}
}
}
- assembler->Bind(&if_lhsisnotnumber);
+ Bind(&if_lhsisnotnumber);
{
// Convert the {lhs} to a Number first.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_lhs.Bind(assembler->CallStub(callable, context, lhs));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_lhs.Bind(CallStub(callable, context, lhs));
+ Goto(&loop);
}
}
}
- assembler->Bind(&do_fsub);
+ Bind(&do_fsub);
{
Node* lhs_value = var_fsub_lhs.value();
Node* rhs_value = var_fsub_rhs.value();
- Node* value = assembler->Float64Sub(lhs_value, rhs_value);
- var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
- assembler->Goto(&end);
+ Node* value = Float64Sub(lhs_value, rhs_value);
+ var_result.Bind(AllocateHeapNumberWithValue(value));
+ Goto(&end);
}
- assembler->Bind(&end);
- assembler->Return(var_result.value());
+ Bind(&end);
+ Return(var_result.value());
}
-void Builtins::Generate_Multiply(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
-
- Node* left = assembler->Parameter(0);
- Node* right = assembler->Parameter(1);
- Node* context = assembler->Parameter(2);
+TF_BUILTIN(Multiply, CodeStubAssembler) {
+ Node* left = Parameter(0);
+ Node* right = Parameter(1);
+ Node* context = Parameter(2);
// Shared entry point for floating point multiplication.
- Label do_fmul(assembler), return_result(assembler);
- Variable var_lhs_float64(assembler, MachineRepresentation::kFloat64),
- var_rhs_float64(assembler, MachineRepresentation::kFloat64);
-
- Node* number_map = assembler->HeapNumberMapConstant();
+ Label do_fmul(this), return_result(this);
+ Variable var_lhs_float64(this, MachineRepresentation::kFloat64),
+ var_rhs_float64(this, MachineRepresentation::kFloat64);
// We might need to loop one or two times due to ToNumber conversions.
- Variable var_lhs(assembler, MachineRepresentation::kTagged),
- var_rhs(assembler, MachineRepresentation::kTagged),
- var_result(assembler, MachineRepresentation::kTagged);
+ Variable var_lhs(this, MachineRepresentation::kTagged),
+ var_rhs(this, MachineRepresentation::kTagged),
+ var_result(this, MachineRepresentation::kTagged);
Variable* loop_variables[] = {&var_lhs, &var_rhs};
- Label loop(assembler, 2, loop_variables);
+ Label loop(this, 2, loop_variables);
var_lhs.Bind(left);
var_rhs.Bind(right);
- assembler->Goto(&loop);
- assembler->Bind(&loop);
+ Goto(&loop);
+ Bind(&loop);
{
Node* lhs = var_lhs.value();
Node* rhs = var_rhs.value();
- Label lhs_is_smi(assembler), lhs_is_not_smi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(lhs), &lhs_is_smi,
- &lhs_is_not_smi);
+ Label lhs_is_smi(this), lhs_is_not_smi(this);
+ Branch(TaggedIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
- assembler->Bind(&lhs_is_smi);
+ Bind(&lhs_is_smi);
{
- Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(rhs), &rhs_is_smi,
- &rhs_is_not_smi);
+ Label rhs_is_smi(this), rhs_is_not_smi(this);
+ Branch(TaggedIsSmi(rhs), &rhs_is_smi, &rhs_is_not_smi);
- assembler->Bind(&rhs_is_smi);
+ Bind(&rhs_is_smi);
{
// Both {lhs} and {rhs} are Smis. The result is not necessarily a smi,
// in case of overflow.
- var_result.Bind(assembler->SmiMul(lhs, rhs));
- assembler->Goto(&return_result);
+ var_result.Bind(SmiMul(lhs, rhs));
+ Goto(&return_result);
}
- assembler->Bind(&rhs_is_not_smi);
+ Bind(&rhs_is_not_smi);
{
- Node* rhs_map = assembler->LoadMap(rhs);
+ Node* rhs_map = LoadMap(rhs);
// Check if {rhs} is a HeapNumber.
- Label rhs_is_number(assembler),
- rhs_is_not_number(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(rhs_map, number_map),
- &rhs_is_number, &rhs_is_not_number);
+ Label rhs_is_number(this), rhs_is_not_number(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(rhs_map), &rhs_is_number, &rhs_is_not_number);
- assembler->Bind(&rhs_is_number);
+ Bind(&rhs_is_number);
{
// Convert {lhs} to a double and multiply it with the value of {rhs}.
- var_lhs_float64.Bind(assembler->SmiToFloat64(lhs));
- var_rhs_float64.Bind(assembler->LoadHeapNumberValue(rhs));
- assembler->Goto(&do_fmul);
+ var_lhs_float64.Bind(SmiToFloat64(lhs));
+ var_rhs_float64.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fmul);
}
- assembler->Bind(&rhs_is_not_number);
+ Bind(&rhs_is_not_number);
{
// Multiplication is commutative, swap {lhs} with {rhs} and loop.
var_lhs.Bind(rhs);
var_rhs.Bind(lhs);
- assembler->Goto(&loop);
+ Goto(&loop);
}
}
}
- assembler->Bind(&lhs_is_not_smi);
+ Bind(&lhs_is_not_smi);
{
- Node* lhs_map = assembler->LoadMap(lhs);
+ Node* lhs_map = LoadMap(lhs);
// Check if {lhs} is a HeapNumber.
- Label lhs_is_number(assembler),
- lhs_is_not_number(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(lhs_map, number_map),
- &lhs_is_number, &lhs_is_not_number);
+ Label lhs_is_number(this), lhs_is_not_number(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(lhs_map), &lhs_is_number, &lhs_is_not_number);
- assembler->Bind(&lhs_is_number);
+ Bind(&lhs_is_number);
{
// Check if {rhs} is a Smi.
- Label rhs_is_smi(assembler), rhs_is_not_smi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(rhs), &rhs_is_smi,
- &rhs_is_not_smi);
+ Label rhs_is_smi(this), rhs_is_not_smi(this);
+ Branch(TaggedIsSmi(rhs), &rhs_is_smi, &rhs_is_not_smi);
- assembler->Bind(&rhs_is_smi);
+ Bind(&rhs_is_smi);
{
// Convert {rhs} to a double and multiply it with the value of {lhs}.
- var_lhs_float64.Bind(assembler->LoadHeapNumberValue(lhs));
- var_rhs_float64.Bind(assembler->SmiToFloat64(rhs));
- assembler->Goto(&do_fmul);
+ var_lhs_float64.Bind(LoadHeapNumberValue(lhs));
+ var_rhs_float64.Bind(SmiToFloat64(rhs));
+ Goto(&do_fmul);
}
- assembler->Bind(&rhs_is_not_smi);
+ Bind(&rhs_is_not_smi);
{
- Node* rhs_map = assembler->LoadMap(rhs);
+ Node* rhs_map = LoadMap(rhs);
// Check if {rhs} is a HeapNumber.
- Label rhs_is_number(assembler),
- rhs_is_not_number(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(rhs_map, number_map),
- &rhs_is_number, &rhs_is_not_number);
+ Label rhs_is_number(this), rhs_is_not_number(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(rhs_map), &rhs_is_number, &rhs_is_not_number);
- assembler->Bind(&rhs_is_number);
+ Bind(&rhs_is_number);
{
// Both {lhs} and {rhs} are HeapNumbers. Load their values and
// multiply them.
- var_lhs_float64.Bind(assembler->LoadHeapNumberValue(lhs));
- var_rhs_float64.Bind(assembler->LoadHeapNumberValue(rhs));
- assembler->Goto(&do_fmul);
+ var_lhs_float64.Bind(LoadHeapNumberValue(lhs));
+ var_rhs_float64.Bind(LoadHeapNumberValue(rhs));
+ Goto(&do_fmul);
}
- assembler->Bind(&rhs_is_not_number);
+ Bind(&rhs_is_not_number);
{
// Multiplication is commutative, swap {lhs} with {rhs} and loop.
var_lhs.Bind(rhs);
var_rhs.Bind(lhs);
- assembler->Goto(&loop);
+ Goto(&loop);
}
}
}
- assembler->Bind(&lhs_is_not_number);
+ Bind(&lhs_is_not_number);
{
// Convert {lhs} to a Number and loop.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_lhs.Bind(assembler->CallStub(callable, context, lhs));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_lhs.Bind(CallStub(callable, context, lhs));
+ Goto(&loop);
}
}
}
- assembler->Bind(&do_fmul);
+ Bind(&do_fmul);
{
- Node* value =
- assembler->Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
- Node* result = assembler->AllocateHeapNumberWithValue(value);
+ Node* value = Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
+ Node* result = AllocateHeapNumberWithValue(value);
var_result.Bind(result);
- assembler->Goto(&return_result);
+ Goto(&return_result);
}
- assembler->Bind(&return_result);
- assembler->Return(var_result.value());
+ Bind(&return_result);
+ Return(var_result.value());
}
-void Builtins::Generate_Divide(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
-
- Node* left = assembler->Parameter(0);
- Node* right = assembler->Parameter(1);
- Node* context = assembler->Parameter(2);
+TF_BUILTIN(Divide, CodeStubAssembler) {
+ Node* left = Parameter(0);
+ Node* right = Parameter(1);
+ Node* context = Parameter(2);
// Shared entry point for floating point division.
- Label do_fdiv(assembler), end(assembler);
- Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
- var_divisor_float64(assembler, MachineRepresentation::kFloat64);
-
- Node* number_map = assembler->HeapNumberMapConstant();
+ Label do_fdiv(this), end(this);
+ Variable var_dividend_float64(this, MachineRepresentation::kFloat64),
+ var_divisor_float64(this, MachineRepresentation::kFloat64);
// We might need to loop one or two times due to ToNumber conversions.
- Variable var_dividend(assembler, MachineRepresentation::kTagged),
- var_divisor(assembler, MachineRepresentation::kTagged),
- var_result(assembler, MachineRepresentation::kTagged);
+ Variable var_dividend(this, MachineRepresentation::kTagged),
+ var_divisor(this, MachineRepresentation::kTagged),
+ var_result(this, MachineRepresentation::kTagged);
Variable* loop_variables[] = {&var_dividend, &var_divisor};
- Label loop(assembler, 2, loop_variables);
+ Label loop(this, 2, loop_variables);
var_dividend.Bind(left);
var_divisor.Bind(right);
- assembler->Goto(&loop);
- assembler->Bind(&loop);
+ Goto(&loop);
+ Bind(&loop);
{
Node* dividend = var_dividend.value();
Node* divisor = var_divisor.value();
- Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(dividend), &dividend_is_smi,
- &dividend_is_not_smi);
+ Label dividend_is_smi(this), dividend_is_not_smi(this);
+ Branch(TaggedIsSmi(dividend), &dividend_is_smi, &dividend_is_not_smi);
- assembler->Bind(&dividend_is_smi);
+ Bind(&dividend_is_smi);
{
- Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
- &divisor_is_not_smi);
+ Label divisor_is_smi(this), divisor_is_not_smi(this);
+ Branch(TaggedIsSmi(divisor), &divisor_is_smi, &divisor_is_not_smi);
- assembler->Bind(&divisor_is_smi);
+ Bind(&divisor_is_smi);
{
- Label bailout(assembler);
+ Label bailout(this);
// Do floating point division if {divisor} is zero.
- assembler->GotoIf(
- assembler->WordEqual(divisor, assembler->IntPtrConstant(0)),
- &bailout);
+ GotoIf(SmiEqual(divisor, SmiConstant(0)), &bailout);
// Do floating point division {dividend} is zero and {divisor} is
// negative.
- Label dividend_is_zero(assembler), dividend_is_not_zero(assembler);
- assembler->Branch(
- assembler->WordEqual(dividend, assembler->IntPtrConstant(0)),
- &dividend_is_zero, &dividend_is_not_zero);
+ Label dividend_is_zero(this), dividend_is_not_zero(this);
+ Branch(SmiEqual(dividend, SmiConstant(0)), &dividend_is_zero,
+ &dividend_is_not_zero);
- assembler->Bind(&dividend_is_zero);
+ Bind(&dividend_is_zero);
{
- assembler->GotoIf(
- assembler->IntPtrLessThan(divisor, assembler->IntPtrConstant(0)),
- &bailout);
- assembler->Goto(&dividend_is_not_zero);
+ GotoIf(SmiLessThan(divisor, SmiConstant(0)), &bailout);
+ Goto(&dividend_is_not_zero);
}
- assembler->Bind(&dividend_is_not_zero);
+ Bind(&dividend_is_not_zero);
- Node* untagged_divisor = assembler->SmiUntag(divisor);
- Node* untagged_dividend = assembler->SmiUntag(dividend);
+ Node* untagged_divisor = SmiToWord32(divisor);
+ Node* untagged_dividend = SmiToWord32(dividend);
// Do floating point division if {dividend} is kMinInt (or kMinInt - 1
// if the Smi size is 31) and {divisor} is -1.
- Label divisor_is_minus_one(assembler),
- divisor_is_not_minus_one(assembler);
- assembler->Branch(assembler->Word32Equal(untagged_divisor,
- assembler->Int32Constant(-1)),
- &divisor_is_minus_one, &divisor_is_not_minus_one);
+ Label divisor_is_minus_one(this), divisor_is_not_minus_one(this);
+ Branch(Word32Equal(untagged_divisor, Int32Constant(-1)),
+ &divisor_is_minus_one, &divisor_is_not_minus_one);
- assembler->Bind(&divisor_is_minus_one);
+ Bind(&divisor_is_minus_one);
{
- assembler->GotoIf(
- assembler->Word32Equal(
- untagged_dividend,
- assembler->Int32Constant(
- kSmiValueSize == 32 ? kMinInt : (kMinInt >> 1))),
+ GotoIf(
+ Word32Equal(untagged_dividend,
+ Int32Constant(kSmiValueSize == 32 ? kMinInt
+ : (kMinInt >> 1))),
&bailout);
- assembler->Goto(&divisor_is_not_minus_one);
+ Goto(&divisor_is_not_minus_one);
}
- assembler->Bind(&divisor_is_not_minus_one);
+ Bind(&divisor_is_not_minus_one);
// TODO(epertoso): consider adding a machine instruction that returns
// both the result and the remainder.
- Node* untagged_result =
- assembler->Int32Div(untagged_dividend, untagged_divisor);
- Node* truncated =
- assembler->Int32Mul(untagged_result, untagged_divisor);
+ Node* untagged_result = Int32Div(untagged_dividend, untagged_divisor);
+ Node* truncated = Int32Mul(untagged_result, untagged_divisor);
// Do floating point division if the remainder is not 0.
- assembler->GotoIf(
- assembler->Word32NotEqual(untagged_dividend, truncated), &bailout);
- var_result.Bind(assembler->SmiTag(untagged_result));
- assembler->Goto(&end);
+ GotoIf(Word32NotEqual(untagged_dividend, truncated), &bailout);
+ var_result.Bind(SmiFromWord32(untagged_result));
+ Goto(&end);
// Bailout: convert {dividend} and {divisor} to double and do double
// division.
- assembler->Bind(&bailout);
+ Bind(&bailout);
{
- var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
- var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
- assembler->Goto(&do_fdiv);
+ var_dividend_float64.Bind(SmiToFloat64(dividend));
+ var_divisor_float64.Bind(SmiToFloat64(divisor));
+ Goto(&do_fdiv);
}
}
- assembler->Bind(&divisor_is_not_smi);
+ Bind(&divisor_is_not_smi);
{
- Node* divisor_map = assembler->LoadMap(divisor);
+ Node* divisor_map = LoadMap(divisor);
// Check if {divisor} is a HeapNumber.
- Label divisor_is_number(assembler),
- divisor_is_not_number(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(divisor_map, number_map),
- &divisor_is_number, &divisor_is_not_number);
+ Label divisor_is_number(this),
+ divisor_is_not_number(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(divisor_map), &divisor_is_number,
+ &divisor_is_not_number);
- assembler->Bind(&divisor_is_number);
+ Bind(&divisor_is_number);
{
// Convert {dividend} to a double and divide it with the value of
// {divisor}.
- var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
- var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
- assembler->Goto(&do_fdiv);
+ var_dividend_float64.Bind(SmiToFloat64(dividend));
+ var_divisor_float64.Bind(LoadHeapNumberValue(divisor));
+ Goto(&do_fdiv);
}
- assembler->Bind(&divisor_is_not_number);
+ Bind(&divisor_is_not_number);
{
// Convert {divisor} to a number and loop.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_divisor.Bind(assembler->CallStub(callable, context, divisor));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_divisor.Bind(CallStub(callable, context, divisor));
+ Goto(&loop);
}
}
}
- assembler->Bind(&dividend_is_not_smi);
+ Bind(&dividend_is_not_smi);
{
- Node* dividend_map = assembler->LoadMap(dividend);
+ Node* dividend_map = LoadMap(dividend);
// Check if {dividend} is a HeapNumber.
- Label dividend_is_number(assembler),
- dividend_is_not_number(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(dividend_map, number_map),
- &dividend_is_number, &dividend_is_not_number);
+ Label dividend_is_number(this),
+ dividend_is_not_number(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(dividend_map), &dividend_is_number,
+ &dividend_is_not_number);
- assembler->Bind(&dividend_is_number);
+ Bind(&dividend_is_number);
{
// Check if {divisor} is a Smi.
- Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
- &divisor_is_not_smi);
+ Label divisor_is_smi(this), divisor_is_not_smi(this);
+ Branch(TaggedIsSmi(divisor), &divisor_is_smi, &divisor_is_not_smi);
- assembler->Bind(&divisor_is_smi);
+ Bind(&divisor_is_smi);
{
// Convert {divisor} to a double and use it for a floating point
// division.
- var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
- var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
- assembler->Goto(&do_fdiv);
+ var_dividend_float64.Bind(LoadHeapNumberValue(dividend));
+ var_divisor_float64.Bind(SmiToFloat64(divisor));
+ Goto(&do_fdiv);
}
- assembler->Bind(&divisor_is_not_smi);
+ Bind(&divisor_is_not_smi);
{
- Node* divisor_map = assembler->LoadMap(divisor);
+ Node* divisor_map = LoadMap(divisor);
// Check if {divisor} is a HeapNumber.
- Label divisor_is_number(assembler),
- divisor_is_not_number(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(divisor_map, number_map),
- &divisor_is_number, &divisor_is_not_number);
+ Label divisor_is_number(this),
+ divisor_is_not_number(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(divisor_map), &divisor_is_number,
+ &divisor_is_not_number);
- assembler->Bind(&divisor_is_number);
+ Bind(&divisor_is_number);
{
// Both {dividend} and {divisor} are HeapNumbers. Load their values
// and divide them.
- var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
- var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
- assembler->Goto(&do_fdiv);
+ var_dividend_float64.Bind(LoadHeapNumberValue(dividend));
+ var_divisor_float64.Bind(LoadHeapNumberValue(divisor));
+ Goto(&do_fdiv);
}
- assembler->Bind(&divisor_is_not_number);
+ Bind(&divisor_is_not_number);
{
// Convert {divisor} to a number and loop.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_divisor.Bind(assembler->CallStub(callable, context, divisor));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_divisor.Bind(CallStub(callable, context, divisor));
+ Goto(&loop);
}
}
}
- assembler->Bind(&dividend_is_not_number);
+ Bind(&dividend_is_not_number);
{
// Convert {dividend} to a Number and loop.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_dividend.Bind(assembler->CallStub(callable, context, dividend));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_dividend.Bind(CallStub(callable, context, dividend));
+ Goto(&loop);
}
}
}
- assembler->Bind(&do_fdiv);
+ Bind(&do_fdiv);
{
- Node* value = assembler->Float64Div(var_dividend_float64.value(),
- var_divisor_float64.value());
- var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
- assembler->Goto(&end);
+ Node* value =
+ Float64Div(var_dividend_float64.value(), var_divisor_float64.value());
+ var_result.Bind(AllocateHeapNumberWithValue(value));
+ Goto(&end);
}
- assembler->Bind(&end);
- assembler->Return(var_result.value());
+ Bind(&end);
+ Return(var_result.value());
}
-void Builtins::Generate_Modulus(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
+TF_BUILTIN(Modulus, CodeStubAssembler) {
+ Node* left = Parameter(0);
+ Node* right = Parameter(1);
+ Node* context = Parameter(2);
- Node* left = assembler->Parameter(0);
- Node* right = assembler->Parameter(1);
- Node* context = assembler->Parameter(2);
-
- Variable var_result(assembler, MachineRepresentation::kTagged);
- Label return_result(assembler, &var_result);
+ Variable var_result(this, MachineRepresentation::kTagged);
+ Label return_result(this, &var_result);
// Shared entry point for floating point modulus.
- Label do_fmod(assembler);
- Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
- var_divisor_float64(assembler, MachineRepresentation::kFloat64);
-
- Node* number_map = assembler->HeapNumberMapConstant();
+ Label do_fmod(this);
+ Variable var_dividend_float64(this, MachineRepresentation::kFloat64),
+ var_divisor_float64(this, MachineRepresentation::kFloat64);
// We might need to loop one or two times due to ToNumber conversions.
- Variable var_dividend(assembler, MachineRepresentation::kTagged),
- var_divisor(assembler, MachineRepresentation::kTagged);
+ Variable var_dividend(this, MachineRepresentation::kTagged),
+ var_divisor(this, MachineRepresentation::kTagged);
Variable* loop_variables[] = {&var_dividend, &var_divisor};
- Label loop(assembler, 2, loop_variables);
+ Label loop(this, 2, loop_variables);
var_dividend.Bind(left);
var_divisor.Bind(right);
- assembler->Goto(&loop);
- assembler->Bind(&loop);
+ Goto(&loop);
+ Bind(&loop);
{
Node* dividend = var_dividend.value();
Node* divisor = var_divisor.value();
- Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(dividend), &dividend_is_smi,
- &dividend_is_not_smi);
+ Label dividend_is_smi(this), dividend_is_not_smi(this);
+ Branch(TaggedIsSmi(dividend), &dividend_is_smi, &dividend_is_not_smi);
- assembler->Bind(&dividend_is_smi);
+ Bind(&dividend_is_smi);
{
- Label dividend_is_not_zero(assembler);
- Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
- &divisor_is_not_smi);
+ Label dividend_is_not_zero(this);
+ Label divisor_is_smi(this), divisor_is_not_smi(this);
+ Branch(TaggedIsSmi(divisor), &divisor_is_smi, &divisor_is_not_smi);
- assembler->Bind(&divisor_is_smi);
+ Bind(&divisor_is_smi);
{
// Compute the modulus of two Smis.
- var_result.Bind(assembler->SmiMod(dividend, divisor));
- assembler->Goto(&return_result);
+ var_result.Bind(SmiMod(dividend, divisor));
+ Goto(&return_result);
}
- assembler->Bind(&divisor_is_not_smi);
+ Bind(&divisor_is_not_smi);
{
- Node* divisor_map = assembler->LoadMap(divisor);
+ Node* divisor_map = LoadMap(divisor);
// Check if {divisor} is a HeapNumber.
- Label divisor_is_number(assembler),
- divisor_is_not_number(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(divisor_map, number_map),
- &divisor_is_number, &divisor_is_not_number);
+ Label divisor_is_number(this),
+ divisor_is_not_number(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(divisor_map), &divisor_is_number,
+ &divisor_is_not_number);
- assembler->Bind(&divisor_is_number);
+ Bind(&divisor_is_number);
{
// Convert {dividend} to a double and compute its modulus with the
// value of {dividend}.
- var_dividend_float64.Bind(assembler->SmiToFloat64(dividend));
- var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
- assembler->Goto(&do_fmod);
+ var_dividend_float64.Bind(SmiToFloat64(dividend));
+ var_divisor_float64.Bind(LoadHeapNumberValue(divisor));
+ Goto(&do_fmod);
}
- assembler->Bind(&divisor_is_not_number);
+ Bind(&divisor_is_not_number);
{
// Convert {divisor} to a number and loop.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_divisor.Bind(assembler->CallStub(callable, context, divisor));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_divisor.Bind(CallStub(callable, context, divisor));
+ Goto(&loop);
}
}
}
- assembler->Bind(&dividend_is_not_smi);
+ Bind(&dividend_is_not_smi);
{
- Node* dividend_map = assembler->LoadMap(dividend);
+ Node* dividend_map = LoadMap(dividend);
// Check if {dividend} is a HeapNumber.
- Label dividend_is_number(assembler),
- dividend_is_not_number(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(dividend_map, number_map),
- &dividend_is_number, &dividend_is_not_number);
+ Label dividend_is_number(this),
+ dividend_is_not_number(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(dividend_map), &dividend_is_number,
+ &dividend_is_not_number);
- assembler->Bind(&dividend_is_number);
+ Bind(&dividend_is_number);
{
// Check if {divisor} is a Smi.
- Label divisor_is_smi(assembler), divisor_is_not_smi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(divisor), &divisor_is_smi,
- &divisor_is_not_smi);
+ Label divisor_is_smi(this), divisor_is_not_smi(this);
+ Branch(TaggedIsSmi(divisor), &divisor_is_smi, &divisor_is_not_smi);
- assembler->Bind(&divisor_is_smi);
+ Bind(&divisor_is_smi);
{
// Convert {divisor} to a double and compute {dividend}'s modulus with
// it.
- var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
- var_divisor_float64.Bind(assembler->SmiToFloat64(divisor));
- assembler->Goto(&do_fmod);
+ var_dividend_float64.Bind(LoadHeapNumberValue(dividend));
+ var_divisor_float64.Bind(SmiToFloat64(divisor));
+ Goto(&do_fmod);
}
- assembler->Bind(&divisor_is_not_smi);
+ Bind(&divisor_is_not_smi);
{
- Node* divisor_map = assembler->LoadMap(divisor);
+ Node* divisor_map = LoadMap(divisor);
// Check if {divisor} is a HeapNumber.
- Label divisor_is_number(assembler),
- divisor_is_not_number(assembler, Label::kDeferred);
- assembler->Branch(assembler->WordEqual(divisor_map, number_map),
- &divisor_is_number, &divisor_is_not_number);
+ Label divisor_is_number(this),
+ divisor_is_not_number(this, Label::kDeferred);
+ Branch(IsHeapNumberMap(divisor_map), &divisor_is_number,
+ &divisor_is_not_number);
- assembler->Bind(&divisor_is_number);
+ Bind(&divisor_is_number);
{
// Both {dividend} and {divisor} are HeapNumbers. Load their values
// and compute their modulus.
- var_dividend_float64.Bind(assembler->LoadHeapNumberValue(dividend));
- var_divisor_float64.Bind(assembler->LoadHeapNumberValue(divisor));
- assembler->Goto(&do_fmod);
+ var_dividend_float64.Bind(LoadHeapNumberValue(dividend));
+ var_divisor_float64.Bind(LoadHeapNumberValue(divisor));
+ Goto(&do_fmod);
}
- assembler->Bind(&divisor_is_not_number);
+ Bind(&divisor_is_not_number);
{
// Convert {divisor} to a number and loop.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_divisor.Bind(assembler->CallStub(callable, context, divisor));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_divisor.Bind(CallStub(callable, context, divisor));
+ Goto(&loop);
}
}
}
- assembler->Bind(&dividend_is_not_number);
+ Bind(&dividend_is_not_number);
{
// Convert {dividend} to a Number and loop.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_dividend.Bind(assembler->CallStub(callable, context, dividend));
- assembler->Goto(&loop);
+ Callable callable = CodeFactory::NonNumberToNumber(isolate());
+ var_dividend.Bind(CallStub(callable, context, dividend));
+ Goto(&loop);
}
}
}
- assembler->Bind(&do_fmod);
+ Bind(&do_fmod);
{
- Node* value = assembler->Float64Mod(var_dividend_float64.value(),
- var_divisor_float64.value());
- var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
- assembler->Goto(&return_result);
+ Node* value =
+ Float64Mod(var_dividend_float64.value(), var_divisor_float64.value());
+ var_result.Bind(AllocateHeapNumberWithValue(value));
+ Goto(&return_result);
}
- assembler->Bind(&return_result);
- assembler->Return(var_result.value());
+ Bind(&return_result);
+ Return(var_result.value());
}
-void Builtins::Generate_ShiftLeft(CodeStubAssembler* assembler) {
- compiler::Node* left = assembler->Parameter(0);
- compiler::Node* right = assembler->Parameter(1);
- compiler::Node* context = assembler->Parameter(2);
-
- using compiler::Node;
-
- Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
- Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
- Node* shift_count =
- assembler->Word32And(rhs_value, assembler->Int32Constant(0x1f));
- Node* value = assembler->Word32Shl(lhs_value, shift_count);
- Node* result = assembler->ChangeInt32ToTagged(value);
- assembler->Return(result);
+TF_BUILTIN(ShiftLeft, NumberBuiltinsAssembler) {
+ BitwiseShiftOp([this](Node* lhs, Node* shift_count) {
+ return Word32Shl(lhs, shift_count);
+ });
}
-void Builtins::Generate_ShiftRight(CodeStubAssembler* assembler) {
- compiler::Node* left = assembler->Parameter(0);
- compiler::Node* right = assembler->Parameter(1);
- compiler::Node* context = assembler->Parameter(2);
-
- using compiler::Node;
-
- Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
- Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
- Node* shift_count =
- assembler->Word32And(rhs_value, assembler->Int32Constant(0x1f));
- Node* value = assembler->Word32Sar(lhs_value, shift_count);
- Node* result = assembler->ChangeInt32ToTagged(value);
- assembler->Return(result);
+TF_BUILTIN(ShiftRight, NumberBuiltinsAssembler) {
+ BitwiseShiftOp([this](Node* lhs, Node* shift_count) {
+ return Word32Sar(lhs, shift_count);
+ });
}
-void Builtins::Generate_ShiftRightLogical(CodeStubAssembler* assembler) {
- compiler::Node* left = assembler->Parameter(0);
- compiler::Node* right = assembler->Parameter(1);
- compiler::Node* context = assembler->Parameter(2);
-
- using compiler::Node;
-
- Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
- Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
- Node* shift_count =
- assembler->Word32And(rhs_value, assembler->Int32Constant(0x1f));
- Node* value = assembler->Word32Shr(lhs_value, shift_count);
- Node* result = assembler->ChangeUint32ToTagged(value);
- assembler->Return(result);
+TF_BUILTIN(ShiftRightLogical, NumberBuiltinsAssembler) {
+ BitwiseShiftOp<kUnsigned>([this](Node* lhs, Node* shift_count) {
+ return Word32Shr(lhs, shift_count);
+ });
}
-void Builtins::Generate_BitwiseAnd(CodeStubAssembler* assembler) {
- compiler::Node* left = assembler->Parameter(0);
- compiler::Node* right = assembler->Parameter(1);
- compiler::Node* context = assembler->Parameter(2);
-
- using compiler::Node;
-
- Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
- Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
- Node* value = assembler->Word32And(lhs_value, rhs_value);
- Node* result = assembler->ChangeInt32ToTagged(value);
- assembler->Return(result);
+TF_BUILTIN(BitwiseAnd, NumberBuiltinsAssembler) {
+ BitwiseOp([this](Node* lhs, Node* rhs) { return Word32And(lhs, rhs); });
}
-void Builtins::Generate_BitwiseOr(CodeStubAssembler* assembler) {
- compiler::Node* left = assembler->Parameter(0);
- compiler::Node* right = assembler->Parameter(1);
- compiler::Node* context = assembler->Parameter(2);
-
- using compiler::Node;
-
- Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
- Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
- Node* value = assembler->Word32Or(lhs_value, rhs_value);
- Node* result = assembler->ChangeInt32ToTagged(value);
- assembler->Return(result);
+TF_BUILTIN(BitwiseOr, NumberBuiltinsAssembler) {
+ BitwiseOp([this](Node* lhs, Node* rhs) { return Word32Or(lhs, rhs); });
}
-void Builtins::Generate_BitwiseXor(CodeStubAssembler* assembler) {
- compiler::Node* left = assembler->Parameter(0);
- compiler::Node* right = assembler->Parameter(1);
- compiler::Node* context = assembler->Parameter(2);
-
- using compiler::Node;
-
- Node* lhs_value = assembler->TruncateTaggedToWord32(context, left);
- Node* rhs_value = assembler->TruncateTaggedToWord32(context, right);
- Node* value = assembler->Word32Xor(lhs_value, rhs_value);
- Node* result = assembler->ChangeInt32ToTagged(value);
- assembler->Return(result);
+TF_BUILTIN(BitwiseXor, NumberBuiltinsAssembler) {
+ BitwiseOp([this](Node* lhs, Node* rhs) { return Word32Xor(lhs, rhs); });
}
-void Builtins::Generate_LessThan(CodeStubAssembler* assembler) {
- compiler::Node* lhs = assembler->Parameter(0);
- compiler::Node* rhs = assembler->Parameter(1);
- compiler::Node* context = assembler->Parameter(2);
-
- assembler->Return(assembler->RelationalComparison(
- CodeStubAssembler::kLessThan, lhs, rhs, context));
+TF_BUILTIN(LessThan, NumberBuiltinsAssembler) {
+ RelationalComparisonBuiltin(kLessThan);
}
-void Builtins::Generate_LessThanOrEqual(CodeStubAssembler* assembler) {
- compiler::Node* lhs = assembler->Parameter(0);
- compiler::Node* rhs = assembler->Parameter(1);
- compiler::Node* context = assembler->Parameter(2);
-
- assembler->Return(assembler->RelationalComparison(
- CodeStubAssembler::kLessThanOrEqual, lhs, rhs, context));
+TF_BUILTIN(LessThanOrEqual, NumberBuiltinsAssembler) {
+ RelationalComparisonBuiltin(kLessThanOrEqual);
}
-void Builtins::Generate_GreaterThan(CodeStubAssembler* assembler) {
- compiler::Node* lhs = assembler->Parameter(0);
- compiler::Node* rhs = assembler->Parameter(1);
- compiler::Node* context = assembler->Parameter(2);
-
- assembler->Return(assembler->RelationalComparison(
- CodeStubAssembler::kGreaterThan, lhs, rhs, context));
+TF_BUILTIN(GreaterThan, NumberBuiltinsAssembler) {
+ RelationalComparisonBuiltin(kGreaterThan);
}
-void Builtins::Generate_GreaterThanOrEqual(CodeStubAssembler* assembler) {
- compiler::Node* lhs = assembler->Parameter(0);
- compiler::Node* rhs = assembler->Parameter(1);
- compiler::Node* context = assembler->Parameter(2);
-
- assembler->Return(assembler->RelationalComparison(
- CodeStubAssembler::kGreaterThanOrEqual, lhs, rhs, context));
+TF_BUILTIN(GreaterThanOrEqual, NumberBuiltinsAssembler) {
+ RelationalComparisonBuiltin(kGreaterThanOrEqual);
}
-void Builtins::Generate_Equal(CodeStubAssembler* assembler) {
- compiler::Node* lhs = assembler->Parameter(0);
- compiler::Node* rhs = assembler->Parameter(1);
- compiler::Node* context = assembler->Parameter(2);
+TF_BUILTIN(Equal, CodeStubAssembler) {
+ Node* lhs = Parameter(0);
+ Node* rhs = Parameter(1);
+ Node* context = Parameter(2);
- assembler->Return(assembler->Equal(CodeStubAssembler::kDontNegateResult, lhs,
- rhs, context));
+ Return(Equal(kDontNegateResult, lhs, rhs, context));
}
-void Builtins::Generate_NotEqual(CodeStubAssembler* assembler) {
- compiler::Node* lhs = assembler->Parameter(0);
- compiler::Node* rhs = assembler->Parameter(1);
- compiler::Node* context = assembler->Parameter(2);
+TF_BUILTIN(NotEqual, CodeStubAssembler) {
+ Node* lhs = Parameter(0);
+ Node* rhs = Parameter(1);
+ Node* context = Parameter(2);
- assembler->Return(
- assembler->Equal(CodeStubAssembler::kNegateResult, lhs, rhs, context));
+ Return(Equal(kNegateResult, lhs, rhs, context));
}
-void Builtins::Generate_StrictEqual(CodeStubAssembler* assembler) {
- compiler::Node* lhs = assembler->Parameter(0);
- compiler::Node* rhs = assembler->Parameter(1);
- compiler::Node* context = assembler->Parameter(2);
+TF_BUILTIN(StrictEqual, CodeStubAssembler) {
+ Node* lhs = Parameter(0);
+ Node* rhs = Parameter(1);
+ Node* context = Parameter(2);
- assembler->Return(assembler->StrictEqual(CodeStubAssembler::kDontNegateResult,
- lhs, rhs, context));
+ Return(StrictEqual(kDontNegateResult, lhs, rhs, context));
}
-void Builtins::Generate_StrictNotEqual(CodeStubAssembler* assembler) {
- compiler::Node* lhs = assembler->Parameter(0);
- compiler::Node* rhs = assembler->Parameter(1);
- compiler::Node* context = assembler->Parameter(2);
+TF_BUILTIN(StrictNotEqual, CodeStubAssembler) {
+ Node* lhs = Parameter(0);
+ Node* rhs = Parameter(1);
+ Node* context = Parameter(2);
- assembler->Return(assembler->StrictEqual(CodeStubAssembler::kNegateResult,
- lhs, rhs, context));
+ Return(StrictEqual(kNegateResult, lhs, rhs, context));
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-object.cc b/deps/v8/src/builtins/builtins-object.cc
index abb5c47555..74e0a20832 100644
--- a/deps/v8/src/builtins/builtins-object.cc
+++ b/deps/v8/src/builtins/builtins-object.cc
@@ -2,149 +2,68 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
-
+#include "src/builtins/builtins.h"
#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
#include "src/property-descriptor.h"
namespace v8 {
namespace internal {
+class ObjectBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit ObjectBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ protected:
+ void IsString(Node* object, Label* if_string, Label* if_notstring);
+ void ReturnToStringFormat(Node* context, Node* string);
+};
+
// -----------------------------------------------------------------------------
// ES6 section 19.1 Object Objects
-void Builtins::Generate_ObjectHasOwnProperty(CodeStubAssembler* assembler) {
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Label Label;
- typedef CodeStubAssembler::Variable Variable;
+TF_BUILTIN(ObjectHasOwnProperty, ObjectBuiltinsAssembler) {
+ Node* object = Parameter(0);
+ Node* key = Parameter(1);
+ Node* context = Parameter(4);
- Node* object = assembler->Parameter(0);
- Node* key = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
-
- Label call_runtime(assembler), return_true(assembler),
- return_false(assembler);
+ Label call_runtime(this), return_true(this), return_false(this);
// Smi receivers do not have own properties.
- Label if_objectisnotsmi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(object), &return_false,
- &if_objectisnotsmi);
- assembler->Bind(&if_objectisnotsmi);
+ Label if_objectisnotsmi(this);
+ Branch(TaggedIsSmi(object), &return_false, &if_objectisnotsmi);
+ Bind(&if_objectisnotsmi);
- Node* map = assembler->LoadMap(object);
- Node* instance_type = assembler->LoadMapInstanceType(map);
+ Node* map = LoadMap(object);
+ Node* instance_type = LoadMapInstanceType(map);
- Variable var_index(assembler, MachineType::PointerRepresentation());
+ Variable var_index(this, MachineType::PointerRepresentation());
- Label keyisindex(assembler), if_iskeyunique(assembler);
- assembler->TryToName(key, &keyisindex, &var_index, &if_iskeyunique,
- &call_runtime);
+ Label keyisindex(this), if_iskeyunique(this);
+ TryToName(key, &keyisindex, &var_index, &if_iskeyunique, &call_runtime);
- assembler->Bind(&if_iskeyunique);
- assembler->TryHasOwnProperty(object, map, instance_type, key, &return_true,
- &return_false, &call_runtime);
+ Bind(&if_iskeyunique);
+ TryHasOwnProperty(object, map, instance_type, key, &return_true,
+ &return_false, &call_runtime);
- assembler->Bind(&keyisindex);
+ Bind(&keyisindex);
// Handle negative keys in the runtime.
- assembler->GotoIf(assembler->IntPtrLessThan(var_index.value(),
- assembler->IntPtrConstant(0)),
- &call_runtime);
- assembler->TryLookupElement(object, map, instance_type, var_index.value(),
- &return_true, &return_false, &call_runtime);
-
- assembler->Bind(&return_true);
- assembler->Return(assembler->BooleanConstant(true));
-
- assembler->Bind(&return_false);
- assembler->Return(assembler->BooleanConstant(false));
-
- assembler->Bind(&call_runtime);
- assembler->Return(assembler->CallRuntime(Runtime::kObjectHasOwnProperty,
- context, object, key));
-}
-
-namespace {
-
-MUST_USE_RESULT Maybe<bool> FastAssign(Handle<JSReceiver> to,
- Handle<Object> next_source) {
- // Non-empty strings are the only non-JSReceivers that need to be handled
- // explicitly by Object.assign.
- if (!next_source->IsJSReceiver()) {
- return Just(!next_source->IsString() ||
- String::cast(*next_source)->length() == 0);
- }
-
- // If the target is deprecated, the object will be updated on first store. If
- // the source for that store equals the target, this will invalidate the
- // cached representation of the source. Preventively upgrade the target.
- // Do this on each iteration since any property load could cause deprecation.
- if (to->map()->is_deprecated()) {
- JSObject::MigrateInstance(Handle<JSObject>::cast(to));
- }
-
- Isolate* isolate = to->GetIsolate();
- Handle<Map> map(JSReceiver::cast(*next_source)->map(), isolate);
+ GotoIf(IntPtrLessThan(var_index.value(), IntPtrConstant(0)), &call_runtime);
+ TryLookupElement(object, map, instance_type, var_index.value(), &return_true,
+ &return_false, &call_runtime);
- if (!map->IsJSObjectMap()) return Just(false);
- if (!map->OnlyHasSimpleProperties()) return Just(false);
+ Bind(&return_true);
+ Return(BooleanConstant(true));
- Handle<JSObject> from = Handle<JSObject>::cast(next_source);
- if (from->elements() != isolate->heap()->empty_fixed_array()) {
- return Just(false);
- }
+ Bind(&return_false);
+ Return(BooleanConstant(false));
- Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
- int length = map->NumberOfOwnDescriptors();
-
- bool stable = true;
-
- for (int i = 0; i < length; i++) {
- Handle<Name> next_key(descriptors->GetKey(i), isolate);
- Handle<Object> prop_value;
- // Directly decode from the descriptor array if |from| did not change shape.
- if (stable) {
- PropertyDetails details = descriptors->GetDetails(i);
- if (!details.IsEnumerable()) continue;
- if (details.kind() == kData) {
- if (details.location() == kDescriptor) {
- prop_value = handle(descriptors->GetValue(i), isolate);
- } else {
- Representation representation = details.representation();
- FieldIndex index = FieldIndex::ForDescriptor(*map, i);
- prop_value = JSObject::FastPropertyAt(from, representation, index);
- }
- } else {
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, prop_value, JSReceiver::GetProperty(from, next_key),
- Nothing<bool>());
- stable = from->map() == *map;
- }
- } else {
- // If the map did change, do a slower lookup. We are still guaranteed that
- // the object has a simple shape, and that the key is a name.
- LookupIterator it(from, next_key, from,
- LookupIterator::OWN_SKIP_INTERCEPTOR);
- if (!it.IsFound()) continue;
- DCHECK(it.state() == LookupIterator::DATA ||
- it.state() == LookupIterator::ACCESSOR);
- if (!it.IsEnumerable()) continue;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, prop_value, Object::GetProperty(&it), Nothing<bool>());
- }
- LookupIterator it(to, next_key, to);
- bool call_to_js = it.IsFound() && it.state() != LookupIterator::DATA;
- Maybe<bool> result = Object::SetProperty(
- &it, prop_value, STRICT, Object::CERTAINLY_NOT_STORE_FROM_KEYED);
- if (result.IsNothing()) return result;
- if (stable && call_to_js) stable = from->map() == *map;
- }
-
- return Just(true);
+ Bind(&call_runtime);
+ Return(CallRuntime(Runtime::kObjectHasOwnProperty, context, object, key));
}
-} // namespace
-
// ES6 19.1.2.1 Object.assign
BUILTIN(ObjectAssign) {
HandleScope scope(isolate);
@@ -160,44 +79,10 @@ BUILTIN(ObjectAssign) {
// second argument.
// 4. For each element nextSource of sources, in ascending index order,
for (int i = 2; i < args.length(); ++i) {
- Handle<Object> next_source = args.at<Object>(i);
- Maybe<bool> fast_assign = FastAssign(to, next_source);
- if (fast_assign.IsNothing()) return isolate->heap()->exception();
- if (fast_assign.FromJust()) continue;
- // 4a. If nextSource is undefined or null, let keys be an empty List.
- // 4b. Else,
- // 4b i. Let from be ToObject(nextSource).
- // Only non-empty strings and JSReceivers have enumerable properties.
- Handle<JSReceiver> from =
- Object::ToObject(isolate, next_source).ToHandleChecked();
- // 4b ii. Let keys be ? from.[[OwnPropertyKeys]]().
- Handle<FixedArray> keys;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, keys, KeyAccumulator::GetKeys(
- from, KeyCollectionMode::kOwnOnly, ALL_PROPERTIES,
- GetKeysConversion::kKeepNumbers));
- // 4c. Repeat for each element nextKey of keys in List order,
- for (int j = 0; j < keys->length(); ++j) {
- Handle<Object> next_key(keys->get(j), isolate);
- // 4c i. Let desc be ? from.[[GetOwnProperty]](nextKey).
- PropertyDescriptor desc;
- Maybe<bool> found =
- JSReceiver::GetOwnPropertyDescriptor(isolate, from, next_key, &desc);
- if (found.IsNothing()) return isolate->heap()->exception();
- // 4c ii. If desc is not undefined and desc.[[Enumerable]] is true, then
- if (found.FromJust() && desc.enumerable()) {
- // 4c ii 1. Let propValue be ? Get(from, nextKey).
- Handle<Object> prop_value;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, prop_value,
- Runtime::GetObjectProperty(isolate, from, next_key));
- // 4c ii 2. Let status be ? Set(to, nextKey, propValue, true).
- Handle<Object> status;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, status, Runtime::SetObjectProperty(isolate, to, next_key,
- prop_value, STRICT));
- }
- }
+ Handle<Object> next_source = args.at(i);
+ MAYBE_RETURN(
+ JSReceiver::SetOrCopyDataProperties(isolate, to, next_source, true),
+ isolate->heap()->exception());
}
// 5. Return to.
return *to;
@@ -219,134 +104,90 @@ BUILTIN(ObjectPrototypePropertyIsEnumerable) {
return isolate->heap()->ToBoolean((maybe.FromJust() & DONT_ENUM) == 0);
}
-namespace { // anonymous namespace for ObjectProtoToString()
-
-void IsString(CodeStubAssembler* assembler, compiler::Node* object,
- CodeStubAssembler::Label* if_string,
- CodeStubAssembler::Label* if_notstring) {
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Label Label;
+void ObjectBuiltinsAssembler::IsString(Node* object, Label* if_string,
+ Label* if_notstring) {
+ Label if_notsmi(this);
+ Branch(TaggedIsSmi(object), if_notstring, &if_notsmi);
- Label if_notsmi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(object), if_notstring, &if_notsmi);
-
- assembler->Bind(&if_notsmi);
+ Bind(&if_notsmi);
{
- Node* instance_type = assembler->LoadInstanceType(object);
+ Node* instance_type = LoadInstanceType(object);
- assembler->Branch(assembler->IsStringInstanceType(instance_type), if_string,
- if_notstring);
+ Branch(IsStringInstanceType(instance_type), if_string, if_notstring);
}
}
-void ReturnToStringFormat(CodeStubAssembler* assembler, compiler::Node* context,
- compiler::Node* string) {
- typedef compiler::Node Node;
-
- Node* lhs = assembler->HeapConstant(
- assembler->factory()->NewStringFromStaticChars("[object "));
- Node* rhs = assembler->HeapConstant(
- assembler->factory()->NewStringFromStaticChars("]"));
+void ObjectBuiltinsAssembler::ReturnToStringFormat(Node* context,
+ Node* string) {
+ Node* lhs = HeapConstant(factory()->NewStringFromStaticChars("[object "));
+ Node* rhs = HeapConstant(factory()->NewStringFromStaticChars("]"));
- Callable callable = CodeFactory::StringAdd(
- assembler->isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
+ Callable callable =
+ CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
- assembler->Return(assembler->CallStub(
- callable, context, assembler->CallStub(callable, context, lhs, string),
- rhs));
+ Return(CallStub(callable, context, CallStub(callable, context, lhs, string),
+ rhs));
}
-void ReturnIfPrimitive(CodeStubAssembler* assembler,
- compiler::Node* instance_type,
- CodeStubAssembler::Label* return_string,
- CodeStubAssembler::Label* return_boolean,
- CodeStubAssembler::Label* return_number) {
- assembler->GotoIf(assembler->IsStringInstanceType(instance_type),
- return_string);
-
- assembler->GotoIf(assembler->Word32Equal(
- instance_type, assembler->Int32Constant(ODDBALL_TYPE)),
- return_boolean);
-
- assembler->GotoIf(
- assembler->Word32Equal(instance_type,
- assembler->Int32Constant(HEAP_NUMBER_TYPE)),
- return_number);
-}
-
-} // namespace
-
// ES6 section 19.1.3.6 Object.prototype.toString
-void Builtins::Generate_ObjectProtoToString(CodeStubAssembler* assembler) {
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Label Label;
- typedef CodeStubAssembler::Variable Variable;
-
- Label return_undefined(assembler, Label::kDeferred),
- return_null(assembler, Label::kDeferred),
- return_arguments(assembler, Label::kDeferred), return_array(assembler),
- return_api(assembler, Label::kDeferred), return_object(assembler),
- return_regexp(assembler), return_function(assembler),
- return_error(assembler), return_date(assembler), return_string(assembler),
- return_boolean(assembler), return_jsvalue(assembler),
- return_jsproxy(assembler, Label::kDeferred), return_number(assembler);
+TF_BUILTIN(ObjectProtoToString, ObjectBuiltinsAssembler) {
+ Label return_undefined(this, Label::kDeferred),
+ return_null(this, Label::kDeferred),
+ return_arguments(this, Label::kDeferred), return_array(this),
+ return_api(this, Label::kDeferred), return_object(this),
+ return_regexp(this), return_function(this), return_error(this),
+ return_date(this), return_jsvalue(this),
+ return_jsproxy(this, Label::kDeferred);
- Label if_isproxy(assembler, Label::kDeferred);
+ Label if_isproxy(this, Label::kDeferred);
- Label checkstringtag(assembler);
- Label if_tostringtag(assembler), if_notostringtag(assembler);
+ Label checkstringtag(this);
+ Label if_tostringtag(this), if_notostringtag(this);
- Node* receiver = assembler->Parameter(0);
- Node* context = assembler->Parameter(3);
+ Node* receiver = Parameter(0);
+ Node* context = Parameter(3);
- assembler->GotoIf(
- assembler->WordEqual(receiver, assembler->UndefinedConstant()),
- &return_undefined);
+ GotoIf(WordEqual(receiver, UndefinedConstant()), &return_undefined);
- assembler->GotoIf(assembler->WordEqual(receiver, assembler->NullConstant()),
- &return_null);
+ GotoIf(WordEqual(receiver, NullConstant()), &return_null);
- assembler->GotoIf(assembler->TaggedIsSmi(receiver), &return_number);
+ Callable to_object = CodeFactory::ToObject(isolate());
+ receiver = CallStub(to_object, context, receiver);
- Node* receiver_instance_type = assembler->LoadInstanceType(receiver);
- ReturnIfPrimitive(assembler, receiver_instance_type, &return_string,
- &return_boolean, &return_number);
+ Node* receiver_instance_type = LoadInstanceType(receiver);
// for proxies, check IsArray before getting @@toStringTag
- Variable var_proxy_is_array(assembler, MachineRepresentation::kTagged);
- var_proxy_is_array.Bind(assembler->BooleanConstant(false));
+ Variable var_proxy_is_array(this, MachineRepresentation::kTagged);
+ var_proxy_is_array.Bind(BooleanConstant(false));
- assembler->Branch(
- assembler->Word32Equal(receiver_instance_type,
- assembler->Int32Constant(JS_PROXY_TYPE)),
- &if_isproxy, &checkstringtag);
+ Branch(Word32Equal(receiver_instance_type, Int32Constant(JS_PROXY_TYPE)),
+ &if_isproxy, &checkstringtag);
- assembler->Bind(&if_isproxy);
+ Bind(&if_isproxy);
{
// This can throw
var_proxy_is_array.Bind(
- assembler->CallRuntime(Runtime::kArrayIsArray, context, receiver));
- assembler->Goto(&checkstringtag);
+ CallRuntime(Runtime::kArrayIsArray, context, receiver));
+ Goto(&checkstringtag);
}
- assembler->Bind(&checkstringtag);
+ Bind(&checkstringtag);
{
- Node* to_string_tag_symbol = assembler->HeapConstant(
- assembler->isolate()->factory()->to_string_tag_symbol());
+ Node* to_string_tag_symbol =
+ HeapConstant(isolate()->factory()->to_string_tag_symbol());
- GetPropertyStub stub(assembler->isolate());
+ GetPropertyStub stub(isolate());
Callable get_property =
Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
- Node* to_string_tag_value = assembler->CallStub(
- get_property, context, receiver, to_string_tag_symbol);
+ Node* to_string_tag_value =
+ CallStub(get_property, context, receiver, to_string_tag_symbol);
- IsString(assembler, to_string_tag_value, &if_tostringtag,
- &if_notostringtag);
+ IsString(to_string_tag_value, &if_tostringtag, &if_notostringtag);
- assembler->Bind(&if_tostringtag);
- ReturnToStringFormat(assembler, context, to_string_tag_value);
+ Bind(&if_tostringtag);
+ ReturnToStringFormat(context, to_string_tag_value);
}
- assembler->Bind(&if_notostringtag);
+ Bind(&if_notostringtag);
{
size_t const kNumCases = 11;
Label* case_labels[kNumCases];
@@ -374,178 +215,164 @@ void Builtins::Generate_ObjectProtoToString(CodeStubAssembler* assembler) {
case_labels[10] = &return_jsproxy;
case_values[10] = JS_PROXY_TYPE;
- assembler->Switch(receiver_instance_type, &return_object, case_values,
- case_labels, arraysize(case_values));
+ Switch(receiver_instance_type, &return_object, case_values, case_labels,
+ arraysize(case_values));
- assembler->Bind(&return_undefined);
- assembler->Return(assembler->HeapConstant(
- assembler->isolate()->factory()->undefined_to_string()));
+ Bind(&return_undefined);
+ Return(HeapConstant(isolate()->factory()->undefined_to_string()));
- assembler->Bind(&return_null);
- assembler->Return(assembler->HeapConstant(
- assembler->isolate()->factory()->null_to_string()));
+ Bind(&return_null);
+ Return(HeapConstant(isolate()->factory()->null_to_string()));
- assembler->Bind(&return_number);
- assembler->Return(assembler->HeapConstant(
- assembler->isolate()->factory()->number_to_string()));
+ Bind(&return_arguments);
+ Return(HeapConstant(isolate()->factory()->arguments_to_string()));
- assembler->Bind(&return_string);
- assembler->Return(assembler->HeapConstant(
- assembler->isolate()->factory()->string_to_string()));
+ Bind(&return_array);
+ Return(HeapConstant(isolate()->factory()->array_to_string()));
- assembler->Bind(&return_boolean);
- assembler->Return(assembler->HeapConstant(
- assembler->isolate()->factory()->boolean_to_string()));
+ Bind(&return_function);
+ Return(HeapConstant(isolate()->factory()->function_to_string()));
- assembler->Bind(&return_arguments);
- assembler->Return(assembler->HeapConstant(
- assembler->isolate()->factory()->arguments_to_string()));
+ Bind(&return_error);
+ Return(HeapConstant(isolate()->factory()->error_to_string()));
- assembler->Bind(&return_array);
- assembler->Return(assembler->HeapConstant(
- assembler->isolate()->factory()->array_to_string()));
+ Bind(&return_date);
+ Return(HeapConstant(isolate()->factory()->date_to_string()));
- assembler->Bind(&return_function);
- assembler->Return(assembler->HeapConstant(
- assembler->isolate()->factory()->function_to_string()));
+ Bind(&return_regexp);
+ Return(HeapConstant(isolate()->factory()->regexp_to_string()));
- assembler->Bind(&return_error);
- assembler->Return(assembler->HeapConstant(
- assembler->isolate()->factory()->error_to_string()));
-
- assembler->Bind(&return_date);
- assembler->Return(assembler->HeapConstant(
- assembler->isolate()->factory()->date_to_string()));
-
- assembler->Bind(&return_regexp);
- assembler->Return(assembler->HeapConstant(
- assembler->isolate()->factory()->regexp_to_string()));
-
- assembler->Bind(&return_api);
+ Bind(&return_api);
{
- Node* class_name =
- assembler->CallRuntime(Runtime::kClassOf, context, receiver);
- ReturnToStringFormat(assembler, context, class_name);
+ Node* class_name = CallRuntime(Runtime::kClassOf, context, receiver);
+ ReturnToStringFormat(context, class_name);
}
- assembler->Bind(&return_jsvalue);
+ Bind(&return_jsvalue);
{
- Node* value = assembler->LoadJSValueValue(receiver);
- assembler->GotoIf(assembler->TaggedIsSmi(value), &return_number);
+ Label return_boolean(this), return_number(this), return_string(this);
+
+ Node* value = LoadJSValueValue(receiver);
+ GotoIf(TaggedIsSmi(value), &return_number);
+ Node* instance_type = LoadInstanceType(value);
- ReturnIfPrimitive(assembler, assembler->LoadInstanceType(value),
- &return_string, &return_boolean, &return_number);
- assembler->Goto(&return_object);
+ GotoIf(IsStringInstanceType(instance_type), &return_string);
+ GotoIf(Word32Equal(instance_type, Int32Constant(HEAP_NUMBER_TYPE)),
+ &return_number);
+ GotoIf(Word32Equal(instance_type, Int32Constant(ODDBALL_TYPE)),
+ &return_boolean);
+
+ CSA_ASSERT(this, Word32Equal(instance_type, Int32Constant(SYMBOL_TYPE)));
+ Goto(&return_object);
+
+ Bind(&return_string);
+ Return(HeapConstant(isolate()->factory()->string_to_string()));
+
+ Bind(&return_number);
+ Return(HeapConstant(isolate()->factory()->number_to_string()));
+
+ Bind(&return_boolean);
+ Return(HeapConstant(isolate()->factory()->boolean_to_string()));
}
- assembler->Bind(&return_jsproxy);
+ Bind(&return_jsproxy);
{
- assembler->GotoIf(assembler->WordEqual(var_proxy_is_array.value(),
- assembler->BooleanConstant(true)),
- &return_array);
+ GotoIf(WordEqual(var_proxy_is_array.value(), BooleanConstant(true)),
+ &return_array);
- Node* map = assembler->LoadMap(receiver);
+ Node* map = LoadMap(receiver);
// Return object if the proxy {receiver} is not callable.
- assembler->Branch(assembler->IsCallableMap(map), &return_function,
- &return_object);
+ Branch(IsCallableMap(map), &return_function, &return_object);
}
// Default
- assembler->Bind(&return_object);
- assembler->Return(assembler->HeapConstant(
- assembler->isolate()->factory()->object_to_string()));
+ Bind(&return_object);
+ Return(HeapConstant(isolate()->factory()->object_to_string()));
}
}
-void Builtins::Generate_ObjectCreate(CodeStubAssembler* a) {
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Label Label;
- typedef CodeStubAssembler::Variable Variable;
+TF_BUILTIN(ObjectCreate, ObjectBuiltinsAssembler) {
+ Node* prototype = Parameter(1);
+ Node* properties = Parameter(2);
+ Node* context = Parameter(3 + 2);
- Node* prototype = a->Parameter(1);
- Node* properties = a->Parameter(2);
- Node* context = a->Parameter(3 + 2);
-
- Label call_runtime(a, Label::kDeferred), prototype_valid(a), no_properties(a);
+ Label call_runtime(this, Label::kDeferred), prototype_valid(this),
+ no_properties(this);
{
- a->Comment("Argument 1 check: prototype");
- a->GotoIf(a->WordEqual(prototype, a->NullConstant()), &prototype_valid);
- a->BranchIfJSReceiver(prototype, &prototype_valid, &call_runtime);
+ Comment("Argument 1 check: prototype");
+ GotoIf(WordEqual(prototype, NullConstant()), &prototype_valid);
+ BranchIfJSReceiver(prototype, &prototype_valid, &call_runtime);
}
- a->Bind(&prototype_valid);
+ Bind(&prototype_valid);
{
- a->Comment("Argument 2 check: properties");
+ Comment("Argument 2 check: properties");
// Check that we have a simple object
- a->GotoIf(a->TaggedIsSmi(properties), &call_runtime);
+ GotoIf(TaggedIsSmi(properties), &call_runtime);
// Undefined implies no properties.
- a->GotoIf(a->WordEqual(properties, a->UndefinedConstant()), &no_properties);
- Node* properties_map = a->LoadMap(properties);
- a->GotoIf(a->IsSpecialReceiverMap(properties_map), &call_runtime);
+ GotoIf(WordEqual(properties, UndefinedConstant()), &no_properties);
+ Node* properties_map = LoadMap(properties);
+ GotoIf(IsSpecialReceiverMap(properties_map), &call_runtime);
// Stay on the fast path only if there are no elements.
- a->GotoUnless(a->WordEqual(a->LoadElements(properties),
- a->LoadRoot(Heap::kEmptyFixedArrayRootIndex)),
- &call_runtime);
+ GotoUnless(WordEqual(LoadElements(properties),
+ LoadRoot(Heap::kEmptyFixedArrayRootIndex)),
+ &call_runtime);
// Handle dictionary objects or fast objects with properties in runtime.
- Node* bit_field3 = a->LoadMapBitField3(properties_map);
- a->GotoIf(a->IsSetWord32<Map::DictionaryMap>(bit_field3), &call_runtime);
- a->Branch(a->IsSetWord32<Map::NumberOfOwnDescriptorsBits>(bit_field3),
- &call_runtime, &no_properties);
+ Node* bit_field3 = LoadMapBitField3(properties_map);
+ GotoIf(IsSetWord32<Map::DictionaryMap>(bit_field3), &call_runtime);
+ Branch(IsSetWord32<Map::NumberOfOwnDescriptorsBits>(bit_field3),
+ &call_runtime, &no_properties);
}
// Create a new object with the given prototype.
- a->Bind(&no_properties);
+ Bind(&no_properties);
{
- Variable map(a, MachineRepresentation::kTagged);
- Variable properties(a, MachineRepresentation::kTagged);
- Label non_null_proto(a), instantiate_map(a), good(a);
+ Variable map(this, MachineRepresentation::kTagged);
+ Variable properties(this, MachineRepresentation::kTagged);
+ Label non_null_proto(this), instantiate_map(this), good(this);
- a->Branch(a->WordEqual(prototype, a->NullConstant()), &good,
- &non_null_proto);
+ Branch(WordEqual(prototype, NullConstant()), &good, &non_null_proto);
- a->Bind(&good);
+ Bind(&good);
{
- map.Bind(a->LoadContextElement(
+ map.Bind(LoadContextElement(
context, Context::SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP));
- properties.Bind(
- a->AllocateNameDictionary(NameDictionary::kInitialCapacity));
- a->Goto(&instantiate_map);
+ properties.Bind(AllocateNameDictionary(NameDictionary::kInitialCapacity));
+ Goto(&instantiate_map);
}
- a->Bind(&non_null_proto);
+ Bind(&non_null_proto);
{
- properties.Bind(a->EmptyFixedArrayConstant());
+ properties.Bind(EmptyFixedArrayConstant());
Node* object_function =
- a->LoadContextElement(context, Context::OBJECT_FUNCTION_INDEX);
- Node* object_function_map = a->LoadObjectField(
+ LoadContextElement(context, Context::OBJECT_FUNCTION_INDEX);
+ Node* object_function_map = LoadObjectField(
object_function, JSFunction::kPrototypeOrInitialMapOffset);
map.Bind(object_function_map);
- a->GotoIf(a->WordEqual(prototype, a->LoadMapPrototype(map.value())),
- &instantiate_map);
+ GotoIf(WordEqual(prototype, LoadMapPrototype(map.value())),
+ &instantiate_map);
// Try loading the prototype info.
Node* prototype_info =
- a->LoadMapPrototypeInfo(a->LoadMap(prototype), &call_runtime);
- a->Comment("Load ObjectCreateMap from PrototypeInfo");
+ LoadMapPrototypeInfo(LoadMap(prototype), &call_runtime);
+ Comment("Load ObjectCreateMap from PrototypeInfo");
Node* weak_cell =
- a->LoadObjectField(prototype_info, PrototypeInfo::kObjectCreateMap);
- a->GotoIf(a->WordEqual(weak_cell, a->UndefinedConstant()), &call_runtime);
- map.Bind(a->LoadWeakCellValue(weak_cell, &call_runtime));
- a->Goto(&instantiate_map);
+ LoadObjectField(prototype_info, PrototypeInfo::kObjectCreateMap);
+ GotoIf(WordEqual(weak_cell, UndefinedConstant()), &call_runtime);
+ map.Bind(LoadWeakCellValue(weak_cell, &call_runtime));
+ Goto(&instantiate_map);
}
- a->Bind(&instantiate_map);
+ Bind(&instantiate_map);
{
- Node* instance =
- a->AllocateJSObjectFromMap(map.value(), properties.value());
- a->Return(instance);
+ Node* instance = AllocateJSObjectFromMap(map.value(), properties.value());
+ Return(instance);
}
}
- a->Bind(&call_runtime);
+ Bind(&call_runtime);
{
- a->Return(
- a->CallRuntime(Runtime::kObjectCreate, context, prototype, properties));
+ Return(CallRuntime(Runtime::kObjectCreate, context, prototype, properties));
}
}
@@ -553,8 +380,8 @@ void Builtins::Generate_ObjectCreate(CodeStubAssembler* a) {
BUILTIN(ObjectDefineProperties) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- Handle<Object> target = args.at<Object>(1);
- Handle<Object> properties = args.at<Object>(2);
+ Handle<Object> target = args.at(1);
+ Handle<Object> properties = args.at(2);
RETURN_RESULT_OR_FAILURE(
isolate, JSReceiver::DefineProperties(isolate, target, properties));
@@ -564,9 +391,9 @@ BUILTIN(ObjectDefineProperties) {
BUILTIN(ObjectDefineProperty) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- Handle<Object> target = args.at<Object>(1);
- Handle<Object> key = args.at<Object>(2);
- Handle<Object> attributes = args.at<Object>(3);
+ Handle<Object> target = args.at(1);
+ Handle<Object> key = args.at(2);
+ Handle<Object> attributes = args.at(3);
return JSReceiver::DefineProperty(isolate, target, key, attributes);
}
@@ -640,13 +467,33 @@ Object* ObjectLookupAccessor(Isolate* isolate, Handle<Object> object,
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
return isolate->heap()->undefined_value();
- case LookupIterator::JSPROXY:
- return isolate->heap()->undefined_value();
+ case LookupIterator::JSPROXY: {
+ PropertyDescriptor desc;
+ Maybe<bool> found = JSProxy::GetOwnPropertyDescriptor(
+ isolate, it.GetHolder<JSProxy>(), it.GetName(), &desc);
+ MAYBE_RETURN(found, isolate->heap()->exception());
+ if (found.FromJust()) {
+ if (component == ACCESSOR_GETTER && desc.has_get()) {
+ return *desc.get();
+ }
+ if (component == ACCESSOR_SETTER && desc.has_set()) {
+ return *desc.set();
+ }
+ return isolate->heap()->undefined_value();
+ }
+ Handle<Object> prototype;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, prototype, JSProxy::GetPrototype(it.GetHolder<JSProxy>()));
+ if (prototype->IsNull(isolate)) {
+ return isolate->heap()->undefined_value();
+ }
+ return ObjectLookupAccessor(isolate, prototype, key, component);
+ }
case LookupIterator::INTEGER_INDEXED_EXOTIC:
- return isolate->heap()->undefined_value();
case LookupIterator::DATA:
- continue;
+ return isolate->heap()->undefined_value();
+
case LookupIterator::ACCESSOR: {
Handle<Object> maybe_pair = it.GetAccessors();
if (maybe_pair->IsAccessorPair()) {
@@ -666,9 +513,9 @@ Object* ObjectLookupAccessor(Isolate* isolate, Handle<Object> object,
// https://tc39.github.io/ecma262/#sec-object.prototype.__defineGetter__
BUILTIN(ObjectDefineGetter) {
HandleScope scope(isolate);
- Handle<Object> object = args.at<Object>(0); // Receiver.
- Handle<Object> name = args.at<Object>(1);
- Handle<Object> getter = args.at<Object>(2);
+ Handle<Object> object = args.at(0); // Receiver.
+ Handle<Object> name = args.at(1);
+ Handle<Object> getter = args.at(2);
return ObjectDefineAccessor<ACCESSOR_GETTER>(isolate, object, name, getter);
}
@@ -676,9 +523,9 @@ BUILTIN(ObjectDefineGetter) {
// https://tc39.github.io/ecma262/#sec-object.prototype.__defineSetter__
BUILTIN(ObjectDefineSetter) {
HandleScope scope(isolate);
- Handle<Object> object = args.at<Object>(0); // Receiver.
- Handle<Object> name = args.at<Object>(1);
- Handle<Object> setter = args.at<Object>(2);
+ Handle<Object> object = args.at(0); // Receiver.
+ Handle<Object> name = args.at(1);
+ Handle<Object> setter = args.at(2);
return ObjectDefineAccessor<ACCESSOR_SETTER>(isolate, object, name, setter);
}
@@ -686,8 +533,8 @@ BUILTIN(ObjectDefineSetter) {
// https://tc39.github.io/ecma262/#sec-object.prototype.__lookupGetter__
BUILTIN(ObjectLookupGetter) {
HandleScope scope(isolate);
- Handle<Object> object = args.at<Object>(0);
- Handle<Object> name = args.at<Object>(1);
+ Handle<Object> object = args.at(0);
+ Handle<Object> name = args.at(1);
return ObjectLookupAccessor(isolate, object, name, ACCESSOR_GETTER);
}
@@ -695,8 +542,8 @@ BUILTIN(ObjectLookupGetter) {
// https://tc39.github.io/ecma262/#sec-object.prototype.__lookupSetter__
BUILTIN(ObjectLookupSetter) {
HandleScope scope(isolate);
- Handle<Object> object = args.at<Object>(0);
- Handle<Object> name = args.at<Object>(1);
+ Handle<Object> object = args.at(0);
+ Handle<Object> name = args.at(1);
return ObjectLookupAccessor(isolate, object, name, ACCESSOR_SETTER);
}
@@ -731,7 +578,7 @@ BUILTIN(ObjectSetPrototypeOf) {
// 1. Let O be ? RequireObjectCoercible(O).
Handle<Object> object = args.atOrUndefined(isolate, 1);
- if (object->IsNull(isolate) || object->IsUndefined(isolate)) {
+ if (object->IsNullOrUndefined(isolate)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
isolate->factory()->NewStringFromAsciiChecked(
@@ -777,7 +624,7 @@ BUILTIN(ObjectPrototypeSetProto) {
HandleScope scope(isolate);
// 1. Let O be ? RequireObjectCoercible(this value).
Handle<Object> object = args.receiver();
- if (object->IsNull(isolate) || object->IsUndefined(isolate)) {
+ if (object->IsNullOrUndefined(isolate)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
isolate->factory()->NewStringFromAsciiChecked(
@@ -785,7 +632,7 @@ BUILTIN(ObjectPrototypeSetProto) {
}
// 2. If Type(proto) is neither Object nor Null, return undefined.
- Handle<Object> proto = args.at<Object>(1);
+ Handle<Object> proto = args.at(1);
if (!proto->IsNull(isolate) && !proto->IsJSReceiver()) {
return isolate->heap()->undefined_value();
}
@@ -860,8 +707,8 @@ BUILTIN(ObjectGetOwnPropertySymbols) {
BUILTIN(ObjectIs) {
SealHandleScope shs(isolate);
DCHECK_EQ(3, args.length());
- Handle<Object> value1 = args.at<Object>(1);
- Handle<Object> value2 = args.at<Object>(2);
+ Handle<Object> value1 = args.at(1);
+ Handle<Object> value2 = args.at(2);
return isolate->heap()->ToBoolean(value1->SameValue(*value2));
}
@@ -1022,50 +869,73 @@ BUILTIN(ObjectSeal) {
return *object;
}
-void Builtins::Generate_HasProperty(CodeStubAssembler* assembler) {
+TF_BUILTIN(CreateIterResultObject, ObjectBuiltinsAssembler) {
+ typedef CreateIterResultObjectDescriptor Descriptor;
+
+ Node* const value = Parameter(Descriptor::kValue);
+ Node* const done = Parameter(Descriptor::kDone);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ Node* const native_context = LoadNativeContext(context);
+ Node* const map =
+ LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
+
+ Node* const result = AllocateJSObjectFromMap(map);
+
+ StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kValueOffset, value);
+ StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kDoneOffset, done);
+
+ Return(result);
+}
+
+TF_BUILTIN(HasProperty, ObjectBuiltinsAssembler) {
typedef HasPropertyDescriptor Descriptor;
- typedef compiler::Node Node;
- Node* key = assembler->Parameter(Descriptor::kKey);
- Node* object = assembler->Parameter(Descriptor::kObject);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* key = Parameter(Descriptor::kKey);
+ Node* object = Parameter(Descriptor::kObject);
+ Node* context = Parameter(Descriptor::kContext);
- assembler->Return(
- assembler->HasProperty(object, key, context, Runtime::kHasProperty));
+ Return(HasProperty(object, key, context, Runtime::kHasProperty));
}
-void Builtins::Generate_ForInFilter(CodeStubAssembler* assembler) {
- typedef compiler::Node Node;
+TF_BUILTIN(ForInFilter, ObjectBuiltinsAssembler) {
typedef ForInFilterDescriptor Descriptor;
- Node* key = assembler->Parameter(Descriptor::kKey);
- Node* object = assembler->Parameter(Descriptor::kObject);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* key = Parameter(Descriptor::kKey);
+ Node* object = Parameter(Descriptor::kObject);
+ Node* context = Parameter(Descriptor::kContext);
- assembler->Return(assembler->ForInFilter(key, object, context));
+ Return(ForInFilter(key, object, context));
}
-void Builtins::Generate_InstanceOf(CodeStubAssembler* assembler) {
- typedef compiler::Node Node;
+TF_BUILTIN(InstanceOf, ObjectBuiltinsAssembler) {
typedef CompareDescriptor Descriptor;
- Node* object = assembler->Parameter(Descriptor::kLeft);
- Node* callable = assembler->Parameter(Descriptor::kRight);
- Node* context = assembler->Parameter(Descriptor::kContext);
- assembler->Return(assembler->InstanceOf(object, callable, context));
+ Node* object = Parameter(Descriptor::kLeft);
+ Node* callable = Parameter(Descriptor::kRight);
+ Node* context = Parameter(Descriptor::kContext);
+
+ Return(InstanceOf(object, callable, context));
}
// ES6 section 7.3.19 OrdinaryHasInstance ( C, O )
-void Builtins::Generate_OrdinaryHasInstance(CodeStubAssembler* assembler) {
- typedef compiler::Node Node;
+TF_BUILTIN(OrdinaryHasInstance, ObjectBuiltinsAssembler) {
typedef CompareDescriptor Descriptor;
- Node* constructor = assembler->Parameter(Descriptor::kLeft);
- Node* object = assembler->Parameter(Descriptor::kRight);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* constructor = Parameter(Descriptor::kLeft);
+ Node* object = Parameter(Descriptor::kRight);
+ Node* context = Parameter(Descriptor::kContext);
+
+ Return(OrdinaryHasInstance(context, constructor, object));
+}
+
+TF_BUILTIN(GetSuperConstructor, ObjectBuiltinsAssembler) {
+ typedef TypeofDescriptor Descriptor;
+
+ Node* object = Parameter(Descriptor::kObject);
+ Node* context = Parameter(Descriptor::kContext);
- assembler->Return(
- assembler->OrdinaryHasInstance(context, constructor, object));
+ Return(GetSuperConstructor(object, context));
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-promise.cc b/deps/v8/src/builtins/builtins-promise.cc
index 9f5d7c88d7..8a2eab06fc 100644
--- a/deps/v8/src/builtins/builtins-promise.cc
+++ b/deps/v8/src/builtins/builtins-promise.cc
@@ -2,82 +2,1554 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-promise.h"
+#include "src/builtins/builtins-constructor.h"
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
-
-#include "src/promise-utils.h"
+#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
namespace v8 {
namespace internal {
-// ES#sec-promise-resolve-functions
-// Promise Resolve Functions
-BUILTIN(PromiseResolveClosure) {
- HandleScope scope(isolate);
+typedef compiler::Node Node;
+typedef CodeStubAssembler::ParameterMode ParameterMode;
+typedef compiler::CodeAssemblerState CodeAssemblerState;
+
+Node* PromiseBuiltinsAssembler::AllocateJSPromise(Node* context) {
+ Node* const native_context = LoadNativeContext(context);
+ Node* const promise_fun =
+ LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ Node* const initial_map =
+ LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset);
+ Node* const instance = AllocateJSObjectFromMap(initial_map);
+ return instance;
+}
+
+void PromiseBuiltinsAssembler::PromiseInit(Node* promise) {
+ StoreObjectField(promise, JSPromise::kStatusOffset,
+ SmiConstant(v8::Promise::kPending));
+ StoreObjectField(promise, JSPromise::kFlagsOffset, SmiConstant(0));
+}
+
+Node* PromiseBuiltinsAssembler::AllocateAndInitJSPromise(Node* context) {
+ return AllocateAndInitJSPromise(context, UndefinedConstant());
+}
+
+Node* PromiseBuiltinsAssembler::AllocateAndInitJSPromise(Node* context,
+ Node* parent) {
+ Node* const instance = AllocateJSPromise(context);
+ PromiseInit(instance);
+
+ Label out(this);
+ GotoUnless(IsPromiseHookEnabled(), &out);
+ CallRuntime(Runtime::kPromiseHookInit, context, instance, parent);
+ Goto(&out);
- Handle<Context> context(isolate->context(), isolate);
+ Bind(&out);
+ return instance;
+}
+
+Node* PromiseBuiltinsAssembler::AllocateAndSetJSPromise(Node* context,
+ Node* status,
+ Node* result) {
+ CSA_ASSERT(this, TaggedIsSmi(status));
+
+ Node* const instance = AllocateJSPromise(context);
- if (PromiseUtils::HasAlreadyVisited(context)) {
- return isolate->heap()->undefined_value();
+ StoreObjectFieldNoWriteBarrier(instance, JSPromise::kStatusOffset, status);
+ StoreObjectFieldNoWriteBarrier(instance, JSPromise::kResultOffset, result);
+ StoreObjectFieldNoWriteBarrier(instance, JSPromise::kFlagsOffset,
+ SmiConstant(0));
+
+ Label out(this);
+ GotoUnless(IsPromiseHookEnabled(), &out);
+ CallRuntime(Runtime::kPromiseHookInit, context, instance,
+ UndefinedConstant());
+ Goto(&out);
+
+ Bind(&out);
+ return instance;
+}
+
+std::pair<Node*, Node*>
+PromiseBuiltinsAssembler::CreatePromiseResolvingFunctions(
+ Node* promise, Node* debug_event, Node* native_context) {
+ Node* const promise_context = CreatePromiseResolvingFunctionsContext(
+ promise, debug_event, native_context);
+ Node* const map = LoadContextElement(
+ native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
+ Node* const resolve_info =
+ LoadContextElement(native_context, Context::PROMISE_RESOLVE_SHARED_FUN);
+ Node* const resolve =
+ AllocateFunctionWithMapAndContext(map, resolve_info, promise_context);
+ Node* const reject_info =
+ LoadContextElement(native_context, Context::PROMISE_REJECT_SHARED_FUN);
+ Node* const reject =
+ AllocateFunctionWithMapAndContext(map, reject_info, promise_context);
+ return std::make_pair(resolve, reject);
+}
+
+Node* PromiseBuiltinsAssembler::NewPromiseCapability(Node* context,
+ Node* constructor,
+ Node* debug_event) {
+ if (debug_event == nullptr) {
+ debug_event = TrueConstant();
}
- PromiseUtils::SetAlreadyVisited(context);
- Handle<JSObject> promise = handle(PromiseUtils::GetPromise(context), isolate);
- Handle<Object> value = args.atOrUndefined(isolate, 1);
+ Node* native_context = LoadNativeContext(context);
+
+ Node* map = LoadRoot(Heap::kJSPromiseCapabilityMapRootIndex);
+ Node* capability = AllocateJSObjectFromMap(map);
+
+ StoreObjectFieldNoWriteBarrier(
+ capability, JSPromiseCapability::kPromiseOffset, UndefinedConstant());
+ StoreObjectFieldNoWriteBarrier(
+ capability, JSPromiseCapability::kResolveOffset, UndefinedConstant());
+ StoreObjectFieldNoWriteBarrier(capability, JSPromiseCapability::kRejectOffset,
+ UndefinedConstant());
+
+ Variable var_result(this, MachineRepresentation::kTagged);
+ var_result.Bind(capability);
+
+ Label if_builtin_promise(this), if_custom_promise(this, Label::kDeferred),
+ out(this);
+ Branch(WordEqual(constructor,
+ LoadContextElement(native_context,
+ Context::PROMISE_FUNCTION_INDEX)),
+ &if_builtin_promise, &if_custom_promise);
+
+ Bind(&if_builtin_promise);
+ {
+ Node* promise = AllocateJSPromise(context);
+ PromiseInit(promise);
+ StoreObjectFieldNoWriteBarrier(
+ capability, JSPromiseCapability::kPromiseOffset, promise);
+
+ Node* resolve = nullptr;
+ Node* reject = nullptr;
+
+ std::tie(resolve, reject) =
+ CreatePromiseResolvingFunctions(promise, debug_event, native_context);
+ StoreObjectField(capability, JSPromiseCapability::kResolveOffset, resolve);
+ StoreObjectField(capability, JSPromiseCapability::kRejectOffset, reject);
+
+ GotoUnless(IsPromiseHookEnabled(), &out);
+ CallRuntime(Runtime::kPromiseHookInit, context, promise,
+ UndefinedConstant());
+ Goto(&out);
+ }
+
+ Bind(&if_custom_promise);
+ {
+ Label if_notcallable(this, Label::kDeferred);
+ Node* executor_context =
+ CreatePromiseGetCapabilitiesExecutorContext(capability, native_context);
+ Node* executor_info = LoadContextElement(
+ native_context, Context::PROMISE_GET_CAPABILITIES_EXECUTOR_SHARED_FUN);
+ Node* function_map = LoadContextElement(
+ native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
+ Node* executor = AllocateFunctionWithMapAndContext(
+ function_map, executor_info, executor_context);
+
+ Node* promise = ConstructJS(CodeFactory::Construct(isolate()), context,
+ constructor, executor);
+
+ Node* resolve =
+ LoadObjectField(capability, JSPromiseCapability::kResolveOffset);
+ GotoIf(TaggedIsSmi(resolve), &if_notcallable);
+ GotoUnless(IsCallableMap(LoadMap(resolve)), &if_notcallable);
+
+ Node* reject =
+ LoadObjectField(capability, JSPromiseCapability::kRejectOffset);
+ GotoIf(TaggedIsSmi(reject), &if_notcallable);
+ GotoUnless(IsCallableMap(LoadMap(reject)), &if_notcallable);
+
+ StoreObjectField(capability, JSPromiseCapability::kPromiseOffset, promise);
+
+ Goto(&out);
+
+ Bind(&if_notcallable);
+ Node* message = SmiConstant(MessageTemplate::kPromiseNonCallable);
+ StoreObjectField(capability, JSPromiseCapability::kPromiseOffset,
+ UndefinedConstant());
+ StoreObjectField(capability, JSPromiseCapability::kResolveOffset,
+ UndefinedConstant());
+ StoreObjectField(capability, JSPromiseCapability::kRejectOffset,
+ UndefinedConstant());
+ CallRuntime(Runtime::kThrowTypeError, context, message);
+ var_result.Bind(UndefinedConstant());
+ Goto(&out);
+ }
+
+ Bind(&out);
+ return var_result.value();
+}
+
+Node* PromiseBuiltinsAssembler::CreatePromiseContext(Node* native_context,
+ int slots) {
+ DCHECK_GE(slots, Context::MIN_CONTEXT_SLOTS);
+
+ Node* const context = Allocate(FixedArray::SizeFor(slots));
+ StoreMapNoWriteBarrier(context, Heap::kFunctionContextMapRootIndex);
+ StoreObjectFieldNoWriteBarrier(context, FixedArray::kLengthOffset,
+ SmiConstant(slots));
+
+ Node* const empty_fn =
+ LoadContextElement(native_context, Context::CLOSURE_INDEX);
+ StoreContextElementNoWriteBarrier(context, Context::CLOSURE_INDEX, empty_fn);
+ StoreContextElementNoWriteBarrier(context, Context::PREVIOUS_INDEX,
+ UndefinedConstant());
+ StoreContextElementNoWriteBarrier(context, Context::EXTENSION_INDEX,
+ TheHoleConstant());
+ StoreContextElementNoWriteBarrier(context, Context::NATIVE_CONTEXT_INDEX,
+ native_context);
+ return context;
+}
- MaybeHandle<Object> maybe_result;
- Handle<Object> argv[] = {promise, value};
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, Execution::Call(isolate, isolate->promise_resolve(),
- isolate->factory()->undefined_value(),
- arraysize(argv), argv));
- return isolate->heap()->undefined_value();
+Node* PromiseBuiltinsAssembler::CreatePromiseResolvingFunctionsContext(
+ Node* promise, Node* debug_event, Node* native_context) {
+ Node* const context =
+ CreatePromiseContext(native_context, kPromiseContextLength);
+ StoreContextElementNoWriteBarrier(context, kAlreadyVisitedSlot,
+ SmiConstant(0));
+ StoreContextElementNoWriteBarrier(context, kPromiseSlot, promise);
+ StoreContextElementNoWriteBarrier(context, kDebugEventSlot, debug_event);
+ return context;
+}
+
+Node* PromiseBuiltinsAssembler::CreatePromiseGetCapabilitiesExecutorContext(
+ Node* promise_capability, Node* native_context) {
+ int kContextLength = kCapabilitiesContextLength;
+ Node* context = CreatePromiseContext(native_context, kContextLength);
+ StoreContextElementNoWriteBarrier(context, kCapabilitySlot,
+ promise_capability);
+ return context;
+}
+
+Node* PromiseBuiltinsAssembler::ThrowIfNotJSReceiver(
+ Node* context, Node* value, MessageTemplate::Template msg_template,
+ const char* method_name) {
+ Label out(this), throw_exception(this, Label::kDeferred);
+ Variable var_value_map(this, MachineRepresentation::kTagged);
+
+ GotoIf(TaggedIsSmi(value), &throw_exception);
+
+ // Load the instance type of the {value}.
+ var_value_map.Bind(LoadMap(value));
+ Node* const value_instance_type = LoadMapInstanceType(var_value_map.value());
+
+ Branch(IsJSReceiverInstanceType(value_instance_type), &out, &throw_exception);
+
+ // The {value} is not a compatible receiver for this method.
+ Bind(&throw_exception);
+ {
+ Node* const method =
+ method_name == nullptr
+ ? UndefinedConstant()
+ : HeapConstant(
+ isolate()->factory()->NewStringFromAsciiChecked(method_name));
+ Node* const message_id = SmiConstant(msg_template);
+ CallRuntime(Runtime::kThrowTypeError, context, message_id, method);
+ var_value_map.Bind(UndefinedConstant());
+ Goto(&out); // Never reached.
+ }
+
+ Bind(&out);
+ return var_value_map.value();
+}
+
+Node* PromiseBuiltinsAssembler::PromiseHasHandler(Node* promise) {
+ Node* const flags = LoadObjectField(promise, JSPromise::kFlagsOffset);
+ return IsSetWord(SmiUntag(flags), 1 << JSPromise::kHasHandlerBit);
+}
+
+void PromiseBuiltinsAssembler::PromiseSetHasHandler(Node* promise) {
+ Node* const flags = LoadObjectField(promise, JSPromise::kFlagsOffset);
+ Node* const new_flags =
+ SmiOr(flags, SmiConstant(1 << JSPromise::kHasHandlerBit));
+ StoreObjectFieldNoWriteBarrier(promise, JSPromise::kFlagsOffset, new_flags);
+}
+
+Node* PromiseBuiltinsAssembler::SpeciesConstructor(Node* context, Node* object,
+ Node* default_constructor) {
+ Isolate* isolate = this->isolate();
+ Variable var_result(this, MachineRepresentation::kTagged);
+ var_result.Bind(default_constructor);
+
+ // 2. Let C be ? Get(O, "constructor").
+ Node* const constructor_str =
+ HeapConstant(isolate->factory()->constructor_string());
+ Callable getproperty_callable = CodeFactory::GetProperty(isolate);
+ Node* const constructor =
+ CallStub(getproperty_callable, context, object, constructor_str);
+
+ // 3. If C is undefined, return defaultConstructor.
+ Label out(this);
+ GotoIf(IsUndefined(constructor), &out);
+
+ // 4. If Type(C) is not Object, throw a TypeError exception.
+ ThrowIfNotJSReceiver(context, constructor,
+ MessageTemplate::kConstructorNotReceiver);
+
+ // 5. Let S be ? Get(C, @@species).
+ Node* const species_symbol =
+ HeapConstant(isolate->factory()->species_symbol());
+ Node* const species =
+ CallStub(getproperty_callable, context, constructor, species_symbol);
+
+ // 6. If S is either undefined or null, return defaultConstructor.
+ GotoIf(IsUndefined(species), &out);
+ GotoIf(WordEqual(species, NullConstant()), &out);
+
+ // 7. If IsConstructor(S) is true, return S.
+ Label throw_error(this);
+ Node* species_bitfield = LoadMapBitField(LoadMap(species));
+ GotoUnless(Word32Equal(Word32And(species_bitfield,
+ Int32Constant((1 << Map::kIsConstructor))),
+ Int32Constant(1 << Map::kIsConstructor)),
+ &throw_error);
+ var_result.Bind(species);
+ Goto(&out);
+
+ // 8. Throw a TypeError exception.
+ Bind(&throw_error);
+ {
+ Node* const message_id =
+ SmiConstant(MessageTemplate::kSpeciesNotConstructor);
+ CallRuntime(Runtime::kThrowTypeError, context, message_id);
+ Goto(&out);
+ }
+
+ Bind(&out);
+ return var_result.value();
+}
+
+void PromiseBuiltinsAssembler::AppendPromiseCallback(int offset, Node* promise,
+ Node* value) {
+ Node* elements = LoadObjectField(promise, offset);
+ Node* length = LoadFixedArrayBaseLength(elements);
+ CodeStubAssembler::ParameterMode mode = OptimalParameterMode();
+ length = TaggedToParameter(length, mode);
+
+ Node* delta = IntPtrOrSmiConstant(1, mode);
+ Node* new_capacity = IntPtrOrSmiAdd(length, delta, mode);
+
+ const ElementsKind kind = FAST_ELEMENTS;
+ const WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER;
+ const CodeStubAssembler::AllocationFlags flags =
+ CodeStubAssembler::kAllowLargeObjectAllocation;
+ int additional_offset = 0;
+
+ Node* new_elements = AllocateFixedArray(kind, new_capacity, mode, flags);
+
+ CopyFixedArrayElements(kind, elements, new_elements, length, barrier_mode,
+ mode);
+ StoreFixedArrayElement(new_elements, length, value, barrier_mode,
+ additional_offset, mode);
+
+ StoreObjectField(promise, offset, new_elements);
+}
+
+Node* PromiseBuiltinsAssembler::InternalPromiseThen(Node* context,
+ Node* promise,
+ Node* on_resolve,
+ Node* on_reject) {
+ Isolate* isolate = this->isolate();
+
+ // 2. If IsPromise(promise) is false, throw a TypeError exception.
+ ThrowIfNotInstanceType(context, promise, JS_PROMISE_TYPE,
+ "Promise.prototype.then");
+
+ Node* const native_context = LoadNativeContext(context);
+ Node* const promise_fun =
+ LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+
+ // 3. Let C be ? SpeciesConstructor(promise, %Promise%).
+ Node* constructor = SpeciesConstructor(context, promise, promise_fun);
+
+ // 4. Let resultCapability be ? NewPromiseCapability(C).
+ Callable call_callable = CodeFactory::Call(isolate);
+ Label fast_promise_capability(this), promise_capability(this),
+ perform_promise_then(this);
+ Variable var_deferred_promise(this, MachineRepresentation::kTagged),
+ var_deferred_on_resolve(this, MachineRepresentation::kTagged),
+ var_deferred_on_reject(this, MachineRepresentation::kTagged);
+
+ Branch(WordEqual(promise_fun, constructor), &fast_promise_capability,
+ &promise_capability);
+
+ Bind(&fast_promise_capability);
+ {
+ Node* const deferred_promise = AllocateAndInitJSPromise(context, promise);
+ var_deferred_promise.Bind(deferred_promise);
+ var_deferred_on_resolve.Bind(UndefinedConstant());
+ var_deferred_on_reject.Bind(UndefinedConstant());
+ Goto(&perform_promise_then);
+ }
+
+ Bind(&promise_capability);
+ {
+ Node* const capability = NewPromiseCapability(context, constructor);
+ var_deferred_promise.Bind(
+ LoadObjectField(capability, JSPromiseCapability::kPromiseOffset));
+ var_deferred_on_resolve.Bind(
+ LoadObjectField(capability, JSPromiseCapability::kResolveOffset));
+ var_deferred_on_reject.Bind(
+ LoadObjectField(capability, JSPromiseCapability::kRejectOffset));
+ Goto(&perform_promise_then);
+ }
+
+ // 5. Return PerformPromiseThen(promise, onFulfilled, onRejected,
+ // resultCapability).
+ Bind(&perform_promise_then);
+ Node* const result = InternalPerformPromiseThen(
+ context, promise, on_resolve, on_reject, var_deferred_promise.value(),
+ var_deferred_on_resolve.value(), var_deferred_on_reject.value());
+ return result;
+}
+
+Node* PromiseBuiltinsAssembler::InternalPerformPromiseThen(
+ Node* context, Node* promise, Node* on_resolve, Node* on_reject,
+ Node* deferred_promise, Node* deferred_on_resolve,
+ Node* deferred_on_reject) {
+ Node* const native_context = LoadNativeContext(context);
+
+ Variable var_on_resolve(this, MachineRepresentation::kTagged),
+ var_on_reject(this, MachineRepresentation::kTagged);
+
+ var_on_resolve.Bind(on_resolve);
+ var_on_reject.Bind(on_reject);
+
+ Label out(this), if_onresolvenotcallable(this), onrejectcheck(this),
+ append_callbacks(this);
+ GotoIf(TaggedIsSmi(on_resolve), &if_onresolvenotcallable);
+
+ Node* const on_resolve_map = LoadMap(on_resolve);
+ Branch(IsCallableMap(on_resolve_map), &onrejectcheck,
+ &if_onresolvenotcallable);
+
+ Bind(&if_onresolvenotcallable);
+ {
+ var_on_resolve.Bind(LoadContextElement(
+ native_context, Context::PROMISE_ID_RESOLVE_HANDLER_INDEX));
+ Goto(&onrejectcheck);
+ }
+
+ Bind(&onrejectcheck);
+ {
+ Label if_onrejectnotcallable(this);
+ GotoIf(TaggedIsSmi(on_reject), &if_onrejectnotcallable);
+
+ Node* const on_reject_map = LoadMap(on_reject);
+ Branch(IsCallableMap(on_reject_map), &append_callbacks,
+ &if_onrejectnotcallable);
+
+ Bind(&if_onrejectnotcallable);
+ {
+ var_on_reject.Bind(LoadContextElement(
+ native_context, Context::PROMISE_ID_REJECT_HANDLER_INDEX));
+ Goto(&append_callbacks);
+ }
+ }
+
+ Bind(&append_callbacks);
+ {
+ Label fulfilled_check(this);
+ Node* const status = LoadObjectField(promise, JSPromise::kStatusOffset);
+ GotoUnless(SmiEqual(status, SmiConstant(v8::Promise::kPending)),
+ &fulfilled_check);
+
+ Node* const existing_deferred_promise =
+ LoadObjectField(promise, JSPromise::kDeferredPromiseOffset);
+
+ Label if_noexistingcallbacks(this), if_existingcallbacks(this);
+ Branch(IsUndefined(existing_deferred_promise), &if_noexistingcallbacks,
+ &if_existingcallbacks);
+
+ Bind(&if_noexistingcallbacks);
+ {
+ // Store callbacks directly in the slots.
+ StoreObjectField(promise, JSPromise::kDeferredPromiseOffset,
+ deferred_promise);
+ StoreObjectField(promise, JSPromise::kDeferredOnResolveOffset,
+ deferred_on_resolve);
+ StoreObjectField(promise, JSPromise::kDeferredOnRejectOffset,
+ deferred_on_reject);
+ StoreObjectField(promise, JSPromise::kFulfillReactionsOffset,
+ var_on_resolve.value());
+ StoreObjectField(promise, JSPromise::kRejectReactionsOffset,
+ var_on_reject.value());
+ Goto(&out);
+ }
+
+ Bind(&if_existingcallbacks);
+ {
+ Label if_singlecallback(this), if_multiplecallbacks(this);
+ BranchIfJSObject(existing_deferred_promise, &if_singlecallback,
+ &if_multiplecallbacks);
+
+ Bind(&if_singlecallback);
+ {
+ // Create new FixedArrays to store callbacks, and migrate
+ // existing callbacks.
+ Node* const deferred_promise_arr =
+ AllocateFixedArray(FAST_ELEMENTS, IntPtrConstant(2));
+ StoreFixedArrayElement(deferred_promise_arr, 0,
+ existing_deferred_promise);
+ StoreFixedArrayElement(deferred_promise_arr, 1, deferred_promise);
+
+ Node* const deferred_on_resolve_arr =
+ AllocateFixedArray(FAST_ELEMENTS, IntPtrConstant(2));
+ StoreFixedArrayElement(
+ deferred_on_resolve_arr, 0,
+ LoadObjectField(promise, JSPromise::kDeferredOnResolveOffset));
+ StoreFixedArrayElement(deferred_on_resolve_arr, 1, deferred_on_resolve);
+
+ Node* const deferred_on_reject_arr =
+ AllocateFixedArray(FAST_ELEMENTS, IntPtrConstant(2));
+ StoreFixedArrayElement(
+ deferred_on_reject_arr, 0,
+ LoadObjectField(promise, JSPromise::kDeferredOnRejectOffset));
+ StoreFixedArrayElement(deferred_on_reject_arr, 1, deferred_on_reject);
+
+ Node* const fulfill_reactions =
+ AllocateFixedArray(FAST_ELEMENTS, IntPtrConstant(2));
+ StoreFixedArrayElement(
+ fulfill_reactions, 0,
+ LoadObjectField(promise, JSPromise::kFulfillReactionsOffset));
+ StoreFixedArrayElement(fulfill_reactions, 1, var_on_resolve.value());
+
+ Node* const reject_reactions =
+ AllocateFixedArray(FAST_ELEMENTS, IntPtrConstant(2));
+ StoreFixedArrayElement(
+ reject_reactions, 0,
+ LoadObjectField(promise, JSPromise::kRejectReactionsOffset));
+ StoreFixedArrayElement(reject_reactions, 1, var_on_reject.value());
+
+ // Store new FixedArrays in promise.
+ StoreObjectField(promise, JSPromise::kDeferredPromiseOffset,
+ deferred_promise_arr);
+ StoreObjectField(promise, JSPromise::kDeferredOnResolveOffset,
+ deferred_on_resolve_arr);
+ StoreObjectField(promise, JSPromise::kDeferredOnRejectOffset,
+ deferred_on_reject_arr);
+ StoreObjectField(promise, JSPromise::kFulfillReactionsOffset,
+ fulfill_reactions);
+ StoreObjectField(promise, JSPromise::kRejectReactionsOffset,
+ reject_reactions);
+ Goto(&out);
+ }
+
+ Bind(&if_multiplecallbacks);
+ {
+ AppendPromiseCallback(JSPromise::kDeferredPromiseOffset, promise,
+ deferred_promise);
+ AppendPromiseCallback(JSPromise::kDeferredOnResolveOffset, promise,
+ deferred_on_resolve);
+ AppendPromiseCallback(JSPromise::kDeferredOnRejectOffset, promise,
+ deferred_on_reject);
+ AppendPromiseCallback(JSPromise::kFulfillReactionsOffset, promise,
+ var_on_resolve.value());
+ AppendPromiseCallback(JSPromise::kRejectReactionsOffset, promise,
+ var_on_reject.value());
+ Goto(&out);
+ }
+ }
+
+ Bind(&fulfilled_check);
+ {
+ Label reject(this);
+ Node* const result = LoadObjectField(promise, JSPromise::kResultOffset);
+ GotoUnless(WordEqual(status, SmiConstant(v8::Promise::kFulfilled)),
+ &reject);
+
+ Node* info = AllocatePromiseReactionJobInfo(
+ result, var_on_resolve.value(), deferred_promise, deferred_on_resolve,
+ deferred_on_reject, context);
+ // TODO(gsathya): Move this to TF
+ CallRuntime(Runtime::kEnqueuePromiseReactionJob, context, promise, info,
+ SmiConstant(v8::Promise::kFulfilled));
+ Goto(&out);
+
+ Bind(&reject);
+ {
+ Node* const has_handler = PromiseHasHandler(promise);
+ Label enqueue(this);
+
+ // TODO(gsathya): Fold these runtime calls and move to TF.
+ GotoIf(has_handler, &enqueue);
+ CallRuntime(Runtime::kPromiseRevokeReject, context, promise);
+ Goto(&enqueue);
+
+ Bind(&enqueue);
+ {
+ Node* info = AllocatePromiseReactionJobInfo(
+ result, var_on_reject.value(), deferred_promise,
+ deferred_on_resolve, deferred_on_reject, context);
+ // TODO(gsathya): Move this to TF
+ CallRuntime(Runtime::kEnqueuePromiseReactionJob, context, promise,
+ info, SmiConstant(v8::Promise::kRejected));
+ Goto(&out);
+ }
+ }
+ }
+ }
+
+ Bind(&out);
+ PromiseSetHasHandler(promise);
+ return deferred_promise;
+}
+
+// Promise fast path implementations rely on unmodified JSPromise instances.
+// We use a fairly coarse granularity for this and simply check whether both
+// the promise itself is unmodified (i.e. its map has not changed) and its
+// prototype is unmodified.
+// TODO(gsathya): Refactor this out to prevent code dupe with builtins-regexp
+void PromiseBuiltinsAssembler::BranchIfFastPath(Node* context, Node* promise,
+ Label* if_isunmodified,
+ Label* if_ismodified) {
+ Node* const native_context = LoadNativeContext(context);
+ Node* const promise_fun =
+ LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ BranchIfFastPath(native_context, promise_fun, promise, if_isunmodified,
+ if_ismodified);
+}
+
+void PromiseBuiltinsAssembler::BranchIfFastPath(Node* native_context,
+ Node* promise_fun,
+ Node* promise,
+ Label* if_isunmodified,
+ Label* if_ismodified) {
+ CSA_ASSERT(this, IsNativeContext(native_context));
+ CSA_ASSERT(this,
+ WordEqual(promise_fun,
+ LoadContextElement(native_context,
+ Context::PROMISE_FUNCTION_INDEX)));
+
+ Node* const map = LoadMap(promise);
+ Node* const initial_map =
+ LoadObjectField(promise_fun, JSFunction::kPrototypeOrInitialMapOffset);
+ Node* const has_initialmap = WordEqual(map, initial_map);
+
+ GotoUnless(has_initialmap, if_ismodified);
+
+ Node* const initial_proto_initial_map =
+ LoadContextElement(native_context, Context::PROMISE_PROTOTYPE_MAP_INDEX);
+ Node* const proto_map = LoadMap(LoadMapPrototype(map));
+ Node* const proto_has_initialmap =
+ WordEqual(proto_map, initial_proto_initial_map);
+
+ Branch(proto_has_initialmap, if_isunmodified, if_ismodified);
+}
+
+Node* PromiseBuiltinsAssembler::AllocatePromiseResolveThenableJobInfo(
+ Node* thenable, Node* then, Node* resolve, Node* reject, Node* context) {
+ Node* const info = Allocate(PromiseResolveThenableJobInfo::kSize);
+ StoreMapNoWriteBarrier(info,
+ Heap::kPromiseResolveThenableJobInfoMapRootIndex);
+ StoreObjectFieldNoWriteBarrier(
+ info, PromiseResolveThenableJobInfo::kThenableOffset, thenable);
+ StoreObjectFieldNoWriteBarrier(
+ info, PromiseResolveThenableJobInfo::kThenOffset, then);
+ StoreObjectFieldNoWriteBarrier(
+ info, PromiseResolveThenableJobInfo::kResolveOffset, resolve);
+ StoreObjectFieldNoWriteBarrier(
+ info, PromiseResolveThenableJobInfo::kRejectOffset, reject);
+ StoreObjectFieldNoWriteBarrier(info,
+ PromiseResolveThenableJobInfo::kDebugIdOffset,
+ SmiConstant(kDebugPromiseNoID));
+ StoreObjectFieldNoWriteBarrier(
+ info, PromiseResolveThenableJobInfo::kContextOffset, context);
+ return info;
+}
+
+void PromiseBuiltinsAssembler::InternalResolvePromise(Node* context,
+ Node* promise,
+ Node* result) {
+ Isolate* isolate = this->isolate();
+
+ Variable var_reason(this, MachineRepresentation::kTagged),
+ var_then(this, MachineRepresentation::kTagged);
+
+ Label do_enqueue(this), fulfill(this), if_cycle(this, Label::kDeferred),
+ if_rejectpromise(this, Label::kDeferred), out(this);
+
+ Label cycle_check(this);
+ GotoUnless(IsPromiseHookEnabled(), &cycle_check);
+ CallRuntime(Runtime::kPromiseHookResolve, context, promise);
+ Goto(&cycle_check);
+
+ Bind(&cycle_check);
+ // 6. If SameValue(resolution, promise) is true, then
+ GotoIf(SameValue(promise, result, context), &if_cycle);
+
+ // 7. If Type(resolution) is not Object, then
+ GotoIf(TaggedIsSmi(result), &fulfill);
+ GotoUnless(IsJSReceiver(result), &fulfill);
+
+ Label if_nativepromise(this), if_notnativepromise(this, Label::kDeferred);
+ Node* const native_context = LoadNativeContext(context);
+ Node* const promise_fun =
+ LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ BranchIfFastPath(native_context, promise_fun, result, &if_nativepromise,
+ &if_notnativepromise);
+
+ // Resolution is a native promise and if it's already resolved or
+ // rejected, shortcircuit the resolution procedure by directly
+ // reusing the value from the promise.
+ Bind(&if_nativepromise);
+ {
+ Node* const thenable_status =
+ LoadObjectField(result, JSPromise::kStatusOffset);
+ Node* const thenable_value =
+ LoadObjectField(result, JSPromise::kResultOffset);
+
+ Label if_isnotpending(this);
+ GotoUnless(SmiEqual(SmiConstant(v8::Promise::kPending), thenable_status),
+ &if_isnotpending);
+
+ // TODO(gsathya): Use a marker here instead of the actual then
+ // callback, and check for the marker in PromiseResolveThenableJob
+ // and perform PromiseThen.
+ Node* const then =
+ LoadContextElement(native_context, Context::PROMISE_THEN_INDEX);
+ var_then.Bind(then);
+ Goto(&do_enqueue);
+
+ Bind(&if_isnotpending);
+ {
+ Label if_fulfilled(this), if_rejected(this);
+ Branch(SmiEqual(SmiConstant(v8::Promise::kFulfilled), thenable_status),
+ &if_fulfilled, &if_rejected);
+
+ Bind(&if_fulfilled);
+ {
+ PromiseFulfill(context, promise, thenable_value,
+ v8::Promise::kFulfilled);
+ PromiseSetHasHandler(promise);
+ Goto(&out);
+ }
+
+ Bind(&if_rejected);
+ {
+ Label reject(this);
+ Node* const has_handler = PromiseHasHandler(result);
+
+ // Promise has already been rejected, but had no handler.
+ // Revoke previously triggered reject event.
+ GotoIf(has_handler, &reject);
+ CallRuntime(Runtime::kPromiseRevokeReject, context, result);
+ Goto(&reject);
+
+ Bind(&reject);
+ // Don't cause a debug event as this case is forwarding a rejection
+ InternalPromiseReject(context, promise, thenable_value, false);
+ PromiseSetHasHandler(result);
+ Goto(&out);
+ }
+ }
+ }
+
+ Bind(&if_notnativepromise);
+ {
+ // 8. Let then be Get(resolution, "then").
+ Node* const then_str = HeapConstant(isolate->factory()->then_string());
+ Callable getproperty_callable = CodeFactory::GetProperty(isolate);
+ Node* const then =
+ CallStub(getproperty_callable, context, result, then_str);
+
+ // 9. If then is an abrupt completion, then
+ GotoIfException(then, &if_rejectpromise, &var_reason);
+
+ // 11. If IsCallable(thenAction) is false, then
+ GotoIf(TaggedIsSmi(then), &fulfill);
+ Node* const then_map = LoadMap(then);
+ GotoUnless(IsCallableMap(then_map), &fulfill);
+ var_then.Bind(then);
+ Goto(&do_enqueue);
+ }
+
+ Bind(&do_enqueue);
+ {
+ // TODO(gsathya): Add fast path for native promises with unmodified
+ // PromiseThen (which don't need these resolving functions, but
+ // instead can just call resolve/reject directly).
+ Node* resolve = nullptr;
+ Node* reject = nullptr;
+ std::tie(resolve, reject) = CreatePromiseResolvingFunctions(
+ promise, FalseConstant(), native_context);
+
+ Node* const info = AllocatePromiseResolveThenableJobInfo(
+ result, var_then.value(), resolve, reject, context);
+
+ Label enqueue(this);
+ GotoUnless(IsDebugActive(), &enqueue);
+
+ Node* const debug_id =
+ CallRuntime(Runtime::kDebugNextAsyncTaskId, context, promise);
+ StoreObjectField(info, PromiseResolveThenableJobInfo::kDebugIdOffset,
+ debug_id);
+
+ GotoIf(TaggedIsSmi(result), &enqueue);
+ GotoUnless(HasInstanceType(result, JS_PROMISE_TYPE), &enqueue);
+
+ // Mark the dependency of the new promise on the resolution
+ Node* const key =
+ HeapConstant(isolate->factory()->promise_handled_by_symbol());
+ CallRuntime(Runtime::kSetProperty, context, result, key, promise,
+ SmiConstant(STRICT));
+ Goto(&enqueue);
+
+ // 12. Perform EnqueueJob("PromiseJobs",
+ // PromiseResolveThenableJob, « promise, resolution, thenAction»).
+ Bind(&enqueue);
+ // TODO(gsathya): Move this to TF
+ CallRuntime(Runtime::kEnqueuePromiseResolveThenableJob, context, info);
+ Goto(&out);
+ }
+
+ // 7.b Return FulfillPromise(promise, resolution).
+ Bind(&fulfill);
+ {
+ PromiseFulfill(context, promise, result, v8::Promise::kFulfilled);
+ Goto(&out);
+ }
+
+ Bind(&if_cycle);
+ {
+ // 6.a Let selfResolutionError be a newly created TypeError object.
+ Node* const message_id = SmiConstant(MessageTemplate::kPromiseCyclic);
+ Node* const error =
+ CallRuntime(Runtime::kNewTypeError, context, message_id, result);
+ var_reason.Bind(error);
+
+ // 6.b Return RejectPromise(promise, selfResolutionError).
+ Goto(&if_rejectpromise);
+ }
+
+ // 9.a Return RejectPromise(promise, then.[[Value]]).
+ Bind(&if_rejectpromise);
+ {
+ InternalPromiseReject(context, promise, var_reason.value(), true);
+ Goto(&out);
+ }
+
+ Bind(&out);
+}
+
+void PromiseBuiltinsAssembler::PromiseFulfill(
+ Node* context, Node* promise, Node* result,
+ v8::Promise::PromiseState status) {
+ Label do_promisereset(this), debug_async_event_enqueue_recurring(this);
+
+ Node* const status_smi = SmiConstant(static_cast<int>(status));
+ Node* const deferred_promise =
+ LoadObjectField(promise, JSPromise::kDeferredPromiseOffset);
+
+ GotoIf(IsUndefined(deferred_promise), &debug_async_event_enqueue_recurring);
+
+ Node* const tasks =
+ status == v8::Promise::kFulfilled
+ ? LoadObjectField(promise, JSPromise::kFulfillReactionsOffset)
+ : LoadObjectField(promise, JSPromise::kRejectReactionsOffset);
+
+ Node* const deferred_on_resolve =
+ LoadObjectField(promise, JSPromise::kDeferredOnResolveOffset);
+ Node* const deferred_on_reject =
+ LoadObjectField(promise, JSPromise::kDeferredOnRejectOffset);
+
+ Node* const info = AllocatePromiseReactionJobInfo(
+ result, tasks, deferred_promise, deferred_on_resolve, deferred_on_reject,
+ context);
+
+ CallRuntime(Runtime::kEnqueuePromiseReactionJob, context, promise, info,
+ status_smi);
+ Goto(&debug_async_event_enqueue_recurring);
+
+ Bind(&debug_async_event_enqueue_recurring);
+ {
+ GotoUnless(IsDebugActive(), &do_promisereset);
+ CallRuntime(Runtime::kDebugAsyncEventEnqueueRecurring, context, promise,
+ status_smi);
+ Goto(&do_promisereset);
+ }
+
+ Bind(&do_promisereset);
+ {
+ StoreObjectField(promise, JSPromise::kStatusOffset, status_smi);
+ StoreObjectField(promise, JSPromise::kResultOffset, result);
+ StoreObjectFieldRoot(promise, JSPromise::kDeferredPromiseOffset,
+ Heap::kUndefinedValueRootIndex);
+ StoreObjectFieldRoot(promise, JSPromise::kDeferredOnResolveOffset,
+ Heap::kUndefinedValueRootIndex);
+ StoreObjectFieldRoot(promise, JSPromise::kDeferredOnRejectOffset,
+ Heap::kUndefinedValueRootIndex);
+ StoreObjectFieldRoot(promise, JSPromise::kFulfillReactionsOffset,
+ Heap::kUndefinedValueRootIndex);
+ StoreObjectFieldRoot(promise, JSPromise::kRejectReactionsOffset,
+ Heap::kUndefinedValueRootIndex);
+ }
+}
+
+void PromiseBuiltinsAssembler::BranchIfAccessCheckFailed(
+ Node* context, Node* native_context, Node* promise_constructor,
+ Node* executor, Label* if_noaccess) {
+ Variable var_executor(this, MachineRepresentation::kTagged);
+ var_executor.Bind(executor);
+ Label has_access(this), call_runtime(this, Label::kDeferred);
+
+ // If executor is a bound function, load the bound function until we've
+ // reached an actual function.
+ Label found_function(this), loop_over_bound_function(this, &var_executor);
+ Goto(&loop_over_bound_function);
+ Bind(&loop_over_bound_function);
+ {
+ Node* executor_type = LoadInstanceType(var_executor.value());
+ GotoIf(InstanceTypeEqual(executor_type, JS_FUNCTION_TYPE), &found_function);
+ GotoUnless(InstanceTypeEqual(executor_type, JS_BOUND_FUNCTION_TYPE),
+ &call_runtime);
+ var_executor.Bind(LoadObjectField(
+ var_executor.value(), JSBoundFunction::kBoundTargetFunctionOffset));
+ Goto(&loop_over_bound_function);
+ }
+
+ // Load the context from the function and compare it to the Promise
+ // constructor's context. If they match, everything is fine, otherwise, bail
+ // out to the runtime.
+ Bind(&found_function);
+ {
+ Node* function_context =
+ LoadObjectField(var_executor.value(), JSFunction::kContextOffset);
+ Node* native_function_context = LoadNativeContext(function_context);
+ Branch(WordEqual(native_context, native_function_context), &has_access,
+ &call_runtime);
+ }
+
+ Bind(&call_runtime);
+ {
+ Branch(WordEqual(CallRuntime(Runtime::kAllowDynamicFunction, context,
+ promise_constructor),
+ BooleanConstant(true)),
+ &has_access, if_noaccess);
+ }
+
+ Bind(&has_access);
+}
+
+void PromiseBuiltinsAssembler::InternalPromiseReject(Node* context,
+ Node* promise, Node* value,
+ Node* debug_event) {
+ Label out(this);
+ GotoUnless(IsDebugActive(), &out);
+ GotoUnless(WordEqual(TrueConstant(), debug_event), &out);
+ CallRuntime(Runtime::kDebugPromiseReject, context, promise, value);
+ Goto(&out);
+
+ Bind(&out);
+ InternalPromiseReject(context, promise, value, false);
+}
+
+// This duplicates a lot of logic from PromiseRejectEvent in
+// runtime-promise.cc
+void PromiseBuiltinsAssembler::InternalPromiseReject(Node* context,
+ Node* promise, Node* value,
+ bool debug_event) {
+ Label fulfill(this), report_unhandledpromise(this), run_promise_hook(this);
+
+ if (debug_event) {
+ GotoUnless(IsDebugActive(), &run_promise_hook);
+ CallRuntime(Runtime::kDebugPromiseReject, context, promise, value);
+ Goto(&run_promise_hook);
+ } else {
+ Goto(&run_promise_hook);
+ }
+
+ Bind(&run_promise_hook);
+ {
+ GotoUnless(IsPromiseHookEnabled(), &report_unhandledpromise);
+ CallRuntime(Runtime::kPromiseHookResolve, context, promise);
+ Goto(&report_unhandledpromise);
+ }
+
+ Bind(&report_unhandledpromise);
+ {
+ GotoIf(PromiseHasHandler(promise), &fulfill);
+ CallRuntime(Runtime::kReportPromiseReject, context, promise, value);
+ Goto(&fulfill);
+ }
+
+ Bind(&fulfill);
+ PromiseFulfill(context, promise, value, v8::Promise::kRejected);
}
// ES#sec-promise-reject-functions
// Promise Reject Functions
-BUILTIN(PromiseRejectClosure) {
- HandleScope scope(isolate);
+TF_BUILTIN(PromiseRejectClosure, PromiseBuiltinsAssembler) {
+ Node* const value = Parameter(1);
+ Node* const context = Parameter(4);
+
+ Label out(this);
+
+ // 3. Let alreadyResolved be F.[[AlreadyResolved]].
+ int has_already_visited_slot = kAlreadyVisitedSlot;
+
+ Node* const has_already_visited =
+ LoadContextElement(context, has_already_visited_slot);
+
+ // 4. If alreadyResolved.[[Value]] is true, return undefined.
+ GotoIf(SmiEqual(has_already_visited, SmiConstant(1)), &out);
+
+ // 5.Set alreadyResolved.[[Value]] to true.
+ StoreContextElementNoWriteBarrier(context, has_already_visited_slot,
+ SmiConstant(1));
+
+ // 2. Let promise be F.[[Promise]].
+ Node* const promise =
+ LoadContextElement(context, IntPtrConstant(kPromiseSlot));
+ Node* const debug_event =
+ LoadContextElement(context, IntPtrConstant(kDebugEventSlot));
+
+ InternalPromiseReject(context, promise, value, debug_event);
+ Return(UndefinedConstant());
+
+ Bind(&out);
+ Return(UndefinedConstant());
+}
+
+TF_BUILTIN(PromiseConstructor, PromiseBuiltinsAssembler) {
+ Node* const executor = Parameter(1);
+ Node* const new_target = Parameter(2);
+ Node* const context = Parameter(4);
+ Isolate* isolate = this->isolate();
+
+ Label if_targetisundefined(this, Label::kDeferred);
+
+ GotoIf(IsUndefined(new_target), &if_targetisundefined);
+
+ Label if_notcallable(this, Label::kDeferred);
+
+ GotoIf(TaggedIsSmi(executor), &if_notcallable);
+
+ Node* const executor_map = LoadMap(executor);
+ GotoUnless(IsCallableMap(executor_map), &if_notcallable);
+
+ Node* const native_context = LoadNativeContext(context);
+ Node* const promise_fun =
+ LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ Node* const is_debug_active = IsDebugActive();
+ Label if_targetisnotmodified(this),
+ if_targetismodified(this, Label::kDeferred), run_executor(this),
+ debug_push(this), if_noaccess(this, Label::kDeferred);
+
+ BranchIfAccessCheckFailed(context, native_context, promise_fun, executor,
+ &if_noaccess);
+
+ Branch(WordEqual(promise_fun, new_target), &if_targetisnotmodified,
+ &if_targetismodified);
+
+ Variable var_result(this, MachineRepresentation::kTagged),
+ var_reject_call(this, MachineRepresentation::kTagged),
+ var_reason(this, MachineRepresentation::kTagged);
+
+ Bind(&if_targetisnotmodified);
+ {
+ Node* const instance = AllocateAndInitJSPromise(context);
+ var_result.Bind(instance);
+ Goto(&debug_push);
+ }
+
+ Bind(&if_targetismodified);
+ {
+ ConstructorBuiltinsAssembler constructor_assembler(this->state());
+ Node* const instance = constructor_assembler.EmitFastNewObject(
+ context, promise_fun, new_target);
+ PromiseInit(instance);
+ var_result.Bind(instance);
+
+ GotoUnless(IsPromiseHookEnabled(), &debug_push);
+ CallRuntime(Runtime::kPromiseHookInit, context, instance,
+ UndefinedConstant());
+ Goto(&debug_push);
+ }
+
+ Bind(&debug_push);
+ {
+ GotoUnless(is_debug_active, &run_executor);
+ CallRuntime(Runtime::kDebugPushPromise, context, var_result.value());
+ Goto(&run_executor);
+ }
+
+ Bind(&run_executor);
+ {
+ Label out(this), if_rejectpromise(this), debug_pop(this, Label::kDeferred);
+
+ Node *resolve, *reject;
+ std::tie(resolve, reject) = CreatePromiseResolvingFunctions(
+ var_result.value(), TrueConstant(), native_context);
+ Callable call_callable = CodeFactory::Call(isolate);
+
+ Node* const maybe_exception = CallJS(call_callable, context, executor,
+ UndefinedConstant(), resolve, reject);
+
+ GotoIfException(maybe_exception, &if_rejectpromise, &var_reason);
+ Branch(is_debug_active, &debug_pop, &out);
+
+ Bind(&if_rejectpromise);
+ {
+ Callable call_callable = CodeFactory::Call(isolate);
+ CallJS(call_callable, context, reject, UndefinedConstant(),
+ var_reason.value());
+ Branch(is_debug_active, &debug_pop, &out);
+ }
+
+ Bind(&debug_pop);
+ {
+ CallRuntime(Runtime::kDebugPopPromise, context);
+ Goto(&out);
+ }
+ Bind(&out);
+ Return(var_result.value());
+ }
+
+ // 1. If NewTarget is undefined, throw a TypeError exception.
+ Bind(&if_targetisundefined);
+ {
+ Node* const message_id = SmiConstant(MessageTemplate::kNotAPromise);
+ CallRuntime(Runtime::kThrowTypeError, context, message_id, new_target);
+ Return(UndefinedConstant()); // Never reached.
+ }
+
+ // 2. If IsCallable(executor) is false, throw a TypeError exception.
+ Bind(&if_notcallable);
+ {
+ Node* const message_id =
+ SmiConstant(MessageTemplate::kResolverNotAFunction);
+ CallRuntime(Runtime::kThrowTypeError, context, message_id, executor);
+ Return(UndefinedConstant()); // Never reached.
+ }
+
+ // Silently fail if the stack looks fishy.
+ Bind(&if_noaccess);
+ {
+ Node* const counter_id =
+ SmiConstant(v8::Isolate::kPromiseConstructorReturnedUndefined);
+ CallRuntime(Runtime::kIncrementUseCounter, context, counter_id);
+ Return(UndefinedConstant());
+ }
+}
+
+TF_BUILTIN(PromiseInternalConstructor, PromiseBuiltinsAssembler) {
+ Node* const parent = Parameter(1);
+ Node* const context = Parameter(4);
+ Return(AllocateAndInitJSPromise(context, parent));
+}
+
+TF_BUILTIN(IsPromise, PromiseBuiltinsAssembler) {
+ Node* const maybe_promise = Parameter(1);
+ Label if_notpromise(this, Label::kDeferred);
+
+ GotoIf(TaggedIsSmi(maybe_promise), &if_notpromise);
+
+ Node* const result =
+ SelectBooleanConstant(HasInstanceType(maybe_promise, JS_PROMISE_TYPE));
+ Return(result);
+
+ Bind(&if_notpromise);
+ Return(FalseConstant());
+}
+
+TF_BUILTIN(PerformPromiseThen, PromiseBuiltinsAssembler) {
+ Node* const promise = Parameter(1);
+ Node* const on_resolve = Parameter(2);
+ Node* const on_reject = Parameter(3);
+ Node* const deferred_promise = Parameter(4);
+ Node* const context = Parameter(7);
+
+ // No deferred_on_resolve/deferred_on_reject because this is just an
+ // internal promise created by async-await.
+ Node* const result = InternalPerformPromiseThen(
+ context, promise, on_resolve, on_reject, deferred_promise,
+ UndefinedConstant(), UndefinedConstant());
+
+ // TODO(gsathya): This is unused, but value is returned according to spec.
+ Return(result);
+}
+
+// ES#sec-promise.prototype.then
+// Promise.prototype.catch ( onFulfilled, onRejected )
+TF_BUILTIN(PromiseThen, PromiseBuiltinsAssembler) {
+ // 1. Let promise be the this value.
+ Node* const promise = Parameter(0);
+ Node* const on_resolve = Parameter(1);
+ Node* const on_reject = Parameter(2);
+ Node* const context = Parameter(5);
+
+ Node* const result =
+ InternalPromiseThen(context, promise, on_resolve, on_reject);
+ Return(result);
+}
+
+// ES#sec-promise-resolve-functions
+// Promise Resolve Functions
+TF_BUILTIN(PromiseResolveClosure, PromiseBuiltinsAssembler) {
+ Node* const value = Parameter(1);
+ Node* const context = Parameter(4);
+
+ Label out(this);
+
+ // 3. Let alreadyResolved be F.[[AlreadyResolved]].
+ int has_already_visited_slot = kAlreadyVisitedSlot;
+
+ Node* const has_already_visited =
+ LoadContextElement(context, has_already_visited_slot);
+
+ // 4. If alreadyResolved.[[Value]] is true, return undefined.
+ GotoIf(SmiEqual(has_already_visited, SmiConstant(1)), &out);
+
+ // 5.Set alreadyResolved.[[Value]] to true.
+ StoreContextElementNoWriteBarrier(context, has_already_visited_slot,
+ SmiConstant(1));
+
+ // 2. Let promise be F.[[Promise]].
+ Node* const promise =
+ LoadContextElement(context, IntPtrConstant(kPromiseSlot));
+
+ InternalResolvePromise(context, promise, value);
+ Return(UndefinedConstant());
+
+ Bind(&out);
+ Return(UndefinedConstant());
+}
+
+TF_BUILTIN(ResolvePromise, PromiseBuiltinsAssembler) {
+ Node* const promise = Parameter(1);
+ Node* const result = Parameter(2);
+ Node* const context = Parameter(5);
+
+ InternalResolvePromise(context, promise, result);
+ Return(UndefinedConstant());
+}
+
+TF_BUILTIN(PromiseHandleReject, PromiseBuiltinsAssembler) {
+ typedef PromiseHandleRejectDescriptor Descriptor;
+
+ Node* const promise = Parameter(Descriptor::kPromise);
+ Node* const on_reject = Parameter(Descriptor::kOnReject);
+ Node* const exception = Parameter(Descriptor::kException);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ Callable call_callable = CodeFactory::Call(isolate());
+ Variable var_unused(this, MachineRepresentation::kTagged);
+
+ Label if_internalhandler(this), if_customhandler(this, Label::kDeferred);
+ Branch(IsUndefined(on_reject), &if_internalhandler, &if_customhandler);
+
+ Bind(&if_internalhandler);
+ {
+ InternalPromiseReject(context, promise, exception, false);
+ Return(UndefinedConstant());
+ }
+
+ Bind(&if_customhandler);
+ {
+ CallJS(call_callable, context, on_reject, UndefinedConstant(), exception);
+ Return(UndefinedConstant());
+ }
+}
+
+TF_BUILTIN(PromiseHandle, PromiseBuiltinsAssembler) {
+ Node* const value = Parameter(1);
+ Node* const handler = Parameter(2);
+ Node* const deferred_promise = Parameter(3);
+ Node* const deferred_on_resolve = Parameter(4);
+ Node* const deferred_on_reject = Parameter(5);
+ Node* const context = Parameter(8);
+ Isolate* isolate = this->isolate();
+
+ Variable var_reason(this, MachineRepresentation::kTagged);
+
+ Node* const is_debug_active = IsDebugActive();
+ Label run_handler(this), if_rejectpromise(this), promisehook_before(this),
+ promisehook_after(this), debug_pop(this);
+
+ GotoUnless(is_debug_active, &promisehook_before);
+ CallRuntime(Runtime::kDebugPushPromise, context, deferred_promise);
+ Goto(&promisehook_before);
+
+ Bind(&promisehook_before);
+ {
+ GotoUnless(IsPromiseHookEnabled(), &run_handler);
+ CallRuntime(Runtime::kPromiseHookBefore, context, deferred_promise);
+ Goto(&run_handler);
+ }
+
+ Bind(&run_handler);
+ {
+ Callable call_callable = CodeFactory::Call(isolate);
+ Node* const result =
+ CallJS(call_callable, context, handler, UndefinedConstant(), value);
+
+ GotoIfException(result, &if_rejectpromise, &var_reason);
+
+ Label if_internalhandler(this), if_customhandler(this, Label::kDeferred);
+ Branch(IsUndefined(deferred_on_resolve), &if_internalhandler,
+ &if_customhandler);
+
+ Bind(&if_internalhandler);
+ InternalResolvePromise(context, deferred_promise, result);
+ Goto(&promisehook_after);
+
+ Bind(&if_customhandler);
+ {
+ Node* const maybe_exception =
+ CallJS(call_callable, context, deferred_on_resolve,
+ UndefinedConstant(), result);
+ GotoIfException(maybe_exception, &if_rejectpromise, &var_reason);
+ Goto(&promisehook_after);
+ }
+ }
+
+ Bind(&if_rejectpromise);
+ {
+ Callable promise_handle_reject = CodeFactory::PromiseHandleReject(isolate);
+ CallStub(promise_handle_reject, context, deferred_promise,
+ deferred_on_reject, var_reason.value());
+ Goto(&promisehook_after);
+ }
+
+ Bind(&promisehook_after);
+ {
+ GotoUnless(IsPromiseHookEnabled(), &debug_pop);
+ CallRuntime(Runtime::kPromiseHookAfter, context, deferred_promise);
+ Goto(&debug_pop);
+ }
+
+ Bind(&debug_pop);
+ {
+ Label out(this);
+
+ GotoUnless(is_debug_active, &out);
+ CallRuntime(Runtime::kDebugPopPromise, context);
+ Goto(&out);
+
+ Bind(&out);
+ Return(UndefinedConstant());
+ }
+}
+
+// ES#sec-promise.prototype.catch
+// Promise.prototype.catch ( onRejected )
+TF_BUILTIN(PromiseCatch, PromiseBuiltinsAssembler) {
+ // 1. Let promise be the this value.
+ Node* const promise = Parameter(0);
+ Node* const on_resolve = UndefinedConstant();
+ Node* const on_reject = Parameter(1);
+ Node* const context = Parameter(4);
+
+ Label if_internalthen(this), if_customthen(this, Label::kDeferred);
+ GotoIf(TaggedIsSmi(promise), &if_customthen);
+ BranchIfFastPath(context, promise, &if_internalthen, &if_customthen);
+
+ Bind(&if_internalthen);
+ {
+ Node* const result =
+ InternalPromiseThen(context, promise, on_resolve, on_reject);
+ Return(result);
+ }
+
+ Bind(&if_customthen);
+ {
+ Isolate* isolate = this->isolate();
+ Node* const then_str = HeapConstant(isolate->factory()->then_string());
+ Callable getproperty_callable = CodeFactory::GetProperty(isolate);
+ Node* const then =
+ CallStub(getproperty_callable, context, promise, then_str);
+ Callable call_callable = CodeFactory::Call(isolate);
+ Node* const result =
+ CallJS(call_callable, context, then, promise, on_resolve, on_reject);
+ Return(result);
+ }
+}
+
+TF_BUILTIN(PromiseResolve, PromiseBuiltinsAssembler) {
+ // 1. Let C be the this value.
+ Node* receiver = Parameter(0);
+ Node* value = Parameter(1);
+ Node* context = Parameter(4);
+ Isolate* isolate = this->isolate();
+
+ // 2. If Type(C) is not Object, throw a TypeError exception.
+ ThrowIfNotJSReceiver(context, receiver, MessageTemplate::kCalledOnNonObject,
+ "PromiseResolve");
+
+ Label if_valueisnativepromise(this), if_valueisnotnativepromise(this),
+ if_valueisnotpromise(this);
+
+ // 3.If IsPromise(x) is true, then
+ GotoIf(TaggedIsSmi(value), &if_valueisnotpromise);
+
+ // This shortcircuits the constructor lookups.
+ GotoUnless(HasInstanceType(value, JS_PROMISE_TYPE), &if_valueisnotpromise);
+
+ // This adds a fast path as non-subclassed native promises don't have
+ // an observable constructor lookup.
+ Node* const native_context = LoadNativeContext(context);
+ Node* const promise_fun =
+ LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ BranchIfFastPath(native_context, promise_fun, value, &if_valueisnativepromise,
+ &if_valueisnotnativepromise);
+
+ Bind(&if_valueisnativepromise);
+ {
+ GotoUnless(WordEqual(promise_fun, receiver), &if_valueisnotnativepromise);
+ Return(value);
+ }
+
+ // At this point, value or/and receiver are not native promises, but
+ // they could be of the same subclass.
+ Bind(&if_valueisnotnativepromise);
+ {
+ // 3.a Let xConstructor be ? Get(x, "constructor").
+ // The constructor lookup is observable.
+ Node* const constructor_str =
+ HeapConstant(isolate->factory()->constructor_string());
+ Callable getproperty_callable = CodeFactory::GetProperty(isolate);
+ Node* const constructor =
+ CallStub(getproperty_callable, context, value, constructor_str);
+
+ // 3.b If SameValue(xConstructor, C) is true, return x.
+ GotoUnless(SameValue(constructor, receiver, context),
+ &if_valueisnotpromise);
+
+ Return(value);
+ }
- Handle<Context> context(isolate->context(), isolate);
+ Bind(&if_valueisnotpromise);
+ {
+ Label if_nativepromise(this), if_notnativepromise(this);
+ BranchIfFastPath(context, receiver, &if_nativepromise,
+ &if_notnativepromise);
- if (PromiseUtils::HasAlreadyVisited(context)) {
- return isolate->heap()->undefined_value();
+ // This adds a fast path for native promises that don't need to
+ // create NewPromiseCapability.
+ Bind(&if_nativepromise);
+ {
+ Label do_resolve(this);
+
+ Node* const result = AllocateAndInitJSPromise(context);
+ InternalResolvePromise(context, result, value);
+ Return(result);
+ }
+
+ Bind(&if_notnativepromise);
+ {
+ // 4. Let promiseCapability be ? NewPromiseCapability(C).
+ Node* const capability = NewPromiseCapability(context, receiver);
+
+ // 5. Perform ? Call(promiseCapability.[[Resolve]], undefined, « x »).
+ Callable call_callable = CodeFactory::Call(isolate);
+ Node* const resolve =
+ LoadObjectField(capability, JSPromiseCapability::kResolveOffset);
+ CallJS(call_callable, context, resolve, UndefinedConstant(), value);
+
+ // 6. Return promiseCapability.[[Promise]].
+ Node* const result =
+ LoadObjectField(capability, JSPromiseCapability::kPromiseOffset);
+ Return(result);
+ }
}
+}
+
+TF_BUILTIN(PromiseGetCapabilitiesExecutor, PromiseBuiltinsAssembler) {
+ Node* const resolve = Parameter(1);
+ Node* const reject = Parameter(2);
+ Node* const context = Parameter(5);
- PromiseUtils::SetAlreadyVisited(context);
- Handle<Object> value = args.atOrUndefined(isolate, 1);
- Handle<JSObject> promise = handle(PromiseUtils::GetPromise(context), isolate);
- Handle<Object> debug_event =
- handle(PromiseUtils::GetDebugEvent(context), isolate);
- MaybeHandle<Object> maybe_result;
- Handle<Object> argv[] = {promise, value, debug_event};
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, Execution::Call(isolate, isolate->promise_internal_reject(),
- isolate->factory()->undefined_value(),
- arraysize(argv), argv));
- return isolate->heap()->undefined_value();
+ Node* const capability = LoadContextElement(context, kCapabilitySlot);
+
+ Label if_alreadyinvoked(this, Label::kDeferred);
+ GotoIf(WordNotEqual(
+ LoadObjectField(capability, JSPromiseCapability::kResolveOffset),
+ UndefinedConstant()),
+ &if_alreadyinvoked);
+ GotoIf(WordNotEqual(
+ LoadObjectField(capability, JSPromiseCapability::kRejectOffset),
+ UndefinedConstant()),
+ &if_alreadyinvoked);
+
+ StoreObjectField(capability, JSPromiseCapability::kResolveOffset, resolve);
+ StoreObjectField(capability, JSPromiseCapability::kRejectOffset, reject);
+
+ Return(UndefinedConstant());
+
+ Bind(&if_alreadyinvoked);
+ Node* message = SmiConstant(MessageTemplate::kPromiseExecutorAlreadyInvoked);
+ Return(CallRuntime(Runtime::kThrowTypeError, context, message));
}
-// ES#sec-createresolvingfunctions
-// CreateResolvingFunctions ( promise )
-BUILTIN(CreateResolvingFunctions) {
- HandleScope scope(isolate);
- DCHECK_EQ(3, args.length());
+TF_BUILTIN(NewPromiseCapability, PromiseBuiltinsAssembler) {
+ Node* constructor = Parameter(1);
+ Node* debug_event = Parameter(2);
+ Node* context = Parameter(5);
- Handle<JSObject> promise = args.at<JSObject>(1);
- Handle<Object> debug_event = args.at<Object>(2);
- Handle<JSFunction> resolve, reject;
+ CSA_ASSERT_JS_ARGC_EQ(this, 2);
- PromiseUtils::CreateResolvingFunctions(isolate, promise, debug_event,
- &resolve, &reject);
+ Return(NewPromiseCapability(context, constructor, debug_event));
+}
+
+TF_BUILTIN(PromiseReject, PromiseBuiltinsAssembler) {
+ // 1. Let C be the this value.
+ Node* const receiver = Parameter(0);
+ Node* const reason = Parameter(1);
+ Node* const context = Parameter(4);
+
+ // 2. If Type(C) is not Object, throw a TypeError exception.
+ ThrowIfNotJSReceiver(context, receiver, MessageTemplate::kCalledOnNonObject,
+ "PromiseReject");
+
+ Label if_nativepromise(this), if_custompromise(this, Label::kDeferred);
+ Node* const native_context = LoadNativeContext(context);
+ Node* const promise_fun =
+ LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX);
+ Branch(WordEqual(promise_fun, receiver), &if_nativepromise,
+ &if_custompromise);
+
+ Bind(&if_nativepromise);
+ {
+ Node* const promise = AllocateAndSetJSPromise(
+ context, SmiConstant(v8::Promise::kRejected), reason);
+ CallRuntime(Runtime::kPromiseRejectEventFromStack, context, promise,
+ reason);
+ Return(promise);
+ }
+
+ Bind(&if_custompromise);
+ {
+ // 3. Let promiseCapability be ? NewPromiseCapability(C).
+ Node* const capability = NewPromiseCapability(context, receiver);
+
+ // 4. Perform ? Call(promiseCapability.[[Reject]], undefined, « r »).
+ Node* const reject =
+ LoadObjectField(capability, JSPromiseCapability::kRejectOffset);
+ Callable call_callable = CodeFactory::Call(isolate());
+ CallJS(call_callable, context, reject, UndefinedConstant(), reason);
+
+ // 5. Return promiseCapability.[[Promise]].
+ Node* const promise =
+ LoadObjectField(capability, JSPromiseCapability::kPromiseOffset);
+ Return(promise);
+ }
+}
- Handle<FixedArray> result = isolate->factory()->NewFixedArray(2);
- result->set(0, *resolve);
- result->set(1, *reject);
+TF_BUILTIN(InternalPromiseReject, PromiseBuiltinsAssembler) {
+ Node* const promise = Parameter(1);
+ Node* const reason = Parameter(2);
+ Node* const debug_event = Parameter(3);
+ Node* const context = Parameter(6);
- return *isolate->factory()->NewJSArrayWithElements(result, FAST_ELEMENTS, 2,
- NOT_TENURED);
+ InternalPromiseReject(context, promise, reason, debug_event);
+ Return(UndefinedConstant());
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-promise.h b/deps/v8/src/builtins/builtins-promise.h
new file mode 100644
index 0000000000..dee9a075a2
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-promise.h
@@ -0,0 +1,120 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/code-stub-assembler.h"
+#include "src/contexts.h"
+
+namespace v8 {
+namespace internal {
+
+typedef compiler::Node Node;
+typedef CodeStubAssembler::ParameterMode ParameterMode;
+typedef compiler::CodeAssemblerState CodeAssemblerState;
+
+class PromiseBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ enum PromiseResolvingFunctionContextSlot {
+ // Whether the resolve/reject callback was already called.
+ kAlreadyVisitedSlot = Context::MIN_CONTEXT_SLOTS,
+
+ // The promise which resolve/reject callbacks fulfill.
+ kPromiseSlot,
+
+ // Whether to trigger a debug event or not. Used in catch
+ // prediction.
+ kDebugEventSlot,
+ kPromiseContextLength,
+ };
+
+ enum FunctionContextSlot {
+ kCapabilitySlot = Context::MIN_CONTEXT_SLOTS,
+
+ kCapabilitiesContextLength,
+ };
+
+ explicit PromiseBuiltinsAssembler(CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+ // These allocate and initialize a promise with pending state and
+ // undefined fields.
+ //
+ // This uses undefined as the parent promise for the promise init
+ // hook.
+ Node* AllocateAndInitJSPromise(Node* context);
+ // This uses the given parent as the parent promise for the promise
+ // init hook.
+ Node* AllocateAndInitJSPromise(Node* context, Node* parent);
+
+ // This allocates and initializes a promise with the given state and
+ // fields.
+ Node* AllocateAndSetJSPromise(Node* context, Node* status, Node* result);
+
+ Node* AllocatePromiseResolveThenableJobInfo(Node* result, Node* then,
+ Node* resolve, Node* reject,
+ Node* context);
+
+ std::pair<Node*, Node*> CreatePromiseResolvingFunctions(
+ Node* promise, Node* native_context, Node* promise_context);
+
+ Node* PromiseHasHandler(Node* promise);
+
+ Node* CreatePromiseResolvingFunctionsContext(Node* promise, Node* debug_event,
+ Node* native_context);
+
+ Node* CreatePromiseGetCapabilitiesExecutorContext(Node* native_context,
+ Node* promise_capability);
+
+ Node* NewPromiseCapability(Node* context, Node* constructor,
+ Node* debug_event = nullptr);
+
+ protected:
+ void PromiseInit(Node* promise);
+
+ Node* ThrowIfNotJSReceiver(Node* context, Node* value,
+ MessageTemplate::Template msg_template,
+ const char* method_name = nullptr);
+
+ Node* SpeciesConstructor(Node* context, Node* object,
+ Node* default_constructor);
+
+ void PromiseSetHasHandler(Node* promise);
+
+ void AppendPromiseCallback(int offset, compiler::Node* promise,
+ compiler::Node* value);
+
+ Node* InternalPromiseThen(Node* context, Node* promise, Node* on_resolve,
+ Node* on_reject);
+
+ Node* InternalPerformPromiseThen(Node* context, Node* promise,
+ Node* on_resolve, Node* on_reject,
+ Node* deferred_promise,
+ Node* deferred_on_resolve,
+ Node* deferred_on_reject);
+
+ void InternalResolvePromise(Node* context, Node* promise, Node* result);
+
+ void BranchIfFastPath(Node* context, Node* promise, Label* if_isunmodified,
+ Label* if_ismodified);
+
+ void BranchIfFastPath(Node* native_context, Node* promise_fun, Node* promise,
+ Label* if_isunmodified, Label* if_ismodified);
+
+ Node* CreatePromiseContext(Node* native_context, int slots);
+ void PromiseFulfill(Node* context, Node* promise, Node* result,
+ v8::Promise::PromiseState status);
+
+ void BranchIfAccessCheckFailed(Node* context, Node* native_context,
+ Node* promise_constructor, Node* executor,
+ Label* if_noaccess);
+
+ void InternalPromiseReject(Node* context, Node* promise, Node* value,
+ bool debug_event);
+ void InternalPromiseReject(Node* context, Node* promise, Node* value,
+ Node* debug_event);
+
+ private:
+ Node* AllocateJSPromise(Node* context);
+};
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-reflect.cc b/deps/v8/src/builtins/builtins-reflect.cc
index b4d16c4a7b..64947b1f77 100644
--- a/deps/v8/src/builtins/builtins-reflect.cc
+++ b/deps/v8/src/builtins/builtins-reflect.cc
@@ -17,9 +17,9 @@ namespace internal {
BUILTIN(ReflectDefineProperty) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
- Handle<Object> target = args.at<Object>(1);
- Handle<Object> key = args.at<Object>(2);
- Handle<Object> attributes = args.at<Object>(3);
+ Handle<Object> target = args.at(1);
+ Handle<Object> key = args.at(2);
+ Handle<Object> attributes = args.at(3);
if (!target->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -48,8 +48,8 @@ BUILTIN(ReflectDefineProperty) {
BUILTIN(ReflectDeleteProperty) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- Handle<Object> target = args.at<Object>(1);
- Handle<Object> key = args.at<Object>(2);
+ Handle<Object> target = args.at(1);
+ Handle<Object> key = args.at(2);
if (!target->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -73,7 +73,7 @@ BUILTIN(ReflectGet) {
HandleScope scope(isolate);
Handle<Object> target = args.atOrUndefined(isolate, 1);
Handle<Object> key = args.atOrUndefined(isolate, 2);
- Handle<Object> receiver = args.length() > 3 ? args.at<Object>(3) : target;
+ Handle<Object> receiver = args.length() > 3 ? args.at(3) : target;
if (!target->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -95,8 +95,8 @@ BUILTIN(ReflectGet) {
BUILTIN(ReflectGetOwnPropertyDescriptor) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- Handle<Object> target = args.at<Object>(1);
- Handle<Object> key = args.at<Object>(2);
+ Handle<Object> target = args.at(1);
+ Handle<Object> key = args.at(2);
if (!target->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -121,7 +121,7 @@ BUILTIN(ReflectGetOwnPropertyDescriptor) {
BUILTIN(ReflectGetPrototypeOf) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- Handle<Object> target = args.at<Object>(1);
+ Handle<Object> target = args.at(1);
if (!target->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -138,8 +138,8 @@ BUILTIN(ReflectGetPrototypeOf) {
BUILTIN(ReflectHas) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- Handle<Object> target = args.at<Object>(1);
- Handle<Object> key = args.at<Object>(2);
+ Handle<Object> target = args.at(1);
+ Handle<Object> key = args.at(2);
if (!target->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -162,7 +162,7 @@ BUILTIN(ReflectHas) {
BUILTIN(ReflectIsExtensible) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- Handle<Object> target = args.at<Object>(1);
+ Handle<Object> target = args.at(1);
if (!target->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -181,7 +181,7 @@ BUILTIN(ReflectIsExtensible) {
BUILTIN(ReflectOwnKeys) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- Handle<Object> target = args.at<Object>(1);
+ Handle<Object> target = args.at(1);
if (!target->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -203,7 +203,7 @@ BUILTIN(ReflectOwnKeys) {
BUILTIN(ReflectPreventExtensions) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- Handle<Object> target = args.at<Object>(1);
+ Handle<Object> target = args.at(1);
if (!target->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -224,7 +224,7 @@ BUILTIN(ReflectSet) {
Handle<Object> target = args.atOrUndefined(isolate, 1);
Handle<Object> key = args.atOrUndefined(isolate, 2);
Handle<Object> value = args.atOrUndefined(isolate, 3);
- Handle<Object> receiver = args.length() > 4 ? args.at<Object>(4) : target;
+ Handle<Object> receiver = args.length() > 4 ? args.at(4) : target;
if (!target->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -249,8 +249,8 @@ BUILTIN(ReflectSet) {
BUILTIN(ReflectSetPrototypeOf) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- Handle<Object> target = args.at<Object>(1);
- Handle<Object> proto = args.at<Object>(2);
+ Handle<Object> target = args.at(1);
+ Handle<Object> proto = args.at(2);
if (!target->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
diff --git a/deps/v8/src/builtins/builtins-regexp.cc b/deps/v8/src/builtins/builtins-regexp.cc
index 5f8d18be43..2191268441 100644
--- a/deps/v8/src/builtins/builtins-regexp.cc
+++ b/deps/v8/src/builtins/builtins-regexp.cc
@@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-constructor.h"
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
-
#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-utils.h"
#include "src/string-builder.h"
@@ -13,541 +14,400 @@
namespace v8 {
namespace internal {
+typedef compiler::Node Node;
+typedef CodeStubAssembler::ParameterMode ParameterMode;
+typedef compiler::CodeAssemblerState CodeAssemblerState;
+
+class RegExpBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit RegExpBuiltinsAssembler(CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ protected:
+ Node* FastLoadLastIndex(Node* regexp);
+ Node* SlowLoadLastIndex(Node* context, Node* regexp);
+ Node* LoadLastIndex(Node* context, Node* regexp, bool is_fastpath);
+
+ void FastStoreLastIndex(Node* regexp, Node* value);
+ void SlowStoreLastIndex(Node* context, Node* regexp, Node* value);
+ void StoreLastIndex(Node* context, Node* regexp, Node* value,
+ bool is_fastpath);
+
+ Node* ConstructNewResultFromMatchInfo(Node* context, Node* match_info,
+ Node* string);
+
+ Node* RegExpPrototypeExecBodyWithoutResult(Node* const context,
+ Node* const regexp,
+ Node* const string,
+ Label* if_didnotmatch,
+ const bool is_fastpath);
+ Node* RegExpPrototypeExecBody(Node* const context, Node* const regexp,
+ Node* const string, const bool is_fastpath);
+
+ Node* ThrowIfNotJSReceiver(Node* context, Node* maybe_receiver,
+ MessageTemplate::Template msg_template,
+ char const* method_name);
+
+ Node* IsInitialRegExpMap(Node* context, Node* map);
+ void BranchIfFastRegExp(Node* context, Node* map, Label* if_isunmodified,
+ Label* if_ismodified);
+ void BranchIfFastRegExpResult(Node* context, Node* map,
+ Label* if_isunmodified, Label* if_ismodified);
+
+ Node* FlagsGetter(Node* const context, Node* const regexp, bool is_fastpath);
+
+ Node* FastFlagGetter(Node* const regexp, JSRegExp::Flag flag);
+ Node* SlowFlagGetter(Node* const context, Node* const regexp,
+ JSRegExp::Flag flag);
+ Node* FlagGetter(Node* const context, Node* const regexp, JSRegExp::Flag flag,
+ bool is_fastpath);
+ void FlagGetter(JSRegExp::Flag flag, v8::Isolate::UseCounterFeature counter,
+ const char* method_name);
+
+ Node* IsRegExp(Node* const context, Node* const maybe_receiver);
+ Node* RegExpInitialize(Node* const context, Node* const regexp,
+ Node* const maybe_pattern, Node* const maybe_flags);
+
+ Node* RegExpExec(Node* context, Node* regexp, Node* string);
+
+ Node* AdvanceStringIndex(Node* const string, Node* const index,
+ Node* const is_unicode);
+
+ void RegExpPrototypeMatchBody(Node* const context, Node* const regexp,
+ Node* const string, const bool is_fastpath);
+
+ void RegExpPrototypeSearchBodyFast(Node* const context, Node* const regexp,
+ Node* const string);
+ void RegExpPrototypeSearchBodySlow(Node* const context, Node* const regexp,
+ Node* const string);
+
+ void RegExpPrototypeSplitBody(Node* const context, Node* const regexp,
+ Node* const string, Node* const limit);
+
+ Node* ReplaceGlobalCallableFastPath(Node* context, Node* regexp, Node* string,
+ Node* replace_callable);
+ Node* ReplaceSimpleStringFastPath(Node* context, Node* regexp, Node* string,
+ Node* replace_string);
+};
+
// -----------------------------------------------------------------------------
// ES6 section 21.2 RegExp Objects
-namespace {
-
-Handle<String> PatternFlags(Isolate* isolate, Handle<JSRegExp> regexp) {
- static const int kMaxFlagsLength = 5 + 1; // 5 flags and '\0';
- char flags_string[kMaxFlagsLength];
- int i = 0;
-
- const JSRegExp::Flags flags = regexp->GetFlags();
-
- if ((flags & JSRegExp::kGlobal) != 0) flags_string[i++] = 'g';
- if ((flags & JSRegExp::kIgnoreCase) != 0) flags_string[i++] = 'i';
- if ((flags & JSRegExp::kMultiline) != 0) flags_string[i++] = 'm';
- if ((flags & JSRegExp::kUnicode) != 0) flags_string[i++] = 'u';
- if ((flags & JSRegExp::kSticky) != 0) flags_string[i++] = 'y';
-
- DCHECK_LT(i, kMaxFlagsLength);
- memset(&flags_string[i], '\0', kMaxFlagsLength - i);
-
- return isolate->factory()->NewStringFromAsciiChecked(flags_string);
-}
-
-// ES#sec-regexpinitialize
-// Runtime Semantics: RegExpInitialize ( obj, pattern, flags )
-MUST_USE_RESULT MaybeHandle<JSRegExp> RegExpInitialize(Isolate* isolate,
- Handle<JSRegExp> regexp,
- Handle<Object> pattern,
- Handle<Object> flags) {
- Handle<String> pattern_string;
- if (pattern->IsUndefined(isolate)) {
- pattern_string = isolate->factory()->empty_string();
- } else {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, pattern_string,
- Object::ToString(isolate, pattern), JSRegExp);
- }
-
- Handle<String> flags_string;
- if (flags->IsUndefined(isolate)) {
- flags_string = isolate->factory()->empty_string();
- } else {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, flags_string,
- Object::ToString(isolate, flags), JSRegExp);
- }
-
- // TODO(jgruber): We could avoid the flags back and forth conversions.
- return JSRegExp::Initialize(regexp, pattern_string, flags_string);
-}
-
-} // namespace
-
-// ES#sec-regexp-pattern-flags
-// RegExp ( pattern, flags )
-BUILTIN(RegExpConstructor) {
- HandleScope scope(isolate);
-
- Handle<HeapObject> new_target = args.new_target();
- Handle<Object> pattern = args.atOrUndefined(isolate, 1);
- Handle<Object> flags = args.atOrUndefined(isolate, 2);
-
- Handle<JSFunction> target = isolate->regexp_function();
-
- bool pattern_is_regexp;
- {
- Maybe<bool> maybe_pattern_is_regexp =
- RegExpUtils::IsRegExp(isolate, pattern);
- if (maybe_pattern_is_regexp.IsNothing()) {
- DCHECK(isolate->has_pending_exception());
- return isolate->heap()->exception();
- }
- pattern_is_regexp = maybe_pattern_is_regexp.FromJust();
- }
-
- if (new_target->IsUndefined(isolate)) {
- new_target = target;
-
- // ES6 section 21.2.3.1 step 3.b
- if (pattern_is_regexp && flags->IsUndefined(isolate)) {
- Handle<Object> pattern_constructor;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, pattern_constructor,
- Object::GetProperty(pattern,
- isolate->factory()->constructor_string()));
-
- if (pattern_constructor.is_identical_to(new_target)) {
- return *pattern;
- }
- }
- }
-
- if (pattern->IsJSRegExp()) {
- Handle<JSRegExp> regexp_pattern = Handle<JSRegExp>::cast(pattern);
-
- if (flags->IsUndefined(isolate)) {
- flags = PatternFlags(isolate, regexp_pattern);
- }
- pattern = handle(regexp_pattern->source(), isolate);
- } else if (pattern_is_regexp) {
- Handle<Object> pattern_source;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, pattern_source,
- Object::GetProperty(pattern, isolate->factory()->source_string()));
-
- if (flags->IsUndefined(isolate)) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, flags,
- Object::GetProperty(pattern, isolate->factory()->flags_string()));
- }
- pattern = pattern_source;
- }
-
- Handle<JSReceiver> new_target_receiver = Handle<JSReceiver>::cast(new_target);
-
- // TODO(jgruber): Fast-path for target == new_target == unmodified JSRegExp.
-
- Handle<JSObject> object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, object, JSObject::New(target, new_target_receiver));
- Handle<JSRegExp> regexp = Handle<JSRegExp>::cast(object);
-
- RETURN_RESULT_OR_FAILURE(isolate,
- RegExpInitialize(isolate, regexp, pattern, flags));
-}
-
-BUILTIN(RegExpPrototypeCompile) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSRegExp, regexp, "RegExp.prototype.compile");
-
- Handle<Object> pattern = args.atOrUndefined(isolate, 1);
- Handle<Object> flags = args.atOrUndefined(isolate, 2);
-
- if (pattern->IsJSRegExp()) {
- Handle<JSRegExp> pattern_regexp = Handle<JSRegExp>::cast(pattern);
-
- if (!flags->IsUndefined(isolate)) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kRegExpFlags));
- }
-
- flags = PatternFlags(isolate, pattern_regexp);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, pattern,
- Object::GetProperty(pattern, isolate->factory()->source_string()));
- }
-
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, regexp, RegExpInitialize(isolate, regexp, pattern, flags));
-
- // Return undefined for compatibility with JSC.
- // See http://crbug.com/585775 for web compat details.
-
- return isolate->heap()->undefined_value();
-}
-
-namespace {
-
-compiler::Node* FastLoadLastIndex(CodeStubAssembler* a, compiler::Node* context,
- compiler::Node* regexp) {
+Node* RegExpBuiltinsAssembler::FastLoadLastIndex(Node* regexp) {
// Load the in-object field.
static const int field_offset =
JSRegExp::kSize + JSRegExp::kLastIndexFieldIndex * kPointerSize;
- return a->LoadObjectField(regexp, field_offset);
+ return LoadObjectField(regexp, field_offset);
}
-compiler::Node* SlowLoadLastIndex(CodeStubAssembler* a, compiler::Node* context,
- compiler::Node* regexp) {
+Node* RegExpBuiltinsAssembler::SlowLoadLastIndex(Node* context, Node* regexp) {
// Load through the GetProperty stub.
- typedef compiler::Node Node;
-
- Node* const name =
- a->HeapConstant(a->isolate()->factory()->lastIndex_string());
- Callable getproperty_callable = CodeFactory::GetProperty(a->isolate());
- return a->CallStub(getproperty_callable, context, regexp, name);
+ Node* const name = HeapConstant(isolate()->factory()->lastIndex_string());
+ Callable getproperty_callable = CodeFactory::GetProperty(isolate());
+ return CallStub(getproperty_callable, context, regexp, name);
}
-compiler::Node* LoadLastIndex(CodeStubAssembler* a, compiler::Node* context,
- compiler::Node* has_initialmap,
- compiler::Node* regexp) {
- typedef CodeStubAssembler::Variable Variable;
- typedef CodeStubAssembler::Label Label;
-
- Variable var_value(a, MachineRepresentation::kTagged);
-
- Label out(a), if_unmodified(a), if_modified(a);
- a->Branch(has_initialmap, &if_unmodified, &if_modified);
-
- a->Bind(&if_unmodified);
- {
- var_value.Bind(FastLoadLastIndex(a, context, regexp));
- a->Goto(&out);
- }
-
- a->Bind(&if_modified);
- {
- var_value.Bind(SlowLoadLastIndex(a, context, regexp));
- a->Goto(&out);
- }
-
- a->Bind(&out);
- return var_value.value();
+Node* RegExpBuiltinsAssembler::LoadLastIndex(Node* context, Node* regexp,
+ bool is_fastpath) {
+ return is_fastpath ? FastLoadLastIndex(regexp)
+ : SlowLoadLastIndex(context, regexp);
}
// The fast-path of StoreLastIndex when regexp is guaranteed to be an unmodified
// JSRegExp instance.
-void FastStoreLastIndex(CodeStubAssembler* a, compiler::Node* context,
- compiler::Node* regexp, compiler::Node* value) {
+void RegExpBuiltinsAssembler::FastStoreLastIndex(Node* regexp, Node* value) {
// Store the in-object field.
static const int field_offset =
JSRegExp::kSize + JSRegExp::kLastIndexFieldIndex * kPointerSize;
- a->StoreObjectField(regexp, field_offset, value);
+ StoreObjectField(regexp, field_offset, value);
}
-void SlowStoreLastIndex(CodeStubAssembler* a, compiler::Node* context,
- compiler::Node* regexp, compiler::Node* value) {
+void RegExpBuiltinsAssembler::SlowStoreLastIndex(Node* context, Node* regexp,
+ Node* value) {
// Store through runtime.
// TODO(ishell): Use SetPropertyStub here once available.
- typedef compiler::Node Node;
-
- Node* const name =
- a->HeapConstant(a->isolate()->factory()->lastIndex_string());
- Node* const language_mode = a->SmiConstant(Smi::FromInt(STRICT));
- a->CallRuntime(Runtime::kSetProperty, context, regexp, name, value,
- language_mode);
+ Node* const name = HeapConstant(isolate()->factory()->lastIndex_string());
+ Node* const language_mode = SmiConstant(Smi::FromInt(STRICT));
+ CallRuntime(Runtime::kSetProperty, context, regexp, name, value,
+ language_mode);
}
-void StoreLastIndex(CodeStubAssembler* a, compiler::Node* context,
- compiler::Node* has_initialmap, compiler::Node* regexp,
- compiler::Node* value) {
- typedef CodeStubAssembler::Label Label;
-
- Label out(a), if_unmodified(a), if_modified(a);
- a->Branch(has_initialmap, &if_unmodified, &if_modified);
-
- a->Bind(&if_unmodified);
- {
- FastStoreLastIndex(a, context, regexp, value);
- a->Goto(&out);
- }
-
- a->Bind(&if_modified);
- {
- SlowStoreLastIndex(a, context, regexp, value);
- a->Goto(&out);
+void RegExpBuiltinsAssembler::StoreLastIndex(Node* context, Node* regexp,
+ Node* value, bool is_fastpath) {
+ if (is_fastpath) {
+ FastStoreLastIndex(regexp, value);
+ } else {
+ SlowStoreLastIndex(context, regexp, value);
}
-
- a->Bind(&out);
}
-compiler::Node* ConstructNewResultFromMatchInfo(Isolate* isolate,
- CodeStubAssembler* a,
- compiler::Node* context,
- compiler::Node* match_info,
- compiler::Node* string) {
- typedef CodeStubAssembler::Variable Variable;
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
-
- Label out(a);
-
- CodeStubAssembler::ParameterMode mode = CodeStubAssembler::INTPTR_PARAMETERS;
- Node* const num_indices = a->SmiUntag(a->LoadFixedArrayElement(
- match_info, a->IntPtrConstant(RegExpMatchInfo::kNumberOfCapturesIndex), 0,
- mode));
- Node* const num_results = a->SmiTag(a->WordShr(num_indices, 1));
- Node* const start = a->LoadFixedArrayElement(
- match_info, a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex), 0,
- mode);
- Node* const end = a->LoadFixedArrayElement(
- match_info, a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex + 1), 0,
- mode);
+Node* RegExpBuiltinsAssembler::ConstructNewResultFromMatchInfo(Node* context,
+ Node* match_info,
+ Node* string) {
+ Label out(this);
+
+ Node* const num_indices = SmiUntag(LoadFixedArrayElement(
+ match_info, RegExpMatchInfo::kNumberOfCapturesIndex));
+ Node* const num_results = SmiTag(WordShr(num_indices, 1));
+ Node* const start =
+ LoadFixedArrayElement(match_info, RegExpMatchInfo::kFirstCaptureIndex);
+ Node* const end = LoadFixedArrayElement(
+ match_info, RegExpMatchInfo::kFirstCaptureIndex + 1);
// Calculate the substring of the first match before creating the result array
// to avoid an unnecessary write barrier storing the first result.
- Node* const first = a->SubString(context, string, start, end);
+ Node* const first = SubString(context, string, start, end);
Node* const result =
- a->AllocateRegExpResult(context, num_results, start, string);
- Node* const result_elements = a->LoadElements(result);
+ AllocateRegExpResult(context, num_results, start, string);
+ Node* const result_elements = LoadElements(result);
- a->StoreFixedArrayElement(result_elements, a->IntPtrConstant(0), first,
- SKIP_WRITE_BARRIER);
+ StoreFixedArrayElement(result_elements, 0, first, SKIP_WRITE_BARRIER);
- a->GotoIf(a->SmiEqual(num_results, a->SmiConstant(Smi::FromInt(1))), &out);
+ GotoIf(SmiEqual(num_results, SmiConstant(Smi::FromInt(1))), &out);
// Store all remaining captures.
- Node* const limit = a->IntPtrAdd(
- a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex), num_indices);
+ Node* const limit = IntPtrAdd(
+ IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex), num_indices);
- Variable var_from_cursor(a, MachineType::PointerRepresentation());
- Variable var_to_cursor(a, MachineType::PointerRepresentation());
+ Variable var_from_cursor(this, MachineType::PointerRepresentation());
+ Variable var_to_cursor(this, MachineType::PointerRepresentation());
- var_from_cursor.Bind(
- a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex + 2));
- var_to_cursor.Bind(a->IntPtrConstant(1));
+ var_from_cursor.Bind(IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex + 2));
+ var_to_cursor.Bind(IntPtrConstant(1));
Variable* vars[] = {&var_from_cursor, &var_to_cursor};
- Label loop(a, 2, vars);
+ Label loop(this, 2, vars);
- a->Goto(&loop);
- a->Bind(&loop);
+ Goto(&loop);
+ Bind(&loop);
{
Node* const from_cursor = var_from_cursor.value();
Node* const to_cursor = var_to_cursor.value();
- Node* const start = a->LoadFixedArrayElement(match_info, from_cursor);
+ Node* const start = LoadFixedArrayElement(match_info, from_cursor);
- Label next_iter(a);
- a->GotoIf(a->SmiEqual(start, a->SmiConstant(Smi::FromInt(-1))), &next_iter);
+ Label next_iter(this);
+ GotoIf(SmiEqual(start, SmiConstant(Smi::FromInt(-1))), &next_iter);
- Node* const from_cursor_plus1 =
- a->IntPtrAdd(from_cursor, a->IntPtrConstant(1));
- Node* const end = a->LoadFixedArrayElement(match_info, from_cursor_plus1);
+ Node* const from_cursor_plus1 = IntPtrAdd(from_cursor, IntPtrConstant(1));
+ Node* const end = LoadFixedArrayElement(match_info, from_cursor_plus1);
- Node* const capture = a->SubString(context, string, start, end);
- a->StoreFixedArrayElement(result_elements, to_cursor, capture);
- a->Goto(&next_iter);
+ Node* const capture = SubString(context, string, start, end);
+ StoreFixedArrayElement(result_elements, to_cursor, capture);
+ Goto(&next_iter);
- a->Bind(&next_iter);
- var_from_cursor.Bind(a->IntPtrAdd(from_cursor, a->IntPtrConstant(2)));
- var_to_cursor.Bind(a->IntPtrAdd(to_cursor, a->IntPtrConstant(1)));
- a->Branch(a->UintPtrLessThan(var_from_cursor.value(), limit), &loop, &out);
+ Bind(&next_iter);
+ var_from_cursor.Bind(IntPtrAdd(from_cursor, IntPtrConstant(2)));
+ var_to_cursor.Bind(IntPtrAdd(to_cursor, IntPtrConstant(1)));
+ Branch(UintPtrLessThan(var_from_cursor.value(), limit), &loop, &out);
}
- a->Bind(&out);
+ Bind(&out);
return result;
}
// ES#sec-regexp.prototype.exec
// RegExp.prototype.exec ( string )
-compiler::Node* RegExpPrototypeExecInternal(CodeStubAssembler* a,
- compiler::Node* context,
- compiler::Node* maybe_receiver,
- compiler::Node* maybe_string) {
- typedef CodeStubAssembler::Variable Variable;
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
+// Implements the core of RegExp.prototype.exec but without actually
+// constructing the JSRegExpResult. Returns either null (if the RegExp did not
+// match) or a fixed array containing match indices as returned by
+// RegExpExecStub.
+Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult(
+ Node* const context, Node* const regexp, Node* const string,
+ Label* if_didnotmatch, const bool is_fastpath) {
+ Isolate* const isolate = this->isolate();
+
+ Node* const null = NullConstant();
+ Node* const int_zero = IntPtrConstant(0);
+ Node* const smi_zero = SmiConstant(Smi::kZero);
+
+ if (!is_fastpath) {
+ ThrowIfNotInstanceType(context, regexp, JS_REGEXP_TYPE,
+ "RegExp.prototype.exec");
+ }
- Isolate* const isolate = a->isolate();
+ CSA_ASSERT(this, IsStringInstanceType(LoadInstanceType(string)));
+ CSA_ASSERT(this, HasInstanceType(regexp, JS_REGEXP_TYPE));
- Node* const null = a->NullConstant();
- Node* const int_zero = a->IntPtrConstant(0);
- Node* const smi_zero = a->SmiConstant(Smi::kZero);
+ Variable var_result(this, MachineRepresentation::kTagged);
+ Label out(this);
- Variable var_result(a, MachineRepresentation::kTagged);
- Label out(a);
-
- // Ensure {maybe_receiver} is a JSRegExp.
- Node* const regexp_map = a->ThrowIfNotInstanceType(
- context, maybe_receiver, JS_REGEXP_TYPE, "RegExp.prototype.exec");
- Node* const regexp = maybe_receiver;
-
- // Check whether the regexp instance is unmodified.
- Node* const native_context = a->LoadNativeContext(context);
- Node* const regexp_fun =
- a->LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
- Node* const initial_map =
- a->LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
- Node* const has_initialmap = a->WordEqual(regexp_map, initial_map);
-
- // Convert {maybe_string} to a string.
- Callable tostring_callable = CodeFactory::ToString(isolate);
- Node* const string = a->CallStub(tostring_callable, context, maybe_string);
- Node* const string_length = a->LoadStringLength(string);
+ Node* const native_context = LoadNativeContext(context);
+ Node* const string_length = LoadStringLength(string);
// Check whether the regexp is global or sticky, which determines whether we
// update last index later on.
- Node* const flags = a->LoadObjectField(regexp, JSRegExp::kFlagsOffset);
- Node* const is_global_or_sticky =
- a->WordAnd(a->SmiUntag(flags),
- a->IntPtrConstant(JSRegExp::kGlobal | JSRegExp::kSticky));
+ Node* const flags = LoadObjectField(regexp, JSRegExp::kFlagsOffset);
+ Node* const is_global_or_sticky = WordAnd(
+ SmiUntag(flags), IntPtrConstant(JSRegExp::kGlobal | JSRegExp::kSticky));
Node* const should_update_last_index =
- a->WordNotEqual(is_global_or_sticky, int_zero);
+ WordNotEqual(is_global_or_sticky, int_zero);
// Grab and possibly update last index.
- Label run_exec(a);
- Variable var_lastindex(a, MachineRepresentation::kTagged);
+ Label run_exec(this);
+ Variable var_lastindex(this, MachineRepresentation::kTagged);
{
- Label if_doupdate(a), if_dontupdate(a);
- a->Branch(should_update_last_index, &if_doupdate, &if_dontupdate);
+ Label if_doupdate(this), if_dontupdate(this);
+ Branch(should_update_last_index, &if_doupdate, &if_dontupdate);
- a->Bind(&if_doupdate);
+ Bind(&if_doupdate);
{
Node* const regexp_lastindex =
- LoadLastIndex(a, context, has_initialmap, regexp);
+ LoadLastIndex(context, regexp, is_fastpath);
+ var_lastindex.Bind(regexp_lastindex);
+
+ // Omit ToLength if lastindex is a non-negative smi.
+ {
+ Label call_tolength(this, Label::kDeferred), next(this);
+ Branch(TaggedIsPositiveSmi(regexp_lastindex), &next, &call_tolength);
- Callable tolength_callable = CodeFactory::ToLength(isolate);
- Node* const lastindex =
- a->CallStub(tolength_callable, context, regexp_lastindex);
- var_lastindex.Bind(lastindex);
+ Bind(&call_tolength);
+ {
+ Callable tolength_callable = CodeFactory::ToLength(isolate);
+ var_lastindex.Bind(
+ CallStub(tolength_callable, context, regexp_lastindex));
+ Goto(&next);
+ }
+
+ Bind(&next);
+ }
- Label if_isoob(a, Label::kDeferred);
- a->GotoUnless(a->TaggedIsSmi(lastindex), &if_isoob);
- a->GotoUnless(a->SmiLessThanOrEqual(lastindex, string_length), &if_isoob);
- a->Goto(&run_exec);
+ Node* const lastindex = var_lastindex.value();
- a->Bind(&if_isoob);
+ Label if_isoob(this, Label::kDeferred);
+ GotoUnless(TaggedIsSmi(lastindex), &if_isoob);
+ GotoUnless(SmiLessThanOrEqual(lastindex, string_length), &if_isoob);
+ Goto(&run_exec);
+
+ Bind(&if_isoob);
{
- StoreLastIndex(a, context, has_initialmap, regexp, smi_zero);
+ StoreLastIndex(context, regexp, smi_zero, is_fastpath);
var_result.Bind(null);
- a->Goto(&out);
+ Goto(if_didnotmatch);
}
}
- a->Bind(&if_dontupdate);
+ Bind(&if_dontupdate);
{
var_lastindex.Bind(smi_zero);
- a->Goto(&run_exec);
+ Goto(&run_exec);
}
}
Node* match_indices;
- Label successful_match(a);
- a->Bind(&run_exec);
+ Label successful_match(this);
+ Bind(&run_exec);
{
// Get last match info from the context.
- Node* const last_match_info = a->LoadContextElement(
+ Node* const last_match_info = LoadContextElement(
native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
// Call the exec stub.
Callable exec_callable = CodeFactory::RegExpExec(isolate);
- match_indices = a->CallStub(exec_callable, context, regexp, string,
- var_lastindex.value(), last_match_info);
+ match_indices = CallStub(exec_callable, context, regexp, string,
+ var_lastindex.value(), last_match_info);
+ var_result.Bind(match_indices);
// {match_indices} is either null or the RegExpMatchInfo array.
// Return early if exec failed, possibly updating last index.
- a->GotoUnless(a->WordEqual(match_indices, null), &successful_match);
-
- Label return_null(a);
- a->GotoUnless(should_update_last_index, &return_null);
+ GotoUnless(WordEqual(match_indices, null), &successful_match);
- StoreLastIndex(a, context, has_initialmap, regexp, smi_zero);
- a->Goto(&return_null);
+ GotoUnless(should_update_last_index, if_didnotmatch);
- a->Bind(&return_null);
- var_result.Bind(null);
- a->Goto(&out);
+ StoreLastIndex(context, regexp, smi_zero, is_fastpath);
+ Goto(if_didnotmatch);
}
- Label construct_result(a);
- a->Bind(&successful_match);
+ Bind(&successful_match);
{
- a->GotoUnless(should_update_last_index, &construct_result);
+ GotoUnless(should_update_last_index, &out);
// Update the new last index from {match_indices}.
- Node* const new_lastindex = a->LoadFixedArrayElement(
- match_indices,
- a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex + 1));
-
- StoreLastIndex(a, context, has_initialmap, regexp, new_lastindex);
- a->Goto(&construct_result);
+ Node* const new_lastindex = LoadFixedArrayElement(
+ match_indices, RegExpMatchInfo::kFirstCaptureIndex + 1);
- a->Bind(&construct_result);
- {
- Node* result = ConstructNewResultFromMatchInfo(isolate, a, context,
- match_indices, string);
- var_result.Bind(result);
- a->Goto(&out);
- }
+ StoreLastIndex(context, regexp, new_lastindex, is_fastpath);
+ Goto(&out);
}
- a->Bind(&out);
+ Bind(&out);
return var_result.value();
}
-} // namespace
-
// ES#sec-regexp.prototype.exec
// RegExp.prototype.exec ( string )
-void Builtins::Generate_RegExpPrototypeExec(CodeStubAssembler* a) {
- typedef compiler::Node Node;
+Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBody(Node* const context,
+ Node* const regexp,
+ Node* const string,
+ const bool is_fastpath) {
+ Node* const null = NullConstant();
- Node* const maybe_receiver = a->Parameter(0);
- Node* const maybe_string = a->Parameter(1);
- Node* const context = a->Parameter(4);
+ Variable var_result(this, MachineRepresentation::kTagged);
- Node* const result =
- RegExpPrototypeExecInternal(a, context, maybe_receiver, maybe_string);
- a->Return(result);
-}
+ Label if_didnotmatch(this), out(this);
+ Node* const indices_or_null = RegExpPrototypeExecBodyWithoutResult(
+ context, regexp, string, &if_didnotmatch, is_fastpath);
-namespace {
+ // Successful match.
+ {
+ Node* const match_indices = indices_or_null;
+ Node* const result =
+ ConstructNewResultFromMatchInfo(context, match_indices, string);
+ var_result.Bind(result);
+ Goto(&out);
+ }
-compiler::Node* ThrowIfNotJSReceiver(CodeStubAssembler* a, Isolate* isolate,
- compiler::Node* context,
- compiler::Node* value,
- MessageTemplate::Template msg_template,
- char const* method_name) {
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Label Label;
- typedef CodeStubAssembler::Variable Variable;
+ Bind(&if_didnotmatch);
+ {
+ var_result.Bind(null);
+ Goto(&out);
+ }
- Label out(a), throw_exception(a, Label::kDeferred);
- Variable var_value_map(a, MachineRepresentation::kTagged);
+ Bind(&out);
+ return var_result.value();
+}
- a->GotoIf(a->TaggedIsSmi(value), &throw_exception);
+Node* RegExpBuiltinsAssembler::ThrowIfNotJSReceiver(
+ Node* context, Node* maybe_receiver, MessageTemplate::Template msg_template,
+ char const* method_name) {
+ Label out(this), throw_exception(this, Label::kDeferred);
+ Variable var_value_map(this, MachineRepresentation::kTagged);
+
+ GotoIf(TaggedIsSmi(maybe_receiver), &throw_exception);
// Load the instance type of the {value}.
- var_value_map.Bind(a->LoadMap(value));
- Node* const value_instance_type =
- a->LoadMapInstanceType(var_value_map.value());
+ var_value_map.Bind(LoadMap(maybe_receiver));
+ Node* const value_instance_type = LoadMapInstanceType(var_value_map.value());
- a->Branch(a->IsJSReceiverInstanceType(value_instance_type), &out,
- &throw_exception);
+ Branch(IsJSReceiverInstanceType(value_instance_type), &out, &throw_exception);
// The {value} is not a compatible receiver for this method.
- a->Bind(&throw_exception);
+ Bind(&throw_exception);
{
- Node* const message_id = a->SmiConstant(Smi::FromInt(msg_template));
- Node* const method_name_str = a->HeapConstant(
- isolate->factory()->NewStringFromAsciiChecked(method_name, TENURED));
+ Node* const message_id = SmiConstant(Smi::FromInt(msg_template));
+ Node* const method_name_str = HeapConstant(
+ isolate()->factory()->NewStringFromAsciiChecked(method_name, TENURED));
- Callable callable = CodeFactory::ToString(isolate);
- Node* const value_str = a->CallStub(callable, context, value);
+ Callable callable = CodeFactory::ToString(isolate());
+ Node* const value_str = CallStub(callable, context, maybe_receiver);
- a->CallRuntime(Runtime::kThrowTypeError, context, message_id,
- method_name_str, value_str);
- var_value_map.Bind(a->UndefinedConstant());
- a->Goto(&out); // Never reached.
+ CallRuntime(Runtime::kThrowTypeError, context, message_id, method_name_str,
+ value_str);
+ var_value_map.Bind(UndefinedConstant());
+ Goto(&out); // Never reached.
}
- a->Bind(&out);
+ Bind(&out);
return var_value_map.value();
}
-compiler::Node* IsInitialRegExpMap(CodeStubAssembler* a,
- compiler::Node* context,
- compiler::Node* map) {
- typedef compiler::Node Node;
-
- Node* const native_context = a->LoadNativeContext(context);
+Node* RegExpBuiltinsAssembler::IsInitialRegExpMap(Node* context, Node* map) {
+ Node* const native_context = LoadNativeContext(context);
Node* const regexp_fun =
- a->LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
+ LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
Node* const initial_map =
- a->LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
- Node* const has_initialmap = a->WordEqual(map, initial_map);
+ LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
+ Node* const has_initialmap = WordEqual(map, initial_map);
return has_initialmap;
}
@@ -556,192 +416,499 @@ compiler::Node* IsInitialRegExpMap(CodeStubAssembler* a,
// We use a fairly coarse granularity for this and simply check whether both
// the regexp itself is unmodified (i.e. its map has not changed) and its
// prototype is unmodified.
-void BranchIfFastPath(CodeStubAssembler* a, compiler::Node* context,
- compiler::Node* map,
- CodeStubAssembler::Label* if_isunmodified,
- CodeStubAssembler::Label* if_ismodified) {
- typedef compiler::Node Node;
-
- Node* const native_context = a->LoadNativeContext(context);
+void RegExpBuiltinsAssembler::BranchIfFastRegExp(Node* context, Node* map,
+ Label* if_isunmodified,
+ Label* if_ismodified) {
+ Node* const native_context = LoadNativeContext(context);
Node* const regexp_fun =
- a->LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
+ LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
Node* const initial_map =
- a->LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
- Node* const has_initialmap = a->WordEqual(map, initial_map);
+ LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
+ Node* const has_initialmap = WordEqual(map, initial_map);
- a->GotoUnless(has_initialmap, if_ismodified);
+ GotoUnless(has_initialmap, if_ismodified);
- Node* const initial_proto_initial_map = a->LoadContextElement(
- native_context, Context::REGEXP_PROTOTYPE_MAP_INDEX);
- Node* const proto_map = a->LoadMap(a->LoadMapPrototype(map));
+ Node* const initial_proto_initial_map =
+ LoadContextElement(native_context, Context::REGEXP_PROTOTYPE_MAP_INDEX);
+ Node* const proto_map = LoadMap(LoadMapPrototype(map));
Node* const proto_has_initialmap =
- a->WordEqual(proto_map, initial_proto_initial_map);
+ WordEqual(proto_map, initial_proto_initial_map);
// TODO(ishell): Update this check once map changes for constant field
// tracking are landing.
- a->Branch(proto_has_initialmap, if_isunmodified, if_ismodified);
+ Branch(proto_has_initialmap, if_isunmodified, if_ismodified);
}
-} // namespace
+void RegExpBuiltinsAssembler::BranchIfFastRegExpResult(Node* context, Node* map,
+ Label* if_isunmodified,
+ Label* if_ismodified) {
+ Node* const native_context = LoadNativeContext(context);
+ Node* const initial_regexp_result_map =
+ LoadContextElement(native_context, Context::REGEXP_RESULT_MAP_INDEX);
-void Builtins::Generate_RegExpPrototypeFlagsGetter(CodeStubAssembler* a) {
- typedef CodeStubAssembler::Variable Variable;
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
+ Branch(WordEqual(map, initial_regexp_result_map), if_isunmodified,
+ if_ismodified);
+}
- Node* const receiver = a->Parameter(0);
- Node* const context = a->Parameter(3);
+// ES#sec-regexp.prototype.exec
+// RegExp.prototype.exec ( string )
+TF_BUILTIN(RegExpPrototypeExec, RegExpBuiltinsAssembler) {
+ Node* const maybe_receiver = Parameter(0);
+ Node* const maybe_string = Parameter(1);
+ Node* const context = Parameter(4);
- Isolate* isolate = a->isolate();
- Node* const int_zero = a->IntPtrConstant(0);
- Node* const int_one = a->IntPtrConstant(1);
+ // Ensure {maybe_receiver} is a JSRegExp.
+ Node* const regexp_map = ThrowIfNotInstanceType(
+ context, maybe_receiver, JS_REGEXP_TYPE, "RegExp.prototype.exec");
+ Node* const receiver = maybe_receiver;
- Node* const map = ThrowIfNotJSReceiver(a, isolate, context, receiver,
- MessageTemplate::kRegExpNonObject,
- "RegExp.prototype.flags");
+ // Convert {maybe_string} to a String.
+ Node* const string = ToString(context, maybe_string);
- Variable var_length(a, MachineType::PointerRepresentation());
- Variable var_flags(a, MachineType::PointerRepresentation());
+ Label if_isfastpath(this), if_isslowpath(this);
+ Branch(IsInitialRegExpMap(context, regexp_map), &if_isfastpath,
+ &if_isslowpath);
+
+ Bind(&if_isfastpath);
+ {
+ Node* const result =
+ RegExpPrototypeExecBody(context, receiver, string, true);
+ Return(result);
+ }
+
+ Bind(&if_isslowpath);
+ {
+ Node* const result =
+ RegExpPrototypeExecBody(context, receiver, string, false);
+ Return(result);
+ }
+}
+
+Node* RegExpBuiltinsAssembler::FlagsGetter(Node* const context,
+ Node* const regexp,
+ bool is_fastpath) {
+ Isolate* isolate = this->isolate();
+
+ Node* const int_zero = IntPtrConstant(0);
+ Node* const int_one = IntPtrConstant(1);
+ Variable var_length(this, MachineType::PointerRepresentation());
+ Variable var_flags(this, MachineType::PointerRepresentation());
// First, count the number of characters we will need and check which flags
// are set.
var_length.Bind(int_zero);
- Label if_isunmodifiedjsregexp(a),
- if_isnotunmodifiedjsregexp(a, Label::kDeferred);
- a->Branch(IsInitialRegExpMap(a, context, map), &if_isunmodifiedjsregexp,
- &if_isnotunmodifiedjsregexp);
-
- Label construct_string(a);
- a->Bind(&if_isunmodifiedjsregexp);
- {
+ if (is_fastpath) {
// Refer to JSRegExp's flag property on the fast-path.
- Node* const flags_smi =
- a->LoadObjectField(receiver, JSRegExp::kFlagsOffset);
- Node* const flags_intptr = a->SmiUntag(flags_smi);
+ Node* const flags_smi = LoadObjectField(regexp, JSRegExp::kFlagsOffset);
+ Node* const flags_intptr = SmiUntag(flags_smi);
var_flags.Bind(flags_intptr);
- Label label_global(a), label_ignorecase(a), label_multiline(a),
- label_unicode(a), label_sticky(a);
-
-#define CASE_FOR_FLAG(FLAG, LABEL, NEXT_LABEL) \
- do { \
- a->Bind(&LABEL); \
- Node* const mask = a->IntPtrConstant(FLAG); \
- a->GotoIf(a->WordEqual(a->WordAnd(flags_intptr, mask), int_zero), \
- &NEXT_LABEL); \
- var_length.Bind(a->IntPtrAdd(var_length.value(), int_one)); \
- a->Goto(&NEXT_LABEL); \
+#define CASE_FOR_FLAG(FLAG) \
+ do { \
+ Label next(this); \
+ GotoUnless(IsSetWord(flags_intptr, FLAG), &next); \
+ var_length.Bind(IntPtrAdd(var_length.value(), int_one)); \
+ Goto(&next); \
+ Bind(&next); \
} while (false)
- a->Goto(&label_global);
- CASE_FOR_FLAG(JSRegExp::kGlobal, label_global, label_ignorecase);
- CASE_FOR_FLAG(JSRegExp::kIgnoreCase, label_ignorecase, label_multiline);
- CASE_FOR_FLAG(JSRegExp::kMultiline, label_multiline, label_unicode);
- CASE_FOR_FLAG(JSRegExp::kUnicode, label_unicode, label_sticky);
- CASE_FOR_FLAG(JSRegExp::kSticky, label_sticky, construct_string);
+ CASE_FOR_FLAG(JSRegExp::kGlobal);
+ CASE_FOR_FLAG(JSRegExp::kIgnoreCase);
+ CASE_FOR_FLAG(JSRegExp::kMultiline);
+ CASE_FOR_FLAG(JSRegExp::kUnicode);
+ CASE_FOR_FLAG(JSRegExp::kSticky);
#undef CASE_FOR_FLAG
- }
+ } else {
+ DCHECK(!is_fastpath);
- a->Bind(&if_isnotunmodifiedjsregexp);
- {
// Fall back to GetProperty stub on the slow-path.
var_flags.Bind(int_zero);
- Callable getproperty_callable = CodeFactory::GetProperty(a->isolate());
- Label label_global(a), label_ignorecase(a), label_multiline(a),
- label_unicode(a), label_sticky(a);
+ Callable getproperty_callable = CodeFactory::GetProperty(isolate);
-#define CASE_FOR_FLAG(NAME, FLAG, LABEL, NEXT_LABEL) \
+#define CASE_FOR_FLAG(NAME, FLAG) \
do { \
- a->Bind(&LABEL); \
+ Label next(this); \
Node* const name = \
- a->HeapConstant(isolate->factory()->NewStringFromAsciiChecked(NAME)); \
- Node* const flag = \
- a->CallStub(getproperty_callable, context, receiver, name); \
- Label if_isflagset(a); \
- a->BranchIfToBooleanIsTrue(flag, &if_isflagset, &NEXT_LABEL); \
- a->Bind(&if_isflagset); \
- var_length.Bind(a->IntPtrAdd(var_length.value(), int_one)); \
- var_flags.Bind(a->WordOr(var_flags.value(), a->IntPtrConstant(FLAG))); \
- a->Goto(&NEXT_LABEL); \
+ HeapConstant(isolate->factory()->InternalizeUtf8String(NAME)); \
+ Node* const flag = CallStub(getproperty_callable, context, regexp, name); \
+ Label if_isflagset(this); \
+ BranchIfToBooleanIsTrue(flag, &if_isflagset, &next); \
+ Bind(&if_isflagset); \
+ var_length.Bind(IntPtrAdd(var_length.value(), int_one)); \
+ var_flags.Bind(WordOr(var_flags.value(), IntPtrConstant(FLAG))); \
+ Goto(&next); \
+ Bind(&next); \
} while (false)
- a->Goto(&label_global);
- CASE_FOR_FLAG("global", JSRegExp::kGlobal, label_global, label_ignorecase);
- CASE_FOR_FLAG("ignoreCase", JSRegExp::kIgnoreCase, label_ignorecase,
- label_multiline);
- CASE_FOR_FLAG("multiline", JSRegExp::kMultiline, label_multiline,
- label_unicode);
- CASE_FOR_FLAG("unicode", JSRegExp::kUnicode, label_unicode, label_sticky);
- CASE_FOR_FLAG("sticky", JSRegExp::kSticky, label_sticky, construct_string);
+ CASE_FOR_FLAG("global", JSRegExp::kGlobal);
+ CASE_FOR_FLAG("ignoreCase", JSRegExp::kIgnoreCase);
+ CASE_FOR_FLAG("multiline", JSRegExp::kMultiline);
+ CASE_FOR_FLAG("unicode", JSRegExp::kUnicode);
+ CASE_FOR_FLAG("sticky", JSRegExp::kSticky);
#undef CASE_FOR_FLAG
}
// Allocate a string of the required length and fill it with the corresponding
// char for each set flag.
- a->Bind(&construct_string);
{
- Node* const result =
- a->AllocateSeqOneByteString(context, var_length.value());
+ Node* const result = AllocateSeqOneByteString(context, var_length.value());
Node* const flags_intptr = var_flags.value();
- Variable var_offset(a, MachineType::PointerRepresentation());
+ Variable var_offset(this, MachineType::PointerRepresentation());
var_offset.Bind(
- a->IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-
- Label label_global(a), label_ignorecase(a), label_multiline(a),
- label_unicode(a), label_sticky(a), out(a);
-
-#define CASE_FOR_FLAG(FLAG, CHAR, LABEL, NEXT_LABEL) \
- do { \
- a->Bind(&LABEL); \
- Node* const mask = a->IntPtrConstant(FLAG); \
- a->GotoIf(a->WordEqual(a->WordAnd(flags_intptr, mask), int_zero), \
- &NEXT_LABEL); \
- Node* const value = a->IntPtrConstant(CHAR); \
- a->StoreNoWriteBarrier(MachineRepresentation::kWord8, result, \
- var_offset.value(), value); \
- var_offset.Bind(a->IntPtrAdd(var_offset.value(), int_one)); \
- a->Goto(&NEXT_LABEL); \
+ IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+
+#define CASE_FOR_FLAG(FLAG, CHAR) \
+ do { \
+ Label next(this); \
+ GotoUnless(IsSetWord(flags_intptr, FLAG), &next); \
+ Node* const value = Int32Constant(CHAR); \
+ StoreNoWriteBarrier(MachineRepresentation::kWord8, result, \
+ var_offset.value(), value); \
+ var_offset.Bind(IntPtrAdd(var_offset.value(), int_one)); \
+ Goto(&next); \
+ Bind(&next); \
} while (false)
- a->Goto(&label_global);
- CASE_FOR_FLAG(JSRegExp::kGlobal, 'g', label_global, label_ignorecase);
- CASE_FOR_FLAG(JSRegExp::kIgnoreCase, 'i', label_ignorecase,
- label_multiline);
- CASE_FOR_FLAG(JSRegExp::kMultiline, 'm', label_multiline, label_unicode);
- CASE_FOR_FLAG(JSRegExp::kUnicode, 'u', label_unicode, label_sticky);
- CASE_FOR_FLAG(JSRegExp::kSticky, 'y', label_sticky, out);
+ CASE_FOR_FLAG(JSRegExp::kGlobal, 'g');
+ CASE_FOR_FLAG(JSRegExp::kIgnoreCase, 'i');
+ CASE_FOR_FLAG(JSRegExp::kMultiline, 'm');
+ CASE_FOR_FLAG(JSRegExp::kUnicode, 'u');
+ CASE_FOR_FLAG(JSRegExp::kSticky, 'y');
#undef CASE_FOR_FLAG
- a->Bind(&out);
- a->Return(result);
+ return result;
}
}
-// ES6 21.2.5.10.
-BUILTIN(RegExpPrototypeSourceGetter) {
- HandleScope scope(isolate);
+// ES#sec-isregexp IsRegExp ( argument )
+Node* RegExpBuiltinsAssembler::IsRegExp(Node* const context,
+ Node* const maybe_receiver) {
+ Label out(this), if_isregexp(this);
+
+ Variable var_result(this, MachineRepresentation::kWord32);
+ var_result.Bind(Int32Constant(0));
+
+ GotoIf(TaggedIsSmi(maybe_receiver), &out);
+ GotoUnless(IsJSReceiver(maybe_receiver), &out);
+
+ Node* const receiver = maybe_receiver;
+
+ // Check @@match.
+ {
+ Callable getproperty_callable = CodeFactory::GetProperty(isolate());
+ Node* const name = HeapConstant(isolate()->factory()->match_symbol());
+ Node* const value = CallStub(getproperty_callable, context, receiver, name);
+
+ Label match_isundefined(this), match_isnotundefined(this);
+ Branch(IsUndefined(value), &match_isundefined, &match_isnotundefined);
+
+ Bind(&match_isundefined);
+ Branch(HasInstanceType(receiver, JS_REGEXP_TYPE), &if_isregexp, &out);
+
+ Bind(&match_isnotundefined);
+ BranchIfToBooleanIsTrue(value, &if_isregexp, &out);
+ }
+
+ Bind(&if_isregexp);
+ var_result.Bind(Int32Constant(1));
+ Goto(&out);
+
+ Bind(&out);
+ return var_result.value();
+}
+
+// ES#sec-regexpinitialize
+// Runtime Semantics: RegExpInitialize ( obj, pattern, flags )
+Node* RegExpBuiltinsAssembler::RegExpInitialize(Node* const context,
+ Node* const regexp,
+ Node* const maybe_pattern,
+ Node* const maybe_flags) {
+ // Normalize pattern.
+ Node* const pattern =
+ Select(IsUndefined(maybe_pattern), [=] { return EmptyStringConstant(); },
+ [=] { return ToString(context, maybe_pattern); },
+ MachineRepresentation::kTagged);
+
+ // Normalize flags.
+ Node* const flags =
+ Select(IsUndefined(maybe_flags), [=] { return EmptyStringConstant(); },
+ [=] { return ToString(context, maybe_flags); },
+ MachineRepresentation::kTagged);
+
+ // Initialize.
+
+ return CallRuntime(Runtime::kRegExpInitializeAndCompile, context, regexp,
+ pattern, flags);
+}
+
+TF_BUILTIN(RegExpPrototypeFlagsGetter, RegExpBuiltinsAssembler) {
+ Node* const maybe_receiver = Parameter(0);
+ Node* const context = Parameter(3);
+
+ Node* const map = ThrowIfNotJSReceiver(context, maybe_receiver,
+ MessageTemplate::kRegExpNonObject,
+ "RegExp.prototype.flags");
+ Node* const receiver = maybe_receiver;
+
+ Label if_isfastpath(this), if_isslowpath(this, Label::kDeferred);
+ Branch(IsInitialRegExpMap(context, map), &if_isfastpath, &if_isslowpath);
+
+ Bind(&if_isfastpath);
+ Return(FlagsGetter(context, receiver, true));
+
+ Bind(&if_isslowpath);
+ Return(FlagsGetter(context, receiver, false));
+}
+
+// ES#sec-regexp-pattern-flags
+// RegExp ( pattern, flags )
+TF_BUILTIN(RegExpConstructor, RegExpBuiltinsAssembler) {
+ Node* const pattern = Parameter(1);
+ Node* const flags = Parameter(2);
+ Node* const new_target = Parameter(3);
+ Node* const context = Parameter(5);
+
+ Isolate* isolate = this->isolate();
+
+ Variable var_flags(this, MachineRepresentation::kTagged);
+ Variable var_pattern(this, MachineRepresentation::kTagged);
+ Variable var_new_target(this, MachineRepresentation::kTagged);
+
+ var_flags.Bind(flags);
+ var_pattern.Bind(pattern);
+ var_new_target.Bind(new_target);
+
+ Node* const native_context = LoadNativeContext(context);
+ Node* const regexp_function =
+ LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
+
+ Node* const pattern_is_regexp = IsRegExp(context, pattern);
+
+ {
+ Label next(this);
+
+ GotoUnless(IsUndefined(new_target), &next);
+ var_new_target.Bind(regexp_function);
+
+ GotoUnless(pattern_is_regexp, &next);
+ GotoUnless(IsUndefined(flags), &next);
+
+ Callable getproperty_callable = CodeFactory::GetProperty(isolate);
+ Node* const name = HeapConstant(isolate->factory()->constructor_string());
+ Node* const value = CallStub(getproperty_callable, context, pattern, name);
+
+ GotoUnless(WordEqual(value, regexp_function), &next);
+ Return(pattern);
+
+ Bind(&next);
+ }
+
+ {
+ Label next(this), if_patternisfastregexp(this),
+ if_patternisslowregexp(this);
+ GotoIf(TaggedIsSmi(pattern), &next);
+
+ GotoIf(HasInstanceType(pattern, JS_REGEXP_TYPE), &if_patternisfastregexp);
+
+ Branch(pattern_is_regexp, &if_patternisslowregexp, &next);
+
+ Bind(&if_patternisfastregexp);
+ {
+ Node* const source = LoadObjectField(pattern, JSRegExp::kSourceOffset);
+ var_pattern.Bind(source);
+
+ {
+ Label inner_next(this);
+ GotoUnless(IsUndefined(flags), &inner_next);
+
+ Node* const value = FlagsGetter(context, pattern, true);
+ var_flags.Bind(value);
+ Goto(&inner_next);
+
+ Bind(&inner_next);
+ }
+
+ Goto(&next);
+ }
+
+ Bind(&if_patternisslowregexp);
+ {
+ Callable getproperty_callable = CodeFactory::GetProperty(isolate);
+
+ {
+ Node* const name = HeapConstant(isolate->factory()->source_string());
+ Node* const value =
+ CallStub(getproperty_callable, context, pattern, name);
+ var_pattern.Bind(value);
+ }
+
+ {
+ Label inner_next(this);
+ GotoUnless(IsUndefined(flags), &inner_next);
+
+ Node* const name = HeapConstant(isolate->factory()->flags_string());
+ Node* const value =
+ CallStub(getproperty_callable, context, pattern, name);
+ var_flags.Bind(value);
+ Goto(&inner_next);
+
+ Bind(&inner_next);
+ }
+
+ Goto(&next);
+ }
+
+ Bind(&next);
+ }
+
+ // Allocate.
+
+ Variable var_regexp(this, MachineRepresentation::kTagged);
+ {
+ Label allocate_jsregexp(this), allocate_generic(this, Label::kDeferred),
+ next(this);
+ Branch(WordEqual(var_new_target.value(), regexp_function),
+ &allocate_jsregexp, &allocate_generic);
+
+ Bind(&allocate_jsregexp);
+ {
+ Node* const initial_map = LoadObjectField(
+ regexp_function, JSFunction::kPrototypeOrInitialMapOffset);
+ Node* const regexp = AllocateJSObjectFromMap(initial_map);
+ var_regexp.Bind(regexp);
+ Goto(&next);
+ }
+
+ Bind(&allocate_generic);
+ {
+ ConstructorBuiltinsAssembler constructor_assembler(this->state());
+ Node* const regexp = constructor_assembler.EmitFastNewObject(
+ context, regexp_function, var_new_target.value());
+ var_regexp.Bind(regexp);
+ Goto(&next);
+ }
+
+ Bind(&next);
+ }
+
+ Node* const result = RegExpInitialize(context, var_regexp.value(),
+ var_pattern.value(), var_flags.value());
+ Return(result);
+}
+
+// ES#sec-regexp.prototype.compile
+// RegExp.prototype.compile ( pattern, flags )
+TF_BUILTIN(RegExpPrototypeCompile, RegExpBuiltinsAssembler) {
+ Node* const maybe_receiver = Parameter(0);
+ Node* const maybe_pattern = Parameter(1);
+ Node* const maybe_flags = Parameter(2);
+ Node* const context = Parameter(5);
+
+ ThrowIfNotInstanceType(context, maybe_receiver, JS_REGEXP_TYPE,
+ "RegExp.prototype.compile");
+ Node* const receiver = maybe_receiver;
+
+ Variable var_flags(this, MachineRepresentation::kTagged);
+ Variable var_pattern(this, MachineRepresentation::kTagged);
+
+ var_flags.Bind(maybe_flags);
+ var_pattern.Bind(maybe_pattern);
+
+ // Handle a JSRegExp pattern.
+ {
+ Label next(this);
+
+ GotoIf(TaggedIsSmi(maybe_pattern), &next);
+ GotoUnless(HasInstanceType(maybe_pattern, JS_REGEXP_TYPE), &next);
+
+ Node* const pattern = maybe_pattern;
+
+ // {maybe_flags} must be undefined in this case, otherwise throw.
+ {
+ Label next(this);
+ GotoIf(IsUndefined(maybe_flags), &next);
- Handle<Object> recv = args.receiver();
- if (!recv->IsJSRegExp()) {
- Handle<JSFunction> regexp_fun = isolate->regexp_function();
- if (*recv == regexp_fun->prototype()) {
- isolate->CountUsage(v8::Isolate::kRegExpPrototypeSourceGetter);
- return *isolate->factory()->NewStringFromAsciiChecked("(?:)");
+ Node* const message_id = SmiConstant(MessageTemplate::kRegExpFlags);
+ TailCallRuntime(Runtime::kThrowTypeError, context, message_id);
+
+ Bind(&next);
}
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kRegExpNonRegExp,
- isolate->factory()->NewStringFromAsciiChecked(
- "RegExp.prototype.source")));
+
+ Node* const new_flags = FlagsGetter(context, pattern, true);
+ Node* const new_pattern = LoadObjectField(pattern, JSRegExp::kSourceOffset);
+
+ var_flags.Bind(new_flags);
+ var_pattern.Bind(new_pattern);
+
+ Goto(&next);
+ Bind(&next);
}
- Handle<JSRegExp> regexp = Handle<JSRegExp>::cast(recv);
- return regexp->source();
+ Node* const result = RegExpInitialize(context, receiver, var_pattern.value(),
+ var_flags.value());
+ Return(result);
+}
+
+// ES6 21.2.5.10.
+TF_BUILTIN(RegExpPrototypeSourceGetter, RegExpBuiltinsAssembler) {
+ Node* const receiver = Parameter(0);
+ Node* const context = Parameter(3);
+
+ // Check whether we have an unmodified regexp instance.
+ Label if_isjsregexp(this), if_isnotjsregexp(this, Label::kDeferred);
+
+ GotoIf(TaggedIsSmi(receiver), &if_isnotjsregexp);
+ Branch(HasInstanceType(receiver, JS_REGEXP_TYPE), &if_isjsregexp,
+ &if_isnotjsregexp);
+
+ Bind(&if_isjsregexp);
+ {
+ Node* const source = LoadObjectField(receiver, JSRegExp::kSourceOffset);
+ Return(source);
+ }
+
+ Bind(&if_isnotjsregexp);
+ {
+ Isolate* isolate = this->isolate();
+ Node* const native_context = LoadNativeContext(context);
+ Node* const regexp_fun =
+ LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
+ Node* const initial_map =
+ LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
+ Node* const initial_prototype = LoadMapPrototype(initial_map);
+
+ Label if_isprototype(this), if_isnotprototype(this);
+ Branch(WordEqual(receiver, initial_prototype), &if_isprototype,
+ &if_isnotprototype);
+
+ Bind(&if_isprototype);
+ {
+ const int counter = v8::Isolate::kRegExpPrototypeSourceGetter;
+ Node* const counter_smi = SmiConstant(counter);
+ CallRuntime(Runtime::kIncrementUseCounter, context, counter_smi);
+
+ Node* const result =
+ HeapConstant(isolate->factory()->NewStringFromAsciiChecked("(?:)"));
+ Return(result);
+ }
+
+ Bind(&if_isnotprototype);
+ {
+ Node* const message_id =
+ SmiConstant(Smi::FromInt(MessageTemplate::kRegExpNonRegExp));
+ Node* const method_name_str =
+ HeapConstant(isolate->factory()->NewStringFromAsciiChecked(
+ "RegExp.prototype.source"));
+ TailCallRuntime(Runtime::kThrowTypeError, context, message_id,
+ method_name_str);
+ }
+ }
}
BUILTIN(RegExpPrototypeToString) {
@@ -781,126 +948,166 @@ BUILTIN(RegExpPrototypeToString) {
RETURN_RESULT_OR_FAILURE(isolate, builder.Finish());
}
-// ES6 21.2.4.2.
-BUILTIN(RegExpPrototypeSpeciesGetter) {
- HandleScope scope(isolate);
- return *args.receiver();
+// Fast-path implementation for flag checks on an unmodified JSRegExp instance.
+Node* RegExpBuiltinsAssembler::FastFlagGetter(Node* const regexp,
+ JSRegExp::Flag flag) {
+ Node* const smi_zero = SmiConstant(Smi::kZero);
+ Node* const flags = LoadObjectField(regexp, JSRegExp::kFlagsOffset);
+ Node* const mask = SmiConstant(Smi::FromInt(flag));
+ Node* const is_flag_set = WordNotEqual(SmiAnd(flags, mask), smi_zero);
+
+ return is_flag_set;
}
-namespace {
+// Load through the GetProperty stub.
+Node* RegExpBuiltinsAssembler::SlowFlagGetter(Node* const context,
+ Node* const regexp,
+ JSRegExp::Flag flag) {
+ Factory* factory = isolate()->factory();
-// Fast-path implementation for flag checks on an unmodified JSRegExp instance.
-compiler::Node* FastFlagGetter(CodeStubAssembler* a,
- compiler::Node* const regexp,
- JSRegExp::Flag flag) {
- typedef compiler::Node Node;
+ Label out(this);
+ Variable var_result(this, MachineRepresentation::kWord32);
- Node* const smi_zero = a->SmiConstant(Smi::kZero);
- Node* const flags = a->LoadObjectField(regexp, JSRegExp::kFlagsOffset);
- Node* const mask = a->SmiConstant(Smi::FromInt(flag));
- Node* const is_flag_set = a->WordNotEqual(a->WordAnd(flags, mask), smi_zero);
+ Node* name;
- return is_flag_set;
+ switch (flag) {
+ case JSRegExp::kGlobal:
+ name = HeapConstant(factory->global_string());
+ break;
+ case JSRegExp::kIgnoreCase:
+ name = HeapConstant(factory->ignoreCase_string());
+ break;
+ case JSRegExp::kMultiline:
+ name = HeapConstant(factory->multiline_string());
+ break;
+ case JSRegExp::kSticky:
+ name = HeapConstant(factory->sticky_string());
+ break;
+ case JSRegExp::kUnicode:
+ name = HeapConstant(factory->unicode_string());
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ Callable getproperty_callable = CodeFactory::GetProperty(isolate());
+ Node* const value = CallStub(getproperty_callable, context, regexp, name);
+
+ Label if_true(this), if_false(this);
+ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
+
+ Bind(&if_true);
+ {
+ var_result.Bind(Int32Constant(1));
+ Goto(&out);
+ }
+
+ Bind(&if_false);
+ {
+ var_result.Bind(Int32Constant(0));
+ Goto(&out);
+ }
+
+ Bind(&out);
+ return var_result.value();
}
-void Generate_FlagGetter(CodeStubAssembler* a, JSRegExp::Flag flag,
- v8::Isolate::UseCounterFeature counter,
- const char* method_name) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
+Node* RegExpBuiltinsAssembler::FlagGetter(Node* const context,
+ Node* const regexp,
+ JSRegExp::Flag flag,
+ bool is_fastpath) {
+ return is_fastpath ? FastFlagGetter(regexp, flag)
+ : SlowFlagGetter(context, regexp, flag);
+}
- Node* const receiver = a->Parameter(0);
- Node* const context = a->Parameter(3);
+void RegExpBuiltinsAssembler::FlagGetter(JSRegExp::Flag flag,
+ v8::Isolate::UseCounterFeature counter,
+ const char* method_name) {
+ Node* const receiver = Parameter(0);
+ Node* const context = Parameter(3);
- Isolate* isolate = a->isolate();
+ Isolate* isolate = this->isolate();
// Check whether we have an unmodified regexp instance.
- Label if_isunmodifiedjsregexp(a),
- if_isnotunmodifiedjsregexp(a, Label::kDeferred);
+ Label if_isunmodifiedjsregexp(this),
+ if_isnotunmodifiedjsregexp(this, Label::kDeferred);
- a->GotoIf(a->TaggedIsSmi(receiver), &if_isnotunmodifiedjsregexp);
+ GotoIf(TaggedIsSmi(receiver), &if_isnotunmodifiedjsregexp);
- Node* const receiver_map = a->LoadMap(receiver);
- Node* const instance_type = a->LoadMapInstanceType(receiver_map);
+ Node* const receiver_map = LoadMap(receiver);
+ Node* const instance_type = LoadMapInstanceType(receiver_map);
- a->Branch(a->Word32Equal(instance_type, a->Int32Constant(JS_REGEXP_TYPE)),
- &if_isunmodifiedjsregexp, &if_isnotunmodifiedjsregexp);
+ Branch(Word32Equal(instance_type, Int32Constant(JS_REGEXP_TYPE)),
+ &if_isunmodifiedjsregexp, &if_isnotunmodifiedjsregexp);
- a->Bind(&if_isunmodifiedjsregexp);
+ Bind(&if_isunmodifiedjsregexp);
{
// Refer to JSRegExp's flag property on the fast-path.
- Node* const is_flag_set = FastFlagGetter(a, receiver, flag);
- a->Return(a->Select(is_flag_set, a->TrueConstant(), a->FalseConstant()));
+ Node* const is_flag_set = FastFlagGetter(receiver, flag);
+ Return(SelectBooleanConstant(is_flag_set));
}
- a->Bind(&if_isnotunmodifiedjsregexp);
+ Bind(&if_isnotunmodifiedjsregexp);
{
- Node* const native_context = a->LoadNativeContext(context);
+ Node* const native_context = LoadNativeContext(context);
Node* const regexp_fun =
- a->LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
- Node* const initial_map = a->LoadObjectField(
- regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
- Node* const initial_prototype = a->LoadMapPrototype(initial_map);
+ LoadContextElement(native_context, Context::REGEXP_FUNCTION_INDEX);
+ Node* const initial_map =
+ LoadObjectField(regexp_fun, JSFunction::kPrototypeOrInitialMapOffset);
+ Node* const initial_prototype = LoadMapPrototype(initial_map);
- Label if_isprototype(a), if_isnotprototype(a);
- a->Branch(a->WordEqual(receiver, initial_prototype), &if_isprototype,
- &if_isnotprototype);
+ Label if_isprototype(this), if_isnotprototype(this);
+ Branch(WordEqual(receiver, initial_prototype), &if_isprototype,
+ &if_isnotprototype);
- a->Bind(&if_isprototype);
+ Bind(&if_isprototype);
{
- Node* const counter_smi = a->SmiConstant(Smi::FromInt(counter));
- a->CallRuntime(Runtime::kIncrementUseCounter, context, counter_smi);
- a->Return(a->UndefinedConstant());
+ Node* const counter_smi = SmiConstant(Smi::FromInt(counter));
+ CallRuntime(Runtime::kIncrementUseCounter, context, counter_smi);
+ Return(UndefinedConstant());
}
- a->Bind(&if_isnotprototype);
+ Bind(&if_isnotprototype);
{
Node* const message_id =
- a->SmiConstant(Smi::FromInt(MessageTemplate::kRegExpNonRegExp));
- Node* const method_name_str = a->HeapConstant(
+ SmiConstant(Smi::FromInt(MessageTemplate::kRegExpNonRegExp));
+ Node* const method_name_str = HeapConstant(
isolate->factory()->NewStringFromAsciiChecked(method_name));
- a->CallRuntime(Runtime::kThrowTypeError, context, message_id,
- method_name_str);
- a->Return(a->UndefinedConstant()); // Never reached.
+ CallRuntime(Runtime::kThrowTypeError, context, message_id,
+ method_name_str);
+ Return(UndefinedConstant()); // Never reached.
}
}
}
-} // namespace
-
// ES6 21.2.5.4.
-void Builtins::Generate_RegExpPrototypeGlobalGetter(CodeStubAssembler* a) {
- Generate_FlagGetter(a, JSRegExp::kGlobal,
- v8::Isolate::kRegExpPrototypeOldFlagGetter,
- "RegExp.prototype.global");
+TF_BUILTIN(RegExpPrototypeGlobalGetter, RegExpBuiltinsAssembler) {
+ FlagGetter(JSRegExp::kGlobal, v8::Isolate::kRegExpPrototypeOldFlagGetter,
+ "RegExp.prototype.global");
}
// ES6 21.2.5.5.
-void Builtins::Generate_RegExpPrototypeIgnoreCaseGetter(CodeStubAssembler* a) {
- Generate_FlagGetter(a, JSRegExp::kIgnoreCase,
- v8::Isolate::kRegExpPrototypeOldFlagGetter,
- "RegExp.prototype.ignoreCase");
+TF_BUILTIN(RegExpPrototypeIgnoreCaseGetter, RegExpBuiltinsAssembler) {
+ FlagGetter(JSRegExp::kIgnoreCase, v8::Isolate::kRegExpPrototypeOldFlagGetter,
+ "RegExp.prototype.ignoreCase");
}
// ES6 21.2.5.7.
-void Builtins::Generate_RegExpPrototypeMultilineGetter(CodeStubAssembler* a) {
- Generate_FlagGetter(a, JSRegExp::kMultiline,
- v8::Isolate::kRegExpPrototypeOldFlagGetter,
- "RegExp.prototype.multiline");
+TF_BUILTIN(RegExpPrototypeMultilineGetter, RegExpBuiltinsAssembler) {
+ FlagGetter(JSRegExp::kMultiline, v8::Isolate::kRegExpPrototypeOldFlagGetter,
+ "RegExp.prototype.multiline");
}
// ES6 21.2.5.12.
-void Builtins::Generate_RegExpPrototypeStickyGetter(CodeStubAssembler* a) {
- Generate_FlagGetter(a, JSRegExp::kSticky,
- v8::Isolate::kRegExpPrototypeStickyGetter,
- "RegExp.prototype.sticky");
+TF_BUILTIN(RegExpPrototypeStickyGetter, RegExpBuiltinsAssembler) {
+ FlagGetter(JSRegExp::kSticky, v8::Isolate::kRegExpPrototypeStickyGetter,
+ "RegExp.prototype.sticky");
}
// ES6 21.2.5.15.
-void Builtins::Generate_RegExpPrototypeUnicodeGetter(CodeStubAssembler* a) {
- Generate_FlagGetter(a, JSRegExp::kUnicode,
- v8::Isolate::kRegExpPrototypeUnicodeGetter,
- "RegExp.prototype.unicode");
+TF_BUILTIN(RegExpPrototypeUnicodeGetter, RegExpBuiltinsAssembler) {
+ FlagGetter(JSRegExp::kUnicode, v8::Isolate::kRegExpPrototypeUnicodeGetter,
+ "RegExp.prototype.unicode");
}
// The properties $1..$9 are the first nine capturing substrings of the last
@@ -986,722 +1193,977 @@ BUILTIN(RegExpRightContextGetter) {
return *isolate->factory()->NewSubString(last_subject, start_index, len);
}
-namespace {
-
// ES#sec-regexpexec Runtime Semantics: RegExpExec ( R, S )
-compiler::Node* RegExpExec(CodeStubAssembler* a, compiler::Node* context,
- compiler::Node* recv, compiler::Node* string) {
- typedef CodeStubAssembler::Variable Variable;
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
+Node* RegExpBuiltinsAssembler::RegExpExec(Node* context, Node* regexp,
+ Node* string) {
+ Isolate* isolate = this->isolate();
- Isolate* isolate = a->isolate();
+ Node* const null = NullConstant();
- Node* const null = a->NullConstant();
+ Variable var_result(this, MachineRepresentation::kTagged);
+ Label out(this), if_isfastpath(this), if_isslowpath(this);
- Variable var_result(a, MachineRepresentation::kTagged);
- Label out(a), call_builtin_exec(a), slow_path(a, Label::kDeferred);
+ Node* const map = LoadMap(regexp);
+ BranchIfFastRegExp(context, map, &if_isfastpath, &if_isslowpath);
- Node* const map = a->LoadMap(recv);
- BranchIfFastPath(a, context, map, &call_builtin_exec, &slow_path);
-
- a->Bind(&call_builtin_exec);
+ Bind(&if_isfastpath);
{
- Node* const result = RegExpPrototypeExecInternal(a, context, recv, string);
+ Node* const result = RegExpPrototypeExecBody(context, regexp, string, true);
var_result.Bind(result);
- a->Goto(&out);
+ Goto(&out);
}
- a->Bind(&slow_path);
+ Bind(&if_isslowpath);
{
// Take the slow path of fetching the exec property, calling it, and
// verifying its return value.
// Get the exec property.
- Node* const name = a->HeapConstant(isolate->factory()->exec_string());
- Callable getproperty_callable = CodeFactory::GetProperty(a->isolate());
- Node* const exec = a->CallStub(getproperty_callable, context, recv, name);
+ Node* const name = HeapConstant(isolate->factory()->exec_string());
+ Callable getproperty_callable = CodeFactory::GetProperty(isolate);
+ Node* const exec = CallStub(getproperty_callable, context, regexp, name);
// Is {exec} callable?
- Label if_iscallable(a), if_isnotcallable(a);
+ Label if_iscallable(this), if_isnotcallable(this);
- a->GotoIf(a->TaggedIsSmi(exec), &if_isnotcallable);
+ GotoIf(TaggedIsSmi(exec), &if_isnotcallable);
- Node* const exec_map = a->LoadMap(exec);
- a->Branch(a->IsCallableMap(exec_map), &if_iscallable, &if_isnotcallable);
+ Node* const exec_map = LoadMap(exec);
+ Branch(IsCallableMap(exec_map), &if_iscallable, &if_isnotcallable);
- a->Bind(&if_iscallable);
+ Bind(&if_iscallable);
{
Callable call_callable = CodeFactory::Call(isolate);
- Node* const result =
- a->CallJS(call_callable, context, exec, recv, string);
+ Node* const result = CallJS(call_callable, context, exec, regexp, string);
var_result.Bind(result);
- a->GotoIf(a->WordEqual(result, null), &out);
+ GotoIf(WordEqual(result, null), &out);
- ThrowIfNotJSReceiver(a, isolate, context, result,
+ ThrowIfNotJSReceiver(context, result,
MessageTemplate::kInvalidRegExpExecResult, "unused");
- a->Goto(&out);
+ Goto(&out);
}
- a->Bind(&if_isnotcallable);
+ Bind(&if_isnotcallable);
{
- a->ThrowIfNotInstanceType(context, recv, JS_REGEXP_TYPE,
- "RegExp.prototype.exec");
- a->Goto(&call_builtin_exec);
+ ThrowIfNotInstanceType(context, regexp, JS_REGEXP_TYPE,
+ "RegExp.prototype.exec");
+
+ Node* const result =
+ RegExpPrototypeExecBody(context, regexp, string, false);
+ var_result.Bind(result);
+ Goto(&out);
}
}
- a->Bind(&out);
+ Bind(&out);
return var_result.value();
}
-} // namespace
-
// ES#sec-regexp.prototype.test
// RegExp.prototype.test ( S )
-void Builtins::Generate_RegExpPrototypeTest(CodeStubAssembler* a) {
- typedef compiler::Node Node;
-
- Isolate* const isolate = a->isolate();
-
- Node* const maybe_receiver = a->Parameter(0);
- Node* const maybe_string = a->Parameter(1);
- Node* const context = a->Parameter(4);
+TF_BUILTIN(RegExpPrototypeTest, RegExpBuiltinsAssembler) {
+ Node* const maybe_receiver = Parameter(0);
+ Node* const maybe_string = Parameter(1);
+ Node* const context = Parameter(4);
// Ensure {maybe_receiver} is a JSReceiver.
- ThrowIfNotJSReceiver(a, isolate, context, maybe_receiver,
- MessageTemplate::kIncompatibleMethodReceiver,
- "RegExp.prototype.test");
+ Node* const map = ThrowIfNotJSReceiver(
+ context, maybe_receiver, MessageTemplate::kIncompatibleMethodReceiver,
+ "RegExp.prototype.test");
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Node* const string = a->ToString(context, maybe_string);
+ Node* const string = ToString(context, maybe_string);
- // Call exec.
- Node* const match_indices = RegExpExec(a, context, receiver, string);
-
- // Return true iff exec matched successfully.
- Node* const result = a->Select(a->WordEqual(match_indices, a->NullConstant()),
- a->FalseConstant(), a->TrueConstant());
- a->Return(result);
-}
-
-// ES#sec-regexp.prototype-@@match
-// RegExp.prototype [ @@match ] ( string )
-BUILTIN(RegExpPrototypeMatch) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSReceiver, recv, "RegExp.prototype.@@match");
+ Label fast_path(this), slow_path(this);
+ BranchIfFastRegExp(context, map, &fast_path, &slow_path);
- Handle<Object> string_obj = args.atOrUndefined(isolate, 1);
+ Bind(&fast_path);
+ {
+ Label if_didnotmatch(this);
+ RegExpPrototypeExecBodyWithoutResult(context, receiver, string,
+ &if_didnotmatch, true);
+ Return(TrueConstant());
- Handle<String> string;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, string,
- Object::ToString(isolate, string_obj));
+ Bind(&if_didnotmatch);
+ Return(FalseConstant());
+ }
- Handle<Object> global_obj;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, global_obj,
- JSReceiver::GetProperty(recv, isolate->factory()->global_string()));
- const bool global = global_obj->BooleanValue();
+ Bind(&slow_path);
+ {
+ // Call exec.
+ Node* const match_indices = RegExpExec(context, receiver, string);
- if (!global) {
- RETURN_RESULT_OR_FAILURE(
- isolate,
- RegExpUtils::RegExpExec(isolate, recv, string,
- isolate->factory()->undefined_value()));
+ // Return true iff exec matched successfully.
+ Node* const result =
+ SelectBooleanConstant(WordNotEqual(match_indices, NullConstant()));
+ Return(result);
}
+}
- Handle<Object> unicode_obj;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, unicode_obj,
- JSReceiver::GetProperty(recv, isolate->factory()->unicode_string()));
- const bool unicode = unicode_obj->BooleanValue();
-
- RETURN_FAILURE_ON_EXCEPTION(isolate,
- RegExpUtils::SetLastIndex(isolate, recv, 0));
+Node* RegExpBuiltinsAssembler::AdvanceStringIndex(Node* const string,
+ Node* const index,
+ Node* const is_unicode) {
+ Variable var_result(this, MachineRepresentation::kTagged);
- static const int kInitialArraySize = 8;
- Handle<FixedArray> elems =
- isolate->factory()->NewFixedArrayWithHoles(kInitialArraySize);
+ // Default to last_index + 1.
+ Node* const index_plus_one = SmiAdd(index, SmiConstant(1));
+ var_result.Bind(index_plus_one);
- int n = 0;
- for (;; n++) {
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- RegExpUtils::RegExpExec(isolate, recv, string,
- isolate->factory()->undefined_value()));
+ Label if_isunicode(this), out(this);
+ Branch(is_unicode, &if_isunicode, &out);
- if (result->IsNull(isolate)) {
- if (n == 0) return isolate->heap()->null_value();
- break;
- }
+ Bind(&if_isunicode);
+ {
+ Node* const string_length = LoadStringLength(string);
+ GotoUnless(SmiLessThan(index_plus_one, string_length), &out);
- Handle<Object> match_obj;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, match_obj,
- Object::GetElement(isolate, result, 0));
+ Node* const lead = StringCharCodeAt(string, index);
+ GotoUnless(Word32Equal(Word32And(lead, Int32Constant(0xFC00)),
+ Int32Constant(0xD800)),
+ &out);
- Handle<String> match;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, match,
- Object::ToString(isolate, match_obj));
+ Node* const trail = StringCharCodeAt(string, index_plus_one);
+ GotoUnless(Word32Equal(Word32And(trail, Int32Constant(0xFC00)),
+ Int32Constant(0xDC00)),
+ &out);
- elems = FixedArray::SetAndGrow(elems, n, match);
+ // At a surrogate pair, return index + 2.
+ Node* const index_plus_two = SmiAdd(index, SmiConstant(2));
+ var_result.Bind(index_plus_two);
- if (match->length() == 0) {
- RETURN_FAILURE_ON_EXCEPTION(isolate, RegExpUtils::SetAdvancedStringIndex(
- isolate, recv, string, unicode));
- }
+ Goto(&out);
}
- elems->Shrink(n);
- return *isolate->factory()->NewJSArrayWithElements(elems);
+ Bind(&out);
+ return var_result.value();
}
namespace {
-void Generate_RegExpPrototypeSearchBody(CodeStubAssembler* a,
- compiler::Node* const receiver,
- compiler::Node* const string,
- compiler::Node* const context,
- bool is_fastpath) {
+// Utility class implementing a growable fixed array through CSA.
+class GrowableFixedArray {
typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
- Isolate* const isolate = a->isolate();
+ public:
+ explicit GrowableFixedArray(CodeStubAssembler* a)
+ : assembler_(a),
+ var_array_(a, MachineRepresentation::kTagged),
+ var_length_(a, MachineType::PointerRepresentation()),
+ var_capacity_(a, MachineType::PointerRepresentation()) {
+ Initialize();
+ }
- Node* const smi_zero = a->SmiConstant(Smi::kZero);
+ Node* length() const { return var_length_.value(); }
- // Grab the initial value of last index.
- Node* const previous_last_index =
- is_fastpath ? FastLoadLastIndex(a, context, receiver)
- : SlowLoadLastIndex(a, context, receiver);
+ Variable* var_array() { return &var_array_; }
+ Variable* var_length() { return &var_length_; }
+ Variable* var_capacity() { return &var_capacity_; }
- // Ensure last index is 0.
- if (is_fastpath) {
- FastStoreLastIndex(a, context, receiver, smi_zero);
- } else {
- Label next(a);
- a->GotoIf(a->SameValue(previous_last_index, smi_zero, context), &next);
+ void Push(Node* const value) {
+ CodeStubAssembler* a = assembler_;
- SlowStoreLastIndex(a, context, receiver, smi_zero);
- a->Goto(&next);
- a->Bind(&next);
- }
+ Node* const length = var_length_.value();
+ Node* const capacity = var_capacity_.value();
- // Call exec.
- Node* const match_indices =
- is_fastpath ? RegExpPrototypeExecInternal(a, context, receiver, string)
- : RegExpExec(a, context, receiver, string);
+ Label grow(a), store(a);
+ a->Branch(a->IntPtrEqual(capacity, length), &grow, &store);
- // Reset last index if necessary.
- if (is_fastpath) {
- FastStoreLastIndex(a, context, receiver, previous_last_index);
- } else {
- Label next(a);
- Node* const current_last_index = SlowLoadLastIndex(a, context, receiver);
+ a->Bind(&grow);
+ {
+ Node* const new_capacity = NewCapacity(a, capacity);
+ Node* const new_array = ResizeFixedArray(length, new_capacity);
- a->GotoIf(a->SameValue(current_last_index, previous_last_index, context),
- &next);
+ var_capacity_.Bind(new_capacity);
+ var_array_.Bind(new_array);
+ a->Goto(&store);
+ }
- SlowStoreLastIndex(a, context, receiver, previous_last_index);
- a->Goto(&next);
- a->Bind(&next);
- }
+ a->Bind(&store);
+ {
+ Node* const array = var_array_.value();
+ a->StoreFixedArrayElement(array, length, value);
- // Return -1 if no match was found.
- {
- Label next(a);
- a->GotoUnless(a->WordEqual(match_indices, a->NullConstant()), &next);
- a->Return(a->SmiConstant(-1));
- a->Bind(&next);
+ Node* const new_length = a->IntPtrAdd(length, a->IntPtrConstant(1));
+ var_length_.Bind(new_length);
+ }
}
- // Return the index of the match.
- {
- Label fast_result(a), slow_result(a, Label::kDeferred);
+ Node* ToJSArray(Node* const context) {
+ CodeStubAssembler* a = assembler_;
- Node* const native_context = a->LoadNativeContext(context);
- Node* const initial_regexp_result_map =
- a->LoadContextElement(native_context, Context::REGEXP_RESULT_MAP_INDEX);
- Node* const match_indices_map = a->LoadMap(match_indices);
+ const ElementsKind kind = FAST_ELEMENTS;
- a->Branch(a->WordEqual(match_indices_map, initial_regexp_result_map),
- &fast_result, &slow_result);
+ Node* const native_context = a->LoadNativeContext(context);
+ Node* const array_map = a->LoadJSArrayElementsMap(kind, native_context);
- a->Bind(&fast_result);
+ // Shrink to fit if necessary.
{
- Node* const index =
- a->LoadObjectField(match_indices, JSRegExpResult::kIndexOffset,
- MachineType::AnyTagged());
- a->Return(index);
- }
+ Label next(a);
- a->Bind(&slow_result);
- {
- Node* const name = a->HeapConstant(isolate->factory()->index_string());
- Callable getproperty_callable = CodeFactory::GetProperty(a->isolate());
- Node* const index =
- a->CallStub(getproperty_callable, context, match_indices, name);
- a->Return(index);
+ Node* const length = var_length_.value();
+ Node* const capacity = var_capacity_.value();
+
+ a->GotoIf(a->WordEqual(length, capacity), &next);
+
+ Node* const array = ResizeFixedArray(length, length);
+ var_array_.Bind(array);
+ var_capacity_.Bind(length);
+ a->Goto(&next);
+
+ a->Bind(&next);
}
- }
-}
-} // namespace
+ Node* const result_length = a->SmiTag(length());
+ Node* const result = a->AllocateUninitializedJSArrayWithoutElements(
+ kind, array_map, result_length, nullptr);
-// ES#sec-regexp.prototype-@@search
-// RegExp.prototype [ @@search ] ( string )
-void Builtins::Generate_RegExpPrototypeSearch(CodeStubAssembler* a) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
+ // Note: We do not currently shrink the fixed array.
- Isolate* const isolate = a->isolate();
+ a->StoreObjectField(result, JSObject::kElementsOffset, var_array_.value());
- Node* const maybe_receiver = a->Parameter(0);
- Node* const maybe_string = a->Parameter(1);
- Node* const context = a->Parameter(4);
+ return result;
+ }
- // Ensure {maybe_receiver} is a JSReceiver.
- Node* const map =
- ThrowIfNotJSReceiver(a, isolate, context, maybe_receiver,
- MessageTemplate::kIncompatibleMethodReceiver,
- "RegExp.prototype.@@search");
- Node* const receiver = maybe_receiver;
+ private:
+ void Initialize() {
+ CodeStubAssembler* a = assembler_;
- // Convert {maybe_string} to a String.
- Node* const string = a->ToString(context, maybe_string);
+ const ElementsKind kind = FAST_ELEMENTS;
- Label fast_path(a), slow_path(a);
- BranchIfFastPath(a, context, map, &fast_path, &slow_path);
+ static const int kInitialArraySize = 8;
+ Node* const capacity = a->IntPtrConstant(kInitialArraySize);
+ Node* const array = a->AllocateFixedArray(kind, capacity);
- a->Bind(&fast_path);
- Generate_RegExpPrototypeSearchBody(a, receiver, string, context, true);
+ a->FillFixedArrayWithValue(kind, array, a->IntPtrConstant(0), capacity,
+ Heap::kTheHoleValueRootIndex);
- a->Bind(&slow_path);
- Generate_RegExpPrototypeSearchBody(a, receiver, string, context, false);
-}
+ var_array_.Bind(array);
+ var_capacity_.Bind(capacity);
+ var_length_.Bind(a->IntPtrConstant(0));
+ }
-namespace {
+ Node* NewCapacity(CodeStubAssembler* a, Node* const current_capacity) {
+ CSA_ASSERT(a, a->IntPtrGreaterThan(current_capacity, a->IntPtrConstant(0)));
+
+ // Growth rate is analog to JSObject::NewElementsCapacity:
+ // new_capacity = (current_capacity + (current_capacity >> 1)) + 16.
-MUST_USE_RESULT MaybeHandle<Object> ToUint32(Isolate* isolate,
- Handle<Object> object,
- uint32_t* out) {
- if (object->IsUndefined(isolate)) {
- *out = kMaxUInt32;
- return object;
+ Node* const new_capacity = a->IntPtrAdd(
+ a->IntPtrAdd(current_capacity, a->WordShr(current_capacity, 1)),
+ a->IntPtrConstant(16));
+
+ return new_capacity;
}
- Handle<Object> number;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, number, Object::ToNumber(object), Object);
- *out = NumberToUint32(*number);
- return object;
-}
+ // Creates a new array with {new_capacity} and copies the first
+ // {element_count} elements from the current array.
+ Node* ResizeFixedArray(Node* const element_count, Node* const new_capacity) {
+ CodeStubAssembler* a = assembler_;
-bool AtSurrogatePair(Isolate* isolate, Handle<String> string, int index) {
- if (index + 1 >= string->length()) return false;
- const uint16_t first = string->Get(index);
- if (first < 0xD800 || first > 0xDBFF) return false;
- const uint16_t second = string->Get(index + 1);
- return (second >= 0xDC00 && second <= 0xDFFF);
-}
+ CSA_ASSERT(a, a->IntPtrGreaterThan(element_count, a->IntPtrConstant(0)));
+ CSA_ASSERT(a, a->IntPtrGreaterThan(new_capacity, a->IntPtrConstant(0)));
+ CSA_ASSERT(a, a->IntPtrGreaterThanOrEqual(new_capacity, element_count));
-Handle<JSArray> NewJSArrayWithElements(Isolate* isolate,
- Handle<FixedArray> elems,
- int num_elems) {
- elems->Shrink(num_elems);
- return isolate->factory()->NewJSArrayWithElements(elems);
-}
+ const ElementsKind kind = FAST_ELEMENTS;
+ const WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER;
+ const ParameterMode mode = CodeStubAssembler::INTPTR_PARAMETERS;
+ const CodeStubAssembler::AllocationFlags flags =
+ CodeStubAssembler::kAllowLargeObjectAllocation;
-MaybeHandle<JSArray> RegExpSplit(Isolate* isolate, Handle<JSRegExp> regexp,
- Handle<String> string,
- Handle<Object> limit_obj) {
- Factory* factory = isolate->factory();
+ Node* const from_array = var_array_.value();
+ Node* const to_array =
+ a->AllocateFixedArray(kind, new_capacity, mode, flags);
+ a->CopyFixedArrayElements(kind, from_array, kind, to_array, element_count,
+ new_capacity, barrier_mode, mode);
- uint32_t limit;
- RETURN_ON_EXCEPTION(isolate, ToUint32(isolate, limit_obj, &limit), JSArray);
+ return to_array;
+ }
- const int length = string->length();
+ private:
+ CodeStubAssembler* const assembler_;
+ Variable var_array_;
+ Variable var_length_;
+ Variable var_capacity_;
+};
- if (limit == 0) return factory->NewJSArray(0);
+} // namespace
+
+void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
+ Node* const regexp,
+ Node* const string,
+ const bool is_fastpath) {
+ Isolate* const isolate = this->isolate();
- Handle<RegExpMatchInfo> last_match_info = isolate->regexp_last_match_info();
+ Node* const null = NullConstant();
+ Node* const int_zero = IntPtrConstant(0);
+ Node* const smi_zero = SmiConstant(Smi::kZero);
- if (length == 0) {
- Handle<Object> match_indices;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, match_indices,
- RegExpImpl::Exec(regexp, string, 0, last_match_info), JSArray);
+ Node* const is_global =
+ FlagGetter(context, regexp, JSRegExp::kGlobal, is_fastpath);
- if (!match_indices->IsNull(isolate)) return factory->NewJSArray(0);
+ Label if_isglobal(this), if_isnotglobal(this);
+ Branch(is_global, &if_isglobal, &if_isnotglobal);
- Handle<FixedArray> elems = factory->NewUninitializedFixedArray(1);
- elems->set(0, *string);
- return factory->NewJSArrayWithElements(elems);
+ Bind(&if_isnotglobal);
+ {
+ Node* const result =
+ is_fastpath ? RegExpPrototypeExecBody(context, regexp, string, true)
+ : RegExpExec(context, regexp, string);
+ Return(result);
}
- int current_index = 0;
- int start_index = 0;
- int start_match = 0;
+ Bind(&if_isglobal);
+ {
+ Node* const is_unicode =
+ FlagGetter(context, regexp, JSRegExp::kUnicode, is_fastpath);
- static const int kInitialArraySize = 8;
- Handle<FixedArray> elems = factory->NewFixedArrayWithHoles(kInitialArraySize);
- int num_elems = 0;
+ StoreLastIndex(context, regexp, smi_zero, is_fastpath);
- while (true) {
- if (start_index == length) {
- Handle<String> substr =
- factory->NewSubString(string, current_index, length);
- elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
- break;
- }
+ // Allocate an array to store the resulting match strings.
- Handle<Object> match_indices_obj;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, match_indices_obj,
- RegExpImpl::Exec(regexp, string, start_index,
- isolate->regexp_last_match_info()),
- JSArray);
-
- if (match_indices_obj->IsNull(isolate)) {
- Handle<String> substr =
- factory->NewSubString(string, current_index, length);
- elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
- break;
- }
+ GrowableFixedArray array(this);
- auto match_indices = Handle<RegExpMatchInfo>::cast(match_indices_obj);
+ // Loop preparations. Within the loop, collect results from RegExpExec
+ // and store match strings in the array.
- start_match = match_indices->Capture(0);
+ Variable* vars[] = {array.var_array(), array.var_length(),
+ array.var_capacity()};
+ Label loop(this, 3, vars), out(this);
+ Goto(&loop);
- if (start_match == length) {
- Handle<String> substr =
- factory->NewSubString(string, current_index, length);
- elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
- break;
- }
+ Bind(&loop);
+ {
+ Variable var_match(this, MachineRepresentation::kTagged);
- const int end_index = match_indices->Capture(1);
+ Label if_didmatch(this), if_didnotmatch(this);
+ if (is_fastpath) {
+ // On the fast path, grab the matching string from the raw match index
+ // array.
+ Node* const match_indices = RegExpPrototypeExecBodyWithoutResult(
+ context, regexp, string, &if_didnotmatch, true);
- if (start_index == end_index && end_index == current_index) {
- const bool unicode = (regexp->GetFlags() & JSRegExp::kUnicode) != 0;
- if (unicode && AtSurrogatePair(isolate, string, start_index)) {
- start_index += 2;
+ Node* const match_from = LoadFixedArrayElement(
+ match_indices, RegExpMatchInfo::kFirstCaptureIndex);
+ Node* const match_to = LoadFixedArrayElement(
+ match_indices, RegExpMatchInfo::kFirstCaptureIndex + 1);
+
+ Node* match = SubString(context, string, match_from, match_to);
+ var_match.Bind(match);
+
+ Goto(&if_didmatch);
} else {
- start_index += 1;
+ DCHECK(!is_fastpath);
+ Node* const result = RegExpExec(context, regexp, string);
+
+ Label load_match(this);
+ Branch(WordEqual(result, null), &if_didnotmatch, &load_match);
+
+ Bind(&load_match);
+ {
+ Label fast_result(this), slow_result(this);
+ BranchIfFastRegExpResult(context, LoadMap(result), &fast_result,
+ &slow_result);
+
+ Bind(&fast_result);
+ {
+ Node* const result_fixed_array = LoadElements(result);
+ Node* const match = LoadFixedArrayElement(result_fixed_array, 0);
+
+ // The match is guaranteed to be a string on the fast path.
+ CSA_ASSERT(this, IsStringInstanceType(LoadInstanceType(match)));
+
+ var_match.Bind(match);
+ Goto(&if_didmatch);
+ }
+
+ Bind(&slow_result);
+ {
+ // TODO(ishell): Use GetElement stub once it's available.
+ Node* const name = smi_zero;
+ Callable getproperty_callable = CodeFactory::GetProperty(isolate);
+ Node* const match =
+ CallStub(getproperty_callable, context, result, name);
+
+ var_match.Bind(ToString(context, match));
+ Goto(&if_didmatch);
+ }
+ }
+ }
+
+ Bind(&if_didnotmatch);
+ {
+ // Return null if there were no matches, otherwise just exit the loop.
+ GotoUnless(IntPtrEqual(array.length(), int_zero), &out);
+ Return(null);
+ }
+
+ Bind(&if_didmatch);
+ {
+ Node* match = var_match.value();
+
+ // Store the match, growing the fixed array if needed.
+
+ array.Push(match);
+
+ // Advance last index if the match is the empty string.
+
+ Node* const match_length = LoadStringLength(match);
+ GotoUnless(SmiEqual(match_length, smi_zero), &loop);
+
+ Node* last_index = LoadLastIndex(context, regexp, is_fastpath);
+
+ Callable tolength_callable = CodeFactory::ToLength(isolate);
+ last_index = CallStub(tolength_callable, context, last_index);
+
+ Node* const new_last_index =
+ AdvanceStringIndex(string, last_index, is_unicode);
+
+ StoreLastIndex(context, regexp, new_last_index, is_fastpath);
+
+ Goto(&loop);
}
- continue;
}
+ Bind(&out);
{
- Handle<String> substr =
- factory->NewSubString(string, current_index, start_match);
- elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
+ // Wrap the match in a JSArray.
+
+ Node* const result = array.ToJSArray(context);
+ Return(result);
}
+ }
+}
+
+// ES#sec-regexp.prototype-@@match
+// RegExp.prototype [ @@match ] ( string )
+TF_BUILTIN(RegExpPrototypeMatch, RegExpBuiltinsAssembler) {
+ Node* const maybe_receiver = Parameter(0);
+ Node* const maybe_string = Parameter(1);
+ Node* const context = Parameter(4);
+
+ // Ensure {maybe_receiver} is a JSReceiver.
+ Node* const map = ThrowIfNotJSReceiver(
+ context, maybe_receiver, MessageTemplate::kIncompatibleMethodReceiver,
+ "RegExp.prototype.@@match");
+ Node* const receiver = maybe_receiver;
- if (static_cast<uint32_t>(num_elems) == limit) break;
+ // Convert {maybe_string} to a String.
+ Node* const string = ToString(context, maybe_string);
- for (int i = 2; i < match_indices->NumberOfCaptureRegisters(); i += 2) {
- const int start = match_indices->Capture(i);
- const int end = match_indices->Capture(i + 1);
+ Label fast_path(this), slow_path(this);
+ BranchIfFastRegExp(context, map, &fast_path, &slow_path);
- if (end != -1) {
- Handle<String> substr = factory->NewSubString(string, start, end);
- elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
- } else {
- elems = FixedArray::SetAndGrow(elems, num_elems++,
- factory->undefined_value());
- }
+ Bind(&fast_path);
+ RegExpPrototypeMatchBody(context, receiver, string, true);
- if (static_cast<uint32_t>(num_elems) == limit) {
- return NewJSArrayWithElements(isolate, elems, num_elems);
- }
- }
+ Bind(&slow_path);
+ RegExpPrototypeMatchBody(context, receiver, string, false);
+}
- start_index = current_index = end_index;
+void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodyFast(
+ Node* const context, Node* const regexp, Node* const string) {
+ // Grab the initial value of last index.
+ Node* const previous_last_index = FastLoadLastIndex(regexp);
+
+ // Ensure last index is 0.
+ FastStoreLastIndex(regexp, SmiConstant(Smi::kZero));
+
+ // Call exec.
+ Label if_didnotmatch(this);
+ Node* const match_indices = RegExpPrototypeExecBodyWithoutResult(
+ context, regexp, string, &if_didnotmatch, true);
+
+ // Successful match.
+ {
+ // Reset last index.
+ FastStoreLastIndex(regexp, previous_last_index);
+
+ // Return the index of the match.
+ Node* const index = LoadFixedArrayElement(
+ match_indices, RegExpMatchInfo::kFirstCaptureIndex);
+ Return(index);
}
- return NewJSArrayWithElements(isolate, elems, num_elems);
+ Bind(&if_didnotmatch);
+ {
+ // Reset last index and return -1.
+ FastStoreLastIndex(regexp, previous_last_index);
+ Return(SmiConstant(-1));
+ }
}
-// ES##sec-speciesconstructor
-// SpeciesConstructor ( O, defaultConstructor )
-MUST_USE_RESULT MaybeHandle<Object> SpeciesConstructor(
- Isolate* isolate, Handle<JSReceiver> recv,
- Handle<JSFunction> default_ctor) {
- Handle<Object> ctor_obj;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, ctor_obj,
- JSObject::GetProperty(recv, isolate->factory()->constructor_string()),
- Object);
+void RegExpBuiltinsAssembler::RegExpPrototypeSearchBodySlow(
+ Node* const context, Node* const regexp, Node* const string) {
+ Isolate* const isolate = this->isolate();
- if (ctor_obj->IsUndefined(isolate)) return default_ctor;
+ Node* const smi_zero = SmiConstant(Smi::kZero);
- if (!ctor_obj->IsJSReceiver()) {
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kConstructorNotReceiver),
- Object);
+ // Grab the initial value of last index.
+ Node* const previous_last_index = SlowLoadLastIndex(context, regexp);
+
+ // Ensure last index is 0.
+ {
+ Label next(this);
+ GotoIf(SameValue(previous_last_index, smi_zero, context), &next);
+
+ SlowStoreLastIndex(context, regexp, smi_zero);
+ Goto(&next);
+ Bind(&next);
}
- Handle<JSReceiver> ctor = Handle<JSReceiver>::cast(ctor_obj);
+ // Call exec.
+ Node* const exec_result = RegExpExec(context, regexp, string);
+
+ // Reset last index if necessary.
+ {
+ Label next(this);
+ Node* const current_last_index = SlowLoadLastIndex(context, regexp);
+
+ GotoIf(SameValue(current_last_index, previous_last_index, context), &next);
- Handle<Object> species;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, species,
- JSObject::GetProperty(ctor, isolate->factory()->species_symbol()),
- Object);
+ SlowStoreLastIndex(context, regexp, previous_last_index);
+ Goto(&next);
- if (species->IsNull(isolate) || species->IsUndefined(isolate)) {
- return default_ctor;
+ Bind(&next);
}
- if (species->IsConstructor()) return species;
+ // Return -1 if no match was found.
+ {
+ Label next(this);
+ GotoUnless(WordEqual(exec_result, NullConstant()), &next);
+ Return(SmiConstant(-1));
+ Bind(&next);
+ }
+
+ // Return the index of the match.
+ {
+ Label fast_result(this), slow_result(this, Label::kDeferred);
+ BranchIfFastRegExpResult(context, LoadMap(exec_result), &fast_result,
+ &slow_result);
+
+ Bind(&fast_result);
+ {
+ Node* const index =
+ LoadObjectField(exec_result, JSRegExpResult::kIndexOffset);
+ Return(index);
+ }
- THROW_NEW_ERROR(
- isolate, NewTypeError(MessageTemplate::kSpeciesNotConstructor), Object);
+ Bind(&slow_result);
+ {
+ Node* const name = HeapConstant(isolate->factory()->index_string());
+ Callable getproperty_callable = CodeFactory::GetProperty(isolate);
+ Node* const index =
+ CallStub(getproperty_callable, context, exec_result, name);
+ Return(index);
+ }
+ }
}
-} // namespace
+// ES#sec-regexp.prototype-@@search
+// RegExp.prototype [ @@search ] ( string )
+TF_BUILTIN(RegExpPrototypeSearch, RegExpBuiltinsAssembler) {
+ Node* const maybe_receiver = Parameter(0);
+ Node* const maybe_string = Parameter(1);
+ Node* const context = Parameter(4);
-// ES#sec-regexp.prototype-@@split
-// RegExp.prototype [ @@split ] ( string, limit )
-BUILTIN(RegExpPrototypeSplit) {
- HandleScope scope(isolate);
- CHECK_RECEIVER(JSReceiver, recv, "RegExp.prototype.@@split");
+ // Ensure {maybe_receiver} is a JSReceiver.
+ Node* const map = ThrowIfNotJSReceiver(
+ context, maybe_receiver, MessageTemplate::kIncompatibleMethodReceiver,
+ "RegExp.prototype.@@search");
+ Node* const receiver = maybe_receiver;
- Factory* factory = isolate->factory();
+ // Convert {maybe_string} to a String.
+ Node* const string = ToString(context, maybe_string);
- Handle<Object> string_obj = args.atOrUndefined(isolate, 1);
- Handle<Object> limit_obj = args.atOrUndefined(isolate, 2);
+ Label fast_path(this), slow_path(this);
+ BranchIfFastRegExp(context, map, &fast_path, &slow_path);
- Handle<String> string;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, string,
- Object::ToString(isolate, string_obj));
+ Bind(&fast_path);
+ RegExpPrototypeSearchBodyFast(context, receiver, string);
- if (RegExpUtils::IsUnmodifiedRegExp(isolate, recv)) {
- RETURN_RESULT_OR_FAILURE(
- isolate,
- RegExpSplit(isolate, Handle<JSRegExp>::cast(recv), string, limit_obj));
- }
+ Bind(&slow_path);
+ RegExpPrototypeSearchBodySlow(context, receiver, string);
+}
- Handle<JSFunction> regexp_fun = isolate->regexp_function();
- Handle<Object> ctor;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, ctor, SpeciesConstructor(isolate, recv, regexp_fun));
+// Generates the fast path for @@split. {regexp} is an unmodified JSRegExp,
+// {string} is a String, and {limit} is a Smi.
+void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
+ Node* const regexp,
+ Node* const string,
+ Node* const limit) {
+ Isolate* isolate = this->isolate();
- Handle<Object> flags_obj;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, flags_obj, JSObject::GetProperty(recv, factory->flags_string()));
+ Node* const null = NullConstant();
+ Node* const smi_zero = SmiConstant(0);
+ Node* const int_zero = IntPtrConstant(0);
+ Node* const int_limit = SmiUntag(limit);
- Handle<String> flags;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, flags,
- Object::ToString(isolate, flags_obj));
+ const ElementsKind kind = FAST_ELEMENTS;
+ const ParameterMode mode = CodeStubAssembler::INTPTR_PARAMETERS;
- Handle<String> u_str = factory->LookupSingleCharacterStringFromCode('u');
- const bool unicode = (String::IndexOf(isolate, flags, u_str, 0) >= 0);
+ Node* const allocation_site = nullptr;
+ Node* const native_context = LoadNativeContext(context);
+ Node* const array_map = LoadJSArrayElementsMap(kind, native_context);
- Handle<String> y_str = factory->LookupSingleCharacterStringFromCode('y');
- const bool sticky = (String::IndexOf(isolate, flags, y_str, 0) >= 0);
+ Label return_empty_array(this, Label::kDeferred);
- Handle<String> new_flags = flags;
- if (!sticky) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, new_flags,
- factory->NewConsString(flags, y_str));
+ // If limit is zero, return an empty array.
+ {
+ Label next(this), if_limitiszero(this, Label::kDeferred);
+ Branch(SmiEqual(limit, smi_zero), &return_empty_array, &next);
+ Bind(&next);
}
- Handle<JSReceiver> splitter;
+ Node* const string_length = LoadStringLength(string);
+
+ // If passed the empty {string}, return either an empty array or a singleton
+ // array depending on whether the {regexp} matches.
{
- const int argc = 2;
+ Label next(this), if_stringisempty(this, Label::kDeferred);
+ Branch(SmiEqual(string_length, smi_zero), &if_stringisempty, &next);
- ScopedVector<Handle<Object>> argv(argc);
- argv[0] = recv;
- argv[1] = new_flags;
+ Bind(&if_stringisempty);
+ {
+ Node* const last_match_info = LoadContextElement(
+ native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
- Handle<JSFunction> ctor_fun = Handle<JSFunction>::cast(ctor);
- Handle<Object> splitter_obj;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, splitter_obj, Execution::New(ctor_fun, argc, argv.start()));
+ Callable exec_callable = CodeFactory::RegExpExec(isolate);
+ Node* const match_indices = CallStub(exec_callable, context, regexp,
+ string, smi_zero, last_match_info);
+
+ Label return_singleton_array(this);
+ Branch(WordEqual(match_indices, null), &return_singleton_array,
+ &return_empty_array);
+
+ Bind(&return_singleton_array);
+ {
+ Node* const length = SmiConstant(1);
+ Node* const capacity = IntPtrConstant(1);
+ Node* const result = AllocateJSArray(kind, array_map, capacity, length,
+ allocation_site, mode);
+
+ Node* const fixed_array = LoadElements(result);
+ StoreFixedArrayElement(fixed_array, 0, string);
- splitter = Handle<JSReceiver>::cast(splitter_obj);
+ Return(result);
+ }
+ }
+
+ Bind(&next);
}
- uint32_t limit;
- RETURN_FAILURE_ON_EXCEPTION(isolate, ToUint32(isolate, limit_obj, &limit));
+ // Loop preparations.
- const int length = string->length();
+ GrowableFixedArray array(this);
- if (limit == 0) return *factory->NewJSArray(0);
+ Variable var_last_matched_until(this, MachineRepresentation::kTagged);
+ Variable var_next_search_from(this, MachineRepresentation::kTagged);
- if (length == 0) {
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, RegExpUtils::RegExpExec(isolate, splitter, string,
- factory->undefined_value()));
+ var_last_matched_until.Bind(smi_zero);
+ var_next_search_from.Bind(smi_zero);
- if (!result->IsNull(isolate)) return *factory->NewJSArray(0);
+ Variable* vars[] = {array.var_array(), array.var_length(),
+ array.var_capacity(), &var_last_matched_until,
+ &var_next_search_from};
+ const int vars_count = sizeof(vars) / sizeof(vars[0]);
+ Label loop(this, vars_count, vars), push_suffix_and_out(this), out(this);
+ Goto(&loop);
- Handle<FixedArray> elems = factory->NewUninitializedFixedArray(1);
- elems->set(0, *string);
- return *factory->NewJSArrayWithElements(elems);
- }
+ Bind(&loop);
+ {
+ Node* const next_search_from = var_next_search_from.value();
+ Node* const last_matched_until = var_last_matched_until.value();
- // TODO(jgruber): Wrap this in a helper class.
- static const int kInitialArraySize = 8;
- Handle<FixedArray> elems = factory->NewFixedArrayWithHoles(kInitialArraySize);
- int num_elems = 0;
+ // We're done if we've reached the end of the string.
+ {
+ Label next(this);
+ Branch(SmiEqual(next_search_from, string_length), &push_suffix_and_out,
+ &next);
+ Bind(&next);
+ }
- int string_index = 0;
- int prev_string_index = 0;
- while (string_index < length) {
- RETURN_FAILURE_ON_EXCEPTION(
- isolate, RegExpUtils::SetLastIndex(isolate, splitter, string_index));
+ // Search for the given {regexp}.
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, RegExpUtils::RegExpExec(isolate, splitter, string,
- factory->undefined_value()));
+ Node* const last_match_info = LoadContextElement(
+ native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
+
+ Callable exec_callable = CodeFactory::RegExpExec(isolate);
+ Node* const match_indices = CallStub(exec_callable, context, regexp, string,
+ next_search_from, last_match_info);
- if (result->IsNull(isolate)) {
- string_index = RegExpUtils::AdvanceStringIndex(isolate, string,
- string_index, unicode);
- continue;
+ // We're done if no match was found.
+ {
+ Label next(this);
+ Branch(WordEqual(match_indices, null), &push_suffix_and_out, &next);
+ Bind(&next);
}
- // TODO(jgruber): Extract toLength of some property into function.
- Handle<Object> last_index_obj;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, last_index_obj, RegExpUtils::GetLastIndex(isolate, splitter));
+ Node* const match_from = LoadFixedArrayElement(
+ match_indices, RegExpMatchInfo::kFirstCaptureIndex);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, last_index_obj, Object::ToLength(isolate, last_index_obj));
- const int last_index = Handle<Smi>::cast(last_index_obj)->value();
-
- const int end = std::min(last_index, length);
- if (end == prev_string_index) {
- string_index = RegExpUtils::AdvanceStringIndex(isolate, string,
- string_index, unicode);
- continue;
+ // We're done if the match starts beyond the string.
+ {
+ Label next(this);
+ Branch(WordEqual(match_from, string_length), &push_suffix_and_out, &next);
+ Bind(&next);
}
+ Node* const match_to = LoadFixedArrayElement(
+ match_indices, RegExpMatchInfo::kFirstCaptureIndex + 1);
+
+ // Advance index and continue if the match is empty.
{
- Handle<String> substr =
- factory->NewSubString(string, prev_string_index, string_index);
- elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
- if (static_cast<uint32_t>(num_elems) == limit) {
- return *NewJSArrayWithElements(isolate, elems, num_elems);
- }
+ Label next(this);
+
+ GotoUnless(SmiEqual(match_to, next_search_from), &next);
+ GotoUnless(SmiEqual(match_to, last_matched_until), &next);
+
+ Node* const is_unicode = FastFlagGetter(regexp, JSRegExp::kUnicode);
+ Node* const new_next_search_from =
+ AdvanceStringIndex(string, next_search_from, is_unicode);
+ var_next_search_from.Bind(new_next_search_from);
+ Goto(&loop);
+
+ Bind(&next);
+ }
+
+ // A valid match was found, add the new substring to the array.
+ {
+ Node* const from = last_matched_until;
+ Node* const to = match_from;
+
+ Node* const substr = SubString(context, string, from, to);
+ array.Push(substr);
+
+ GotoIf(WordEqual(array.length(), int_limit), &out);
}
- prev_string_index = end;
+ // Add all captures to the array.
+ {
+ Node* const num_registers = LoadFixedArrayElement(
+ match_indices, RegExpMatchInfo::kNumberOfCapturesIndex);
+ Node* const int_num_registers = SmiUntag(num_registers);
+
+ Variable var_reg(this, MachineType::PointerRepresentation());
+ var_reg.Bind(IntPtrConstant(2));
- Handle<Object> num_captures_obj;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, num_captures_obj,
- Object::GetProperty(result, isolate->factory()->length_string()));
+ Variable* vars[] = {array.var_array(), array.var_length(),
+ array.var_capacity(), &var_reg};
+ const int vars_count = sizeof(vars) / sizeof(vars[0]);
+ Label nested_loop(this, vars_count, vars), nested_loop_out(this);
+ Branch(IntPtrLessThan(var_reg.value(), int_num_registers), &nested_loop,
+ &nested_loop_out);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, num_captures_obj, Object::ToLength(isolate, num_captures_obj));
- const int num_captures =
- std::max(Handle<Smi>::cast(num_captures_obj)->value(), 0);
-
- for (int i = 1; i < num_captures; i++) {
- Handle<Object> capture;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, capture, Object::GetElement(isolate, result, i));
- elems = FixedArray::SetAndGrow(elems, num_elems++, capture);
- if (static_cast<uint32_t>(num_elems) == limit) {
- return *NewJSArrayWithElements(isolate, elems, num_elems);
+ Bind(&nested_loop);
+ {
+ Node* const reg = var_reg.value();
+ Node* const from = LoadFixedArrayElement(
+ match_indices, reg,
+ RegExpMatchInfo::kFirstCaptureIndex * kPointerSize, mode);
+ Node* const to = LoadFixedArrayElement(
+ match_indices, reg,
+ (RegExpMatchInfo::kFirstCaptureIndex + 1) * kPointerSize, mode);
+
+ Label select_capture(this), select_undefined(this), store_value(this);
+ Variable var_value(this, MachineRepresentation::kTagged);
+ Branch(SmiEqual(to, SmiConstant(-1)), &select_undefined,
+ &select_capture);
+
+ Bind(&select_capture);
+ {
+ Node* const substr = SubString(context, string, from, to);
+ var_value.Bind(substr);
+ Goto(&store_value);
+ }
+
+ Bind(&select_undefined);
+ {
+ Node* const undefined = UndefinedConstant();
+ var_value.Bind(undefined);
+ Goto(&store_value);
+ }
+
+ Bind(&store_value);
+ {
+ array.Push(var_value.value());
+ GotoIf(WordEqual(array.length(), int_limit), &out);
+
+ Node* const new_reg = IntPtrAdd(reg, IntPtrConstant(2));
+ var_reg.Bind(new_reg);
+
+ Branch(IntPtrLessThan(new_reg, int_num_registers), &nested_loop,
+ &nested_loop_out);
+ }
}
+
+ Bind(&nested_loop_out);
}
- string_index = prev_string_index;
+ var_last_matched_until.Bind(match_to);
+ var_next_search_from.Bind(match_to);
+ Goto(&loop);
}
+ Bind(&push_suffix_and_out);
{
- Handle<String> substr =
- factory->NewSubString(string, prev_string_index, length);
- elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
+ Node* const from = var_last_matched_until.value();
+ Node* const to = string_length;
+
+ Node* const substr = SubString(context, string, from, to);
+ array.Push(substr);
+
+ Goto(&out);
}
- return *NewJSArrayWithElements(isolate, elems, num_elems);
+ Bind(&out);
+ {
+ Node* const result = array.ToJSArray(context);
+ Return(result);
+ }
+
+ Bind(&return_empty_array);
+ {
+ Node* const length = smi_zero;
+ Node* const capacity = int_zero;
+ Node* const result = AllocateJSArray(kind, array_map, capacity, length,
+ allocation_site, mode);
+ Return(result);
+ }
}
-namespace {
+// ES#sec-regexp.prototype-@@split
+// RegExp.prototype [ @@split ] ( string, limit )
+TF_BUILTIN(RegExpPrototypeSplit, RegExpBuiltinsAssembler) {
+ Node* const maybe_receiver = Parameter(0);
+ Node* const maybe_string = Parameter(1);
+ Node* const maybe_limit = Parameter(2);
+ Node* const context = Parameter(5);
+
+ // Ensure {maybe_receiver} is a JSReceiver.
+ Node* const map = ThrowIfNotJSReceiver(
+ context, maybe_receiver, MessageTemplate::kIncompatibleMethodReceiver,
+ "RegExp.prototype.@@split");
+ Node* const receiver = maybe_receiver;
+
+ // Convert {maybe_string} to a String.
+ Node* const string = ToString(context, maybe_string);
+
+ Label fast_path(this), slow_path(this);
+ BranchIfFastRegExp(context, map, &fast_path, &slow_path);
-compiler::Node* ReplaceGlobalCallableFastPath(
- CodeStubAssembler* a, compiler::Node* context, compiler::Node* regexp,
- compiler::Node* subject_string, compiler::Node* replace_callable) {
+ Bind(&fast_path);
+ {
+ // TODO(jgruber): Even if map checks send us to the fast path, we still need
+ // to verify the constructor property and jump to the slow path if it has
+ // been changed.
+
+ // Convert {maybe_limit} to a uint32, capping at the maximal smi value.
+ Variable var_limit(this, MachineRepresentation::kTagged);
+ Label if_limitissmimax(this), limit_done(this);
+
+ GotoIf(IsUndefined(maybe_limit), &if_limitissmimax);
+
+ {
+ Node* const limit = ToUint32(context, maybe_limit);
+ GotoUnless(TaggedIsSmi(limit), &if_limitissmimax);
+
+ var_limit.Bind(limit);
+ Goto(&limit_done);
+ }
+
+ Bind(&if_limitissmimax);
+ {
+ // TODO(jgruber): In this case, we can probably generation of limit checks
+ // in Generate_RegExpPrototypeSplitBody.
+ Node* const smi_max = SmiConstant(Smi::kMaxValue);
+ var_limit.Bind(smi_max);
+ Goto(&limit_done);
+ }
+
+ Bind(&limit_done);
+ {
+ Node* const limit = var_limit.value();
+ RegExpPrototypeSplitBody(context, receiver, string, limit);
+ }
+ }
+
+ Bind(&slow_path);
+ {
+ Node* const result = CallRuntime(Runtime::kRegExpSplit, context, receiver,
+ string, maybe_limit);
+ Return(result);
+ }
+}
+
+Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
+ Node* context, Node* regexp, Node* string, Node* replace_callable) {
// The fast path is reached only if {receiver} is a global unmodified
// JSRegExp instance and {replace_callable} is callable.
- typedef CodeStubAssembler::Variable Variable;
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
-
- Isolate* const isolate = a->isolate();
+ Isolate* const isolate = this->isolate();
- Node* const null = a->NullConstant();
- Node* const undefined = a->UndefinedConstant();
- Node* const int_zero = a->IntPtrConstant(0);
- Node* const int_one = a->IntPtrConstant(1);
- Node* const smi_zero = a->SmiConstant(Smi::kZero);
+ Node* const null = NullConstant();
+ Node* const undefined = UndefinedConstant();
+ Node* const int_zero = IntPtrConstant(0);
+ Node* const int_one = IntPtrConstant(1);
+ Node* const smi_zero = SmiConstant(Smi::kZero);
- Node* const native_context = a->LoadNativeContext(context);
+ Node* const native_context = LoadNativeContext(context);
- Label out(a);
- Variable var_result(a, MachineRepresentation::kTagged);
+ Label out(this);
+ Variable var_result(this, MachineRepresentation::kTagged);
// Set last index to 0.
- FastStoreLastIndex(a, context, regexp, smi_zero);
+ FastStoreLastIndex(regexp, smi_zero);
// Allocate {result_array}.
Node* result_array;
{
ElementsKind kind = FAST_ELEMENTS;
- Node* const array_map = a->LoadJSArrayElementsMap(kind, native_context);
- Node* const capacity = a->IntPtrConstant(16);
+ Node* const array_map = LoadJSArrayElementsMap(kind, native_context);
+ Node* const capacity = IntPtrConstant(16);
Node* const length = smi_zero;
Node* const allocation_site = nullptr;
- CodeStubAssembler::ParameterMode capacity_mode =
- CodeStubAssembler::INTPTR_PARAMETERS;
+ ParameterMode capacity_mode = CodeStubAssembler::INTPTR_PARAMETERS;
- result_array = a->AllocateJSArray(kind, array_map, capacity, length,
- allocation_site, capacity_mode);
+ result_array = AllocateJSArray(kind, array_map, capacity, length,
+ allocation_site, capacity_mode);
}
// Call into runtime for RegExpExecMultiple.
- Node* last_match_info = a->LoadContextElement(
- native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
- Node* const res =
- a->CallRuntime(Runtime::kRegExpExecMultiple, context, regexp,
- subject_string, last_match_info, result_array);
+ Node* last_match_info =
+ LoadContextElement(native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
+ Node* const res = CallRuntime(Runtime::kRegExpExecMultiple, context, regexp,
+ string, last_match_info, result_array);
// Reset last index to 0.
- FastStoreLastIndex(a, context, regexp, smi_zero);
+ FastStoreLastIndex(regexp, smi_zero);
// If no matches, return the subject string.
- var_result.Bind(subject_string);
- a->GotoIf(a->WordEqual(res, null), &out);
+ var_result.Bind(string);
+ GotoIf(WordEqual(res, null), &out);
// Reload last match info since it might have changed.
- last_match_info = a->LoadContextElement(
- native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
+ last_match_info =
+ LoadContextElement(native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
- Node* const res_length = a->LoadJSArrayLength(res);
- Node* const res_elems = a->LoadElements(res);
- CSA_ASSERT(a, a->HasInstanceType(res_elems, FIXED_ARRAY_TYPE));
+ Node* const res_length = LoadJSArrayLength(res);
+ Node* const res_elems = LoadElements(res);
+ CSA_ASSERT(this, HasInstanceType(res_elems, FIXED_ARRAY_TYPE));
- CodeStubAssembler::ParameterMode mode = CodeStubAssembler::INTPTR_PARAMETERS;
- Node* const num_capture_registers = a->LoadFixedArrayElement(
- last_match_info,
- a->IntPtrConstant(RegExpMatchInfo::kNumberOfCapturesIndex), 0, mode);
+ Node* const num_capture_registers = LoadFixedArrayElement(
+ last_match_info, RegExpMatchInfo::kNumberOfCapturesIndex);
- Label if_hasexplicitcaptures(a), if_noexplicitcaptures(a), create_result(a);
- a->Branch(a->SmiEqual(num_capture_registers, a->SmiConstant(Smi::FromInt(2))),
- &if_noexplicitcaptures, &if_hasexplicitcaptures);
+ Label if_hasexplicitcaptures(this), if_noexplicitcaptures(this),
+ create_result(this);
+ Branch(SmiEqual(num_capture_registers, SmiConstant(Smi::FromInt(2))),
+ &if_noexplicitcaptures, &if_hasexplicitcaptures);
- a->Bind(&if_noexplicitcaptures);
+ Bind(&if_noexplicitcaptures);
{
// If the number of captures is two then there are no explicit captures in
// the regexp, just the implicit capture that captures the whole match. In
@@ -1710,394 +2172,358 @@ compiler::Node* ReplaceGlobalCallableFastPath(
// input string and some replacements that were returned from the replace
// function.
- Variable var_match_start(a, MachineRepresentation::kTagged);
+ Variable var_match_start(this, MachineRepresentation::kTagged);
var_match_start.Bind(smi_zero);
- Node* const end = a->SmiUntag(res_length);
- Variable var_i(a, MachineType::PointerRepresentation());
+ Node* const end = SmiUntag(res_length);
+ Variable var_i(this, MachineType::PointerRepresentation());
var_i.Bind(int_zero);
Variable* vars[] = {&var_i, &var_match_start};
- Label loop(a, 2, vars);
- a->Goto(&loop);
- a->Bind(&loop);
+ Label loop(this, 2, vars);
+ Goto(&loop);
+ Bind(&loop);
{
Node* const i = var_i.value();
- a->GotoUnless(a->IntPtrLessThan(i, end), &create_result);
+ GotoUnless(IntPtrLessThan(i, end), &create_result);
- CodeStubAssembler::ParameterMode mode =
- CodeStubAssembler::INTPTR_PARAMETERS;
- Node* const elem = a->LoadFixedArrayElement(res_elems, i, 0, mode);
+ Node* const elem = LoadFixedArrayElement(res_elems, i);
- Label if_issmi(a), if_isstring(a), loop_epilogue(a);
- a->Branch(a->TaggedIsSmi(elem), &if_issmi, &if_isstring);
+ Label if_issmi(this), if_isstring(this), loop_epilogue(this);
+ Branch(TaggedIsSmi(elem), &if_issmi, &if_isstring);
- a->Bind(&if_issmi);
+ Bind(&if_issmi);
{
// Integers represent slices of the original string.
- Label if_isnegativeorzero(a), if_ispositive(a);
- a->BranchIfSmiLessThanOrEqual(elem, smi_zero, &if_isnegativeorzero,
- &if_ispositive);
+ Label if_isnegativeorzero(this), if_ispositive(this);
+ BranchIfSmiLessThanOrEqual(elem, smi_zero, &if_isnegativeorzero,
+ &if_ispositive);
- a->Bind(&if_ispositive);
+ Bind(&if_ispositive);
{
- Node* const int_elem = a->SmiUntag(elem);
+ Node* const int_elem = SmiUntag(elem);
Node* const new_match_start =
- a->IntPtrAdd(a->WordShr(int_elem, a->IntPtrConstant(11)),
- a->WordAnd(int_elem, a->IntPtrConstant(0x7ff)));
- var_match_start.Bind(a->SmiTag(new_match_start));
- a->Goto(&loop_epilogue);
+ IntPtrAdd(WordShr(int_elem, IntPtrConstant(11)),
+ WordAnd(int_elem, IntPtrConstant(0x7ff)));
+ var_match_start.Bind(SmiTag(new_match_start));
+ Goto(&loop_epilogue);
}
- a->Bind(&if_isnegativeorzero);
+ Bind(&if_isnegativeorzero);
{
- Node* const next_i = a->IntPtrAdd(i, int_one);
+ Node* const next_i = IntPtrAdd(i, int_one);
var_i.Bind(next_i);
- Node* const next_elem =
- a->LoadFixedArrayElement(res_elems, next_i, 0, mode);
+ Node* const next_elem = LoadFixedArrayElement(res_elems, next_i);
- Node* const new_match_start = a->SmiSub(next_elem, elem);
+ Node* const new_match_start = SmiSub(next_elem, elem);
var_match_start.Bind(new_match_start);
- a->Goto(&loop_epilogue);
+ Goto(&loop_epilogue);
}
}
- a->Bind(&if_isstring);
+ Bind(&if_isstring);
{
- CSA_ASSERT(a, a->IsStringInstanceType(a->LoadInstanceType(elem)));
+ CSA_ASSERT(this, IsStringInstanceType(LoadInstanceType(elem)));
Callable call_callable = CodeFactory::Call(isolate);
Node* const replacement_obj =
- a->CallJS(call_callable, context, replace_callable, undefined, elem,
- var_match_start.value(), subject_string);
+ CallJS(call_callable, context, replace_callable, undefined, elem,
+ var_match_start.value(), string);
- Node* const replacement_str = a->ToString(context, replacement_obj);
- a->StoreFixedArrayElement(res_elems, i, replacement_str);
+ Node* const replacement_str = ToString(context, replacement_obj);
+ StoreFixedArrayElement(res_elems, i, replacement_str);
- Node* const elem_length = a->LoadStringLength(elem);
+ Node* const elem_length = LoadStringLength(elem);
Node* const new_match_start =
- a->SmiAdd(var_match_start.value(), elem_length);
+ SmiAdd(var_match_start.value(), elem_length);
var_match_start.Bind(new_match_start);
- a->Goto(&loop_epilogue);
+ Goto(&loop_epilogue);
}
- a->Bind(&loop_epilogue);
+ Bind(&loop_epilogue);
{
- var_i.Bind(a->IntPtrAdd(var_i.value(), int_one));
- a->Goto(&loop);
+ var_i.Bind(IntPtrAdd(var_i.value(), int_one));
+ Goto(&loop);
}
}
}
- a->Bind(&if_hasexplicitcaptures);
+ Bind(&if_hasexplicitcaptures);
{
- CodeStubAssembler::ParameterMode mode =
- CodeStubAssembler::INTPTR_PARAMETERS;
-
Node* const from = int_zero;
- Node* const to = a->SmiUntag(res_length);
+ Node* const to = SmiUntag(res_length);
const int increment = 1;
- a->BuildFastLoop(
+ BuildFastLoop(
MachineType::PointerRepresentation(), from, to,
- [res_elems, isolate, native_context, context, undefined,
- replace_callable, mode](CodeStubAssembler* a, Node* index) {
- Node* const elem =
- a->LoadFixedArrayElement(res_elems, index, 0, mode);
+ [this, res_elems, isolate, native_context, context, undefined,
+ replace_callable](Node* index) {
+ Node* const elem = LoadFixedArrayElement(res_elems, index);
- Label do_continue(a);
- a->GotoIf(a->TaggedIsSmi(elem), &do_continue);
+ Label do_continue(this);
+ GotoIf(TaggedIsSmi(elem), &do_continue);
// elem must be an Array.
// Use the apply argument as backing for global RegExp properties.
- CSA_ASSERT(a, a->HasInstanceType(elem, JS_ARRAY_TYPE));
+ CSA_ASSERT(this, HasInstanceType(elem, JS_ARRAY_TYPE));
// TODO(jgruber): Remove indirection through Call->ReflectApply.
Callable call_callable = CodeFactory::Call(isolate);
- Node* const reflect_apply = a->LoadContextElement(
- native_context, Context::REFLECT_APPLY_INDEX);
+ Node* const reflect_apply =
+ LoadContextElement(native_context, Context::REFLECT_APPLY_INDEX);
Node* const replacement_obj =
- a->CallJS(call_callable, context, reflect_apply, undefined,
- replace_callable, undefined, elem);
+ CallJS(call_callable, context, reflect_apply, undefined,
+ replace_callable, undefined, elem);
// Overwrite the i'th element in the results with the string we got
// back from the callback function.
- Node* const replacement_str = a->ToString(context, replacement_obj);
- a->StoreFixedArrayElement(res_elems, index, replacement_str,
- UPDATE_WRITE_BARRIER, mode);
+ Node* const replacement_str = ToString(context, replacement_obj);
+ StoreFixedArrayElement(res_elems, index, replacement_str);
- a->Goto(&do_continue);
- a->Bind(&do_continue);
+ Goto(&do_continue);
+ Bind(&do_continue);
},
increment, CodeStubAssembler::IndexAdvanceMode::kPost);
- a->Goto(&create_result);
+ Goto(&create_result);
}
- a->Bind(&create_result);
+ Bind(&create_result);
{
- Node* const result = a->CallRuntime(Runtime::kStringBuilderConcat, context,
- res, res_length, subject_string);
+ Node* const result = CallRuntime(Runtime::kStringBuilderConcat, context,
+ res, res_length, string);
var_result.Bind(result);
- a->Goto(&out);
+ Goto(&out);
}
- a->Bind(&out);
+ Bind(&out);
return var_result.value();
}
-compiler::Node* ReplaceSimpleStringFastPath(CodeStubAssembler* a,
- compiler::Node* context,
- compiler::Node* regexp,
- compiler::Node* subject_string,
- compiler::Node* replace_string) {
+Node* RegExpBuiltinsAssembler::ReplaceSimpleStringFastPath(
+ Node* context, Node* regexp, Node* string, Node* replace_string) {
// The fast path is reached only if {receiver} is an unmodified
// JSRegExp instance, {replace_value} is non-callable, and
// ToString({replace_value}) does not contain '$', i.e. we're doing a simple
// string replacement.
- typedef CodeStubAssembler::Variable Variable;
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
+ Isolate* const isolate = this->isolate();
- Isolate* const isolate = a->isolate();
+ Node* const null = NullConstant();
+ Node* const int_zero = IntPtrConstant(0);
+ Node* const smi_zero = SmiConstant(Smi::kZero);
- Node* const null = a->NullConstant();
- Node* const int_zero = a->IntPtrConstant(0);
- Node* const smi_zero = a->SmiConstant(Smi::kZero);
-
- Label out(a);
- Variable var_result(a, MachineRepresentation::kTagged);
+ Label out(this);
+ Variable var_result(this, MachineRepresentation::kTagged);
// Load the last match info.
- Node* const native_context = a->LoadNativeContext(context);
- Node* const last_match_info = a->LoadContextElement(
- native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
+ Node* const native_context = LoadNativeContext(context);
+ Node* const last_match_info =
+ LoadContextElement(native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
// Is {regexp} global?
- Label if_isglobal(a), if_isnonglobal(a);
- Node* const flags = a->LoadObjectField(regexp, JSRegExp::kFlagsOffset);
+ Label if_isglobal(this), if_isnonglobal(this);
+ Node* const flags = LoadObjectField(regexp, JSRegExp::kFlagsOffset);
Node* const is_global =
- a->WordAnd(a->SmiUntag(flags), a->IntPtrConstant(JSRegExp::kGlobal));
- a->Branch(a->WordEqual(is_global, int_zero), &if_isnonglobal, &if_isglobal);
+ WordAnd(SmiUntag(flags), IntPtrConstant(JSRegExp::kGlobal));
+ Branch(WordEqual(is_global, int_zero), &if_isnonglobal, &if_isglobal);
- a->Bind(&if_isglobal);
+ Bind(&if_isglobal);
{
// Hand off global regexps to runtime.
- FastStoreLastIndex(a, context, regexp, smi_zero);
+ FastStoreLastIndex(regexp, smi_zero);
Node* const result =
- a->CallRuntime(Runtime::kStringReplaceGlobalRegExpWithString, context,
- subject_string, regexp, replace_string, last_match_info);
+ CallRuntime(Runtime::kStringReplaceGlobalRegExpWithString, context,
+ string, regexp, replace_string, last_match_info);
var_result.Bind(result);
- a->Goto(&out);
+ Goto(&out);
}
- a->Bind(&if_isnonglobal);
+ Bind(&if_isnonglobal);
{
// Run exec, then manually construct the resulting string.
Callable exec_callable = CodeFactory::RegExpExec(isolate);
- Node* const match_indices =
- a->CallStub(exec_callable, context, regexp, subject_string, smi_zero,
- last_match_info);
+ Node* const match_indices = CallStub(exec_callable, context, regexp, string,
+ smi_zero, last_match_info);
- Label if_matched(a), if_didnotmatch(a);
- a->Branch(a->WordEqual(match_indices, null), &if_didnotmatch, &if_matched);
+ Label if_matched(this), if_didnotmatch(this);
+ Branch(WordEqual(match_indices, null), &if_didnotmatch, &if_matched);
- a->Bind(&if_didnotmatch);
+ Bind(&if_didnotmatch);
{
- FastStoreLastIndex(a, context, regexp, smi_zero);
- var_result.Bind(subject_string);
- a->Goto(&out);
+ FastStoreLastIndex(regexp, smi_zero);
+ var_result.Bind(string);
+ Goto(&out);
}
- a->Bind(&if_matched);
+ Bind(&if_matched);
{
- CodeStubAssembler::ParameterMode mode =
- CodeStubAssembler::INTPTR_PARAMETERS;
-
Node* const subject_start = smi_zero;
- Node* const match_start = a->LoadFixedArrayElement(
- match_indices, a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex),
- 0, mode);
- Node* const match_end = a->LoadFixedArrayElement(
- match_indices,
- a->IntPtrConstant(RegExpMatchInfo::kFirstCaptureIndex + 1), 0, mode);
- Node* const subject_end = a->LoadStringLength(subject_string);
-
- Label if_replaceisempty(a), if_replaceisnotempty(a);
- Node* const replace_length = a->LoadStringLength(replace_string);
- a->Branch(a->SmiEqual(replace_length, smi_zero), &if_replaceisempty,
- &if_replaceisnotempty);
-
- a->Bind(&if_replaceisempty);
+ Node* const match_start = LoadFixedArrayElement(
+ match_indices, RegExpMatchInfo::kFirstCaptureIndex);
+ Node* const match_end = LoadFixedArrayElement(
+ match_indices, RegExpMatchInfo::kFirstCaptureIndex + 1);
+ Node* const subject_end = LoadStringLength(string);
+
+ Label if_replaceisempty(this), if_replaceisnotempty(this);
+ Node* const replace_length = LoadStringLength(replace_string);
+ Branch(SmiEqual(replace_length, smi_zero), &if_replaceisempty,
+ &if_replaceisnotempty);
+
+ Bind(&if_replaceisempty);
{
// TODO(jgruber): We could skip many of the checks that using SubString
// here entails.
Node* const first_part =
- a->SubString(context, subject_string, subject_start, match_start);
+ SubString(context, string, subject_start, match_start);
Node* const second_part =
- a->SubString(context, subject_string, match_end, subject_end);
+ SubString(context, string, match_end, subject_end);
- Node* const result = a->StringAdd(context, first_part, second_part);
+ Node* const result = StringAdd(context, first_part, second_part);
var_result.Bind(result);
- a->Goto(&out);
+ Goto(&out);
}
- a->Bind(&if_replaceisnotempty);
+ Bind(&if_replaceisnotempty);
{
Node* const first_part =
- a->SubString(context, subject_string, subject_start, match_start);
+ SubString(context, string, subject_start, match_start);
Node* const second_part = replace_string;
Node* const third_part =
- a->SubString(context, subject_string, match_end, subject_end);
+ SubString(context, string, match_end, subject_end);
- Node* result = a->StringAdd(context, first_part, second_part);
- result = a->StringAdd(context, result, third_part);
+ Node* result = StringAdd(context, first_part, second_part);
+ result = StringAdd(context, result, third_part);
var_result.Bind(result);
- a->Goto(&out);
+ Goto(&out);
}
}
}
- a->Bind(&out);
+ Bind(&out);
return var_result.value();
}
-} // namespace
-
// ES#sec-regexp.prototype-@@replace
// RegExp.prototype [ @@replace ] ( string, replaceValue )
-void Builtins::Generate_RegExpPrototypeReplace(CodeStubAssembler* a) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
-
- Isolate* const isolate = a->isolate();
-
- Node* const maybe_receiver = a->Parameter(0);
- Node* const maybe_string = a->Parameter(1);
- Node* const replace_value = a->Parameter(2);
- Node* const context = a->Parameter(5);
-
- Node* const int_zero = a->IntPtrConstant(0);
+TF_BUILTIN(RegExpPrototypeReplace, RegExpBuiltinsAssembler) {
+ Node* const maybe_receiver = Parameter(0);
+ Node* const maybe_string = Parameter(1);
+ Node* const replace_value = Parameter(2);
+ Node* const context = Parameter(5);
// Ensure {maybe_receiver} is a JSReceiver.
- Node* const map =
- ThrowIfNotJSReceiver(a, isolate, context, maybe_receiver,
- MessageTemplate::kIncompatibleMethodReceiver,
- "RegExp.prototype.@@replace");
+ Node* const map = ThrowIfNotJSReceiver(
+ context, maybe_receiver, MessageTemplate::kIncompatibleMethodReceiver,
+ "RegExp.prototype.@@replace");
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Callable tostring_callable = CodeFactory::ToString(isolate);
- Node* const string = a->CallStub(tostring_callable, context, maybe_string);
+ Callable tostring_callable = CodeFactory::ToString(isolate());
+ Node* const string = CallStub(tostring_callable, context, maybe_string);
// Fast-path checks: 1. Is the {receiver} an unmodified JSRegExp instance?
- Label checkreplacecallable(a), runtime(a, Label::kDeferred), fastpath(a);
- BranchIfFastPath(a, context, map, &checkreplacecallable, &runtime);
+ Label checkreplacecallable(this), runtime(this, Label::kDeferred),
+ fastpath(this);
+ BranchIfFastRegExp(context, map, &checkreplacecallable, &runtime);
- a->Bind(&checkreplacecallable);
+ Bind(&checkreplacecallable);
Node* const regexp = receiver;
// 2. Is {replace_value} callable?
- Label checkreplacestring(a), if_iscallable(a);
- a->GotoIf(a->TaggedIsSmi(replace_value), &checkreplacestring);
+ Label checkreplacestring(this), if_iscallable(this);
+ GotoIf(TaggedIsSmi(replace_value), &checkreplacestring);
- Node* const replace_value_map = a->LoadMap(replace_value);
- a->Branch(a->IsCallableMap(replace_value_map), &if_iscallable,
- &checkreplacestring);
+ Node* const replace_value_map = LoadMap(replace_value);
+ Branch(IsCallableMap(replace_value_map), &if_iscallable, &checkreplacestring);
// 3. Does ToString({replace_value}) contain '$'?
- a->Bind(&checkreplacestring);
+ Bind(&checkreplacestring);
{
Node* const replace_string =
- a->CallStub(tostring_callable, context, replace_value);
+ CallStub(tostring_callable, context, replace_value);
- Node* const dollar_char = a->IntPtrConstant('$');
- Node* const smi_minusone = a->SmiConstant(Smi::FromInt(-1));
- a->GotoUnless(a->SmiEqual(a->StringIndexOfChar(context, replace_string,
- dollar_char, int_zero),
- smi_minusone),
- &runtime);
+ Node* const dollar_char = Int32Constant('$');
+ Node* const smi_minusone = SmiConstant(Smi::FromInt(-1));
+ GotoUnless(SmiEqual(StringIndexOfChar(context, replace_string, dollar_char,
+ SmiConstant(0)),
+ smi_minusone),
+ &runtime);
- a->Return(ReplaceSimpleStringFastPath(a, context, regexp, string,
- replace_string));
+ Return(
+ ReplaceSimpleStringFastPath(context, regexp, string, replace_string));
}
// {regexp} is unmodified and {replace_value} is callable.
- a->Bind(&if_iscallable);
+ Bind(&if_iscallable);
{
Node* const replace_callable = replace_value;
// Check if the {regexp} is global.
- Label if_isglobal(a), if_isnotglobal(a);
- Node* const is_global = FastFlagGetter(a, regexp, JSRegExp::kGlobal);
- a->Branch(is_global, &if_isglobal, &if_isnotglobal);
+ Label if_isglobal(this), if_isnotglobal(this);
+ Node* const is_global = FastFlagGetter(regexp, JSRegExp::kGlobal);
+ Branch(is_global, &if_isglobal, &if_isnotglobal);
- a->Bind(&if_isglobal);
+ Bind(&if_isglobal);
{
Node* const result = ReplaceGlobalCallableFastPath(
- a, context, regexp, string, replace_callable);
- a->Return(result);
+ context, regexp, string, replace_callable);
+ Return(result);
}
- a->Bind(&if_isnotglobal);
+ Bind(&if_isnotglobal);
{
Node* const result =
- a->CallRuntime(Runtime::kStringReplaceNonGlobalRegExpWithFunction,
- context, string, regexp, replace_callable);
- a->Return(result);
+ CallRuntime(Runtime::kStringReplaceNonGlobalRegExpWithFunction,
+ context, string, regexp, replace_callable);
+ Return(result);
}
}
- a->Bind(&runtime);
+ Bind(&runtime);
{
- Node* const result = a->CallRuntime(Runtime::kRegExpReplace, context,
- receiver, string, replace_value);
- a->Return(result);
+ Node* const result = CallRuntime(Runtime::kRegExpReplace, context, receiver,
+ string, replace_value);
+ Return(result);
}
}
// Simple string matching functionality for internal use which does not modify
// the last match info.
-void Builtins::Generate_RegExpInternalMatch(CodeStubAssembler* a) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
-
- Isolate* const isolate = a->isolate();
-
- Node* const regexp = a->Parameter(1);
- Node* const string = a->Parameter(2);
- Node* const context = a->Parameter(5);
+TF_BUILTIN(RegExpInternalMatch, RegExpBuiltinsAssembler) {
+ Node* const regexp = Parameter(1);
+ Node* const string = Parameter(2);
+ Node* const context = Parameter(5);
- Node* const null = a->NullConstant();
- Node* const smi_zero = a->SmiConstant(Smi::FromInt(0));
+ Node* const null = NullConstant();
+ Node* const smi_zero = SmiConstant(Smi::FromInt(0));
- Node* const native_context = a->LoadNativeContext(context);
- Node* const internal_match_info = a->LoadContextElement(
+ Node* const native_context = LoadNativeContext(context);
+ Node* const internal_match_info = LoadContextElement(
native_context, Context::REGEXP_INTERNAL_MATCH_INFO_INDEX);
- Callable exec_callable = CodeFactory::RegExpExec(isolate);
- Node* const match_indices = a->CallStub(
- exec_callable, context, regexp, string, smi_zero, internal_match_info);
+ Callable exec_callable = CodeFactory::RegExpExec(isolate());
+ Node* const match_indices = CallStub(exec_callable, context, regexp, string,
+ smi_zero, internal_match_info);
- Label if_matched(a), if_didnotmatch(a);
- a->Branch(a->WordEqual(match_indices, null), &if_didnotmatch, &if_matched);
+ Label if_matched(this), if_didnotmatch(this);
+ Branch(WordEqual(match_indices, null), &if_didnotmatch, &if_matched);
- a->Bind(&if_didnotmatch);
- a->Return(null);
+ Bind(&if_didnotmatch);
+ Return(null);
- a->Bind(&if_matched);
+ Bind(&if_matched);
{
- Node* result = ConstructNewResultFromMatchInfo(isolate, a, context,
- match_indices, string);
- a->Return(result);
+ Node* result =
+ ConstructNewResultFromMatchInfo(context, match_indices, string);
+ Return(result);
}
}
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
index 2b5bf498a5..53caf1fe21 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
-
+#include "src/builtins/builtins.h"
#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
namespace v8 {
namespace internal {
@@ -31,7 +31,7 @@ void ValidateSharedTypedArray(CodeStubAssembler* a, compiler::Node* tagged,
compiler::Node* context,
compiler::Node** out_instance_type,
compiler::Node** out_backing_store) {
- using namespace compiler;
+ using compiler::Node;
CodeStubAssembler::Label is_smi(a), not_smi(a), is_typed_array(a),
not_typed_array(a), is_shared(a), not_shared(a), is_float_or_clamped(a),
not_float_or_clamped(a), invalid(a);
@@ -43,8 +43,8 @@ void ValidateSharedTypedArray(CodeStubAssembler* a, compiler::Node* tagged,
// Fail if the array's instance type is not JSTypedArray.
a->Bind(&not_smi);
- a->Branch(a->WordEqual(a->LoadInstanceType(tagged),
- a->Int32Constant(JS_TYPED_ARRAY_TYPE)),
+ a->Branch(a->Word32Equal(a->LoadInstanceType(tagged),
+ a->Int32Constant(JS_TYPED_ARRAY_TYPE)),
&is_typed_array, &not_typed_array);
a->Bind(&not_typed_array);
a->Goto(&invalid);
@@ -88,14 +88,15 @@ void ValidateSharedTypedArray(CodeStubAssembler* a, compiler::Node* tagged,
Node* byte_offset = a->ChangeUint32ToWord(a->TruncateTaggedToWord32(
context,
a->LoadObjectField(tagged, JSArrayBufferView::kByteOffsetOffset)));
- *out_backing_store = a->IntPtrAdd(backing_store, byte_offset);
+ *out_backing_store =
+ a->IntPtrAdd(a->BitcastTaggedToWord(backing_store), byte_offset);
}
// https://tc39.github.io/ecmascript_sharedmem/shmem.html#Atomics.ValidateAtomicAccess
compiler::Node* ConvertTaggedAtomicIndexToWord32(CodeStubAssembler* a,
compiler::Node* tagged,
compiler::Node* context) {
- using namespace compiler;
+ using compiler::Node;
CodeStubAssembler::Variable var_result(a, MachineRepresentation::kWord32);
Callable to_number = CodeFactory::ToNumber(a->isolate());
@@ -139,13 +140,13 @@ compiler::Node* ConvertTaggedAtomicIndexToWord32(CodeStubAssembler* a,
void ValidateAtomicIndex(CodeStubAssembler* a, compiler::Node* index_word,
compiler::Node* array_length_word,
compiler::Node* context) {
- using namespace compiler;
+ using compiler::Node;
// Check if the index is in bounds. If not, throw RangeError.
CodeStubAssembler::Label if_inbounds(a), if_notinbounds(a);
// TODO(jkummerow): Use unsigned comparison instead of "i<0 || i>length".
a->Branch(
- a->WordOr(a->Int32LessThan(index_word, a->Int32Constant(0)),
- a->Int32GreaterThanOrEqual(index_word, array_length_word)),
+ a->Word32Or(a->Int32LessThan(index_word, a->Int32Constant(0)),
+ a->Int32GreaterThanOrEqual(index_word, array_length_word)),
&if_notinbounds, &if_inbounds);
a->Bind(&if_notinbounds);
a->Return(
@@ -155,24 +156,25 @@ void ValidateAtomicIndex(CodeStubAssembler* a, compiler::Node* index_word,
} // anonymous namespace
-void Builtins::Generate_AtomicsLoad(CodeStubAssembler* a) {
- using namespace compiler;
- Node* array = a->Parameter(1);
- Node* index = a->Parameter(2);
- Node* context = a->Parameter(3 + 2);
+void Builtins::Generate_AtomicsLoad(compiler::CodeAssemblerState* state) {
+ using compiler::Node;
+ CodeStubAssembler a(state);
+ Node* array = a.Parameter(1);
+ Node* index = a.Parameter(2);
+ Node* context = a.Parameter(3 + 2);
Node* instance_type;
Node* backing_store;
- ValidateSharedTypedArray(a, array, context, &instance_type, &backing_store);
+ ValidateSharedTypedArray(&a, array, context, &instance_type, &backing_store);
- Node* index_word32 = ConvertTaggedAtomicIndexToWord32(a, index, context);
- Node* array_length_word32 = a->TruncateTaggedToWord32(
- context, a->LoadObjectField(array, JSTypedArray::kLengthOffset));
- ValidateAtomicIndex(a, index_word32, array_length_word32, context);
- Node* index_word = a->ChangeUint32ToWord(index_word32);
+ Node* index_word32 = ConvertTaggedAtomicIndexToWord32(&a, index, context);
+ Node* array_length_word32 = a.TruncateTaggedToWord32(
+ context, a.LoadObjectField(array, JSTypedArray::kLengthOffset));
+ ValidateAtomicIndex(&a, index_word32, array_length_word32, context);
+ Node* index_word = a.ChangeUint32ToWord(index_word32);
- CodeStubAssembler::Label i8(a), u8(a), i16(a), u16(a), i32(a), u32(a),
- other(a);
+ CodeStubAssembler::Label i8(&a), u8(&a), i16(&a), u16(&a), i32(&a), u32(&a),
+ other(&a);
int32_t case_values[] = {
FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
@@ -180,59 +182,60 @@ void Builtins::Generate_AtomicsLoad(CodeStubAssembler* a) {
CodeStubAssembler::Label* case_labels[] = {
&i8, &u8, &i16, &u16, &i32, &u32,
};
- a->Switch(instance_type, &other, case_values, case_labels,
- arraysize(case_labels));
+ a.Switch(instance_type, &other, case_values, case_labels,
+ arraysize(case_labels));
- a->Bind(&i8);
- a->Return(
- a->SmiTag(a->AtomicLoad(MachineType::Int8(), backing_store, index_word)));
+ a.Bind(&i8);
+ a.Return(a.SmiFromWord32(
+ a.AtomicLoad(MachineType::Int8(), backing_store, index_word)));
- a->Bind(&u8);
- a->Return(a->SmiTag(
- a->AtomicLoad(MachineType::Uint8(), backing_store, index_word)));
+ a.Bind(&u8);
+ a.Return(a.SmiFromWord32(
+ a.AtomicLoad(MachineType::Uint8(), backing_store, index_word)));
- a->Bind(&i16);
- a->Return(a->SmiTag(a->AtomicLoad(MachineType::Int16(), backing_store,
- a->WordShl(index_word, 1))));
+ a.Bind(&i16);
+ a.Return(a.SmiFromWord32(a.AtomicLoad(MachineType::Int16(), backing_store,
+ a.WordShl(index_word, 1))));
- a->Bind(&u16);
- a->Return(a->SmiTag(a->AtomicLoad(MachineType::Uint16(), backing_store,
- a->WordShl(index_word, 1))));
+ a.Bind(&u16);
+ a.Return(a.SmiFromWord32(a.AtomicLoad(MachineType::Uint16(), backing_store,
+ a.WordShl(index_word, 1))));
- a->Bind(&i32);
- a->Return(a->ChangeInt32ToTagged(a->AtomicLoad(
- MachineType::Int32(), backing_store, a->WordShl(index_word, 2))));
+ a.Bind(&i32);
+ a.Return(a.ChangeInt32ToTagged(a.AtomicLoad(
+ MachineType::Int32(), backing_store, a.WordShl(index_word, 2))));
- a->Bind(&u32);
- a->Return(a->ChangeUint32ToTagged(a->AtomicLoad(
- MachineType::Uint32(), backing_store, a->WordShl(index_word, 2))));
+ a.Bind(&u32);
+ a.Return(a.ChangeUint32ToTagged(a.AtomicLoad(
+ MachineType::Uint32(), backing_store, a.WordShl(index_word, 2))));
// This shouldn't happen, we've already validated the type.
- a->Bind(&other);
- a->Return(a->Int32Constant(0));
+ a.Bind(&other);
+ a.Return(a.SmiConstant(0));
}
-void Builtins::Generate_AtomicsStore(CodeStubAssembler* a) {
- using namespace compiler;
- Node* array = a->Parameter(1);
- Node* index = a->Parameter(2);
- Node* value = a->Parameter(3);
- Node* context = a->Parameter(4 + 2);
+void Builtins::Generate_AtomicsStore(compiler::CodeAssemblerState* state) {
+ using compiler::Node;
+ CodeStubAssembler a(state);
+ Node* array = a.Parameter(1);
+ Node* index = a.Parameter(2);
+ Node* value = a.Parameter(3);
+ Node* context = a.Parameter(4 + 2);
Node* instance_type;
Node* backing_store;
- ValidateSharedTypedArray(a, array, context, &instance_type, &backing_store);
+ ValidateSharedTypedArray(&a, array, context, &instance_type, &backing_store);
- Node* index_word32 = ConvertTaggedAtomicIndexToWord32(a, index, context);
- Node* array_length_word32 = a->TruncateTaggedToWord32(
- context, a->LoadObjectField(array, JSTypedArray::kLengthOffset));
- ValidateAtomicIndex(a, index_word32, array_length_word32, context);
- Node* index_word = a->ChangeUint32ToWord(index_word32);
+ Node* index_word32 = ConvertTaggedAtomicIndexToWord32(&a, index, context);
+ Node* array_length_word32 = a.TruncateTaggedToWord32(
+ context, a.LoadObjectField(array, JSTypedArray::kLengthOffset));
+ ValidateAtomicIndex(&a, index_word32, array_length_word32, context);
+ Node* index_word = a.ChangeUint32ToWord(index_word32);
- Node* value_integer = a->ToInteger(context, value);
- Node* value_word32 = a->TruncateTaggedToWord32(context, value_integer);
+ Node* value_integer = a.ToInteger(context, value);
+ Node* value_word32 = a.TruncateTaggedToWord32(context, value_integer);
- CodeStubAssembler::Label u8(a), u16(a), u32(a), other(a);
+ CodeStubAssembler::Label u8(&a), u16(&a), u32(&a), other(&a);
int32_t case_values[] = {
FIXED_INT8_ARRAY_TYPE, FIXED_UINT8_ARRAY_TYPE, FIXED_INT16_ARRAY_TYPE,
FIXED_UINT16_ARRAY_TYPE, FIXED_INT32_ARRAY_TYPE, FIXED_UINT32_ARRAY_TYPE,
@@ -240,27 +243,27 @@ void Builtins::Generate_AtomicsStore(CodeStubAssembler* a) {
CodeStubAssembler::Label* case_labels[] = {
&u8, &u8, &u16, &u16, &u32, &u32,
};
- a->Switch(instance_type, &other, case_values, case_labels,
- arraysize(case_labels));
+ a.Switch(instance_type, &other, case_values, case_labels,
+ arraysize(case_labels));
- a->Bind(&u8);
- a->AtomicStore(MachineRepresentation::kWord8, backing_store, index_word,
- value_word32);
- a->Return(value_integer);
+ a.Bind(&u8);
+ a.AtomicStore(MachineRepresentation::kWord8, backing_store, index_word,
+ value_word32);
+ a.Return(value_integer);
- a->Bind(&u16);
- a->AtomicStore(MachineRepresentation::kWord16, backing_store,
- a->WordShl(index_word, 1), value_word32);
- a->Return(value_integer);
+ a.Bind(&u16);
+ a.AtomicStore(MachineRepresentation::kWord16, backing_store,
+ a.WordShl(index_word, 1), value_word32);
+ a.Return(value_integer);
- a->Bind(&u32);
- a->AtomicStore(MachineRepresentation::kWord32, backing_store,
- a->WordShl(index_word, 2), value_word32);
- a->Return(value_integer);
+ a.Bind(&u32);
+ a.AtomicStore(MachineRepresentation::kWord32, backing_store,
+ a.WordShl(index_word, 2), value_word32);
+ a.Return(value_integer);
// This shouldn't happen, we've already validated the type.
- a->Bind(&other);
- a->Return(a->Int32Constant(0));
+ a.Bind(&other);
+ a.Return(a.SmiConstant(0));
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-string.cc b/deps/v8/src/builtins/builtins-string.cc
index 4ccccbc859..3259d0021a 100644
--- a/deps/v8/src/builtins/builtins-string.cc
+++ b/deps/v8/src/builtins/builtins-string.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
-
+#include "src/builtins/builtins.h"
#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
#include "src/regexp/regexp-utils.h"
namespace v8 {
@@ -14,9 +14,55 @@ namespace internal {
typedef CodeStubAssembler::ResultMode ResultMode;
typedef CodeStubAssembler::RelationalComparisonMode RelationalComparisonMode;
-namespace {
+class StringBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit StringBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ protected:
+ Node* LoadOneByteChar(Node* string, Node* index) {
+ return Load(MachineType::Uint8(), string, OneByteCharOffset(index));
+ }
+
+ Node* OneByteCharAddress(Node* string, Node* index) {
+ Node* offset = OneByteCharOffset(index);
+ return IntPtrAdd(BitcastTaggedToWord(string), offset);
+ }
+
+ Node* OneByteCharOffset(Node* index) {
+ return CharOffset(String::ONE_BYTE_ENCODING, index);
+ }
+
+ Node* CharOffset(String::Encoding encoding, Node* index) {
+ const int header = SeqOneByteString::kHeaderSize - kHeapObjectTag;
+ Node* offset = index;
+ if (encoding == String::TWO_BYTE_ENCODING) {
+ offset = IntPtrAdd(offset, offset);
+ }
+ offset = IntPtrAdd(offset, IntPtrConstant(header));
+ return offset;
+ }
+
+ void BranchIfSimpleOneByteStringInstanceType(Node* instance_type,
+ Label* if_true,
+ Label* if_false) {
+ const int kMask = kStringRepresentationMask | kStringEncodingMask;
+ const int kType = kOneByteStringTag | kSeqStringTag;
+ Branch(Word32Equal(Word32And(instance_type, Int32Constant(kMask)),
+ Int32Constant(kType)),
+ if_true, if_false);
+ }
+
+ void GenerateStringEqual(ResultMode mode);
+ void GenerateStringRelationalComparison(RelationalComparisonMode mode);
+
+ Node* ToSmiBetweenZeroAnd(Node* context, Node* value, Node* limit);
-void GenerateStringEqual(CodeStubAssembler* assembler, ResultMode mode) {
+ Node* LoadSurrogatePairAt(Node* string, Node* length, Node* index,
+ UnicodeEncoding encoding);
+};
+
+void StringBuiltinsAssembler::GenerateStringEqual(ResultMode mode) {
// Here's pseudo-code for the algorithm below in case of kDontNegateResult
// mode; for kNegateResult mode we properly negate the result.
//
@@ -33,140 +79,89 @@ void GenerateStringEqual(CodeStubAssembler* assembler, ResultMode mode) {
// }
// return %StringEqual(lhs, rhs);
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
-
- Node* lhs = assembler->Parameter(0);
- Node* rhs = assembler->Parameter(1);
- Node* context = assembler->Parameter(2);
+ Node* lhs = Parameter(0);
+ Node* rhs = Parameter(1);
+ Node* context = Parameter(2);
- Label if_equal(assembler), if_notequal(assembler);
+ Label if_equal(this), if_notequal(this);
// Fast check to see if {lhs} and {rhs} refer to the same String object.
- Label if_same(assembler), if_notsame(assembler);
- assembler->Branch(assembler->WordEqual(lhs, rhs), &if_same, &if_notsame);
-
- assembler->Bind(&if_same);
- assembler->Goto(&if_equal);
-
- assembler->Bind(&if_notsame);
+ GotoIf(WordEqual(lhs, rhs), &if_equal);
+
+ // Load the length of {lhs} and {rhs}.
+ Node* lhs_length = LoadStringLength(lhs);
+ Node* rhs_length = LoadStringLength(rhs);
+
+ // Strings with different lengths cannot be equal.
+ GotoIf(WordNotEqual(lhs_length, rhs_length), &if_notequal);
+
+ // Load instance types of {lhs} and {rhs}.
+ Node* lhs_instance_type = LoadInstanceType(lhs);
+ Node* rhs_instance_type = LoadInstanceType(rhs);
+
+ // Combine the instance types into a single 16-bit value, so we can check
+ // both of them at once.
+ Node* both_instance_types = Word32Or(
+ lhs_instance_type, Word32Shl(rhs_instance_type, Int32Constant(8)));
+
+ // Check if both {lhs} and {rhs} are internalized. Since we already know
+ // that they're not the same object, they're not equal in that case.
+ int const kBothInternalizedMask =
+ kIsNotInternalizedMask | (kIsNotInternalizedMask << 8);
+ int const kBothInternalizedTag = kInternalizedTag | (kInternalizedTag << 8);
+ GotoIf(Word32Equal(Word32And(both_instance_types,
+ Int32Constant(kBothInternalizedMask)),
+ Int32Constant(kBothInternalizedTag)),
+ &if_notequal);
+
+ // Check that both {lhs} and {rhs} are flat one-byte strings.
+ int const kBothSeqOneByteStringMask =
+ kStringEncodingMask | kStringRepresentationMask |
+ ((kStringEncodingMask | kStringRepresentationMask) << 8);
+ int const kBothSeqOneByteStringTag =
+ kOneByteStringTag | kSeqStringTag |
+ ((kOneByteStringTag | kSeqStringTag) << 8);
+ Label if_bothonebyteseqstrings(this), if_notbothonebyteseqstrings(this);
+ Branch(Word32Equal(Word32And(both_instance_types,
+ Int32Constant(kBothSeqOneByteStringMask)),
+ Int32Constant(kBothSeqOneByteStringTag)),
+ &if_bothonebyteseqstrings, &if_notbothonebyteseqstrings);
+
+ Bind(&if_bothonebyteseqstrings);
{
- // The {lhs} and {rhs} don't refer to the exact same String object.
+ // Compute the effective offset of the first character.
+ Node* begin =
+ IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag);
+
+ // Compute the first offset after the string from the length.
+ Node* end = IntPtrAdd(begin, SmiUntag(lhs_length));
+
+ // Loop over the {lhs} and {rhs} strings to see if they are equal.
+ Variable var_offset(this, MachineType::PointerRepresentation());
+ Label loop(this, &var_offset);
+ var_offset.Bind(begin);
+ Goto(&loop);
+ Bind(&loop);
+ {
+ // If {offset} equals {end}, no difference was found, so the
+ // strings are equal.
+ Node* offset = var_offset.value();
+ GotoIf(WordEqual(offset, end), &if_equal);
- // Load the length of {lhs} and {rhs}.
- Node* lhs_length = assembler->LoadStringLength(lhs);
- Node* rhs_length = assembler->LoadStringLength(rhs);
+ // Load the next characters from {lhs} and {rhs}.
+ Node* lhs_value = Load(MachineType::Uint8(), lhs, offset);
+ Node* rhs_value = Load(MachineType::Uint8(), rhs, offset);
- // Check if the lengths of {lhs} and {rhs} are equal.
- Label if_lengthisequal(assembler), if_lengthisnotequal(assembler);
- assembler->Branch(assembler->WordEqual(lhs_length, rhs_length),
- &if_lengthisequal, &if_lengthisnotequal);
+ // Check if the characters match.
+ GotoIf(Word32NotEqual(lhs_value, rhs_value), &if_notequal);
- assembler->Bind(&if_lengthisequal);
- {
- // Load instance types of {lhs} and {rhs}.
- Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
- Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
-
- // Combine the instance types into a single 16-bit value, so we can check
- // both of them at once.
- Node* both_instance_types = assembler->Word32Or(
- lhs_instance_type,
- assembler->Word32Shl(rhs_instance_type, assembler->Int32Constant(8)));
-
- // Check if both {lhs} and {rhs} are internalized.
- int const kBothInternalizedMask =
- kIsNotInternalizedMask | (kIsNotInternalizedMask << 8);
- int const kBothInternalizedTag =
- kInternalizedTag | (kInternalizedTag << 8);
- Label if_bothinternalized(assembler), if_notbothinternalized(assembler);
- assembler->Branch(assembler->Word32Equal(
- assembler->Word32And(both_instance_types,
- assembler->Int32Constant(
- kBothInternalizedMask)),
- assembler->Int32Constant(kBothInternalizedTag)),
- &if_bothinternalized, &if_notbothinternalized);
-
- assembler->Bind(&if_bothinternalized);
- {
- // Fast negative check for internalized-to-internalized equality.
- assembler->Goto(&if_notequal);
- }
-
- assembler->Bind(&if_notbothinternalized);
- {
- // Check that both {lhs} and {rhs} are flat one-byte strings.
- int const kBothSeqOneByteStringMask =
- kStringEncodingMask | kStringRepresentationMask |
- ((kStringEncodingMask | kStringRepresentationMask) << 8);
- int const kBothSeqOneByteStringTag =
- kOneByteStringTag | kSeqStringTag |
- ((kOneByteStringTag | kSeqStringTag) << 8);
- Label if_bothonebyteseqstrings(assembler),
- if_notbothonebyteseqstrings(assembler);
- assembler->Branch(
- assembler->Word32Equal(
- assembler->Word32And(
- both_instance_types,
- assembler->Int32Constant(kBothSeqOneByteStringMask)),
- assembler->Int32Constant(kBothSeqOneByteStringTag)),
- &if_bothonebyteseqstrings, &if_notbothonebyteseqstrings);
-
- assembler->Bind(&if_bothonebyteseqstrings);
- {
- // Compute the effective offset of the first character.
- Node* begin = assembler->IntPtrConstant(
- SeqOneByteString::kHeaderSize - kHeapObjectTag);
-
- // Compute the first offset after the string from the length.
- Node* end =
- assembler->IntPtrAdd(begin, assembler->SmiUntag(lhs_length));
-
- // Loop over the {lhs} and {rhs} strings to see if they are equal.
- Variable var_offset(assembler, MachineType::PointerRepresentation());
- Label loop(assembler, &var_offset);
- var_offset.Bind(begin);
- assembler->Goto(&loop);
- assembler->Bind(&loop);
- {
- // Check if {offset} equals {end}.
- Node* offset = var_offset.value();
- Label if_done(assembler), if_notdone(assembler);
- assembler->Branch(assembler->WordEqual(offset, end), &if_done,
- &if_notdone);
-
- assembler->Bind(&if_notdone);
- {
- // Load the next characters from {lhs} and {rhs}.
- Node* lhs_value =
- assembler->Load(MachineType::Uint8(), lhs, offset);
- Node* rhs_value =
- assembler->Load(MachineType::Uint8(), rhs, offset);
-
- // Check if the characters match.
- Label if_valueissame(assembler), if_valueisnotsame(assembler);
- assembler->Branch(assembler->Word32Equal(lhs_value, rhs_value),
- &if_valueissame, &if_valueisnotsame);
-
- assembler->Bind(&if_valueissame);
- {
- // Advance to next character.
- var_offset.Bind(
- assembler->IntPtrAdd(offset, assembler->IntPtrConstant(1)));
- }
- assembler->Goto(&loop);
-
- assembler->Bind(&if_valueisnotsame);
- assembler->Goto(&if_notequal);
- }
-
- assembler->Bind(&if_done);
- assembler->Goto(&if_equal);
- }
+ // Advance to next character.
+ var_offset.Bind(IntPtrAdd(offset, IntPtrConstant(1)));
+ Goto(&loop);
+ }
}
- assembler->Bind(&if_notbothonebyteseqstrings);
+ Bind(&if_notbothonebyteseqstrings);
{
// TODO(bmeurer): Add fast case support for flattened cons strings;
// also add support for two byte string equality checks.
@@ -174,363 +169,328 @@ void GenerateStringEqual(CodeStubAssembler* assembler, ResultMode mode) {
(mode == ResultMode::kDontNegateResult)
? Runtime::kStringEqual
: Runtime::kStringNotEqual;
- assembler->TailCallRuntime(function_id, context, lhs, rhs);
+ TailCallRuntime(function_id, context, lhs, rhs);
}
- }
- }
- assembler->Bind(&if_lengthisnotequal);
- {
- // Mismatch in length of {lhs} and {rhs}, cannot be equal.
- assembler->Goto(&if_notequal);
- }
- }
-
- assembler->Bind(&if_equal);
- assembler->Return(
- assembler->BooleanConstant(mode == ResultMode::kDontNegateResult));
+ Bind(&if_equal);
+ Return(BooleanConstant(mode == ResultMode::kDontNegateResult));
- assembler->Bind(&if_notequal);
- assembler->Return(
- assembler->BooleanConstant(mode == ResultMode::kNegateResult));
+ Bind(&if_notequal);
+ Return(BooleanConstant(mode == ResultMode::kNegateResult));
}
+void StringBuiltinsAssembler::GenerateStringRelationalComparison(
+ RelationalComparisonMode mode) {
+ Node* lhs = Parameter(0);
+ Node* rhs = Parameter(1);
+ Node* context = Parameter(2);
-void GenerateStringRelationalComparison(CodeStubAssembler* assembler,
- RelationalComparisonMode mode) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
+ Label if_less(this), if_equal(this), if_greater(this);
- Node* lhs = assembler->Parameter(0);
- Node* rhs = assembler->Parameter(1);
- Node* context = assembler->Parameter(2);
+ // Fast check to see if {lhs} and {rhs} refer to the same String object.
+ GotoIf(WordEqual(lhs, rhs), &if_equal);
+
+ // Load instance types of {lhs} and {rhs}.
+ Node* lhs_instance_type = LoadInstanceType(lhs);
+ Node* rhs_instance_type = LoadInstanceType(rhs);
+
+ // Combine the instance types into a single 16-bit value, so we can check
+ // both of them at once.
+ Node* both_instance_types = Word32Or(
+ lhs_instance_type, Word32Shl(rhs_instance_type, Int32Constant(8)));
+
+ // Check that both {lhs} and {rhs} are flat one-byte strings.
+ int const kBothSeqOneByteStringMask =
+ kStringEncodingMask | kStringRepresentationMask |
+ ((kStringEncodingMask | kStringRepresentationMask) << 8);
+ int const kBothSeqOneByteStringTag =
+ kOneByteStringTag | kSeqStringTag |
+ ((kOneByteStringTag | kSeqStringTag) << 8);
+ Label if_bothonebyteseqstrings(this), if_notbothonebyteseqstrings(this);
+ Branch(Word32Equal(Word32And(both_instance_types,
+ Int32Constant(kBothSeqOneByteStringMask)),
+ Int32Constant(kBothSeqOneByteStringTag)),
+ &if_bothonebyteseqstrings, &if_notbothonebyteseqstrings);
+
+ Bind(&if_bothonebyteseqstrings);
+ {
+ // Load the length of {lhs} and {rhs}.
+ Node* lhs_length = LoadStringLength(lhs);
+ Node* rhs_length = LoadStringLength(rhs);
- Label if_less(assembler), if_equal(assembler), if_greater(assembler);
+ // Determine the minimum length.
+ Node* length = SmiMin(lhs_length, rhs_length);
- // Fast check to see if {lhs} and {rhs} refer to the same String object.
- Label if_same(assembler), if_notsame(assembler);
- assembler->Branch(assembler->WordEqual(lhs, rhs), &if_same, &if_notsame);
+ // Compute the effective offset of the first character.
+ Node* begin =
+ IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag);
- assembler->Bind(&if_same);
- assembler->Goto(&if_equal);
+ // Compute the first offset after the string from the length.
+ Node* end = IntPtrAdd(begin, SmiUntag(length));
- assembler->Bind(&if_notsame);
- {
- // Load instance types of {lhs} and {rhs}.
- Node* lhs_instance_type = assembler->LoadInstanceType(lhs);
- Node* rhs_instance_type = assembler->LoadInstanceType(rhs);
-
- // Combine the instance types into a single 16-bit value, so we can check
- // both of them at once.
- Node* both_instance_types = assembler->Word32Or(
- lhs_instance_type,
- assembler->Word32Shl(rhs_instance_type, assembler->Int32Constant(8)));
-
- // Check that both {lhs} and {rhs} are flat one-byte strings.
- int const kBothSeqOneByteStringMask =
- kStringEncodingMask | kStringRepresentationMask |
- ((kStringEncodingMask | kStringRepresentationMask) << 8);
- int const kBothSeqOneByteStringTag =
- kOneByteStringTag | kSeqStringTag |
- ((kOneByteStringTag | kSeqStringTag) << 8);
- Label if_bothonebyteseqstrings(assembler),
- if_notbothonebyteseqstrings(assembler);
- assembler->Branch(assembler->Word32Equal(
- assembler->Word32And(both_instance_types,
- assembler->Int32Constant(
- kBothSeqOneByteStringMask)),
- assembler->Int32Constant(kBothSeqOneByteStringTag)),
- &if_bothonebyteseqstrings, &if_notbothonebyteseqstrings);
-
- assembler->Bind(&if_bothonebyteseqstrings);
+ // Loop over the {lhs} and {rhs} strings to see if they are equal.
+ Variable var_offset(this, MachineType::PointerRepresentation());
+ Label loop(this, &var_offset);
+ var_offset.Bind(begin);
+ Goto(&loop);
+ Bind(&loop);
{
- // Load the length of {lhs} and {rhs}.
- Node* lhs_length = assembler->LoadStringLength(lhs);
- Node* rhs_length = assembler->LoadStringLength(rhs);
-
- // Determine the minimum length.
- Node* length = assembler->SmiMin(lhs_length, rhs_length);
-
- // Compute the effective offset of the first character.
- Node* begin = assembler->IntPtrConstant(SeqOneByteString::kHeaderSize -
- kHeapObjectTag);
-
- // Compute the first offset after the string from the length.
- Node* end = assembler->IntPtrAdd(begin, assembler->SmiUntag(length));
-
- // Loop over the {lhs} and {rhs} strings to see if they are equal.
- Variable var_offset(assembler, MachineType::PointerRepresentation());
- Label loop(assembler, &var_offset);
- var_offset.Bind(begin);
- assembler->Goto(&loop);
- assembler->Bind(&loop);
+ // Check if {offset} equals {end}.
+ Node* offset = var_offset.value();
+ Label if_done(this), if_notdone(this);
+ Branch(WordEqual(offset, end), &if_done, &if_notdone);
+
+ Bind(&if_notdone);
{
- // Check if {offset} equals {end}.
- Node* offset = var_offset.value();
- Label if_done(assembler), if_notdone(assembler);
- assembler->Branch(assembler->WordEqual(offset, end), &if_done,
- &if_notdone);
+ // Load the next characters from {lhs} and {rhs}.
+ Node* lhs_value = Load(MachineType::Uint8(), lhs, offset);
+ Node* rhs_value = Load(MachineType::Uint8(), rhs, offset);
- assembler->Bind(&if_notdone);
- {
- // Load the next characters from {lhs} and {rhs}.
- Node* lhs_value = assembler->Load(MachineType::Uint8(), lhs, offset);
- Node* rhs_value = assembler->Load(MachineType::Uint8(), rhs, offset);
-
- // Check if the characters match.
- Label if_valueissame(assembler), if_valueisnotsame(assembler);
- assembler->Branch(assembler->Word32Equal(lhs_value, rhs_value),
- &if_valueissame, &if_valueisnotsame);
-
- assembler->Bind(&if_valueissame);
- {
- // Advance to next character.
- var_offset.Bind(
- assembler->IntPtrAdd(offset, assembler->IntPtrConstant(1)));
- }
- assembler->Goto(&loop);
-
- assembler->Bind(&if_valueisnotsame);
- assembler->Branch(assembler->Uint32LessThan(lhs_value, rhs_value),
- &if_less, &if_greater);
- }
+ // Check if the characters match.
+ Label if_valueissame(this), if_valueisnotsame(this);
+ Branch(Word32Equal(lhs_value, rhs_value), &if_valueissame,
+ &if_valueisnotsame);
- assembler->Bind(&if_done);
+ Bind(&if_valueissame);
{
- // All characters up to the min length are equal, decide based on
- // string length.
- Label if_lengthisequal(assembler), if_lengthisnotequal(assembler);
- assembler->Branch(assembler->SmiEqual(lhs_length, rhs_length),
- &if_lengthisequal, &if_lengthisnotequal);
-
- assembler->Bind(&if_lengthisequal);
- assembler->Goto(&if_equal);
-
- assembler->Bind(&if_lengthisnotequal);
- assembler->BranchIfSmiLessThan(lhs_length, rhs_length, &if_less,
- &if_greater);
+ // Advance to next character.
+ var_offset.Bind(IntPtrAdd(offset, IntPtrConstant(1)));
}
+ Goto(&loop);
+
+ Bind(&if_valueisnotsame);
+ Branch(Uint32LessThan(lhs_value, rhs_value), &if_less, &if_greater);
+ }
+
+ Bind(&if_done);
+ {
+ // All characters up to the min length are equal, decide based on
+ // string length.
+ GotoIf(SmiEqual(lhs_length, rhs_length), &if_equal);
+ BranchIfSmiLessThan(lhs_length, rhs_length, &if_less, &if_greater);
}
}
+ }
- assembler->Bind(&if_notbothonebyteseqstrings);
+ Bind(&if_notbothonebyteseqstrings);
{
// TODO(bmeurer): Add fast case support for flattened cons strings;
// also add support for two byte string relational comparisons.
switch (mode) {
case RelationalComparisonMode::kLessThan:
- assembler->TailCallRuntime(Runtime::kStringLessThan, context, lhs,
- rhs);
+ TailCallRuntime(Runtime::kStringLessThan, context, lhs, rhs);
break;
case RelationalComparisonMode::kLessThanOrEqual:
- assembler->TailCallRuntime(Runtime::kStringLessThanOrEqual, context,
- lhs, rhs);
+ TailCallRuntime(Runtime::kStringLessThanOrEqual, context, lhs, rhs);
break;
case RelationalComparisonMode::kGreaterThan:
- assembler->TailCallRuntime(Runtime::kStringGreaterThan, context, lhs,
- rhs);
+ TailCallRuntime(Runtime::kStringGreaterThan, context, lhs, rhs);
break;
case RelationalComparisonMode::kGreaterThanOrEqual:
- assembler->TailCallRuntime(Runtime::kStringGreaterThanOrEqual,
- context, lhs, rhs);
+ TailCallRuntime(Runtime::kStringGreaterThanOrEqual, context, lhs,
+ rhs);
break;
}
}
- }
- assembler->Bind(&if_less);
- switch (mode) {
- case RelationalComparisonMode::kLessThan:
- case RelationalComparisonMode::kLessThanOrEqual:
- assembler->Return(assembler->BooleanConstant(true));
- break;
+ Bind(&if_less);
+ switch (mode) {
+ case RelationalComparisonMode::kLessThan:
+ case RelationalComparisonMode::kLessThanOrEqual:
+ Return(BooleanConstant(true));
+ break;
- case RelationalComparisonMode::kGreaterThan:
- case RelationalComparisonMode::kGreaterThanOrEqual:
- assembler->Return(assembler->BooleanConstant(false));
- break;
+ case RelationalComparisonMode::kGreaterThan:
+ case RelationalComparisonMode::kGreaterThanOrEqual:
+ Return(BooleanConstant(false));
+ break;
}
- assembler->Bind(&if_equal);
+ Bind(&if_equal);
switch (mode) {
case RelationalComparisonMode::kLessThan:
case RelationalComparisonMode::kGreaterThan:
- assembler->Return(assembler->BooleanConstant(false));
+ Return(BooleanConstant(false));
break;
case RelationalComparisonMode::kLessThanOrEqual:
case RelationalComparisonMode::kGreaterThanOrEqual:
- assembler->Return(assembler->BooleanConstant(true));
+ Return(BooleanConstant(true));
break;
}
- assembler->Bind(&if_greater);
+ Bind(&if_greater);
switch (mode) {
case RelationalComparisonMode::kLessThan:
case RelationalComparisonMode::kLessThanOrEqual:
- assembler->Return(assembler->BooleanConstant(false));
+ Return(BooleanConstant(false));
break;
case RelationalComparisonMode::kGreaterThan:
case RelationalComparisonMode::kGreaterThanOrEqual:
- assembler->Return(assembler->BooleanConstant(true));
+ Return(BooleanConstant(true));
break;
}
}
-} // namespace
-
-// static
-void Builtins::Generate_StringEqual(CodeStubAssembler* assembler) {
- GenerateStringEqual(assembler, ResultMode::kDontNegateResult);
+TF_BUILTIN(StringEqual, StringBuiltinsAssembler) {
+ GenerateStringEqual(ResultMode::kDontNegateResult);
}
-// static
-void Builtins::Generate_StringNotEqual(CodeStubAssembler* assembler) {
- GenerateStringEqual(assembler, ResultMode::kNegateResult);
+TF_BUILTIN(StringNotEqual, StringBuiltinsAssembler) {
+ GenerateStringEqual(ResultMode::kNegateResult);
}
-// static
-void Builtins::Generate_StringLessThan(CodeStubAssembler* assembler) {
- GenerateStringRelationalComparison(assembler,
- RelationalComparisonMode::kLessThan);
+TF_BUILTIN(StringLessThan, StringBuiltinsAssembler) {
+ GenerateStringRelationalComparison(RelationalComparisonMode::kLessThan);
}
-// static
-void Builtins::Generate_StringLessThanOrEqual(CodeStubAssembler* assembler) {
+TF_BUILTIN(StringLessThanOrEqual, StringBuiltinsAssembler) {
GenerateStringRelationalComparison(
- assembler, RelationalComparisonMode::kLessThanOrEqual);
+ RelationalComparisonMode::kLessThanOrEqual);
}
-// static
-void Builtins::Generate_StringGreaterThan(CodeStubAssembler* assembler) {
- GenerateStringRelationalComparison(assembler,
- RelationalComparisonMode::kGreaterThan);
+TF_BUILTIN(StringGreaterThan, StringBuiltinsAssembler) {
+ GenerateStringRelationalComparison(RelationalComparisonMode::kGreaterThan);
}
-// static
-void Builtins::Generate_StringGreaterThanOrEqual(CodeStubAssembler* assembler) {
+TF_BUILTIN(StringGreaterThanOrEqual, StringBuiltinsAssembler) {
GenerateStringRelationalComparison(
- assembler, RelationalComparisonMode::kGreaterThanOrEqual);
+ RelationalComparisonMode::kGreaterThanOrEqual);
+}
+
+TF_BUILTIN(StringCharAt, CodeStubAssembler) {
+ Node* receiver = Parameter(0);
+ Node* position = Parameter(1);
+
+ // Load the character code at the {position} from the {receiver}.
+ Node* code = StringCharCodeAt(receiver, position,
+ CodeStubAssembler::INTPTR_PARAMETERS);
+
+ // And return the single character string with only that {code}
+ Node* result = StringFromCharCode(code);
+ Return(result);
+}
+
+TF_BUILTIN(StringCharCodeAt, CodeStubAssembler) {
+ Node* receiver = Parameter(0);
+ Node* position = Parameter(1);
+
+ // Load the character code at the {position} from the {receiver}.
+ Node* code = StringCharCodeAt(receiver, position,
+ CodeStubAssembler::INTPTR_PARAMETERS);
+
+ // And return it as TaggedSigned value.
+ // TODO(turbofan): Allow builtins to return values untagged.
+ Node* result = SmiFromWord32(code);
+ Return(result);
}
// -----------------------------------------------------------------------------
// ES6 section 21.1 String Objects
// ES6 section 21.1.2.1 String.fromCharCode ( ...codeUnits )
-void Builtins::Generate_StringFromCharCode(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
+TF_BUILTIN(StringFromCharCode, CodeStubAssembler) {
+ Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
- Node* argc = assembler->ChangeInt32ToIntPtr(
- assembler->Parameter(BuiltinDescriptor::kArgumentsCount));
- Node* context = assembler->Parameter(BuiltinDescriptor::kContext);
-
- CodeStubArguments arguments(assembler, argc);
+ CodeStubArguments arguments(this, argc);
+ // From now on use word-size argc value.
+ argc = arguments.GetLength();
// Check if we have exactly one argument (plus the implicit receiver), i.e.
// if the parent frame is not an arguments adaptor frame.
- Label if_oneargument(assembler), if_notoneargument(assembler);
- assembler->Branch(assembler->WordEqual(argc, assembler->IntPtrConstant(1)),
- &if_oneargument, &if_notoneargument);
+ Label if_oneargument(this), if_notoneargument(this);
+ Branch(WordEqual(argc, IntPtrConstant(1)), &if_oneargument,
+ &if_notoneargument);
- assembler->Bind(&if_oneargument);
+ Bind(&if_oneargument);
{
// Single argument case, perform fast single character string cache lookup
// for one-byte code units, or fall back to creating a single character
// string on the fly otherwise.
Node* code = arguments.AtIndex(0);
- Node* code32 = assembler->TruncateTaggedToWord32(context, code);
- Node* code16 = assembler->Word32And(
- code32, assembler->Int32Constant(String::kMaxUtf16CodeUnit));
- Node* result = assembler->StringFromCharCode(code16);
+ Node* code32 = TruncateTaggedToWord32(context, code);
+ Node* code16 = Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit));
+ Node* result = StringFromCharCode(code16);
arguments.PopAndReturn(result);
}
Node* code16 = nullptr;
- assembler->Bind(&if_notoneargument);
+ Bind(&if_notoneargument);
{
- Label two_byte(assembler);
+ Label two_byte(this);
// Assume that the resulting string contains only one-byte characters.
- Node* one_byte_result = assembler->AllocateSeqOneByteString(context, argc);
+ Node* one_byte_result = AllocateSeqOneByteString(context, argc);
- Variable max_index(assembler, MachineType::PointerRepresentation());
- max_index.Bind(assembler->IntPtrConstant(0));
+ Variable max_index(this, MachineType::PointerRepresentation());
+ max_index.Bind(IntPtrConstant(0));
// Iterate over the incoming arguments, converting them to 8-bit character
// codes. Stop if any of the conversions generates a code that doesn't fit
// in 8 bits.
- CodeStubAssembler::VariableList vars({&max_index}, assembler->zone());
- arguments.ForEach(vars, [context, &two_byte, &max_index, &code16,
- one_byte_result](CodeStubAssembler* assembler,
- Node* arg) {
- Node* code32 = assembler->TruncateTaggedToWord32(context, arg);
- code16 = assembler->Word32And(
- code32, assembler->Int32Constant(String::kMaxUtf16CodeUnit));
-
- assembler->GotoIf(
- assembler->Int32GreaterThan(
- code16, assembler->Int32Constant(String::kMaxOneByteCharCode)),
+ CodeStubAssembler::VariableList vars({&max_index}, zone());
+ arguments.ForEach(vars, [this, context, &two_byte, &max_index, &code16,
+ one_byte_result](Node* arg) {
+ Node* code32 = TruncateTaggedToWord32(context, arg);
+ code16 = Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit));
+
+ GotoIf(
+ Int32GreaterThan(code16, Int32Constant(String::kMaxOneByteCharCode)),
&two_byte);
// The {code16} fits into the SeqOneByteString {one_byte_result}.
- Node* offset = assembler->ElementOffsetFromIndex(
+ Node* offset = ElementOffsetFromIndex(
max_index.value(), UINT8_ELEMENTS,
CodeStubAssembler::INTPTR_PARAMETERS,
SeqOneByteString::kHeaderSize - kHeapObjectTag);
- assembler->StoreNoWriteBarrier(MachineRepresentation::kWord8,
- one_byte_result, offset, code16);
- max_index.Bind(assembler->IntPtrAdd(max_index.value(),
- assembler->IntPtrConstant(1)));
+ StoreNoWriteBarrier(MachineRepresentation::kWord8, one_byte_result,
+ offset, code16);
+ max_index.Bind(IntPtrAdd(max_index.value(), IntPtrConstant(1)));
});
arguments.PopAndReturn(one_byte_result);
- assembler->Bind(&two_byte);
+ Bind(&two_byte);
// At least one of the characters in the string requires a 16-bit
// representation. Allocate a SeqTwoByteString to hold the resulting
// string.
- Node* two_byte_result = assembler->AllocateSeqTwoByteString(context, argc);
+ Node* two_byte_result = AllocateSeqTwoByteString(context, argc);
// Copy the characters that have already been put in the 8-bit string into
// their corresponding positions in the new 16-bit string.
- Node* zero = assembler->IntPtrConstant(0);
- assembler->CopyStringCharacters(
- one_byte_result, two_byte_result, zero, zero, max_index.value(),
- String::ONE_BYTE_ENCODING, String::TWO_BYTE_ENCODING,
- CodeStubAssembler::INTPTR_PARAMETERS);
+ Node* zero = IntPtrConstant(0);
+ CopyStringCharacters(one_byte_result, two_byte_result, zero, zero,
+ max_index.value(), String::ONE_BYTE_ENCODING,
+ String::TWO_BYTE_ENCODING,
+ CodeStubAssembler::INTPTR_PARAMETERS);
// Write the character that caused the 8-bit to 16-bit fault.
- Node* max_index_offset = assembler->ElementOffsetFromIndex(
- max_index.value(), UINT16_ELEMENTS,
- CodeStubAssembler::INTPTR_PARAMETERS,
- SeqTwoByteString::kHeaderSize - kHeapObjectTag);
- assembler->StoreNoWriteBarrier(MachineRepresentation::kWord16,
- two_byte_result, max_index_offset, code16);
- max_index.Bind(
- assembler->IntPtrAdd(max_index.value(), assembler->IntPtrConstant(1)));
+ Node* max_index_offset =
+ ElementOffsetFromIndex(max_index.value(), UINT16_ELEMENTS,
+ CodeStubAssembler::INTPTR_PARAMETERS,
+ SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+ StoreNoWriteBarrier(MachineRepresentation::kWord16, two_byte_result,
+ max_index_offset, code16);
+ max_index.Bind(IntPtrAdd(max_index.value(), IntPtrConstant(1)));
// Resume copying the passed-in arguments from the same place where the
// 8-bit copy stopped, but this time copying over all of the characters
// using a 16-bit representation.
arguments.ForEach(
vars,
- [context, two_byte_result, &max_index](CodeStubAssembler* assembler,
- Node* arg) {
- Node* code32 = assembler->TruncateTaggedToWord32(context, arg);
- Node* code16 = assembler->Word32And(
- code32, assembler->Int32Constant(String::kMaxUtf16CodeUnit));
+ [this, context, two_byte_result, &max_index](Node* arg) {
+ Node* code32 = TruncateTaggedToWord32(context, arg);
+ Node* code16 =
+ Word32And(code32, Int32Constant(String::kMaxUtf16CodeUnit));
- Node* offset = assembler->ElementOffsetFromIndex(
+ Node* offset = ElementOffsetFromIndex(
max_index.value(), UINT16_ELEMENTS,
CodeStubAssembler::INTPTR_PARAMETERS,
SeqTwoByteString::kHeaderSize - kHeapObjectTag);
- assembler->StoreNoWriteBarrier(MachineRepresentation::kWord16,
- two_byte_result, offset, code16);
- max_index.Bind(assembler->IntPtrAdd(max_index.value(),
- assembler->IntPtrConstant(1)));
+ StoreNoWriteBarrier(MachineRepresentation::kWord16, two_byte_result,
+ offset, code16);
+ max_index.Bind(IntPtrAdd(max_index.value(), IntPtrConstant(1)));
},
max_index.value());
@@ -558,7 +518,7 @@ bool IsValidCodePoint(Isolate* isolate, Handle<Object> value) {
}
uc32 NextCodePoint(Isolate* isolate, BuiltinArguments args, int index) {
- Handle<Object> value = args.at<Object>(1 + index);
+ Handle<Object> value = args.at(1 + index);
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, value, Object::ToNumber(value), -1);
if (!IsValidCodePoint(isolate, value)) {
isolate->Throw(*isolate->factory()->NewRangeError(
@@ -632,91 +592,79 @@ BUILTIN(StringFromCodePoint) {
}
// ES6 section 21.1.3.1 String.prototype.charAt ( pos )
-void Builtins::Generate_StringPrototypeCharAt(CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
-
- Node* receiver = assembler->Parameter(0);
- Node* position = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
+TF_BUILTIN(StringPrototypeCharAt, CodeStubAssembler) {
+ Node* receiver = Parameter(0);
+ Node* position = Parameter(1);
+ Node* context = Parameter(4);
// Check that {receiver} is coercible to Object and convert it to a String.
- receiver =
- assembler->ToThisString(context, receiver, "String.prototype.charAt");
+ receiver = ToThisString(context, receiver, "String.prototype.charAt");
// Convert the {position} to a Smi and check that it's in bounds of the
// {receiver}.
{
- Label return_emptystring(assembler, Label::kDeferred);
- position = assembler->ToInteger(context, position,
- CodeStubAssembler::kTruncateMinusZero);
- assembler->GotoUnless(assembler->TaggedIsSmi(position),
- &return_emptystring);
+ Label return_emptystring(this, Label::kDeferred);
+ position =
+ ToInteger(context, position, CodeStubAssembler::kTruncateMinusZero);
+ GotoUnless(TaggedIsSmi(position), &return_emptystring);
// Determine the actual length of the {receiver} String.
- Node* receiver_length =
- assembler->LoadObjectField(receiver, String::kLengthOffset);
+ Node* receiver_length = LoadObjectField(receiver, String::kLengthOffset);
// Return "" if the Smi {position} is outside the bounds of the {receiver}.
- Label if_positioninbounds(assembler);
- assembler->Branch(assembler->SmiAboveOrEqual(position, receiver_length),
- &return_emptystring, &if_positioninbounds);
+ Label if_positioninbounds(this);
+ Branch(SmiAboveOrEqual(position, receiver_length), &return_emptystring,
+ &if_positioninbounds);
- assembler->Bind(&return_emptystring);
- assembler->Return(assembler->EmptyStringConstant());
+ Bind(&return_emptystring);
+ Return(EmptyStringConstant());
- assembler->Bind(&if_positioninbounds);
+ Bind(&if_positioninbounds);
}
// Load the character code at the {position} from the {receiver}.
- Node* code = assembler->StringCharCodeAt(receiver, position);
+ Node* code = StringCharCodeAt(receiver, position);
// And return the single character string with only that {code}.
- Node* result = assembler->StringFromCharCode(code);
- assembler->Return(result);
+ Node* result = StringFromCharCode(code);
+ Return(result);
}
// ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos )
-void Builtins::Generate_StringPrototypeCharCodeAt(
- CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
-
- Node* receiver = assembler->Parameter(0);
- Node* position = assembler->Parameter(1);
- Node* context = assembler->Parameter(4);
+TF_BUILTIN(StringPrototypeCharCodeAt, CodeStubAssembler) {
+ Node* receiver = Parameter(0);
+ Node* position = Parameter(1);
+ Node* context = Parameter(4);
// Check that {receiver} is coercible to Object and convert it to a String.
- receiver =
- assembler->ToThisString(context, receiver, "String.prototype.charCodeAt");
+ receiver = ToThisString(context, receiver, "String.prototype.charCodeAt");
// Convert the {position} to a Smi and check that it's in bounds of the
// {receiver}.
{
- Label return_nan(assembler, Label::kDeferred);
- position = assembler->ToInteger(context, position,
- CodeStubAssembler::kTruncateMinusZero);
- assembler->GotoUnless(assembler->TaggedIsSmi(position), &return_nan);
+ Label return_nan(this, Label::kDeferred);
+ position =
+ ToInteger(context, position, CodeStubAssembler::kTruncateMinusZero);
+ GotoUnless(TaggedIsSmi(position), &return_nan);
// Determine the actual length of the {receiver} String.
- Node* receiver_length =
- assembler->LoadObjectField(receiver, String::kLengthOffset);
+ Node* receiver_length = LoadObjectField(receiver, String::kLengthOffset);
// Return NaN if the Smi {position} is outside the bounds of the {receiver}.
- Label if_positioninbounds(assembler);
- assembler->Branch(assembler->SmiAboveOrEqual(position, receiver_length),
- &return_nan, &if_positioninbounds);
+ Label if_positioninbounds(this);
+ Branch(SmiAboveOrEqual(position, receiver_length), &return_nan,
+ &if_positioninbounds);
- assembler->Bind(&return_nan);
- assembler->Return(assembler->NaNConstant());
+ Bind(&return_nan);
+ Return(NaNConstant());
- assembler->Bind(&if_positioninbounds);
+ Bind(&if_positioninbounds);
}
// Load the character at the {position} from the {receiver}.
- Node* value = assembler->StringCharCodeAt(receiver, position);
- Node* result = assembler->SmiFromWord32(value);
- assembler->Return(result);
+ Node* value = StringCharCodeAt(receiver, position);
+ Node* result = SmiFromWord32(value);
+ Return(result);
}
// ES6 section 21.1.3.6
@@ -750,16 +698,30 @@ BUILTIN(StringPrototypeEndsWith) {
} else {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, position,
Object::ToInteger(isolate, position));
- double index = std::max(position->Number(), 0.0);
- index = std::min(index, static_cast<double>(str->length()));
- end = static_cast<uint32_t>(index);
+ end = str->ToValidIndex(*position);
}
int start = end - search_string->length();
if (start < 0) return isolate->heap()->false_value();
- FlatStringReader str_reader(isolate, String::Flatten(str));
- FlatStringReader search_reader(isolate, String::Flatten(search_string));
+ str = String::Flatten(str);
+ search_string = String::Flatten(search_string);
+
+ DisallowHeapAllocation no_gc; // ensure vectors stay valid
+ String::FlatContent str_content = str->GetFlatContent();
+ String::FlatContent search_content = search_string->GetFlatContent();
+
+ if (str_content.IsOneByte() && search_content.IsOneByte()) {
+ Vector<const uint8_t> str_vector = str_content.ToOneByteVector();
+ Vector<const uint8_t> search_vector = search_content.ToOneByteVector();
+
+ return isolate->heap()->ToBoolean(memcmp(str_vector.start() + start,
+ search_vector.start(),
+ search_string->length()) == 0);
+ }
+
+ FlatStringReader str_reader(isolate, str);
+ FlatStringReader search_reader(isolate, search_string);
for (int i = 0; i < search_string->length(); i++) {
if (str_reader.Get(start + i) != search_reader.Get(i)) {
@@ -796,21 +758,137 @@ BUILTIN(StringPrototypeIncludes) {
isolate, position,
Object::ToInteger(isolate, args.atOrUndefined(isolate, 2)));
- double index = std::max(position->Number(), 0.0);
- index = std::min(index, static_cast<double>(str->length()));
-
- int index_in_str = String::IndexOf(isolate, str, search_string,
- static_cast<uint32_t>(index));
+ uint32_t index = str->ToValidIndex(*position);
+ int index_in_str = String::IndexOf(isolate, str, search_string, index);
return *isolate->factory()->ToBoolean(index_in_str != -1);
}
-// ES6 section 21.1.3.8 String.prototype.indexOf ( searchString [ , position ] )
-BUILTIN(StringPrototypeIndexOf) {
- HandleScope handle_scope(isolate);
+// ES6 #sec-string.prototype.indexof
+TF_BUILTIN(StringPrototypeIndexOf, StringBuiltinsAssembler) {
+ Variable search_string(this, MachineRepresentation::kTagged),
+ position(this, MachineRepresentation::kTagged);
+ Label call_runtime(this), call_runtime_unchecked(this), argc_0(this),
+ no_argc_0(this), argc_1(this), no_argc_1(this), argc_2(this),
+ fast_path(this), return_minus_1(this);
+
+ Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+
+ CodeStubArguments arguments(this, argc);
+ Node* receiver = arguments.GetReceiver();
+ // From now on use word-size argc value.
+ argc = arguments.GetLength();
+
+ GotoIf(IntPtrEqual(argc, IntPtrConstant(0)), &argc_0);
+ GotoIf(IntPtrEqual(argc, IntPtrConstant(1)), &argc_1);
+ Goto(&argc_2);
+ Bind(&argc_0);
+ {
+ Comment("0 Argument case");
+ Node* undefined = UndefinedConstant();
+ search_string.Bind(undefined);
+ position.Bind(undefined);
+ Goto(&call_runtime);
+ }
+ Bind(&argc_1);
+ {
+ Comment("1 Argument case");
+ search_string.Bind(arguments.AtIndex(0));
+ position.Bind(SmiConstant(0));
+ Goto(&fast_path);
+ }
+ Bind(&argc_2);
+ {
+ Comment("2 Argument case");
+ search_string.Bind(arguments.AtIndex(0));
+ position.Bind(arguments.AtIndex(1));
+ GotoUnless(TaggedIsSmi(position.value()), &call_runtime);
+ position.Bind(SmiMax(position.value(), SmiConstant(0)));
+ Goto(&fast_path);
+ }
- return String::IndexOf(isolate, args.receiver(),
- args.atOrUndefined(isolate, 1),
- args.atOrUndefined(isolate, 2));
+ Bind(&fast_path);
+ {
+ Comment("Fast Path");
+ Label zero_length_needle(this);
+ GotoIf(TaggedIsSmi(receiver), &call_runtime);
+ Node* needle = search_string.value();
+ GotoIf(TaggedIsSmi(needle), &call_runtime);
+ Node* instance_type = LoadInstanceType(receiver);
+ GotoUnless(IsStringInstanceType(instance_type), &call_runtime);
+
+ Node* needle_instance_type = LoadInstanceType(needle);
+ GotoUnless(IsStringInstanceType(needle_instance_type), &call_runtime);
+
+ // At this point we know that the receiver and the needle are Strings and
+ // that position is a Smi.
+
+ Node* needle_length = SmiUntag(LoadStringLength(needle));
+ // Use possibly faster runtime fallback for long search strings.
+ GotoIf(IntPtrLessThan(IntPtrConstant(1), needle_length),
+ &call_runtime_unchecked);
+ Node* string_length = SmiUntag(LoadStringLength(receiver));
+ Node* start_position = SmiUntag(position.value());
+
+ GotoIf(IntPtrEqual(IntPtrConstant(0), needle_length), &zero_length_needle);
+ // Check that the needle fits in the start position.
+ GotoUnless(IntPtrLessThanOrEqual(needle_length,
+ IntPtrSub(string_length, start_position)),
+ &return_minus_1);
+ // Only support one-byte strings on the fast path.
+ Label check_needle(this), continue_fast_path(this);
+ BranchIfSimpleOneByteStringInstanceType(instance_type, &check_needle,
+ &call_runtime_unchecked);
+ Bind(&check_needle);
+ BranchIfSimpleOneByteStringInstanceType(
+ needle_instance_type, &continue_fast_path, &call_runtime_unchecked);
+ Bind(&continue_fast_path);
+ {
+ Node* needle_byte =
+ ChangeInt32ToIntPtr(LoadOneByteChar(needle, IntPtrConstant(0)));
+ Node* start_address = OneByteCharAddress(receiver, start_position);
+ Node* search_length = IntPtrSub(string_length, start_position);
+ // Call out to the highly optimized memchr to perform the actual byte
+ // search.
+ Node* memchr =
+ ExternalConstant(ExternalReference::libc_memchr_function(isolate()));
+ Node* result_address =
+ CallCFunction3(MachineType::Pointer(), MachineType::Pointer(),
+ MachineType::IntPtr(), MachineType::UintPtr(), memchr,
+ start_address, needle_byte, search_length);
+ GotoIf(WordEqual(result_address, IntPtrConstant(0)), &return_minus_1);
+ Node* result_index =
+ IntPtrAdd(IntPtrSub(result_address, start_address), start_position);
+ arguments.PopAndReturn(SmiTag(result_index));
+ }
+ Bind(&zero_length_needle);
+ {
+ Comment("0-length needle");
+ arguments.PopAndReturn(SmiTag(IntPtrMin(string_length, start_position)));
+ }
+ }
+
+ Bind(&return_minus_1);
+ { arguments.PopAndReturn(SmiConstant(-1)); }
+
+ Bind(&call_runtime);
+ {
+ Comment("Call Runtime");
+ Node* result = CallRuntime(Runtime::kStringIndexOf, context, receiver,
+ search_string.value(), position.value());
+ arguments.PopAndReturn(result);
+ }
+
+ Bind(&call_runtime_unchecked);
+ {
+ // Simplified version of the runtime call where the types of the arguments
+ // are already known due to type checks in this stub.
+ Comment("Call Runtime Unchecked");
+ Node* result =
+ CallRuntime(Runtime::kStringIndexOfUnchecked, context, receiver,
+ search_string.value(), position.value());
+ arguments.PopAndReturn(result);
+ }
}
// ES6 section 21.1.3.9
@@ -834,8 +912,8 @@ BUILTIN(StringPrototypeLocaleCompare) {
TO_THIS_STRING(str1, "String.prototype.localeCompare");
Handle<String> str2;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, str2, Object::ToString(isolate, args.at<Object>(1)));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, str2,
+ Object::ToString(isolate, args.at(1)));
if (str1.is_identical_to(str2)) return Smi::kZero; // Equal.
int str1_length = str1->length();
@@ -908,236 +986,220 @@ BUILTIN(StringPrototypeNormalize) {
}
// ES6 section B.2.3.1 String.prototype.substr ( start, length )
-void Builtins::Generate_StringPrototypeSubstr(CodeStubAssembler* a) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
+TF_BUILTIN(StringPrototypeSubstr, CodeStubAssembler) {
+ Label out(this), handle_length(this);
- Label out(a), handle_length(a);
+ Variable var_start(this, MachineRepresentation::kTagged);
+ Variable var_length(this, MachineRepresentation::kTagged);
- Variable var_start(a, MachineRepresentation::kTagged);
- Variable var_length(a, MachineRepresentation::kTagged);
+ Node* const receiver = Parameter(0);
+ Node* const start = Parameter(1);
+ Node* const length = Parameter(2);
+ Node* const context = Parameter(5);
- Node* const receiver = a->Parameter(0);
- Node* const start = a->Parameter(1);
- Node* const length = a->Parameter(2);
- Node* const context = a->Parameter(5);
-
- Node* const zero = a->SmiConstant(Smi::kZero);
+ Node* const zero = SmiConstant(Smi::kZero);
// Check that {receiver} is coercible to Object and convert it to a String.
Node* const string =
- a->ToThisString(context, receiver, "String.prototype.substr");
+ ToThisString(context, receiver, "String.prototype.substr");
- Node* const string_length = a->LoadStringLength(string);
+ Node* const string_length = LoadStringLength(string);
// Conversions and bounds-checks for {start}.
{
Node* const start_int =
- a->ToInteger(context, start, CodeStubAssembler::kTruncateMinusZero);
+ ToInteger(context, start, CodeStubAssembler::kTruncateMinusZero);
- Label if_issmi(a), if_isheapnumber(a, Label::kDeferred);
- a->Branch(a->TaggedIsSmi(start_int), &if_issmi, &if_isheapnumber);
+ Label if_issmi(this), if_isheapnumber(this, Label::kDeferred);
+ Branch(TaggedIsSmi(start_int), &if_issmi, &if_isheapnumber);
- a->Bind(&if_issmi);
+ Bind(&if_issmi);
{
- Node* const length_plus_start = a->SmiAdd(string_length, start_int);
- var_start.Bind(a->Select(a->SmiLessThan(start_int, zero),
- a->SmiMax(length_plus_start, zero), start_int));
- a->Goto(&handle_length);
+ Node* const length_plus_start = SmiAdd(string_length, start_int);
+ var_start.Bind(Select(SmiLessThan(start_int, zero),
+ [&] { return SmiMax(length_plus_start, zero); },
+ [&] { return start_int; },
+ MachineRepresentation::kTagged));
+ Goto(&handle_length);
}
- a->Bind(&if_isheapnumber);
+ Bind(&if_isheapnumber);
{
// If {start} is a heap number, it is definitely out of bounds. If it is
// negative, {start} = max({string_length} + {start}),0) = 0'. If it is
// positive, set {start} to {string_length} which ultimately results in
// returning an empty string.
- Node* const float_zero = a->Float64Constant(0.);
- Node* const start_float = a->LoadHeapNumberValue(start_int);
- var_start.Bind(a->Select(a->Float64LessThan(start_float, float_zero),
- zero, string_length));
- a->Goto(&handle_length);
+ Node* const float_zero = Float64Constant(0.);
+ Node* const start_float = LoadHeapNumberValue(start_int);
+ var_start.Bind(SelectTaggedConstant(
+ Float64LessThan(start_float, float_zero), zero, string_length));
+ Goto(&handle_length);
}
}
// Conversions and bounds-checks for {length}.
- a->Bind(&handle_length);
+ Bind(&handle_length);
{
- Label if_issmi(a), if_isheapnumber(a, Label::kDeferred);
+ Label if_issmi(this), if_isheapnumber(this, Label::kDeferred);
// Default to {string_length} if {length} is undefined.
{
- Label if_isundefined(a, Label::kDeferred), if_isnotundefined(a);
- a->Branch(a->WordEqual(length, a->UndefinedConstant()), &if_isundefined,
- &if_isnotundefined);
+ Label if_isundefined(this, Label::kDeferred), if_isnotundefined(this);
+ Branch(WordEqual(length, UndefinedConstant()), &if_isundefined,
+ &if_isnotundefined);
- a->Bind(&if_isundefined);
+ Bind(&if_isundefined);
var_length.Bind(string_length);
- a->Goto(&if_issmi);
+ Goto(&if_issmi);
- a->Bind(&if_isnotundefined);
+ Bind(&if_isnotundefined);
var_length.Bind(
- a->ToInteger(context, length, CodeStubAssembler::kTruncateMinusZero));
+ ToInteger(context, length, CodeStubAssembler::kTruncateMinusZero));
}
- a->Branch(a->TaggedIsSmi(var_length.value()), &if_issmi, &if_isheapnumber);
+ Branch(TaggedIsSmi(var_length.value()), &if_issmi, &if_isheapnumber);
// Set {length} to min(max({length}, 0), {string_length} - {start}
- a->Bind(&if_issmi);
+ Bind(&if_issmi);
{
- Node* const positive_length = a->SmiMax(var_length.value(), zero);
+ Node* const positive_length = SmiMax(var_length.value(), zero);
- Node* const minimal_length = a->SmiSub(string_length, var_start.value());
- var_length.Bind(a->SmiMin(positive_length, minimal_length));
+ Node* const minimal_length = SmiSub(string_length, var_start.value());
+ var_length.Bind(SmiMin(positive_length, minimal_length));
- a->GotoUnless(a->SmiLessThanOrEqual(var_length.value(), zero), &out);
- a->Return(a->EmptyStringConstant());
+ GotoUnless(SmiLessThanOrEqual(var_length.value(), zero), &out);
+ Return(EmptyStringConstant());
}
- a->Bind(&if_isheapnumber);
+ Bind(&if_isheapnumber);
{
// If {length} is a heap number, it is definitely out of bounds. There are
// two cases according to the spec: if it is negative, "" is returned; if
// it is positive, then length is set to {string_length} - {start}.
- CSA_ASSERT(a, a->WordEqual(a->LoadMap(var_length.value()),
- a->HeapNumberMapConstant()));
+ CSA_ASSERT(this, IsHeapNumberMap(LoadMap(var_length.value())));
- Label if_isnegative(a), if_ispositive(a);
- Node* const float_zero = a->Float64Constant(0.);
- Node* const length_float = a->LoadHeapNumberValue(var_length.value());
- a->Branch(a->Float64LessThan(length_float, float_zero), &if_isnegative,
- &if_ispositive);
+ Label if_isnegative(this), if_ispositive(this);
+ Node* const float_zero = Float64Constant(0.);
+ Node* const length_float = LoadHeapNumberValue(var_length.value());
+ Branch(Float64LessThan(length_float, float_zero), &if_isnegative,
+ &if_ispositive);
- a->Bind(&if_isnegative);
- a->Return(a->EmptyStringConstant());
+ Bind(&if_isnegative);
+ Return(EmptyStringConstant());
- a->Bind(&if_ispositive);
+ Bind(&if_ispositive);
{
- var_length.Bind(a->SmiSub(string_length, var_start.value()));
- a->GotoUnless(a->SmiLessThanOrEqual(var_length.value(), zero), &out);
- a->Return(a->EmptyStringConstant());
+ var_length.Bind(SmiSub(string_length, var_start.value()));
+ GotoUnless(SmiLessThanOrEqual(var_length.value(), zero), &out);
+ Return(EmptyStringConstant());
}
}
}
- a->Bind(&out);
+ Bind(&out);
{
- Node* const end = a->SmiAdd(var_start.value(), var_length.value());
- Node* const result = a->SubString(context, string, var_start.value(), end);
- a->Return(result);
+ Node* const end = SmiAdd(var_start.value(), var_length.value());
+ Node* const result = SubString(context, string, var_start.value(), end);
+ Return(result);
}
}
-namespace {
-
-compiler::Node* ToSmiBetweenZeroAnd(CodeStubAssembler* a,
- compiler::Node* context,
- compiler::Node* value,
- compiler::Node* limit) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
-
- Label out(a);
- Variable var_result(a, MachineRepresentation::kTagged);
+compiler::Node* StringBuiltinsAssembler::ToSmiBetweenZeroAnd(Node* context,
+ Node* value,
+ Node* limit) {
+ Label out(this);
+ Variable var_result(this, MachineRepresentation::kTagged);
Node* const value_int =
- a->ToInteger(context, value, CodeStubAssembler::kTruncateMinusZero);
+ this->ToInteger(context, value, CodeStubAssembler::kTruncateMinusZero);
- Label if_issmi(a), if_isnotsmi(a, Label::kDeferred);
- a->Branch(a->TaggedIsSmi(value_int), &if_issmi, &if_isnotsmi);
+ Label if_issmi(this), if_isnotsmi(this, Label::kDeferred);
+ Branch(TaggedIsSmi(value_int), &if_issmi, &if_isnotsmi);
- a->Bind(&if_issmi);
+ Bind(&if_issmi);
{
- Label if_isinbounds(a), if_isoutofbounds(a, Label::kDeferred);
- a->Branch(a->SmiAbove(value_int, limit), &if_isoutofbounds, &if_isinbounds);
+ Label if_isinbounds(this), if_isoutofbounds(this, Label::kDeferred);
+ Branch(SmiAbove(value_int, limit), &if_isoutofbounds, &if_isinbounds);
- a->Bind(&if_isinbounds);
+ Bind(&if_isinbounds);
{
var_result.Bind(value_int);
- a->Goto(&out);
+ Goto(&out);
}
- a->Bind(&if_isoutofbounds);
+ Bind(&if_isoutofbounds);
{
- Node* const zero = a->SmiConstant(Smi::kZero);
- var_result.Bind(a->Select(a->SmiLessThan(value_int, zero), zero, limit));
- a->Goto(&out);
+ Node* const zero = SmiConstant(Smi::kZero);
+ var_result.Bind(
+ SelectTaggedConstant(SmiLessThan(value_int, zero), zero, limit));
+ Goto(&out);
}
}
- a->Bind(&if_isnotsmi);
+ Bind(&if_isnotsmi);
{
// {value} is a heap number - in this case, it is definitely out of bounds.
- CSA_ASSERT(a,
- a->WordEqual(a->LoadMap(value_int), a->HeapNumberMapConstant()));
-
- Node* const float_zero = a->Float64Constant(0.);
- Node* const smi_zero = a->SmiConstant(Smi::kZero);
- Node* const value_float = a->LoadHeapNumberValue(value_int);
- var_result.Bind(a->Select(a->Float64LessThan(value_float, float_zero),
- smi_zero, limit));
- a->Goto(&out);
+ CSA_ASSERT(this, IsHeapNumberMap(LoadMap(value_int)));
+
+ Node* const float_zero = Float64Constant(0.);
+ Node* const smi_zero = SmiConstant(Smi::kZero);
+ Node* const value_float = LoadHeapNumberValue(value_int);
+ var_result.Bind(SelectTaggedConstant(
+ Float64LessThan(value_float, float_zero), smi_zero, limit));
+ Goto(&out);
}
- a->Bind(&out);
+ Bind(&out);
return var_result.value();
}
-} // namespace
-
// ES6 section 21.1.3.19 String.prototype.substring ( start, end )
-void Builtins::Generate_StringPrototypeSubstring(CodeStubAssembler* a) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
+TF_BUILTIN(StringPrototypeSubstring, StringBuiltinsAssembler) {
+ Label out(this);
- Label out(a);
+ Variable var_start(this, MachineRepresentation::kTagged);
+ Variable var_end(this, MachineRepresentation::kTagged);
- Variable var_start(a, MachineRepresentation::kTagged);
- Variable var_end(a, MachineRepresentation::kTagged);
-
- Node* const receiver = a->Parameter(0);
- Node* const start = a->Parameter(1);
- Node* const end = a->Parameter(2);
- Node* const context = a->Parameter(5);
+ Node* const receiver = Parameter(0);
+ Node* const start = Parameter(1);
+ Node* const end = Parameter(2);
+ Node* const context = Parameter(5);
// Check that {receiver} is coercible to Object and convert it to a String.
Node* const string =
- a->ToThisString(context, receiver, "String.prototype.substring");
+ ToThisString(context, receiver, "String.prototype.substring");
- Node* const length = a->LoadStringLength(string);
+ Node* const length = LoadStringLength(string);
// Conversion and bounds-checks for {start}.
- var_start.Bind(ToSmiBetweenZeroAnd(a, context, start, length));
+ var_start.Bind(ToSmiBetweenZeroAnd(context, start, length));
// Conversion and bounds-checks for {end}.
{
var_end.Bind(length);
- a->GotoIf(a->WordEqual(end, a->UndefinedConstant()), &out);
+ GotoIf(WordEqual(end, UndefinedConstant()), &out);
- var_end.Bind(ToSmiBetweenZeroAnd(a, context, end, length));
+ var_end.Bind(ToSmiBetweenZeroAnd(context, end, length));
- Label if_endislessthanstart(a);
- a->Branch(a->SmiLessThan(var_end.value(), var_start.value()),
- &if_endislessthanstart, &out);
+ Label if_endislessthanstart(this);
+ Branch(SmiLessThan(var_end.value(), var_start.value()),
+ &if_endislessthanstart, &out);
- a->Bind(&if_endislessthanstart);
+ Bind(&if_endislessthanstart);
{
Node* const tmp = var_end.value();
var_end.Bind(var_start.value());
var_start.Bind(tmp);
- a->Goto(&out);
+ Goto(&out);
}
}
- a->Bind(&out);
+ Bind(&out);
{
Node* result =
- a->SubString(context, string, var_start.value(), var_end.value());
- a->Return(result);
+ SubString(context, string, var_start.value(), var_end.value());
+ Return(result);
}
}
@@ -1170,9 +1232,7 @@ BUILTIN(StringPrototypeStartsWith) {
} else {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, position,
Object::ToInteger(isolate, position));
- double index = std::max(position->Number(), 0.0);
- index = std::min(index, static_cast<double>(str->length()));
- start = static_cast<uint32_t>(index);
+ start = str->ToValidIndex(*position);
}
if (start + search_string->length() > str->length()) {
@@ -1191,15 +1251,13 @@ BUILTIN(StringPrototypeStartsWith) {
}
// ES6 section 21.1.3.25 String.prototype.toString ()
-void Builtins::Generate_StringPrototypeToString(CodeStubAssembler* assembler) {
- typedef compiler::Node Node;
-
- Node* receiver = assembler->Parameter(0);
- Node* context = assembler->Parameter(3);
+TF_BUILTIN(StringPrototypeToString, CodeStubAssembler) {
+ Node* receiver = Parameter(0);
+ Node* context = Parameter(3);
- Node* result = assembler->ToThisValue(
- context, receiver, PrimitiveType::kString, "String.prototype.toString");
- assembler->Return(result);
+ Node* result = ToThisValue(context, receiver, PrimitiveType::kString,
+ "String.prototype.toString");
+ Return(result);
}
// ES6 section 21.1.3.27 String.prototype.trim ()
@@ -1224,103 +1282,82 @@ BUILTIN(StringPrototypeTrimRight) {
}
// ES6 section 21.1.3.28 String.prototype.valueOf ( )
-void Builtins::Generate_StringPrototypeValueOf(CodeStubAssembler* assembler) {
- typedef compiler::Node Node;
+TF_BUILTIN(StringPrototypeValueOf, CodeStubAssembler) {
+ Node* receiver = Parameter(0);
+ Node* context = Parameter(3);
- Node* receiver = assembler->Parameter(0);
- Node* context = assembler->Parameter(3);
-
- Node* result = assembler->ToThisValue(
- context, receiver, PrimitiveType::kString, "String.prototype.valueOf");
- assembler->Return(result);
+ Node* result = ToThisValue(context, receiver, PrimitiveType::kString,
+ "String.prototype.valueOf");
+ Return(result);
}
-void Builtins::Generate_StringPrototypeIterator(CodeStubAssembler* assembler) {
- typedef compiler::Node Node;
-
- Node* receiver = assembler->Parameter(0);
- Node* context = assembler->Parameter(3);
-
- Node* string = assembler->ToThisString(context, receiver,
- "String.prototype[Symbol.iterator]");
-
- Node* native_context = assembler->LoadNativeContext(context);
- Node* map = assembler->LoadFixedArrayElement(
- native_context,
- assembler->IntPtrConstant(Context::STRING_ITERATOR_MAP_INDEX), 0,
- CodeStubAssembler::INTPTR_PARAMETERS);
- Node* iterator = assembler->Allocate(JSStringIterator::kSize);
- assembler->StoreMapNoWriteBarrier(iterator, map);
- assembler->StoreObjectFieldRoot(iterator, JSValue::kPropertiesOffset,
- Heap::kEmptyFixedArrayRootIndex);
- assembler->StoreObjectFieldRoot(iterator, JSObject::kElementsOffset,
- Heap::kEmptyFixedArrayRootIndex);
- assembler->StoreObjectFieldNoWriteBarrier(
- iterator, JSStringIterator::kStringOffset, string);
- Node* index = assembler->SmiConstant(Smi::kZero);
- assembler->StoreObjectFieldNoWriteBarrier(
- iterator, JSStringIterator::kNextIndexOffset, index);
- assembler->Return(iterator);
-}
+TF_BUILTIN(StringPrototypeIterator, CodeStubAssembler) {
+ Node* receiver = Parameter(0);
+ Node* context = Parameter(3);
-namespace {
+ Node* string =
+ ToThisString(context, receiver, "String.prototype[Symbol.iterator]");
+
+ Node* native_context = LoadNativeContext(context);
+ Node* map =
+ LoadContextElement(native_context, Context::STRING_ITERATOR_MAP_INDEX);
+ Node* iterator = Allocate(JSStringIterator::kSize);
+ StoreMapNoWriteBarrier(iterator, map);
+ StoreObjectFieldRoot(iterator, JSValue::kPropertiesOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldRoot(iterator, JSObject::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldNoWriteBarrier(iterator, JSStringIterator::kStringOffset,
+ string);
+ Node* index = SmiConstant(Smi::kZero);
+ StoreObjectFieldNoWriteBarrier(iterator, JSStringIterator::kNextIndexOffset,
+ index);
+ Return(iterator);
+}
// Return the |word32| codepoint at {index}. Supports SeqStrings and
// ExternalStrings.
-compiler::Node* LoadSurrogatePairInternal(CodeStubAssembler* assembler,
- compiler::Node* string,
- compiler::Node* length,
- compiler::Node* index,
- UnicodeEncoding encoding) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
- Label handle_surrogate_pair(assembler), return_result(assembler);
- Variable var_result(assembler, MachineRepresentation::kWord32);
- Variable var_trail(assembler, MachineRepresentation::kWord16);
- var_result.Bind(assembler->StringCharCodeAt(string, index));
- var_trail.Bind(assembler->Int32Constant(0));
-
- assembler->GotoIf(assembler->Word32NotEqual(
- assembler->Word32And(var_result.value(),
- assembler->Int32Constant(0xFC00)),
- assembler->Int32Constant(0xD800)),
- &return_result);
- Node* next_index =
- assembler->SmiAdd(index, assembler->SmiConstant(Smi::FromInt(1)));
-
- assembler->GotoUnless(assembler->SmiLessThan(next_index, length),
- &return_result);
- var_trail.Bind(assembler->StringCharCodeAt(string, next_index));
- assembler->Branch(assembler->Word32Equal(
- assembler->Word32And(var_trail.value(),
- assembler->Int32Constant(0xFC00)),
- assembler->Int32Constant(0xDC00)),
- &handle_surrogate_pair, &return_result);
-
- assembler->Bind(&handle_surrogate_pair);
+compiler::Node* StringBuiltinsAssembler::LoadSurrogatePairAt(
+ compiler::Node* string, compiler::Node* length, compiler::Node* index,
+ UnicodeEncoding encoding) {
+ Label handle_surrogate_pair(this), return_result(this);
+ Variable var_result(this, MachineRepresentation::kWord32);
+ Variable var_trail(this, MachineRepresentation::kWord32);
+ var_result.Bind(StringCharCodeAt(string, index));
+ var_trail.Bind(Int32Constant(0));
+
+ GotoIf(Word32NotEqual(Word32And(var_result.value(), Int32Constant(0xFC00)),
+ Int32Constant(0xD800)),
+ &return_result);
+ Node* next_index = SmiAdd(index, SmiConstant(Smi::FromInt(1)));
+
+ GotoUnless(SmiLessThan(next_index, length), &return_result);
+ var_trail.Bind(StringCharCodeAt(string, next_index));
+ Branch(Word32Equal(Word32And(var_trail.value(), Int32Constant(0xFC00)),
+ Int32Constant(0xDC00)),
+ &handle_surrogate_pair, &return_result);
+
+ Bind(&handle_surrogate_pair);
{
Node* lead = var_result.value();
Node* trail = var_trail.value();
// Check that this path is only taken if a surrogate pair is found
- CSA_SLOW_ASSERT(assembler, assembler->Uint32GreaterThanOrEqual(
- lead, assembler->Int32Constant(0xD800)));
- CSA_SLOW_ASSERT(assembler, assembler->Uint32LessThan(
- lead, assembler->Int32Constant(0xDC00)));
- CSA_SLOW_ASSERT(assembler, assembler->Uint32GreaterThanOrEqual(
- trail, assembler->Int32Constant(0xDC00)));
- CSA_SLOW_ASSERT(assembler, assembler->Uint32LessThan(
- trail, assembler->Int32Constant(0xE000)));
+ CSA_SLOW_ASSERT(this,
+ Uint32GreaterThanOrEqual(lead, Int32Constant(0xD800)));
+ CSA_SLOW_ASSERT(this, Uint32LessThan(lead, Int32Constant(0xDC00)));
+ CSA_SLOW_ASSERT(this,
+ Uint32GreaterThanOrEqual(trail, Int32Constant(0xDC00)));
+ CSA_SLOW_ASSERT(this, Uint32LessThan(trail, Int32Constant(0xE000)));
switch (encoding) {
case UnicodeEncoding::UTF16:
- var_result.Bind(assembler->WordOr(
+ var_result.Bind(Word32Or(
// Need to swap the order for big-endian platforms
#if V8_TARGET_BIG_ENDIAN
- assembler->WordShl(lead, assembler->Int32Constant(16)), trail));
+ Word32Shl(lead, Int32Constant(16)), trail));
#else
- assembler->WordShl(trail, assembler->Int32Constant(16)), lead));
+ Word32Shl(trail, Int32Constant(16)), lead));
#endif
break;
@@ -1328,107 +1365,85 @@ compiler::Node* LoadSurrogatePairInternal(CodeStubAssembler* assembler,
// Convert UTF16 surrogate pair into |word32| code point, encoded as
// UTF32.
Node* surrogate_offset =
- assembler->Int32Constant(0x10000 - (0xD800 << 10) - 0xDC00);
+ Int32Constant(0x10000 - (0xD800 << 10) - 0xDC00);
// (lead << 10) + trail + SURROGATE_OFFSET
- var_result.Bind(assembler->Int32Add(
- assembler->WordShl(lead, assembler->Int32Constant(10)),
- assembler->Int32Add(trail, surrogate_offset)));
+ var_result.Bind(Int32Add(WordShl(lead, Int32Constant(10)),
+ Int32Add(trail, surrogate_offset)));
break;
}
}
- assembler->Goto(&return_result);
+ Goto(&return_result);
}
- assembler->Bind(&return_result);
+ Bind(&return_result);
return var_result.value();
}
-compiler::Node* LoadSurrogatePairAt(CodeStubAssembler* assembler,
- compiler::Node* string,
- compiler::Node* length,
- compiler::Node* index) {
- return LoadSurrogatePairInternal(assembler, string, length, index,
- UnicodeEncoding::UTF16);
-}
-
-} // namespace
-
-void Builtins::Generate_StringIteratorPrototypeNext(
- CodeStubAssembler* assembler) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
+TF_BUILTIN(StringIteratorPrototypeNext, StringBuiltinsAssembler) {
+ Variable var_value(this, MachineRepresentation::kTagged);
+ Variable var_done(this, MachineRepresentation::kTagged);
- Variable var_value(assembler, MachineRepresentation::kTagged);
- Variable var_done(assembler, MachineRepresentation::kTagged);
+ var_value.Bind(UndefinedConstant());
+ var_done.Bind(BooleanConstant(true));
- var_value.Bind(assembler->UndefinedConstant());
- var_done.Bind(assembler->BooleanConstant(true));
+ Label throw_bad_receiver(this), next_codepoint(this), return_result(this);
- Label throw_bad_receiver(assembler), next_codepoint(assembler),
- return_result(assembler);
+ Node* iterator = Parameter(0);
+ Node* context = Parameter(3);
- Node* iterator = assembler->Parameter(0);
- Node* context = assembler->Parameter(3);
+ GotoIf(TaggedIsSmi(iterator), &throw_bad_receiver);
+ GotoUnless(Word32Equal(LoadInstanceType(iterator),
+ Int32Constant(JS_STRING_ITERATOR_TYPE)),
+ &throw_bad_receiver);
- assembler->GotoIf(assembler->TaggedIsSmi(iterator), &throw_bad_receiver);
- assembler->GotoUnless(
- assembler->WordEqual(assembler->LoadInstanceType(iterator),
- assembler->Int32Constant(JS_STRING_ITERATOR_TYPE)),
- &throw_bad_receiver);
-
- Node* string =
- assembler->LoadObjectField(iterator, JSStringIterator::kStringOffset);
+ Node* string = LoadObjectField(iterator, JSStringIterator::kStringOffset);
Node* position =
- assembler->LoadObjectField(iterator, JSStringIterator::kNextIndexOffset);
- Node* length = assembler->LoadObjectField(string, String::kLengthOffset);
+ LoadObjectField(iterator, JSStringIterator::kNextIndexOffset);
+ Node* length = LoadObjectField(string, String::kLengthOffset);
- assembler->Branch(assembler->SmiLessThan(position, length), &next_codepoint,
- &return_result);
+ Branch(SmiLessThan(position, length), &next_codepoint, &return_result);
- assembler->Bind(&next_codepoint);
+ Bind(&next_codepoint);
{
- Node* ch = LoadSurrogatePairAt(assembler, string, length, position);
- Node* value = assembler->StringFromCodePoint(ch, UnicodeEncoding::UTF16);
+ UnicodeEncoding encoding = UnicodeEncoding::UTF16;
+ Node* ch = LoadSurrogatePairAt(string, length, position, encoding);
+ Node* value = StringFromCodePoint(ch, encoding);
var_value.Bind(value);
- Node* length = assembler->LoadObjectField(value, String::kLengthOffset);
- assembler->StoreObjectFieldNoWriteBarrier(
- iterator, JSStringIterator::kNextIndexOffset,
- assembler->SmiAdd(position, length));
- var_done.Bind(assembler->BooleanConstant(false));
- assembler->Goto(&return_result);
+ Node* length = LoadObjectField(value, String::kLengthOffset);
+ StoreObjectFieldNoWriteBarrier(iterator, JSStringIterator::kNextIndexOffset,
+ SmiAdd(position, length));
+ var_done.Bind(BooleanConstant(false));
+ Goto(&return_result);
}
- assembler->Bind(&return_result);
+ Bind(&return_result);
{
- Node* native_context = assembler->LoadNativeContext(context);
- Node* map = assembler->LoadFixedArrayElement(
- native_context,
- assembler->IntPtrConstant(Context::ITERATOR_RESULT_MAP_INDEX), 0,
- CodeStubAssembler::INTPTR_PARAMETERS);
- Node* result = assembler->Allocate(JSIteratorResult::kSize);
- assembler->StoreMapNoWriteBarrier(result, map);
- assembler->StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOffset,
- Heap::kEmptyFixedArrayRootIndex);
- assembler->StoreObjectFieldRoot(result, JSIteratorResult::kElementsOffset,
- Heap::kEmptyFixedArrayRootIndex);
- assembler->StoreObjectFieldNoWriteBarrier(
- result, JSIteratorResult::kValueOffset, var_value.value());
- assembler->StoreObjectFieldNoWriteBarrier(
- result, JSIteratorResult::kDoneOffset, var_done.value());
- assembler->Return(result);
+ Node* native_context = LoadNativeContext(context);
+ Node* map =
+ LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
+ Node* result = Allocate(JSIteratorResult::kSize);
+ StoreMapNoWriteBarrier(result, map);
+ StoreObjectFieldRoot(result, JSIteratorResult::kPropertiesOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldRoot(result, JSIteratorResult::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kValueOffset,
+ var_value.value());
+ StoreObjectFieldNoWriteBarrier(result, JSIteratorResult::kDoneOffset,
+ var_done.value());
+ Return(result);
}
- assembler->Bind(&throw_bad_receiver);
+ Bind(&throw_bad_receiver);
{
// The {receiver} is not a valid JSGeneratorObject.
- Node* result = assembler->CallRuntime(
- Runtime::kThrowIncompatibleMethodReceiver, context,
- assembler->HeapConstant(assembler->factory()->NewStringFromAsciiChecked(
- "String Iterator.prototype.next", TENURED)),
- iterator);
- assembler->Return(result); // Never reached.
+ Node* result =
+ CallRuntime(Runtime::kThrowIncompatibleMethodReceiver, context,
+ HeapConstant(factory()->NewStringFromAsciiChecked(
+ "String Iterator.prototype.next", TENURED)),
+ iterator);
+ Return(result); // Never reached.
}
}
diff --git a/deps/v8/src/builtins/builtins-symbol.cc b/deps/v8/src/builtins/builtins-symbol.cc
index 8dd8a1fa27..6067edba6d 100644
--- a/deps/v8/src/builtins/builtins-symbol.cc
+++ b/deps/v8/src/builtins/builtins-symbol.cc
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/code-stub-assembler.h"
namespace v8 {
namespace internal {
@@ -32,44 +33,81 @@ BUILTIN(SymbolConstructor_ConstructStub) {
isolate->factory()->Symbol_string()));
}
+// ES6 section 19.4.2.1 Symbol.for.
+BUILTIN(SymbolFor) {
+ HandleScope scope(isolate);
+ Handle<Object> key_obj = args.atOrUndefined(isolate, 1);
+ Handle<String> key;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, key,
+ Object::ToString(isolate, key_obj));
+ return *isolate->SymbolFor(Heap::kPublicSymbolTableRootIndex, key, false);
+}
+
+// ES6 section 19.4.2.5 Symbol.keyFor.
+BUILTIN(SymbolKeyFor) {
+ HandleScope scope(isolate);
+ Handle<Object> obj = args.atOrUndefined(isolate, 1);
+ if (!obj->IsSymbol()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kSymbolKeyFor, obj));
+ }
+ Handle<Symbol> symbol = Handle<Symbol>::cast(obj);
+ DisallowHeapAllocation no_gc;
+ Object* result;
+ if (symbol->is_public()) {
+ result = symbol->name();
+ DCHECK(result->IsString());
+ } else {
+ result = isolate->heap()->undefined_value();
+ }
+ DCHECK_EQ(isolate->heap()->public_symbol_table()->SlowReverseLookup(*symbol),
+ result);
+ return result;
+}
+
// ES6 section 19.4.3.4 Symbol.prototype [ @@toPrimitive ] ( hint )
void Builtins::Generate_SymbolPrototypeToPrimitive(
- CodeStubAssembler* assembler) {
+ compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(0);
- Node* context = assembler->Parameter(4);
+ Node* receiver = assembler.Parameter(0);
+ Node* context = assembler.Parameter(4);
Node* result =
- assembler->ToThisValue(context, receiver, PrimitiveType::kSymbol,
- "Symbol.prototype [ @@toPrimitive ]");
- assembler->Return(result);
+ assembler.ToThisValue(context, receiver, PrimitiveType::kSymbol,
+ "Symbol.prototype [ @@toPrimitive ]");
+ assembler.Return(result);
}
// ES6 section 19.4.3.2 Symbol.prototype.toString ( )
-void Builtins::Generate_SymbolPrototypeToString(CodeStubAssembler* assembler) {
+void Builtins::Generate_SymbolPrototypeToString(
+ compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(0);
- Node* context = assembler->Parameter(3);
+ Node* receiver = assembler.Parameter(0);
+ Node* context = assembler.Parameter(3);
- Node* value = assembler->ToThisValue(
- context, receiver, PrimitiveType::kSymbol, "Symbol.prototype.toString");
+ Node* value = assembler.ToThisValue(context, receiver, PrimitiveType::kSymbol,
+ "Symbol.prototype.toString");
Node* result =
- assembler->CallRuntime(Runtime::kSymbolDescriptiveString, context, value);
- assembler->Return(result);
+ assembler.CallRuntime(Runtime::kSymbolDescriptiveString, context, value);
+ assembler.Return(result);
}
// ES6 section 19.4.3.3 Symbol.prototype.valueOf ( )
-void Builtins::Generate_SymbolPrototypeValueOf(CodeStubAssembler* assembler) {
+void Builtins::Generate_SymbolPrototypeValueOf(
+ compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(0);
- Node* context = assembler->Parameter(3);
+ Node* receiver = assembler.Parameter(0);
+ Node* context = assembler.Parameter(3);
- Node* result = assembler->ToThisValue(
+ Node* result = assembler.ToThisValue(
context, receiver, PrimitiveType::kSymbol, "Symbol.prototype.valueOf");
- assembler->Return(result);
+ assembler.Return(result);
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-typedarray.cc b/deps/v8/src/builtins/builtins-typedarray.cc
index 94173fa613..ab1ebbc69e 100644
--- a/deps/v8/src/builtins/builtins-typedarray.cc
+++ b/deps/v8/src/builtins/builtins-typedarray.cc
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+#include "src/code-stub-assembler.h"
namespace v8 {
namespace internal {
@@ -20,48 +21,48 @@ BUILTIN(TypedArrayPrototypeBuffer) {
namespace {
-void Generate_TypedArrayProtoypeGetter(CodeStubAssembler* assembler,
- const char* method_name,
- int object_offset) {
+void Generate_TypedArrayPrototypeGetter(compiler::CodeAssemblerState* state,
+ const char* method_name,
+ int object_offset) {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(0);
- Node* context = assembler->Parameter(3);
+ Node* receiver = assembler.Parameter(0);
+ Node* context = assembler.Parameter(3);
// Check if the {receiver} is actually a JSTypedArray.
- Label if_receiverisincompatible(assembler, Label::kDeferred);
- assembler->GotoIf(assembler->TaggedIsSmi(receiver),
- &if_receiverisincompatible);
- Node* receiver_instance_type = assembler->LoadInstanceType(receiver);
- assembler->GotoUnless(
- assembler->Word32Equal(receiver_instance_type,
- assembler->Int32Constant(JS_TYPED_ARRAY_TYPE)),
+ Label if_receiverisincompatible(&assembler, Label::kDeferred);
+ assembler.GotoIf(assembler.TaggedIsSmi(receiver), &if_receiverisincompatible);
+ Node* receiver_instance_type = assembler.LoadInstanceType(receiver);
+ assembler.GotoUnless(
+ assembler.Word32Equal(receiver_instance_type,
+ assembler.Int32Constant(JS_TYPED_ARRAY_TYPE)),
&if_receiverisincompatible);
// Check if the {receiver}'s JSArrayBuffer was neutered.
Node* receiver_buffer =
- assembler->LoadObjectField(receiver, JSTypedArray::kBufferOffset);
- Label if_receiverisneutered(assembler, Label::kDeferred);
- assembler->GotoIf(assembler->IsDetachedBuffer(receiver_buffer),
- &if_receiverisneutered);
- assembler->Return(assembler->LoadObjectField(receiver, object_offset));
+ assembler.LoadObjectField(receiver, JSTypedArray::kBufferOffset);
+ Label if_receiverisneutered(&assembler, Label::kDeferred);
+ assembler.GotoIf(assembler.IsDetachedBuffer(receiver_buffer),
+ &if_receiverisneutered);
+ assembler.Return(assembler.LoadObjectField(receiver, object_offset));
- assembler->Bind(&if_receiverisneutered);
+ assembler.Bind(&if_receiverisneutered);
{
// The {receiver}s buffer was neutered, default to zero.
- assembler->Return(assembler->SmiConstant(0));
+ assembler.Return(assembler.SmiConstant(0));
}
- assembler->Bind(&if_receiverisincompatible);
+ assembler.Bind(&if_receiverisincompatible);
{
// The {receiver} is not a valid JSGeneratorObject.
- Node* result = assembler->CallRuntime(
+ Node* result = assembler.CallRuntime(
Runtime::kThrowIncompatibleMethodReceiver, context,
- assembler->HeapConstant(assembler->factory()->NewStringFromAsciiChecked(
+ assembler.HeapConstant(assembler.factory()->NewStringFromAsciiChecked(
method_name, TENURED)),
receiver);
- assembler->Return(result); // Never reached.
+ assembler.Return(result); // Never reached.
}
}
@@ -69,100 +70,101 @@ void Generate_TypedArrayProtoypeGetter(CodeStubAssembler* assembler,
// ES6 section 22.2.3.2 get %TypedArray%.prototype.byteLength
void Builtins::Generate_TypedArrayPrototypeByteLength(
- CodeStubAssembler* assembler) {
- Generate_TypedArrayProtoypeGetter(assembler,
- "get TypedArray.prototype.byteLength",
- JSTypedArray::kByteLengthOffset);
+ compiler::CodeAssemblerState* state) {
+ Generate_TypedArrayPrototypeGetter(state,
+ "get TypedArray.prototype.byteLength",
+ JSTypedArray::kByteLengthOffset);
}
// ES6 section 22.2.3.3 get %TypedArray%.prototype.byteOffset
void Builtins::Generate_TypedArrayPrototypeByteOffset(
- CodeStubAssembler* assembler) {
- Generate_TypedArrayProtoypeGetter(assembler,
- "get TypedArray.prototype.byteOffset",
- JSTypedArray::kByteOffsetOffset);
+ compiler::CodeAssemblerState* state) {
+ Generate_TypedArrayPrototypeGetter(state,
+ "get TypedArray.prototype.byteOffset",
+ JSTypedArray::kByteOffsetOffset);
}
// ES6 section 22.2.3.18 get %TypedArray%.prototype.length
void Builtins::Generate_TypedArrayPrototypeLength(
- CodeStubAssembler* assembler) {
- Generate_TypedArrayProtoypeGetter(assembler,
- "get TypedArray.prototype.length",
- JSTypedArray::kLengthOffset);
+ compiler::CodeAssemblerState* state) {
+ Generate_TypedArrayPrototypeGetter(state, "get TypedArray.prototype.length",
+ JSTypedArray::kLengthOffset);
}
namespace {
template <IterationKind kIterationKind>
-void Generate_TypedArrayPrototypeIterationMethod(CodeStubAssembler* assembler,
- const char* method_name) {
+void Generate_TypedArrayPrototypeIterationMethod(
+ compiler::CodeAssemblerState* state, const char* method_name) {
typedef compiler::Node Node;
typedef CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Variable Variable;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(0);
- Node* context = assembler->Parameter(3);
+ Node* receiver = assembler.Parameter(0);
+ Node* context = assembler.Parameter(3);
- Label throw_bad_receiver(assembler, Label::kDeferred);
- Label throw_typeerror(assembler, Label::kDeferred);
+ Label throw_bad_receiver(&assembler, Label::kDeferred);
+ Label throw_typeerror(&assembler, Label::kDeferred);
- assembler->GotoIf(assembler->TaggedIsSmi(receiver), &throw_bad_receiver);
+ assembler.GotoIf(assembler.TaggedIsSmi(receiver), &throw_bad_receiver);
- Node* map = assembler->LoadMap(receiver);
- Node* instance_type = assembler->LoadMapInstanceType(map);
- assembler->GotoIf(
- assembler->Word32NotEqual(instance_type,
- assembler->Int32Constant(JS_TYPED_ARRAY_TYPE)),
+ Node* map = assembler.LoadMap(receiver);
+ Node* instance_type = assembler.LoadMapInstanceType(map);
+ assembler.GotoIf(
+ assembler.Word32NotEqual(instance_type,
+ assembler.Int32Constant(JS_TYPED_ARRAY_TYPE)),
&throw_bad_receiver);
// Check if the {receiver}'s JSArrayBuffer was neutered.
Node* receiver_buffer =
- assembler->LoadObjectField(receiver, JSTypedArray::kBufferOffset);
- Label if_receiverisneutered(assembler, Label::kDeferred);
- assembler->GotoIf(assembler->IsDetachedBuffer(receiver_buffer),
- &if_receiverisneutered);
+ assembler.LoadObjectField(receiver, JSTypedArray::kBufferOffset);
+ Label if_receiverisneutered(&assembler, Label::kDeferred);
+ assembler.GotoIf(assembler.IsDetachedBuffer(receiver_buffer),
+ &if_receiverisneutered);
- assembler->Return(assembler->CreateArrayIterator(receiver, map, instance_type,
- context, kIterationKind));
+ assembler.Return(assembler.CreateArrayIterator(receiver, map, instance_type,
+ context, kIterationKind));
- Variable var_message(assembler, MachineRepresentation::kTagged);
- assembler->Bind(&throw_bad_receiver);
+ Variable var_message(&assembler, MachineRepresentation::kTagged);
+ assembler.Bind(&throw_bad_receiver);
var_message.Bind(
- assembler->SmiConstant(Smi::FromInt(MessageTemplate::kNotTypedArray)));
- assembler->Goto(&throw_typeerror);
+ assembler.SmiConstant(Smi::FromInt(MessageTemplate::kNotTypedArray)));
+ assembler.Goto(&throw_typeerror);
- assembler->Bind(&if_receiverisneutered);
- var_message.Bind(assembler->SmiConstant(
- Smi::FromInt(MessageTemplate::kDetachedOperation)));
- assembler->Goto(&throw_typeerror);
+ assembler.Bind(&if_receiverisneutered);
+ var_message.Bind(
+ assembler.SmiConstant(Smi::FromInt(MessageTemplate::kDetachedOperation)));
+ assembler.Goto(&throw_typeerror);
- assembler->Bind(&throw_typeerror);
+ assembler.Bind(&throw_typeerror);
{
- Node* arg1 = assembler->HeapConstant(
- assembler->isolate()->factory()->NewStringFromAsciiChecked(method_name,
- TENURED));
- Node* result = assembler->CallRuntime(Runtime::kThrowTypeError, context,
- var_message.value(), arg1);
- assembler->Return(result);
+ Node* arg1 = assembler.HeapConstant(
+ assembler.isolate()->factory()->NewStringFromAsciiChecked(method_name,
+ TENURED));
+ Node* result = assembler.CallRuntime(Runtime::kThrowTypeError, context,
+ var_message.value(), arg1);
+ assembler.Return(result);
}
}
} // namespace
void Builtins::Generate_TypedArrayPrototypeValues(
- CodeStubAssembler* assembler) {
+ compiler::CodeAssemblerState* state) {
Generate_TypedArrayPrototypeIterationMethod<IterationKind::kValues>(
- assembler, "%TypedArray%.prototype.values()");
+ state, "%TypedArray%.prototype.values()");
}
void Builtins::Generate_TypedArrayPrototypeEntries(
- CodeStubAssembler* assembler) {
+ compiler::CodeAssemblerState* state) {
Generate_TypedArrayPrototypeIterationMethod<IterationKind::kEntries>(
- assembler, "%TypedArray%.prototype.entries()");
+ state, "%TypedArray%.prototype.entries()");
}
-void Builtins::Generate_TypedArrayPrototypeKeys(CodeStubAssembler* assembler) {
+void Builtins::Generate_TypedArrayPrototypeKeys(
+ compiler::CodeAssemblerState* state) {
Generate_TypedArrayPrototypeIterationMethod<IterationKind::kKeys>(
- assembler, "%TypedArray%.prototype.keys()");
+ state, "%TypedArray%.prototype.keys()");
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-utils.h b/deps/v8/src/builtins/builtins-utils.h
index 6378fdfad5..be689ac038 100644
--- a/deps/v8/src/builtins/builtins-utils.h
+++ b/deps/v8/src/builtins/builtins-utils.h
@@ -8,11 +8,14 @@
#include "src/arguments.h"
#include "src/base/logging.h"
#include "src/builtins/builtins.h"
-#include "src/code-stub-assembler.h"
namespace v8 {
namespace internal {
+namespace compiler {
+class CodeAssemblerState;
+}
+
// Arguments object passed to C++ builtins.
class BuiltinArguments : public Arguments {
public:
@@ -27,7 +30,7 @@ class BuiltinArguments : public Arguments {
return Arguments::operator[](index);
}
- template <class S>
+ template <class S = Object>
Handle<S> at(int index) {
DCHECK_LT(index, length());
return Arguments::at<S>(index);
@@ -102,6 +105,31 @@ class BuiltinArguments : public Arguments {
Isolate* isolate)
// ----------------------------------------------------------------------------
+// Support macro for defining builtins with Turbofan.
+// ----------------------------------------------------------------------------
+//
+// A builtin function is defined by writing:
+//
+// TF_BUILTIN(name, code_assember_base_class) {
+// ...
+// }
+//
+// In the body of the builtin function the arguments can be accessed
+// as "Parameter(n)".
+#define TF_BUILTIN(Name, AssemblerBase) \
+ class Name##Assembler : public AssemblerBase { \
+ public: \
+ explicit Name##Assembler(compiler::CodeAssemblerState* state) \
+ : AssemblerBase(state) {} \
+ void Generate##NameImpl(); \
+ }; \
+ void Builtins::Generate_##Name(compiler::CodeAssemblerState* state) { \
+ Name##Assembler assembler(state); \
+ assembler.Generate##NameImpl(); \
+ } \
+ void Name##Assembler::Generate##NameImpl()
+
+// ----------------------------------------------------------------------------
#define CHECK_RECEIVER(Type, name, method) \
if (!args.receiver()->Is##Type()) { \
@@ -117,8 +145,7 @@ class BuiltinArguments : public Arguments {
// or converts the receiver to a String otherwise and assigns it to a new var
// with the given {name}.
#define TO_THIS_STRING(name, method) \
- if (args.receiver()->IsNull(isolate) || \
- args.receiver()->IsUndefined(isolate)) { \
+ if (args.receiver()->IsNullOrUndefined(isolate)) { \
THROW_NEW_ERROR_RETURN_FAILURE( \
isolate, \
NewTypeError(MessageTemplate::kCalledOnNullOrUndefined, \
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index ec981fe01e..5997eb3550 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -4,7 +4,7 @@
#include "src/builtins/builtins.h"
#include "src/code-events.h"
-#include "src/code-stub-assembler.h"
+#include "src/compiler/code-assembler.h"
#include "src/ic/ic-state.h"
#include "src/interface-descriptors.h"
#include "src/isolate.h"
@@ -42,7 +42,7 @@ void PostBuildProfileAndTracing(Isolate* isolate, Code* code,
}
typedef void (*MacroAssemblerGenerator)(MacroAssembler*);
-typedef void (*CodeAssemblerGenerator)(CodeStubAssembler*);
+typedef void (*CodeAssemblerGenerator)(compiler::CodeAssemblerState*);
Code* BuildWithMacroAssembler(Isolate* isolate,
MacroAssemblerGenerator generator,
@@ -86,9 +86,10 @@ Code* BuildWithCodeStubAssemblerJS(Isolate* isolate,
Zone zone(isolate->allocator(), ZONE_NAME);
const int argc_with_recv =
(argc == SharedFunctionInfo::kDontAdaptArgumentsSentinel) ? 0 : argc + 1;
- CodeStubAssembler assembler(isolate, &zone, argc_with_recv, flags, name);
- generator(&assembler);
- Handle<Code> code = assembler.GenerateCode();
+ compiler::CodeAssemblerState state(isolate, &zone, argc_with_recv, flags,
+ name);
+ generator(&state);
+ Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state);
PostBuildProfileAndTracing(isolate, *code, name);
return *code;
}
@@ -105,9 +106,9 @@ Code* BuildWithCodeStubAssemblerCS(Isolate* isolate,
CallInterfaceDescriptor descriptor(isolate, interface_descriptor);
// Ensure descriptor is already initialized.
DCHECK_LE(0, descriptor.GetRegisterParameterCount());
- CodeStubAssembler assembler(isolate, &zone, descriptor, flags, name);
- generator(&assembler);
- Handle<Code> code = assembler.GenerateCode();
+ compiler::CodeAssemblerState state(isolate, &zone, descriptor, flags, name);
+ generator(&state);
+ Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state);
PostBuildProfileAndTracing(isolate, *code, name);
return *code;
}
diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h
index a6b126d106..a21b272f20 100644
--- a/deps/v8/src/builtins/builtins.h
+++ b/deps/v8/src/builtins/builtins.h
@@ -29,9 +29,7 @@ namespace internal {
V(NoAge) \
CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V)
-#define DECLARE_CODE_AGE_BUILTIN(C, V) \
- V(Make##C##CodeYoungAgainOddMarking) \
- V(Make##C##CodeYoungAgainEvenMarking)
+#define DECLARE_CODE_AGE_BUILTIN(C, V) V(Make##C##CodeYoungAgain)
// CPP: Builtin in C++. Entered via BUILTIN_EXIT frame.
// Args: name
@@ -47,658 +45,734 @@ namespace internal {
// Args: name, code kind, extra IC state
// DBG: Builtin in platform-dependent assembly, used by the debugger.
// Args: name
-#define BUILTIN_LIST(CPP, API, TFJ, TFS, ASM, ASH, DBG) \
- ASM(Abort) \
- /* Code aging */ \
- CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, ASM) \
- \
- TFS(ToObject, BUILTIN, kNoExtraICState, TypeConversion) \
- \
- /* Calls */ \
- ASM(ArgumentsAdaptorTrampoline) \
- /* ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) */ \
- ASM(CallFunction_ReceiverIsNullOrUndefined) \
- ASM(CallFunction_ReceiverIsNotNullOrUndefined) \
- ASM(CallFunction_ReceiverIsAny) \
- ASM(TailCallFunction_ReceiverIsNullOrUndefined) \
- ASM(TailCallFunction_ReceiverIsNotNullOrUndefined) \
- ASM(TailCallFunction_ReceiverIsAny) \
- /* ES6 section 9.4.1.1 [[Call]] ( thisArgument, argumentsList) */ \
- ASM(CallBoundFunction) \
- ASM(TailCallBoundFunction) \
- /* ES6 section 7.3.12 Call(F, V, [argumentsList]) */ \
- ASM(Call_ReceiverIsNullOrUndefined) \
- ASM(Call_ReceiverIsNotNullOrUndefined) \
- ASM(Call_ReceiverIsAny) \
- ASM(TailCall_ReceiverIsNullOrUndefined) \
- ASM(TailCall_ReceiverIsNotNullOrUndefined) \
- ASM(TailCall_ReceiverIsAny) \
- \
- /* Construct */ \
- /* ES6 section 9.2.2 [[Construct]] ( argumentsList, newTarget) */ \
- ASM(ConstructFunction) \
- /* ES6 section 9.4.1.2 [[Construct]] (argumentsList, newTarget) */ \
- ASM(ConstructBoundFunction) \
- ASM(ConstructedNonConstructable) \
- /* ES6 section 9.5.14 [[Construct]] ( argumentsList, newTarget) */ \
- ASM(ConstructProxy) \
- /* ES6 section 7.3.13 Construct (F, [argumentsList], [newTarget]) */ \
- ASM(Construct) \
- ASM(JSConstructStubApi) \
- ASM(JSConstructStubGeneric) \
- ASM(JSBuiltinsConstructStub) \
- ASM(JSBuiltinsConstructStubForDerived) \
- \
- /* Apply and entries */ \
- ASM(Apply) \
- ASM(JSEntryTrampoline) \
- ASM(JSConstructEntryTrampoline) \
- ASM(ResumeGeneratorTrampoline) \
- \
- /* Stack and interrupt check */ \
- ASM(InterruptCheck) \
- ASM(StackCheck) \
- \
- /* String helpers */ \
- TFS(StringEqual, BUILTIN, kNoExtraICState, Compare) \
- TFS(StringNotEqual, BUILTIN, kNoExtraICState, Compare) \
- TFS(StringLessThan, BUILTIN, kNoExtraICState, Compare) \
- TFS(StringLessThanOrEqual, BUILTIN, kNoExtraICState, Compare) \
- TFS(StringGreaterThan, BUILTIN, kNoExtraICState, Compare) \
- TFS(StringGreaterThanOrEqual, BUILTIN, kNoExtraICState, Compare) \
- \
- /* Interpreter */ \
- ASM(InterpreterEntryTrampoline) \
- ASM(InterpreterPushArgsAndCall) \
- ASM(InterpreterPushArgsAndCallFunction) \
- ASM(InterpreterPushArgsAndTailCall) \
- ASM(InterpreterPushArgsAndTailCallFunction) \
- ASM(InterpreterPushArgsAndConstruct) \
- ASM(InterpreterPushArgsAndConstructFunction) \
- ASM(InterpreterPushArgsAndConstructArray) \
- ASM(InterpreterEnterBytecodeAdvance) \
- ASM(InterpreterEnterBytecodeDispatch) \
- ASM(InterpreterOnStackReplacement) \
- \
- /* Code life-cycle */ \
- ASM(CompileLazy) \
- ASM(CompileBaseline) \
- ASM(CompileOptimized) \
- ASM(CompileOptimizedConcurrent) \
- ASM(InOptimizationQueue) \
- ASM(InstantiateAsmJs) \
- ASM(MarkCodeAsToBeExecutedOnce) \
- ASM(MarkCodeAsExecutedOnce) \
- ASM(MarkCodeAsExecutedTwice) \
- ASM(NotifyDeoptimized) \
- ASM(NotifySoftDeoptimized) \
- ASM(NotifyLazyDeoptimized) \
- ASM(NotifyStubFailure) \
- ASM(NotifyStubFailureSaveDoubles) \
- ASM(OnStackReplacement) \
- \
- /* API callback handling */ \
- API(HandleApiCall) \
- API(HandleApiCallAsFunction) \
- API(HandleApiCallAsConstructor) \
- ASM(HandleFastApiCall) \
- \
- /* Adapters for Turbofan into runtime */ \
- ASM(AllocateInNewSpace) \
- ASM(AllocateInOldSpace) \
- \
- /* TurboFan support builtins */ \
- TFS(CopyFastSmiOrObjectElements, BUILTIN, kNoExtraICState, \
- CopyFastSmiOrObjectElements) \
- TFS(GrowFastDoubleElements, BUILTIN, kNoExtraICState, GrowArrayElements) \
- TFS(GrowFastSmiOrObjectElements, BUILTIN, kNoExtraICState, \
- GrowArrayElements) \
- \
- /* Debugger */ \
- DBG(FrameDropper_LiveEdit) \
- DBG(Return_DebugBreak) \
- DBG(Slot_DebugBreak) \
- \
- /* Type conversions */ \
- TFS(ToBoolean, BUILTIN, kNoExtraICState, TypeConversion) \
- TFS(OrdinaryToPrimitive_Number, BUILTIN, kNoExtraICState, TypeConversion) \
- TFS(OrdinaryToPrimitive_String, BUILTIN, kNoExtraICState, TypeConversion) \
- TFS(NonPrimitiveToPrimitive_Default, BUILTIN, kNoExtraICState, \
- TypeConversion) \
- TFS(NonPrimitiveToPrimitive_Number, BUILTIN, kNoExtraICState, \
- TypeConversion) \
- TFS(NonPrimitiveToPrimitive_String, BUILTIN, kNoExtraICState, \
- TypeConversion) \
- TFS(StringToNumber, BUILTIN, kNoExtraICState, TypeConversion) \
- TFS(ToName, BUILTIN, kNoExtraICState, TypeConversion) \
- TFS(NonNumberToNumber, BUILTIN, kNoExtraICState, TypeConversion) \
- TFS(ToNumber, BUILTIN, kNoExtraICState, TypeConversion) \
- TFS(ToString, BUILTIN, kNoExtraICState, TypeConversion) \
- TFS(ToInteger, BUILTIN, kNoExtraICState, TypeConversion) \
- TFS(ToLength, BUILTIN, kNoExtraICState, TypeConversion) \
- TFS(Typeof, BUILTIN, kNoExtraICState, Typeof) \
- \
- /* Handlers */ \
- TFS(KeyedLoadIC_Megamorphic_TF, KEYED_LOAD_IC, kNoExtraICState, \
- LoadWithVector) \
- ASM(KeyedLoadIC_Miss) \
- ASH(KeyedLoadIC_Slow, HANDLER, Code::KEYED_LOAD_IC) \
- ASH(KeyedStoreIC_Megamorphic, KEYED_STORE_IC, kNoExtraICState) \
- ASH(KeyedStoreIC_Megamorphic_Strict, KEYED_STORE_IC, \
- StoreICState::kStrictModeState) \
- TFS(KeyedStoreIC_Megamorphic_TF, KEYED_STORE_IC, kNoExtraICState, \
- StoreWithVector) \
- TFS(KeyedStoreIC_Megamorphic_Strict_TF, KEYED_STORE_IC, \
- StoreICState::kStrictModeState, StoreWithVector) \
- ASM(KeyedStoreIC_Miss) \
- ASH(KeyedStoreIC_Slow, HANDLER, Code::KEYED_STORE_IC) \
- TFS(LoadGlobalIC_Miss, BUILTIN, kNoExtraICState, LoadGlobalWithVector) \
- TFS(LoadGlobalIC_Slow, HANDLER, Code::LOAD_GLOBAL_IC, LoadGlobalWithVector) \
- ASH(LoadIC_Getter_ForDeopt, LOAD_IC, kNoExtraICState) \
- TFS(LoadIC_Miss, BUILTIN, kNoExtraICState, LoadWithVector) \
- ASH(LoadIC_Normal, HANDLER, Code::LOAD_IC) \
- TFS(LoadIC_Slow, HANDLER, Code::LOAD_IC, LoadWithVector) \
- TFS(StoreIC_Miss, BUILTIN, kNoExtraICState, StoreWithVector) \
- ASH(StoreIC_Normal, HANDLER, Code::STORE_IC) \
- ASH(StoreIC_Setter_ForDeopt, STORE_IC, StoreICState::kStrictModeState) \
- TFS(StoreIC_SlowSloppy, HANDLER, Code::STORE_IC, StoreWithVector) \
- TFS(StoreIC_SlowStrict, HANDLER, Code::STORE_IC, StoreWithVector) \
- \
- /* Built-in functions for Javascript */ \
- /* Special internal builtins */ \
- CPP(EmptyFunction) \
- CPP(Illegal) \
- CPP(RestrictedFunctionPropertiesThrower) \
- CPP(RestrictedStrictArgumentsPropertiesThrower) \
- CPP(UnsupportedThrower) \
- \
- /* Array */ \
- ASM(ArrayCode) \
- ASM(InternalArrayCode) \
- CPP(ArrayConcat) \
- /* ES6 section 22.1.2.2 Array.isArray */ \
- TFJ(ArrayIsArray, 1) \
- /* ES7 #sec-array.prototype.includes */ \
- TFJ(ArrayIncludes, 2) \
- TFJ(ArrayIndexOf, 2) \
- CPP(ArrayPop) \
- CPP(ArrayPush) \
- CPP(ArrayShift) \
- CPP(ArraySlice) \
- CPP(ArraySplice) \
- CPP(ArrayUnshift) \
- /* ES6 #sec-array.prototype.entries */ \
- TFJ(ArrayPrototypeEntries, 0) \
- /* ES6 #sec-array.prototype.keys */ \
- TFJ(ArrayPrototypeKeys, 0) \
- /* ES6 #sec-array.prototype.values */ \
- TFJ(ArrayPrototypeValues, 0) \
- /* ES6 #sec-%arrayiteratorprototype%.next */ \
- TFJ(ArrayIteratorPrototypeNext, 0) \
- \
- /* ArrayBuffer */ \
- CPP(ArrayBufferConstructor) \
- CPP(ArrayBufferConstructor_ConstructStub) \
- CPP(ArrayBufferPrototypeGetByteLength) \
- CPP(ArrayBufferIsView) \
- \
- /* Boolean */ \
- CPP(BooleanConstructor) \
- CPP(BooleanConstructor_ConstructStub) \
- /* ES6 section 19.3.3.2 Boolean.prototype.toString ( ) */ \
- TFJ(BooleanPrototypeToString, 0) \
- /* ES6 section 19.3.3.3 Boolean.prototype.valueOf ( ) */ \
- TFJ(BooleanPrototypeValueOf, 0) \
- \
- /* CallSite */ \
- CPP(CallSitePrototypeGetColumnNumber) \
- CPP(CallSitePrototypeGetEvalOrigin) \
- CPP(CallSitePrototypeGetFileName) \
- CPP(CallSitePrototypeGetFunction) \
- CPP(CallSitePrototypeGetFunctionName) \
- CPP(CallSitePrototypeGetLineNumber) \
- CPP(CallSitePrototypeGetMethodName) \
- CPP(CallSitePrototypeGetPosition) \
- CPP(CallSitePrototypeGetScriptNameOrSourceURL) \
- CPP(CallSitePrototypeGetThis) \
- CPP(CallSitePrototypeGetTypeName) \
- CPP(CallSitePrototypeIsConstructor) \
- CPP(CallSitePrototypeIsEval) \
- CPP(CallSitePrototypeIsNative) \
- CPP(CallSitePrototypeIsToplevel) \
- CPP(CallSitePrototypeToString) \
- \
- /* DataView */ \
- CPP(DataViewConstructor) \
- CPP(DataViewConstructor_ConstructStub) \
- CPP(DataViewPrototypeGetBuffer) \
- CPP(DataViewPrototypeGetByteLength) \
- CPP(DataViewPrototypeGetByteOffset) \
- CPP(DataViewPrototypeGetInt8) \
- CPP(DataViewPrototypeSetInt8) \
- CPP(DataViewPrototypeGetUint8) \
- CPP(DataViewPrototypeSetUint8) \
- CPP(DataViewPrototypeGetInt16) \
- CPP(DataViewPrototypeSetInt16) \
- CPP(DataViewPrototypeGetUint16) \
- CPP(DataViewPrototypeSetUint16) \
- CPP(DataViewPrototypeGetInt32) \
- CPP(DataViewPrototypeSetInt32) \
- CPP(DataViewPrototypeGetUint32) \
- CPP(DataViewPrototypeSetUint32) \
- CPP(DataViewPrototypeGetFloat32) \
- CPP(DataViewPrototypeSetFloat32) \
- CPP(DataViewPrototypeGetFloat64) \
- CPP(DataViewPrototypeSetFloat64) \
- \
- /* Date */ \
- CPP(DateConstructor) \
- CPP(DateConstructor_ConstructStub) \
- /* ES6 section 20.3.4.2 Date.prototype.getDate ( ) */ \
- TFJ(DatePrototypeGetDate, 0) \
- /* ES6 section 20.3.4.3 Date.prototype.getDay ( ) */ \
- TFJ(DatePrototypeGetDay, 0) \
- /* ES6 section 20.3.4.4 Date.prototype.getFullYear ( ) */ \
- TFJ(DatePrototypeGetFullYear, 0) \
- /* ES6 section 20.3.4.5 Date.prototype.getHours ( ) */ \
- TFJ(DatePrototypeGetHours, 0) \
- /* ES6 section 20.3.4.6 Date.prototype.getMilliseconds ( ) */ \
- TFJ(DatePrototypeGetMilliseconds, 0) \
- /* ES6 section 20.3.4.7 Date.prototype.getMinutes ( ) */ \
- TFJ(DatePrototypeGetMinutes, 0) \
- /* ES6 section 20.3.4.8 Date.prototype.getMonth */ \
- TFJ(DatePrototypeGetMonth, 0) \
- /* ES6 section 20.3.4.9 Date.prototype.getSeconds ( ) */ \
- TFJ(DatePrototypeGetSeconds, 0) \
- /* ES6 section 20.3.4.10 Date.prototype.getTime ( ) */ \
- TFJ(DatePrototypeGetTime, 0) \
- /* ES6 section 20.3.4.11 Date.prototype.getTimezoneOffset ( ) */ \
- TFJ(DatePrototypeGetTimezoneOffset, 0) \
- /* ES6 section 20.3.4.12 Date.prototype.getUTCDate ( ) */ \
- TFJ(DatePrototypeGetUTCDate, 0) \
- /* ES6 section 20.3.4.13 Date.prototype.getUTCDay ( ) */ \
- TFJ(DatePrototypeGetUTCDay, 0) \
- /* ES6 section 20.3.4.14 Date.prototype.getUTCFullYear ( ) */ \
- TFJ(DatePrototypeGetUTCFullYear, 0) \
- /* ES6 section 20.3.4.15 Date.prototype.getUTCHours ( ) */ \
- TFJ(DatePrototypeGetUTCHours, 0) \
- /* ES6 section 20.3.4.16 Date.prototype.getUTCMilliseconds ( ) */ \
- TFJ(DatePrototypeGetUTCMilliseconds, 0) \
- /* ES6 section 20.3.4.17 Date.prototype.getUTCMinutes ( ) */ \
- TFJ(DatePrototypeGetUTCMinutes, 0) \
- /* ES6 section 20.3.4.18 Date.prototype.getUTCMonth ( ) */ \
- TFJ(DatePrototypeGetUTCMonth, 0) \
- /* ES6 section 20.3.4.19 Date.prototype.getUTCSeconds ( ) */ \
- TFJ(DatePrototypeGetUTCSeconds, 0) \
- CPP(DatePrototypeGetYear) \
- CPP(DatePrototypeSetYear) \
- CPP(DateNow) \
- CPP(DateParse) \
- CPP(DatePrototypeSetDate) \
- CPP(DatePrototypeSetFullYear) \
- CPP(DatePrototypeSetHours) \
- CPP(DatePrototypeSetMilliseconds) \
- CPP(DatePrototypeSetMinutes) \
- CPP(DatePrototypeSetMonth) \
- CPP(DatePrototypeSetSeconds) \
- CPP(DatePrototypeSetTime) \
- CPP(DatePrototypeSetUTCDate) \
- CPP(DatePrototypeSetUTCFullYear) \
- CPP(DatePrototypeSetUTCHours) \
- CPP(DatePrototypeSetUTCMilliseconds) \
- CPP(DatePrototypeSetUTCMinutes) \
- CPP(DatePrototypeSetUTCMonth) \
- CPP(DatePrototypeSetUTCSeconds) \
- CPP(DatePrototypeToDateString) \
- CPP(DatePrototypeToISOString) \
- CPP(DatePrototypeToPrimitive) \
- CPP(DatePrototypeToUTCString) \
- CPP(DatePrototypeToString) \
- CPP(DatePrototypeToTimeString) \
- CPP(DatePrototypeValueOf) \
- CPP(DatePrototypeToJson) \
- CPP(DateUTC) \
- \
- /* Error */ \
- CPP(ErrorConstructor) \
- CPP(ErrorCaptureStackTrace) \
- CPP(ErrorPrototypeToString) \
- CPP(MakeError) \
- CPP(MakeRangeError) \
- CPP(MakeSyntaxError) \
- CPP(MakeTypeError) \
- CPP(MakeURIError) \
- \
- /* Function */ \
- CPP(FunctionConstructor) \
- ASM(FunctionPrototypeApply) \
- CPP(FunctionPrototypeBind) \
- ASM(FunctionPrototypeCall) \
- /* ES6 section 19.2.3.6 Function.prototype [ @@hasInstance ] ( V ) */ \
- TFJ(FunctionPrototypeHasInstance, 1) \
- CPP(FunctionPrototypeToString) \
- \
- /* Generator and Async */ \
- CPP(GeneratorFunctionConstructor) \
- /* ES6 section 25.3.1.2 Generator.prototype.next ( value ) */ \
- TFJ(GeneratorPrototypeNext, 1) \
- /* ES6 section 25.3.1.3 Generator.prototype.return ( value ) */ \
- TFJ(GeneratorPrototypeReturn, 1) \
- /* ES6 section 25.3.1.4 Generator.prototype.throw ( exception ) */ \
- TFJ(GeneratorPrototypeThrow, 1) \
- CPP(AsyncFunctionConstructor) \
- \
- /* Global object */ \
- CPP(GlobalDecodeURI) \
- CPP(GlobalDecodeURIComponent) \
- CPP(GlobalEncodeURI) \
- CPP(GlobalEncodeURIComponent) \
- CPP(GlobalEscape) \
- CPP(GlobalUnescape) \
- CPP(GlobalEval) \
- /* ES6 section 18.2.2 isFinite ( number ) */ \
- TFJ(GlobalIsFinite, 1) \
- /* ES6 section 18.2.3 isNaN ( number ) */ \
- TFJ(GlobalIsNaN, 1) \
- \
- /* ES6 #sec-%iteratorprototype%-@@iterator */ \
- TFJ(IteratorPrototypeIterator, 0) \
- \
- /* JSON */ \
- CPP(JsonParse) \
- CPP(JsonStringify) \
- \
- /* Math */ \
- /* ES6 section 20.2.2.1 Math.abs ( x ) */ \
- TFJ(MathAbs, 1) \
- /* ES6 section 20.2.2.2 Math.acos ( x ) */ \
- TFJ(MathAcos, 1) \
- /* ES6 section 20.2.2.3 Math.acosh ( x ) */ \
- TFJ(MathAcosh, 1) \
- /* ES6 section 20.2.2.4 Math.asin ( x ) */ \
- TFJ(MathAsin, 1) \
- /* ES6 section 20.2.2.5 Math.asinh ( x ) */ \
- TFJ(MathAsinh, 1) \
- /* ES6 section 20.2.2.6 Math.atan ( x ) */ \
- TFJ(MathAtan, 1) \
- /* ES6 section 20.2.2.7 Math.atanh ( x ) */ \
- TFJ(MathAtanh, 1) \
- /* ES6 section 20.2.2.8 Math.atan2 ( y, x ) */ \
- TFJ(MathAtan2, 2) \
- /* ES6 section 20.2.2.9 Math.cbrt ( x ) */ \
- TFJ(MathCbrt, 1) \
- /* ES6 section 20.2.2.10 Math.ceil ( x ) */ \
- TFJ(MathCeil, 1) \
- /* ES6 section 20.2.2.11 Math.clz32 ( x ) */ \
- TFJ(MathClz32, 1) \
- /* ES6 section 20.2.2.12 Math.cos ( x ) */ \
- TFJ(MathCos, 1) \
- /* ES6 section 20.2.2.13 Math.cosh ( x ) */ \
- TFJ(MathCosh, 1) \
- /* ES6 section 20.2.2.14 Math.exp ( x ) */ \
- TFJ(MathExp, 1) \
- /* ES6 section 20.2.2.15 Math.expm1 ( x ) */ \
- TFJ(MathExpm1, 1) \
- /* ES6 section 20.2.2.16 Math.floor ( x ) */ \
- TFJ(MathFloor, 1) \
- /* ES6 section 20.2.2.17 Math.fround ( x ) */ \
- TFJ(MathFround, 1) \
- /* ES6 section 20.2.2.18 Math.hypot ( value1, value2, ...values ) */ \
- CPP(MathHypot) \
- /* ES6 section 20.2.2.19 Math.imul ( x, y ) */ \
- TFJ(MathImul, 2) \
- /* ES6 section 20.2.2.20 Math.log ( x ) */ \
- TFJ(MathLog, 1) \
- /* ES6 section 20.2.2.21 Math.log1p ( x ) */ \
- TFJ(MathLog1p, 1) \
- /* ES6 section 20.2.2.22 Math.log10 ( x ) */ \
- TFJ(MathLog10, 1) \
- /* ES6 section 20.2.2.23 Math.log2 ( x ) */ \
- TFJ(MathLog2, 1) \
- /* ES6 section 20.2.2.24 Math.max ( value1, value2 , ...values ) */ \
- ASM(MathMax) \
- /* ES6 section 20.2.2.25 Math.min ( value1, value2 , ...values ) */ \
- ASM(MathMin) \
- /* ES6 section 20.2.2.26 Math.pow ( x, y ) */ \
- TFJ(MathPow, 2) \
- /* ES6 section 20.2.2.27 Math.random */ \
- TFJ(MathRandom, 0) \
- /* ES6 section 20.2.2.28 Math.round ( x ) */ \
- TFJ(MathRound, 1) \
- /* ES6 section 20.2.2.29 Math.sign ( x ) */ \
- TFJ(MathSign, 1) \
- /* ES6 section 20.2.2.30 Math.sin ( x ) */ \
- TFJ(MathSin, 1) \
- /* ES6 section 20.2.2.31 Math.sinh ( x ) */ \
- TFJ(MathSinh, 1) \
- /* ES6 section 20.2.2.32 Math.sqrt ( x ) */ \
- TFJ(MathTan, 1) \
- /* ES6 section 20.2.2.33 Math.tan ( x ) */ \
- TFJ(MathTanh, 1) \
- /* ES6 section 20.2.2.34 Math.tanh ( x ) */ \
- TFJ(MathSqrt, 1) \
- /* ES6 section 20.2.2.35 Math.trunc ( x ) */ \
- TFJ(MathTrunc, 1) \
- \
- /* Number */ \
- /* ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Call]] case */ \
- ASM(NumberConstructor) \
- /* ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Construct]] case */ \
- ASM(NumberConstructor_ConstructStub) \
- /* ES6 section 20.1.2.2 Number.isFinite ( number ) */ \
- TFJ(NumberIsFinite, 1) \
- /* ES6 section 20.1.2.3 Number.isInteger ( number ) */ \
- TFJ(NumberIsInteger, 1) \
- /* ES6 section 20.1.2.4 Number.isNaN ( number ) */ \
- TFJ(NumberIsNaN, 1) \
- /* ES6 section 20.1.2.5 Number.isSafeInteger ( number ) */ \
- TFJ(NumberIsSafeInteger, 1) \
- /* ES6 section 20.1.2.12 Number.parseFloat ( string ) */ \
- TFJ(NumberParseFloat, 1) \
- /* ES6 section 20.1.2.13 Number.parseInt ( string, radix ) */ \
- TFJ(NumberParseInt, 2) \
- CPP(NumberPrototypeToExponential) \
- CPP(NumberPrototypeToFixed) \
- CPP(NumberPrototypeToLocaleString) \
- CPP(NumberPrototypeToPrecision) \
- CPP(NumberPrototypeToString) \
- /* ES6 section 20.1.3.7 Number.prototype.valueOf ( ) */ \
- TFJ(NumberPrototypeValueOf, 0) \
- TFS(Add, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(Subtract, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(Multiply, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(Divide, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(Modulus, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(BitwiseAnd, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(BitwiseOr, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(BitwiseXor, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(ShiftLeft, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(ShiftRight, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(ShiftRightLogical, BUILTIN, kNoExtraICState, BinaryOp) \
- TFS(LessThan, BUILTIN, kNoExtraICState, Compare) \
- TFS(LessThanOrEqual, BUILTIN, kNoExtraICState, Compare) \
- TFS(GreaterThan, BUILTIN, kNoExtraICState, Compare) \
- TFS(GreaterThanOrEqual, BUILTIN, kNoExtraICState, Compare) \
- TFS(Equal, BUILTIN, kNoExtraICState, Compare) \
- TFS(NotEqual, BUILTIN, kNoExtraICState, Compare) \
- TFS(StrictEqual, BUILTIN, kNoExtraICState, Compare) \
- TFS(StrictNotEqual, BUILTIN, kNoExtraICState, Compare) \
- \
- /* Object */ \
- CPP(ObjectAssign) \
- TFJ(ObjectCreate, 2) \
- CPP(ObjectDefineGetter) \
- CPP(ObjectDefineProperties) \
- CPP(ObjectDefineProperty) \
- CPP(ObjectDefineSetter) \
- CPP(ObjectEntries) \
- CPP(ObjectFreeze) \
- CPP(ObjectGetOwnPropertyDescriptor) \
- CPP(ObjectGetOwnPropertyDescriptors) \
- CPP(ObjectGetOwnPropertyNames) \
- CPP(ObjectGetOwnPropertySymbols) \
- CPP(ObjectGetPrototypeOf) \
- CPP(ObjectSetPrototypeOf) \
- /* ES6 section 19.1.3.2 Object.prototype.hasOwnProperty */ \
- TFJ(ObjectHasOwnProperty, 1) \
- CPP(ObjectIs) \
- CPP(ObjectIsExtensible) \
- CPP(ObjectIsFrozen) \
- CPP(ObjectIsSealed) \
- CPP(ObjectKeys) \
- CPP(ObjectLookupGetter) \
- CPP(ObjectLookupSetter) \
- CPP(ObjectPreventExtensions) \
- /* ES6 section 19.1.3.6 Object.prototype.toString () */ \
- TFJ(ObjectProtoToString, 0) \
- CPP(ObjectPrototypePropertyIsEnumerable) \
- CPP(ObjectPrototypeGetProto) \
- CPP(ObjectPrototypeSetProto) \
- CPP(ObjectSeal) \
- CPP(ObjectValues) \
- \
- TFS(HasProperty, BUILTIN, kNoExtraICState, HasProperty) \
- TFS(InstanceOf, BUILTIN, kNoExtraICState, Compare) \
- TFS(OrdinaryHasInstance, BUILTIN, kNoExtraICState, Compare) \
- TFS(ForInFilter, BUILTIN, kNoExtraICState, ForInFilter) \
- \
- /* Promise */ \
- CPP(CreateResolvingFunctions) \
- CPP(PromiseResolveClosure) \
- CPP(PromiseRejectClosure) \
- \
- /* Proxy */ \
- CPP(ProxyConstructor) \
- CPP(ProxyConstructor_ConstructStub) \
- \
- /* Reflect */ \
- ASM(ReflectApply) \
- ASM(ReflectConstruct) \
- CPP(ReflectDefineProperty) \
- CPP(ReflectDeleteProperty) \
- CPP(ReflectGet) \
- CPP(ReflectGetOwnPropertyDescriptor) \
- CPP(ReflectGetPrototypeOf) \
- CPP(ReflectHas) \
- CPP(ReflectIsExtensible) \
- CPP(ReflectOwnKeys) \
- CPP(ReflectPreventExtensions) \
- CPP(ReflectSet) \
- CPP(ReflectSetPrototypeOf) \
- \
- /* RegExp */ \
- CPP(RegExpCapture1Getter) \
- CPP(RegExpCapture2Getter) \
- CPP(RegExpCapture3Getter) \
- CPP(RegExpCapture4Getter) \
- CPP(RegExpCapture5Getter) \
- CPP(RegExpCapture6Getter) \
- CPP(RegExpCapture7Getter) \
- CPP(RegExpCapture8Getter) \
- CPP(RegExpCapture9Getter) \
- CPP(RegExpConstructor) \
- TFJ(RegExpInternalMatch, 2) \
- CPP(RegExpInputGetter) \
- CPP(RegExpInputSetter) \
- CPP(RegExpLastMatchGetter) \
- CPP(RegExpLastParenGetter) \
- CPP(RegExpLeftContextGetter) \
- CPP(RegExpPrototypeCompile) \
- TFJ(RegExpPrototypeExec, 1) \
- TFJ(RegExpPrototypeFlagsGetter, 0) \
- TFJ(RegExpPrototypeGlobalGetter, 0) \
- TFJ(RegExpPrototypeIgnoreCaseGetter, 0) \
- CPP(RegExpPrototypeMatch) \
- TFJ(RegExpPrototypeMultilineGetter, 0) \
- TFJ(RegExpPrototypeReplace, 2) \
- TFJ(RegExpPrototypeSearch, 1) \
- CPP(RegExpPrototypeSourceGetter) \
- CPP(RegExpPrototypeSpeciesGetter) \
- CPP(RegExpPrototypeSplit) \
- TFJ(RegExpPrototypeStickyGetter, 0) \
- TFJ(RegExpPrototypeTest, 1) \
- CPP(RegExpPrototypeToString) \
- TFJ(RegExpPrototypeUnicodeGetter, 0) \
- CPP(RegExpRightContextGetter) \
- \
- /* SharedArrayBuffer */ \
- CPP(SharedArrayBufferPrototypeGetByteLength) \
- TFJ(AtomicsLoad, 2) \
- TFJ(AtomicsStore, 3) \
- \
- /* String */ \
- ASM(StringConstructor) \
- ASM(StringConstructor_ConstructStub) \
- CPP(StringFromCodePoint) \
- /* ES6 section 21.1.2.1 String.fromCharCode ( ...codeUnits ) */ \
- TFJ(StringFromCharCode, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
- /* ES6 section 21.1.3.1 String.prototype.charAt ( pos ) */ \
- TFJ(StringPrototypeCharAt, 1) \
- /* ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos ) */ \
- TFJ(StringPrototypeCharCodeAt, 1) \
- /* ES6 section 21.1.3.6 */ \
- /* String.prototype.endsWith ( searchString [ , endPosition ] ) */ \
- CPP(StringPrototypeEndsWith) \
- /* ES6 section 21.1.3.7 */ \
- /* String.prototype.includes ( searchString [ , position ] ) */ \
- CPP(StringPrototypeIncludes) \
- /* ES6 section 21.1.3.8 */ \
- /* String.prototype.indexOf ( searchString [ , position ] ) */ \
- CPP(StringPrototypeIndexOf) \
- /* ES6 section 21.1.3.9 */ \
- /* String.prototype.lastIndexOf ( searchString [ , position ] ) */ \
- CPP(StringPrototypeLastIndexOf) \
- /* ES6 section 21.1.3.10 String.prototype.localeCompare ( that ) */ \
- CPP(StringPrototypeLocaleCompare) \
- /* ES6 section 21.1.3.12 String.prototype.normalize ( [form] ) */ \
- CPP(StringPrototypeNormalize) \
- /* ES6 section B.2.3.1 String.prototype.substr ( start, length ) */ \
- TFJ(StringPrototypeSubstr, 2) \
- /* ES6 section 21.1.3.19 String.prototype.substring ( start, end ) */ \
- TFJ(StringPrototypeSubstring, 2) \
- /* ES6 section 21.1.3.20 */ \
- /* String.prototype.startsWith ( searchString [ , position ] ) */ \
- CPP(StringPrototypeStartsWith) \
- /* ES6 section 21.1.3.25 String.prototype.toString () */ \
- TFJ(StringPrototypeToString, 0) \
- CPP(StringPrototypeTrim) \
- CPP(StringPrototypeTrimLeft) \
- CPP(StringPrototypeTrimRight) \
- /* ES6 section 21.1.3.28 String.prototype.valueOf () */ \
- TFJ(StringPrototypeValueOf, 0) \
- /* ES6 #sec-string.prototype-@@iterator */ \
- TFJ(StringPrototypeIterator, 0) \
- \
- /* StringIterator */ \
- TFJ(StringIteratorPrototypeNext, 0) \
- \
- /* Symbol */ \
- CPP(SymbolConstructor) \
- CPP(SymbolConstructor_ConstructStub) \
- /* ES6 section 19.4.3.4 Symbol.prototype [ @@toPrimitive ] ( hint ) */ \
- TFJ(SymbolPrototypeToPrimitive, 1) \
- /* ES6 section 19.4.3.2 Symbol.prototype.toString ( ) */ \
- TFJ(SymbolPrototypeToString, 0) \
- /* ES6 section 19.4.3.3 Symbol.prototype.valueOf ( ) */ \
- TFJ(SymbolPrototypeValueOf, 0) \
- \
- /* TypedArray */ \
- CPP(TypedArrayPrototypeBuffer) \
- /* ES6 section 22.2.3.2 get %TypedArray%.prototype.byteLength */ \
- TFJ(TypedArrayPrototypeByteLength, 0) \
- /* ES6 section 22.2.3.3 get %TypedArray%.prototype.byteOffset */ \
- TFJ(TypedArrayPrototypeByteOffset, 0) \
- /* ES6 section 22.2.3.18 get %TypedArray%.prototype.length */ \
- TFJ(TypedArrayPrototypeLength, 0) \
- /* ES6 #sec-%typedarray%.prototype.entries */ \
- TFJ(TypedArrayPrototypeEntries, 0) \
- /* ES6 #sec-%typedarray%.prototype.keys */ \
- TFJ(TypedArrayPrototypeKeys, 0) \
- /* ES6 #sec-%typedarray%.prototype.values */ \
- TFJ(TypedArrayPrototypeValues, 0) \
- \
- CPP(ModuleNamespaceIterator) \
- CPP(FixedArrayIteratorNext)
+#define BUILTIN_LIST(CPP, API, TFJ, TFS, ASM, ASH, DBG) \
+ ASM(Abort) \
+ /* Code aging */ \
+ CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, ASM) \
+ \
+ /* Declared first for dependency reasons */ \
+ ASM(CompileLazy) \
+ TFS(ToObject, BUILTIN, kNoExtraICState, TypeConversion) \
+ TFS(FastNewObject, BUILTIN, kNoExtraICState, FastNewObject) \
+ \
+ /* Calls */ \
+ ASM(ArgumentsAdaptorTrampoline) \
+ /* ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) */ \
+ ASM(CallFunction_ReceiverIsNullOrUndefined) \
+ ASM(CallFunction_ReceiverIsNotNullOrUndefined) \
+ ASM(CallFunction_ReceiverIsAny) \
+ ASM(TailCallFunction_ReceiverIsNullOrUndefined) \
+ ASM(TailCallFunction_ReceiverIsNotNullOrUndefined) \
+ ASM(TailCallFunction_ReceiverIsAny) \
+ /* ES6 section 9.4.1.1 [[Call]] ( thisArgument, argumentsList) */ \
+ ASM(CallBoundFunction) \
+ ASM(TailCallBoundFunction) \
+ /* ES6 section 7.3.12 Call(F, V, [argumentsList]) */ \
+ ASM(Call_ReceiverIsNullOrUndefined) \
+ ASM(Call_ReceiverIsNotNullOrUndefined) \
+ ASM(Call_ReceiverIsAny) \
+ ASM(TailCall_ReceiverIsNullOrUndefined) \
+ ASM(TailCall_ReceiverIsNotNullOrUndefined) \
+ ASM(TailCall_ReceiverIsAny) \
+ \
+ /* Construct */ \
+ /* ES6 section 9.2.2 [[Construct]] ( argumentsList, newTarget) */ \
+ ASM(ConstructFunction) \
+ /* ES6 section 9.4.1.2 [[Construct]] (argumentsList, newTarget) */ \
+ ASM(ConstructBoundFunction) \
+ ASM(ConstructedNonConstructable) \
+ /* ES6 section 9.5.14 [[Construct]] ( argumentsList, newTarget) */ \
+ ASM(ConstructProxy) \
+ /* ES6 section 7.3.13 Construct (F, [argumentsList], [newTarget]) */ \
+ ASM(Construct) \
+ ASM(JSConstructStubApi) \
+ ASM(JSConstructStubGeneric) \
+ ASM(JSBuiltinsConstructStub) \
+ ASM(JSBuiltinsConstructStubForDerived) \
+ TFS(FastNewClosure, BUILTIN, kNoExtraICState, FastNewClosure) \
+ TFS(FastNewFunctionContextEval, BUILTIN, kNoExtraICState, \
+ FastNewFunctionContext) \
+ TFS(FastNewFunctionContextFunction, BUILTIN, kNoExtraICState, \
+ FastNewFunctionContext) \
+ TFS(FastCloneRegExp, BUILTIN, kNoExtraICState, FastCloneRegExp) \
+ TFS(FastCloneShallowArrayTrack, BUILTIN, kNoExtraICState, \
+ FastCloneShallowArray) \
+ TFS(FastCloneShallowArrayDontTrack, BUILTIN, kNoExtraICState, \
+ FastCloneShallowArray) \
+ TFS(FastCloneShallowObject0, BUILTIN, kNoExtraICState, \
+ FastCloneShallowObject) \
+ TFS(FastCloneShallowObject1, BUILTIN, kNoExtraICState, \
+ FastCloneShallowObject) \
+ TFS(FastCloneShallowObject2, BUILTIN, kNoExtraICState, \
+ FastCloneShallowObject) \
+ TFS(FastCloneShallowObject3, BUILTIN, kNoExtraICState, \
+ FastCloneShallowObject) \
+ TFS(FastCloneShallowObject4, BUILTIN, kNoExtraICState, \
+ FastCloneShallowObject) \
+ TFS(FastCloneShallowObject5, BUILTIN, kNoExtraICState, \
+ FastCloneShallowObject) \
+ TFS(FastCloneShallowObject6, BUILTIN, kNoExtraICState, \
+ FastCloneShallowObject) \
+ \
+ /* Apply and entries */ \
+ ASM(Apply) \
+ ASM(JSEntryTrampoline) \
+ ASM(JSConstructEntryTrampoline) \
+ ASM(ResumeGeneratorTrampoline) \
+ \
+ /* Stack and interrupt check */ \
+ ASM(InterruptCheck) \
+ ASM(StackCheck) \
+ \
+ /* String helpers */ \
+ TFS(StringEqual, BUILTIN, kNoExtraICState, Compare) \
+ TFS(StringNotEqual, BUILTIN, kNoExtraICState, Compare) \
+ TFS(StringLessThan, BUILTIN, kNoExtraICState, Compare) \
+ TFS(StringLessThanOrEqual, BUILTIN, kNoExtraICState, Compare) \
+ TFS(StringGreaterThan, BUILTIN, kNoExtraICState, Compare) \
+ TFS(StringGreaterThanOrEqual, BUILTIN, kNoExtraICState, Compare) \
+ TFS(StringCharAt, BUILTIN, kNoExtraICState, StringCharAt) \
+ TFS(StringCharCodeAt, BUILTIN, kNoExtraICState, StringCharCodeAt) \
+ \
+ /* Interpreter */ \
+ ASM(InterpreterEntryTrampoline) \
+ ASM(InterpreterPushArgsAndCall) \
+ ASM(InterpreterPushArgsAndCallFunction) \
+ ASM(InterpreterPushArgsAndTailCall) \
+ ASM(InterpreterPushArgsAndTailCallFunction) \
+ ASM(InterpreterPushArgsAndConstruct) \
+ ASM(InterpreterPushArgsAndConstructFunction) \
+ ASM(InterpreterPushArgsAndConstructArray) \
+ ASM(InterpreterEnterBytecodeAdvance) \
+ ASM(InterpreterEnterBytecodeDispatch) \
+ ASM(InterpreterOnStackReplacement) \
+ \
+ /* Code life-cycle */ \
+ ASM(CompileBaseline) \
+ ASM(CompileOptimized) \
+ ASM(CompileOptimizedConcurrent) \
+ ASM(InOptimizationQueue) \
+ ASM(InstantiateAsmJs) \
+ ASM(MarkCodeAsToBeExecutedOnce) \
+ ASM(MarkCodeAsExecutedOnce) \
+ ASM(MarkCodeAsExecutedTwice) \
+ ASM(NotifyDeoptimized) \
+ ASM(NotifySoftDeoptimized) \
+ ASM(NotifyLazyDeoptimized) \
+ ASM(NotifyStubFailure) \
+ ASM(NotifyStubFailureSaveDoubles) \
+ ASM(OnStackReplacement) \
+ \
+ /* API callback handling */ \
+ API(HandleApiCall) \
+ API(HandleApiCallAsFunction) \
+ API(HandleApiCallAsConstructor) \
+ ASM(HandleFastApiCall) \
+ \
+ /* Adapters for Turbofan into runtime */ \
+ ASM(AllocateInNewSpace) \
+ ASM(AllocateInOldSpace) \
+ \
+ /* TurboFan support builtins */ \
+ TFS(CopyFastSmiOrObjectElements, BUILTIN, kNoExtraICState, \
+ CopyFastSmiOrObjectElements) \
+ TFS(GrowFastDoubleElements, BUILTIN, kNoExtraICState, GrowArrayElements) \
+ TFS(GrowFastSmiOrObjectElements, BUILTIN, kNoExtraICState, \
+ GrowArrayElements) \
+ TFS(NewUnmappedArgumentsElements, BUILTIN, kNoExtraICState, \
+ NewArgumentsElements) \
+ TFS(NewRestParameterElements, BUILTIN, kNoExtraICState, \
+ NewArgumentsElements) \
+ \
+ /* Debugger */ \
+ DBG(FrameDropper_LiveEdit) \
+ DBG(Return_DebugBreak) \
+ DBG(Slot_DebugBreak) \
+ \
+ /* Type conversions */ \
+ TFS(ToBoolean, BUILTIN, kNoExtraICState, TypeConversion) \
+ TFS(OrdinaryToPrimitive_Number, BUILTIN, kNoExtraICState, TypeConversion) \
+ TFS(OrdinaryToPrimitive_String, BUILTIN, kNoExtraICState, TypeConversion) \
+ TFS(NonPrimitiveToPrimitive_Default, BUILTIN, kNoExtraICState, \
+ TypeConversion) \
+ TFS(NonPrimitiveToPrimitive_Number, BUILTIN, kNoExtraICState, \
+ TypeConversion) \
+ TFS(NonPrimitiveToPrimitive_String, BUILTIN, kNoExtraICState, \
+ TypeConversion) \
+ TFS(StringToNumber, BUILTIN, kNoExtraICState, TypeConversion) \
+ TFS(ToName, BUILTIN, kNoExtraICState, TypeConversion) \
+ TFS(NonNumberToNumber, BUILTIN, kNoExtraICState, TypeConversion) \
+ TFS(ToNumber, BUILTIN, kNoExtraICState, TypeConversion) \
+ TFS(ToString, BUILTIN, kNoExtraICState, TypeConversion) \
+ TFS(ToInteger, BUILTIN, kNoExtraICState, TypeConversion) \
+ TFS(ToLength, BUILTIN, kNoExtraICState, TypeConversion) \
+ TFS(Typeof, BUILTIN, kNoExtraICState, Typeof) \
+ TFS(GetSuperConstructor, BUILTIN, kNoExtraICState, TypeConversion) \
+ \
+ /* Handlers */ \
+ TFS(KeyedLoadIC_Megamorphic_TF, KEYED_LOAD_IC, kNoExtraICState, \
+ LoadWithVector) \
+ TFS(KeyedLoadIC_Miss, BUILTIN, kNoExtraICState, LoadWithVector) \
+ TFS(KeyedLoadIC_Slow, HANDLER, Code::KEYED_LOAD_IC, LoadWithVector) \
+ TFS(KeyedStoreIC_Megamorphic_TF, KEYED_STORE_IC, kNoExtraICState, \
+ StoreWithVector) \
+ TFS(KeyedStoreIC_Megamorphic_Strict_TF, KEYED_STORE_IC, \
+ StoreICState::kStrictModeState, StoreWithVector) \
+ ASM(KeyedStoreIC_Miss) \
+ ASH(KeyedStoreIC_Slow, HANDLER, Code::KEYED_STORE_IC) \
+ TFS(LoadGlobalIC_Miss, BUILTIN, kNoExtraICState, LoadGlobalWithVector) \
+ TFS(LoadGlobalIC_Slow, HANDLER, Code::LOAD_GLOBAL_IC, LoadGlobalWithVector) \
+ ASH(LoadIC_Getter_ForDeopt, LOAD_IC, kNoExtraICState) \
+ TFS(LoadIC_Miss, BUILTIN, kNoExtraICState, LoadWithVector) \
+ TFS(LoadIC_Normal, HANDLER, Code::LOAD_IC, LoadWithVector) \
+ TFS(LoadIC_Slow, HANDLER, Code::LOAD_IC, LoadWithVector) \
+ TFS(StoreIC_Miss, BUILTIN, kNoExtraICState, StoreWithVector) \
+ TFS(StoreIC_Normal, HANDLER, Code::STORE_IC, StoreWithVector) \
+ ASH(StoreIC_Setter_ForDeopt, STORE_IC, StoreICState::kStrictModeState) \
+ TFS(StoreIC_SlowSloppy, HANDLER, Code::STORE_IC, StoreWithVector) \
+ TFS(StoreIC_SlowStrict, HANDLER, Code::STORE_IC, StoreWithVector) \
+ \
+ /* Built-in functions for Javascript */ \
+ /* Special internal builtins */ \
+ CPP(EmptyFunction) \
+ CPP(Illegal) \
+ CPP(RestrictedFunctionPropertiesThrower) \
+ CPP(RestrictedStrictArgumentsPropertiesThrower) \
+ CPP(UnsupportedThrower) \
+ TFJ(ReturnReceiver, 0) \
+ \
+ /* Array */ \
+ ASM(ArrayCode) \
+ ASM(InternalArrayCode) \
+ CPP(ArrayConcat) \
+ /* ES6 section 22.1.2.2 Array.isArray */ \
+ TFJ(ArrayIsArray, 1) \
+ /* ES7 #sec-array.prototype.includes */ \
+ TFJ(ArrayIncludes, 2) \
+ TFJ(ArrayIndexOf, 2) \
+ CPP(ArrayPop) \
+ CPP(ArrayPush) \
+ TFJ(FastArrayPush, -1) \
+ CPP(ArrayShift) \
+ CPP(ArraySlice) \
+ CPP(ArraySplice) \
+ CPP(ArrayUnshift) \
+ /* ES6 #sec-array.prototype.entries */ \
+ TFJ(ArrayPrototypeEntries, 0) \
+ /* ES6 #sec-array.prototype.keys */ \
+ TFJ(ArrayPrototypeKeys, 0) \
+ /* ES6 #sec-array.prototype.values */ \
+ TFJ(ArrayPrototypeValues, 0) \
+ /* ES6 #sec-%arrayiteratorprototype%.next */ \
+ TFJ(ArrayIteratorPrototypeNext, 0) \
+ \
+ /* ArrayBuffer */ \
+ CPP(ArrayBufferConstructor) \
+ CPP(ArrayBufferConstructor_ConstructStub) \
+ CPP(ArrayBufferPrototypeGetByteLength) \
+ CPP(ArrayBufferIsView) \
+ \
+ /* Boolean */ \
+ CPP(BooleanConstructor) \
+ CPP(BooleanConstructor_ConstructStub) \
+ /* ES6 section 19.3.3.2 Boolean.prototype.toString ( ) */ \
+ TFJ(BooleanPrototypeToString, 0) \
+ /* ES6 section 19.3.3.3 Boolean.prototype.valueOf ( ) */ \
+ TFJ(BooleanPrototypeValueOf, 0) \
+ \
+ /* CallSite */ \
+ CPP(CallSitePrototypeGetColumnNumber) \
+ CPP(CallSitePrototypeGetEvalOrigin) \
+ CPP(CallSitePrototypeGetFileName) \
+ CPP(CallSitePrototypeGetFunction) \
+ CPP(CallSitePrototypeGetFunctionName) \
+ CPP(CallSitePrototypeGetLineNumber) \
+ CPP(CallSitePrototypeGetMethodName) \
+ CPP(CallSitePrototypeGetPosition) \
+ CPP(CallSitePrototypeGetScriptNameOrSourceURL) \
+ CPP(CallSitePrototypeGetThis) \
+ CPP(CallSitePrototypeGetTypeName) \
+ CPP(CallSitePrototypeIsConstructor) \
+ CPP(CallSitePrototypeIsEval) \
+ CPP(CallSitePrototypeIsNative) \
+ CPP(CallSitePrototypeIsToplevel) \
+ CPP(CallSitePrototypeToString) \
+ \
+ /* DataView */ \
+ CPP(DataViewConstructor) \
+ CPP(DataViewConstructor_ConstructStub) \
+ CPP(DataViewPrototypeGetBuffer) \
+ CPP(DataViewPrototypeGetByteLength) \
+ CPP(DataViewPrototypeGetByteOffset) \
+ CPP(DataViewPrototypeGetInt8) \
+ CPP(DataViewPrototypeSetInt8) \
+ CPP(DataViewPrototypeGetUint8) \
+ CPP(DataViewPrototypeSetUint8) \
+ CPP(DataViewPrototypeGetInt16) \
+ CPP(DataViewPrototypeSetInt16) \
+ CPP(DataViewPrototypeGetUint16) \
+ CPP(DataViewPrototypeSetUint16) \
+ CPP(DataViewPrototypeGetInt32) \
+ CPP(DataViewPrototypeSetInt32) \
+ CPP(DataViewPrototypeGetUint32) \
+ CPP(DataViewPrototypeSetUint32) \
+ CPP(DataViewPrototypeGetFloat32) \
+ CPP(DataViewPrototypeSetFloat32) \
+ CPP(DataViewPrototypeGetFloat64) \
+ CPP(DataViewPrototypeSetFloat64) \
+ \
+ /* Date */ \
+ CPP(DateConstructor) \
+ CPP(DateConstructor_ConstructStub) \
+ /* ES6 section 20.3.4.2 Date.prototype.getDate ( ) */ \
+ TFJ(DatePrototypeGetDate, 0) \
+ /* ES6 section 20.3.4.3 Date.prototype.getDay ( ) */ \
+ TFJ(DatePrototypeGetDay, 0) \
+ /* ES6 section 20.3.4.4 Date.prototype.getFullYear ( ) */ \
+ TFJ(DatePrototypeGetFullYear, 0) \
+ /* ES6 section 20.3.4.5 Date.prototype.getHours ( ) */ \
+ TFJ(DatePrototypeGetHours, 0) \
+ /* ES6 section 20.3.4.6 Date.prototype.getMilliseconds ( ) */ \
+ TFJ(DatePrototypeGetMilliseconds, 0) \
+ /* ES6 section 20.3.4.7 Date.prototype.getMinutes ( ) */ \
+ TFJ(DatePrototypeGetMinutes, 0) \
+ /* ES6 section 20.3.4.8 Date.prototype.getMonth */ \
+ TFJ(DatePrototypeGetMonth, 0) \
+ /* ES6 section 20.3.4.9 Date.prototype.getSeconds ( ) */ \
+ TFJ(DatePrototypeGetSeconds, 0) \
+ /* ES6 section 20.3.4.10 Date.prototype.getTime ( ) */ \
+ TFJ(DatePrototypeGetTime, 0) \
+ /* ES6 section 20.3.4.11 Date.prototype.getTimezoneOffset ( ) */ \
+ TFJ(DatePrototypeGetTimezoneOffset, 0) \
+ /* ES6 section 20.3.4.12 Date.prototype.getUTCDate ( ) */ \
+ TFJ(DatePrototypeGetUTCDate, 0) \
+ /* ES6 section 20.3.4.13 Date.prototype.getUTCDay ( ) */ \
+ TFJ(DatePrototypeGetUTCDay, 0) \
+ /* ES6 section 20.3.4.14 Date.prototype.getUTCFullYear ( ) */ \
+ TFJ(DatePrototypeGetUTCFullYear, 0) \
+ /* ES6 section 20.3.4.15 Date.prototype.getUTCHours ( ) */ \
+ TFJ(DatePrototypeGetUTCHours, 0) \
+ /* ES6 section 20.3.4.16 Date.prototype.getUTCMilliseconds ( ) */ \
+ TFJ(DatePrototypeGetUTCMilliseconds, 0) \
+ /* ES6 section 20.3.4.17 Date.prototype.getUTCMinutes ( ) */ \
+ TFJ(DatePrototypeGetUTCMinutes, 0) \
+ /* ES6 section 20.3.4.18 Date.prototype.getUTCMonth ( ) */ \
+ TFJ(DatePrototypeGetUTCMonth, 0) \
+ /* ES6 section 20.3.4.19 Date.prototype.getUTCSeconds ( ) */ \
+ TFJ(DatePrototypeGetUTCSeconds, 0) \
+ /* ES6 section 20.3.4.44 Date.prototype.valueOf ( ) */ \
+ TFJ(DatePrototypeValueOf, 0) \
+ /* ES6 section 20.3.4.45 Date.prototype [ @@toPrimitive ] ( hint ) */ \
+ TFJ(DatePrototypeToPrimitive, 1) \
+ CPP(DatePrototypeGetYear) \
+ CPP(DatePrototypeSetYear) \
+ CPP(DateNow) \
+ CPP(DateParse) \
+ CPP(DatePrototypeSetDate) \
+ CPP(DatePrototypeSetFullYear) \
+ CPP(DatePrototypeSetHours) \
+ CPP(DatePrototypeSetMilliseconds) \
+ CPP(DatePrototypeSetMinutes) \
+ CPP(DatePrototypeSetMonth) \
+ CPP(DatePrototypeSetSeconds) \
+ CPP(DatePrototypeSetTime) \
+ CPP(DatePrototypeSetUTCDate) \
+ CPP(DatePrototypeSetUTCFullYear) \
+ CPP(DatePrototypeSetUTCHours) \
+ CPP(DatePrototypeSetUTCMilliseconds) \
+ CPP(DatePrototypeSetUTCMinutes) \
+ CPP(DatePrototypeSetUTCMonth) \
+ CPP(DatePrototypeSetUTCSeconds) \
+ CPP(DatePrototypeToDateString) \
+ CPP(DatePrototypeToISOString) \
+ CPP(DatePrototypeToUTCString) \
+ CPP(DatePrototypeToString) \
+ CPP(DatePrototypeToTimeString) \
+ CPP(DatePrototypeToJson) \
+ CPP(DateUTC) \
+ \
+ /* Error */ \
+ CPP(ErrorConstructor) \
+ CPP(ErrorCaptureStackTrace) \
+ CPP(ErrorPrototypeToString) \
+ CPP(MakeError) \
+ CPP(MakeRangeError) \
+ CPP(MakeSyntaxError) \
+ CPP(MakeTypeError) \
+ CPP(MakeURIError) \
+ \
+ /* Function */ \
+ CPP(FunctionConstructor) \
+ ASM(FunctionPrototypeApply) \
+ CPP(FunctionPrototypeBind) \
+ TFJ(FastFunctionPrototypeBind, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ ASM(FunctionPrototypeCall) \
+ /* ES6 section 19.2.3.6 Function.prototype [ @@hasInstance ] ( V ) */ \
+ TFJ(FunctionPrototypeHasInstance, 1) \
+ CPP(FunctionPrototypeToString) \
+ \
+ /* Belongs to Objects but is a dependency of GeneratorPrototypeResume */ \
+ TFS(CreateIterResultObject, BUILTIN, kNoExtraICState, \
+ CreateIterResultObject) \
+ \
+ /* Generator and Async */ \
+ CPP(GeneratorFunctionConstructor) \
+ /* ES6 section 25.3.1.2 Generator.prototype.next ( value ) */ \
+ TFJ(GeneratorPrototypeNext, 1) \
+ /* ES6 section 25.3.1.3 Generator.prototype.return ( value ) */ \
+ TFJ(GeneratorPrototypeReturn, 1) \
+ /* ES6 section 25.3.1.4 Generator.prototype.throw ( exception ) */ \
+ TFJ(GeneratorPrototypeThrow, 1) \
+ CPP(AsyncFunctionConstructor) \
+ \
+ /* Global object */ \
+ CPP(GlobalDecodeURI) \
+ CPP(GlobalDecodeURIComponent) \
+ CPP(GlobalEncodeURI) \
+ CPP(GlobalEncodeURIComponent) \
+ CPP(GlobalEscape) \
+ CPP(GlobalUnescape) \
+ CPP(GlobalEval) \
+ /* ES6 section 18.2.2 isFinite ( number ) */ \
+ TFJ(GlobalIsFinite, 1) \
+ /* ES6 section 18.2.3 isNaN ( number ) */ \
+ TFJ(GlobalIsNaN, 1) \
+ \
+ /* JSON */ \
+ CPP(JsonParse) \
+ CPP(JsonStringify) \
+ \
+ /* ICs */ \
+ TFS(LoadIC, LOAD_IC, kNoExtraICState, LoadWithVector) \
+ TFS(LoadICTrampoline, LOAD_IC, kNoExtraICState, Load) \
+ TFS(KeyedLoadIC, KEYED_LOAD_IC, kNoExtraICState, LoadWithVector) \
+ TFS(KeyedLoadICTrampoline, KEYED_LOAD_IC, kNoExtraICState, Load) \
+ TFS(StoreIC, STORE_IC, kNoExtraICState, StoreWithVector) \
+ TFS(StoreICTrampoline, STORE_IC, kNoExtraICState, Store) \
+ TFS(StoreICStrict, STORE_IC, StoreICState::kStrictModeState, \
+ StoreWithVector) \
+ TFS(StoreICStrictTrampoline, STORE_IC, StoreICState::kStrictModeState, \
+ Store) \
+ TFS(KeyedStoreIC, KEYED_STORE_IC, kNoExtraICState, StoreWithVector) \
+ TFS(KeyedStoreICTrampoline, KEYED_STORE_IC, kNoExtraICState, Store) \
+ TFS(KeyedStoreICStrict, KEYED_STORE_IC, StoreICState::kStrictModeState, \
+ StoreWithVector) \
+ TFS(KeyedStoreICStrictTrampoline, KEYED_STORE_IC, \
+ StoreICState::kStrictModeState, Store) \
+ TFS(LoadGlobalIC, LOAD_GLOBAL_IC, LoadGlobalICState::kNotInsideTypeOfState, \
+ LoadGlobalWithVector) \
+ TFS(LoadGlobalICInsideTypeof, LOAD_GLOBAL_IC, \
+ LoadGlobalICState::kInsideTypeOfState, LoadGlobalWithVector) \
+ TFS(LoadGlobalICTrampoline, LOAD_GLOBAL_IC, \
+ LoadGlobalICState::kNotInsideTypeOfState, LoadGlobal) \
+ TFS(LoadGlobalICInsideTypeofTrampoline, LOAD_GLOBAL_IC, \
+ LoadGlobalICState::kInsideTypeOfState, LoadGlobal) \
+ \
+ /* Math */ \
+ /* ES6 section 20.2.2.1 Math.abs ( x ) */ \
+ TFJ(MathAbs, 1) \
+ /* ES6 section 20.2.2.2 Math.acos ( x ) */ \
+ TFJ(MathAcos, 1) \
+ /* ES6 section 20.2.2.3 Math.acosh ( x ) */ \
+ TFJ(MathAcosh, 1) \
+ /* ES6 section 20.2.2.4 Math.asin ( x ) */ \
+ TFJ(MathAsin, 1) \
+ /* ES6 section 20.2.2.5 Math.asinh ( x ) */ \
+ TFJ(MathAsinh, 1) \
+ /* ES6 section 20.2.2.6 Math.atan ( x ) */ \
+ TFJ(MathAtan, 1) \
+ /* ES6 section 20.2.2.7 Math.atanh ( x ) */ \
+ TFJ(MathAtanh, 1) \
+ /* ES6 section 20.2.2.8 Math.atan2 ( y, x ) */ \
+ TFJ(MathAtan2, 2) \
+ /* ES6 section 20.2.2.9 Math.cbrt ( x ) */ \
+ TFJ(MathCbrt, 1) \
+ /* ES6 section 20.2.2.10 Math.ceil ( x ) */ \
+ TFJ(MathCeil, 1) \
+ /* ES6 section 20.2.2.11 Math.clz32 ( x ) */ \
+ TFJ(MathClz32, 1) \
+ /* ES6 section 20.2.2.12 Math.cos ( x ) */ \
+ TFJ(MathCos, 1) \
+ /* ES6 section 20.2.2.13 Math.cosh ( x ) */ \
+ TFJ(MathCosh, 1) \
+ /* ES6 section 20.2.2.14 Math.exp ( x ) */ \
+ TFJ(MathExp, 1) \
+ /* ES6 section 20.2.2.15 Math.expm1 ( x ) */ \
+ TFJ(MathExpm1, 1) \
+ /* ES6 section 20.2.2.16 Math.floor ( x ) */ \
+ TFJ(MathFloor, 1) \
+ /* ES6 section 20.2.2.17 Math.fround ( x ) */ \
+ TFJ(MathFround, 1) \
+ /* ES6 section 20.2.2.18 Math.hypot ( value1, value2, ...values ) */ \
+ CPP(MathHypot) \
+ /* ES6 section 20.2.2.19 Math.imul ( x, y ) */ \
+ TFJ(MathImul, 2) \
+ /* ES6 section 20.2.2.20 Math.log ( x ) */ \
+ TFJ(MathLog, 1) \
+ /* ES6 section 20.2.2.21 Math.log1p ( x ) */ \
+ TFJ(MathLog1p, 1) \
+ /* ES6 section 20.2.2.22 Math.log10 ( x ) */ \
+ TFJ(MathLog10, 1) \
+ /* ES6 section 20.2.2.23 Math.log2 ( x ) */ \
+ TFJ(MathLog2, 1) \
+ /* ES6 section 20.2.2.24 Math.max ( value1, value2 , ...values ) */ \
+ ASM(MathMax) \
+ /* ES6 section 20.2.2.25 Math.min ( value1, value2 , ...values ) */ \
+ ASM(MathMin) \
+ /* ES6 section 20.2.2.26 Math.pow ( x, y ) */ \
+ TFJ(MathPow, 2) \
+ /* ES6 section 20.2.2.27 Math.random */ \
+ TFJ(MathRandom, 0) \
+ /* ES6 section 20.2.2.28 Math.round ( x ) */ \
+ TFJ(MathRound, 1) \
+ /* ES6 section 20.2.2.29 Math.sign ( x ) */ \
+ TFJ(MathSign, 1) \
+ /* ES6 section 20.2.2.30 Math.sin ( x ) */ \
+ TFJ(MathSin, 1) \
+ /* ES6 section 20.2.2.31 Math.sinh ( x ) */ \
+ TFJ(MathSinh, 1) \
+ /* ES6 section 20.2.2.32 Math.sqrt ( x ) */ \
+ TFJ(MathTan, 1) \
+ /* ES6 section 20.2.2.33 Math.tan ( x ) */ \
+ TFJ(MathTanh, 1) \
+ /* ES6 section 20.2.2.34 Math.tanh ( x ) */ \
+ TFJ(MathSqrt, 1) \
+ /* ES6 section 20.2.2.35 Math.trunc ( x ) */ \
+ TFJ(MathTrunc, 1) \
+ \
+ /* Number */ \
+ /* ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Call]] case */ \
+ ASM(NumberConstructor) \
+ /* ES6 section 20.1.1.1 Number ( [ value ] ) for the [[Construct]] case */ \
+ ASM(NumberConstructor_ConstructStub) \
+ /* ES6 section 20.1.2.2 Number.isFinite ( number ) */ \
+ TFJ(NumberIsFinite, 1) \
+ /* ES6 section 20.1.2.3 Number.isInteger ( number ) */ \
+ TFJ(NumberIsInteger, 1) \
+ /* ES6 section 20.1.2.4 Number.isNaN ( number ) */ \
+ TFJ(NumberIsNaN, 1) \
+ /* ES6 section 20.1.2.5 Number.isSafeInteger ( number ) */ \
+ TFJ(NumberIsSafeInteger, 1) \
+ /* ES6 section 20.1.2.12 Number.parseFloat ( string ) */ \
+ TFJ(NumberParseFloat, 1) \
+ /* ES6 section 20.1.2.13 Number.parseInt ( string, radix ) */ \
+ TFJ(NumberParseInt, 2) \
+ CPP(NumberPrototypeToExponential) \
+ CPP(NumberPrototypeToFixed) \
+ CPP(NumberPrototypeToLocaleString) \
+ CPP(NumberPrototypeToPrecision) \
+ CPP(NumberPrototypeToString) \
+ /* ES6 section 20.1.3.7 Number.prototype.valueOf ( ) */ \
+ TFJ(NumberPrototypeValueOf, 0) \
+ TFS(Add, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(Subtract, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(Multiply, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(Divide, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(Modulus, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(BitwiseAnd, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(BitwiseOr, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(BitwiseXor, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(ShiftLeft, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(ShiftRight, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(ShiftRightLogical, BUILTIN, kNoExtraICState, BinaryOp) \
+ TFS(LessThan, BUILTIN, kNoExtraICState, Compare) \
+ TFS(LessThanOrEqual, BUILTIN, kNoExtraICState, Compare) \
+ TFS(GreaterThan, BUILTIN, kNoExtraICState, Compare) \
+ TFS(GreaterThanOrEqual, BUILTIN, kNoExtraICState, Compare) \
+ TFS(Equal, BUILTIN, kNoExtraICState, Compare) \
+ TFS(NotEqual, BUILTIN, kNoExtraICState, Compare) \
+ TFS(StrictEqual, BUILTIN, kNoExtraICState, Compare) \
+ TFS(StrictNotEqual, BUILTIN, kNoExtraICState, Compare) \
+ \
+ /* Object */ \
+ CPP(ObjectAssign) \
+ TFJ(ObjectCreate, 2) \
+ CPP(ObjectDefineGetter) \
+ CPP(ObjectDefineProperties) \
+ CPP(ObjectDefineProperty) \
+ CPP(ObjectDefineSetter) \
+ CPP(ObjectEntries) \
+ CPP(ObjectFreeze) \
+ CPP(ObjectGetOwnPropertyDescriptor) \
+ CPP(ObjectGetOwnPropertyDescriptors) \
+ CPP(ObjectGetOwnPropertyNames) \
+ CPP(ObjectGetOwnPropertySymbols) \
+ CPP(ObjectGetPrototypeOf) \
+ CPP(ObjectSetPrototypeOf) \
+ /* ES6 section 19.1.3.2 Object.prototype.hasOwnProperty */ \
+ TFJ(ObjectHasOwnProperty, 1) \
+ CPP(ObjectIs) \
+ CPP(ObjectIsExtensible) \
+ CPP(ObjectIsFrozen) \
+ CPP(ObjectIsSealed) \
+ CPP(ObjectKeys) \
+ CPP(ObjectLookupGetter) \
+ CPP(ObjectLookupSetter) \
+ CPP(ObjectPreventExtensions) \
+ /* ES6 section 19.1.3.6 Object.prototype.toString () */ \
+ TFJ(ObjectProtoToString, 0) \
+ CPP(ObjectPrototypePropertyIsEnumerable) \
+ CPP(ObjectPrototypeGetProto) \
+ CPP(ObjectPrototypeSetProto) \
+ CPP(ObjectSeal) \
+ CPP(ObjectValues) \
+ \
+ TFS(HasProperty, BUILTIN, kNoExtraICState, HasProperty) \
+ TFS(InstanceOf, BUILTIN, kNoExtraICState, Compare) \
+ TFS(OrdinaryHasInstance, BUILTIN, kNoExtraICState, Compare) \
+ TFS(ForInFilter, BUILTIN, kNoExtraICState, ForInFilter) \
+ \
+ /* Promise */ \
+ TFJ(PromiseGetCapabilitiesExecutor, 2) \
+ TFJ(NewPromiseCapability, 2) \
+ TFJ(PromiseConstructor, 1) \
+ TFJ(PromiseInternalConstructor, 1) \
+ TFJ(IsPromise, 1) \
+ TFJ(PromiseResolveClosure, 1) \
+ TFJ(PromiseRejectClosure, 1) \
+ TFJ(PromiseThen, 2) \
+ TFJ(PromiseCatch, 1) \
+ TFJ(PerformPromiseThen, 4) \
+ TFJ(ResolvePromise, 2) \
+ TFS(PromiseHandleReject, BUILTIN, kNoExtraICState, PromiseHandleReject) \
+ TFJ(PromiseHandle, 5) \
+ TFJ(PromiseResolve, 1) \
+ TFJ(PromiseReject, 1) \
+ TFJ(InternalPromiseReject, 3) \
+ \
+ /* Proxy */ \
+ CPP(ProxyConstructor) \
+ CPP(ProxyConstructor_ConstructStub) \
+ \
+ /* Reflect */ \
+ ASM(ReflectApply) \
+ ASM(ReflectConstruct) \
+ CPP(ReflectDefineProperty) \
+ CPP(ReflectDeleteProperty) \
+ CPP(ReflectGet) \
+ CPP(ReflectGetOwnPropertyDescriptor) \
+ CPP(ReflectGetPrototypeOf) \
+ CPP(ReflectHas) \
+ CPP(ReflectIsExtensible) \
+ CPP(ReflectOwnKeys) \
+ CPP(ReflectPreventExtensions) \
+ CPP(ReflectSet) \
+ CPP(ReflectSetPrototypeOf) \
+ \
+ /* RegExp */ \
+ CPP(RegExpCapture1Getter) \
+ CPP(RegExpCapture2Getter) \
+ CPP(RegExpCapture3Getter) \
+ CPP(RegExpCapture4Getter) \
+ CPP(RegExpCapture5Getter) \
+ CPP(RegExpCapture6Getter) \
+ CPP(RegExpCapture7Getter) \
+ CPP(RegExpCapture8Getter) \
+ CPP(RegExpCapture9Getter) \
+ TFJ(RegExpConstructor, 2) \
+ TFJ(RegExpInternalMatch, 2) \
+ CPP(RegExpInputGetter) \
+ CPP(RegExpInputSetter) \
+ CPP(RegExpLastMatchGetter) \
+ CPP(RegExpLastParenGetter) \
+ CPP(RegExpLeftContextGetter) \
+ TFJ(RegExpPrototypeCompile, 2) \
+ TFJ(RegExpPrototypeExec, 1) \
+ TFJ(RegExpPrototypeFlagsGetter, 0) \
+ TFJ(RegExpPrototypeGlobalGetter, 0) \
+ TFJ(RegExpPrototypeIgnoreCaseGetter, 0) \
+ TFJ(RegExpPrototypeMatch, 1) \
+ TFJ(RegExpPrototypeMultilineGetter, 0) \
+ TFJ(RegExpPrototypeReplace, 2) \
+ TFJ(RegExpPrototypeSearch, 1) \
+ TFJ(RegExpPrototypeSourceGetter, 0) \
+ TFJ(RegExpPrototypeSplit, 2) \
+ TFJ(RegExpPrototypeStickyGetter, 0) \
+ TFJ(RegExpPrototypeTest, 1) \
+ CPP(RegExpPrototypeToString) \
+ TFJ(RegExpPrototypeUnicodeGetter, 0) \
+ CPP(RegExpRightContextGetter) \
+ \
+ /* SharedArrayBuffer */ \
+ CPP(SharedArrayBufferPrototypeGetByteLength) \
+ TFJ(AtomicsLoad, 2) \
+ TFJ(AtomicsStore, 3) \
+ \
+ /* String */ \
+ ASM(StringConstructor) \
+ ASM(StringConstructor_ConstructStub) \
+ CPP(StringFromCodePoint) \
+ /* ES6 section 21.1.2.1 String.fromCharCode ( ...codeUnits ) */ \
+ TFJ(StringFromCharCode, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES6 section 21.1.3.1 String.prototype.charAt ( pos ) */ \
+ TFJ(StringPrototypeCharAt, 1) \
+ /* ES6 section 21.1.3.2 String.prototype.charCodeAt ( pos ) */ \
+ TFJ(StringPrototypeCharCodeAt, 1) \
+ /* ES6 section 21.1.3.6 */ \
+ /* String.prototype.endsWith ( searchString [ , endPosition ] ) */ \
+ CPP(StringPrototypeEndsWith) \
+ /* ES6 section 21.1.3.7 */ \
+ /* String.prototype.includes ( searchString [ , position ] ) */ \
+ CPP(StringPrototypeIncludes) \
+ /* ES6 section #sec-string.prototype.indexof */ \
+ /* String.prototype.indexOf ( searchString [ , position ] ) */ \
+ TFJ(StringPrototypeIndexOf, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES6 section 21.1.3.9 */ \
+ /* String.prototype.lastIndexOf ( searchString [ , position ] ) */ \
+ CPP(StringPrototypeLastIndexOf) \
+ /* ES6 section 21.1.3.10 String.prototype.localeCompare ( that ) */ \
+ CPP(StringPrototypeLocaleCompare) \
+ /* ES6 section 21.1.3.12 String.prototype.normalize ( [form] ) */ \
+ CPP(StringPrototypeNormalize) \
+ /* ES6 section B.2.3.1 String.prototype.substr ( start, length ) */ \
+ TFJ(StringPrototypeSubstr, 2) \
+ /* ES6 section 21.1.3.19 String.prototype.substring ( start, end ) */ \
+ TFJ(StringPrototypeSubstring, 2) \
+ /* ES6 section 21.1.3.20 */ \
+ /* String.prototype.startsWith ( searchString [ , position ] ) */ \
+ CPP(StringPrototypeStartsWith) \
+ /* ES6 section 21.1.3.25 String.prototype.toString () */ \
+ TFJ(StringPrototypeToString, 0) \
+ CPP(StringPrototypeTrim) \
+ CPP(StringPrototypeTrimLeft) \
+ CPP(StringPrototypeTrimRight) \
+ /* ES6 section 21.1.3.28 String.prototype.valueOf () */ \
+ TFJ(StringPrototypeValueOf, 0) \
+ /* ES6 #sec-string.prototype-@@iterator */ \
+ TFJ(StringPrototypeIterator, 0) \
+ \
+ /* StringIterator */ \
+ TFJ(StringIteratorPrototypeNext, 0) \
+ \
+ /* Symbol */ \
+ CPP(SymbolConstructor) \
+ CPP(SymbolConstructor_ConstructStub) \
+ /* ES6 section 19.4.2.1 Symbol.for */ \
+ CPP(SymbolFor) \
+ /* ES6 section 19.4.2.5 Symbol.keyFor */ \
+ CPP(SymbolKeyFor) \
+ /* ES6 section 19.4.3.4 Symbol.prototype [ @@toPrimitive ] ( hint ) */ \
+ TFJ(SymbolPrototypeToPrimitive, 1) \
+ /* ES6 section 19.4.3.2 Symbol.prototype.toString ( ) */ \
+ TFJ(SymbolPrototypeToString, 0) \
+ /* ES6 section 19.4.3.3 Symbol.prototype.valueOf ( ) */ \
+ TFJ(SymbolPrototypeValueOf, 0) \
+ \
+ /* TypedArray */ \
+ CPP(TypedArrayPrototypeBuffer) \
+ /* ES6 section 22.2.3.2 get %TypedArray%.prototype.byteLength */ \
+ TFJ(TypedArrayPrototypeByteLength, 0) \
+ /* ES6 section 22.2.3.3 get %TypedArray%.prototype.byteOffset */ \
+ TFJ(TypedArrayPrototypeByteOffset, 0) \
+ /* ES6 section 22.2.3.18 get %TypedArray%.prototype.length */ \
+ TFJ(TypedArrayPrototypeLength, 0) \
+ /* ES6 #sec-%typedarray%.prototype.entries */ \
+ TFJ(TypedArrayPrototypeEntries, 0) \
+ /* ES6 #sec-%typedarray%.prototype.keys */ \
+ TFJ(TypedArrayPrototypeKeys, 0) \
+ /* ES6 #sec-%typedarray%.prototype.values */ \
+ TFJ(TypedArrayPrototypeValues, 0)
#define IGNORE_BUILTIN(...)
@@ -717,8 +791,10 @@ namespace internal {
IGNORE_BUILTIN, IGNORE_BUILTIN, V)
// Forward declarations.
-class CodeStubAssembler;
class ObjectVisitor;
+namespace compiler {
+class CodeAssemblerState;
+}
class Builtins {
public:
@@ -761,6 +837,9 @@ class Builtins {
TailCallMode tail_call_mode,
CallableType function_type = CallableType::kAny);
Handle<Code> InterpreterPushArgsAndConstruct(CallableType function_type);
+ Handle<Code> NewFunctionContext(ScopeType scope_type);
+ Handle<Code> NewCloneShallowArray(AllocationSiteMode allocation_mode);
+ Handle<Code> NewCloneShallowObject(int length);
Code* builtin(Name name) {
// Code::cast cannot be used here since we access builtins
@@ -817,16 +896,13 @@ class Builtins {
static void Generate_InterpreterPushArgsAndConstructImpl(
MacroAssembler* masm, CallableType function_type);
- static void Generate_DatePrototype_GetField(CodeStubAssembler* masm,
- int field_index);
-
enum class MathMaxMinKind { kMax, kMin };
static void Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind);
#define DECLARE_ASM(Name, ...) \
static void Generate_##Name(MacroAssembler* masm);
#define DECLARE_TF(Name, ...) \
- static void Generate_##Name(CodeStubAssembler* csasm);
+ static void Generate_##Name(compiler::CodeAssemblerState* state);
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, DECLARE_TF, DECLARE_TF,
DECLARE_ASM, DECLARE_ASM, DECLARE_ASM)
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 4287333d3f..2cf1708b12 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -135,8 +135,8 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// Allocate the new receiver object.
__ Push(edi);
__ Push(edx);
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ mov(ebx, eax);
__ Pop(edx);
__ Pop(edi);
@@ -386,17 +386,16 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ mov(FieldOperand(ebx, JSGeneratorObject::kResumeModeOffset), edx);
// Load suspended function and context.
- __ mov(esi, FieldOperand(ebx, JSGeneratorObject::kContextOffset));
__ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
Label stepping_prepared;
- ExternalReference last_step_action =
- ExternalReference::debug_last_step_action_address(masm->isolate());
- STATIC_ASSERT(StepFrame > StepIn);
- __ cmpb(Operand::StaticVariable(last_step_action), Immediate(StepIn));
- __ j(greater_equal, &prepare_step_in_if_stepping);
+ ExternalReference debug_hook =
+ ExternalReference::debug_hook_on_function_call_address(masm->isolate());
+ __ cmpb(Operand::StaticVariable(debug_hook), Immediate(0));
+ __ j(not_equal, &prepare_step_in_if_stepping);
// Flood function if we need to continue stepping in the suspended generator.
ExternalReference debug_suspended_generator =
@@ -437,19 +436,20 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&done_loop);
}
- // Dispatch on the kind of generator object.
- Label old_generator;
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
- __ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, ecx);
- __ j(not_equal, &old_generator);
+ // Underlying function needs to have bytecode available.
+ if (FLAG_debug_code) {
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
+ __ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, ecx);
+ __ Assert(equal, kMissingBytecodeArray);
+ }
- // New-style (ignition/turbofan) generator object
+ // Resume (Ignition/TurboFan) generator object.
{
__ PushReturnAddressFrom(eax);
__ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(eax,
- FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+ FieldOperand(eax, SharedFunctionInfo::kFormalParameterCountOffset));
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
@@ -457,56 +457,13 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ jmp(FieldOperand(edi, JSFunction::kCodeEntryOffset));
}
- // Old-style (full-codegen) generator object
- __ bind(&old_generator);
- {
- // Enter a new JavaScript frame, and initialize its slots as they were when
- // the generator was suspended.
- FrameScope scope(masm, StackFrame::MANUAL);
- __ PushReturnAddressFrom(eax); // Return address.
- __ Push(ebp); // Caller's frame pointer.
- __ Move(ebp, esp);
- __ Push(esi); // Callee's context.
- __ Push(edi); // Callee's JS Function.
-
- // Restore the operand stack.
- __ mov(eax, FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset));
- {
- Label done_loop, loop;
- __ Move(ecx, Smi::kZero);
- __ bind(&loop);
- __ cmp(ecx, FieldOperand(eax, FixedArray::kLengthOffset));
- __ j(equal, &done_loop, Label::kNear);
- __ Push(FieldOperand(eax, ecx, times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ add(ecx, Immediate(Smi::FromInt(1)));
- __ jmp(&loop);
- __ bind(&done_loop);
- }
-
- // Reset operand stack so we don't leak.
- __ mov(FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset),
- Immediate(masm->isolate()->factory()->empty_fixed_array()));
-
- // Resume the generator function at the continuation.
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
- __ mov(ecx, FieldOperand(ebx, JSGeneratorObject::kContinuationOffset));
- __ SmiUntag(ecx);
- __ lea(edx, FieldOperand(edx, ecx, times_1, Code::kHeaderSize));
- __ mov(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
- Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
- __ mov(eax, ebx); // Continuation expects generator object in eax.
- __ jmp(edx);
- }
-
__ bind(&prepare_step_in_if_stepping);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(ebx);
__ Push(edx);
__ Push(edi);
- __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ __ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(edx);
__ Pop(ebx);
__ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
@@ -605,6 +562,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
+ // Reset code age.
+ __ mov_b(FieldOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kBytecodeAgeOffset),
+ Immediate(BytecodeArray::kNoAgeBytecodeAge));
+
// Push bytecode array.
__ push(kInterpreterBytecodeArrayRegister);
// Push Smi tagged initial bytecode array offset.
@@ -1092,12 +1054,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
__ cmp(temp, native_context);
__ j(not_equal, &loop_bottom);
- // OSR id set to none?
- __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
- SharedFunctionInfo::kOffsetToPreviousOsrAstId));
- const int bailout_id = BailoutId::None().ToInt();
- __ cmp(temp, Immediate(Smi::FromInt(bailout_id)));
- __ j(not_equal, &loop_bottom);
// Literals available?
__ mov(temp, FieldOperand(map, index, times_half_pointer_size,
SharedFunctionInfo::kOffsetToPreviousLiterals));
@@ -1165,14 +1121,14 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ test_b(FieldOperand(entry, SharedFunctionInfo::kMarkedForTierUpByteOffset),
Immediate(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
__ j(not_zero, &gotta_call_runtime_no_stack);
- // Is the full code valid?
+
+ // If SFI points to anything other than CompileLazy, install that.
__ mov(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
- __ mov(ebx, FieldOperand(entry, Code::kFlagsOffset));
- __ and_(ebx, Code::KindField::kMask);
- __ shr(ebx, Code::KindField::kShift);
- __ cmp(ebx, Immediate(Code::BUILTIN));
+ __ Move(ebx, masm->CodeObject());
+ __ cmp(entry, ebx);
__ j(equal, &gotta_call_runtime_no_stack);
- // Yes, install the full code.
+
+ // Install the SFI's code entry.
__ lea(entry, FieldOperand(entry, Code::kHeaderSize));
__ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
__ RecordWriteCodeEntryField(closure, entry, ebx);
@@ -1294,14 +1250,9 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
__ ret(0);
}
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
- void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
- } \
- void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+ void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
}
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
@@ -1969,8 +1920,8 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterBuiltinFrame(esi, edi, ecx);
__ Push(ebx); // the first argument
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(FieldOperand(eax, JSValue::kValueOffset));
__ LeaveBuiltinFrame(esi, edi, ecx);
}
@@ -2132,8 +2083,8 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(ebx);
__ EnterBuiltinFrame(esi, edi, ebx);
__ Push(eax); // the first argument
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(FieldOperand(eax, JSValue::kValueOffset));
__ LeaveBuiltinFrame(esi, edi, ebx);
__ SmiUntag(ebx);
@@ -2193,7 +2144,8 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Create the list of arguments from the array-like argumentsList.
{
- Label create_arguments, create_array, create_runtime, done_create;
+ Label create_arguments, create_array, create_holey_array, create_runtime,
+ done_create;
__ JumpIfSmi(eax, &create_runtime);
// Load the map of argumentsList into ecx.
@@ -2237,6 +2189,22 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
__ mov(eax, ecx);
__ jmp(&done_create);
+ // For holey JSArrays we need to check that the array prototype chain
+ // protector is intact and our prototype is the Array.prototype actually.
+ __ bind(&create_holey_array);
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ mov(ecx, FieldOperand(ecx, Map::kPrototypeOffset));
+ __ cmp(ecx, ContextOperand(ebx, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+ __ j(not_equal, &create_runtime);
+ __ LoadRoot(ecx, Heap::kArrayProtectorRootIndex);
+ __ cmp(FieldOperand(ecx, PropertyCell::kValueOffset),
+ Immediate(Smi::FromInt(Isolate::kProtectorValid)));
+ __ j(not_equal, &create_runtime);
+ __ mov(ebx, FieldOperand(eax, JSArray::kLengthOffset));
+ __ SmiUntag(ebx);
+ __ mov(eax, FieldOperand(eax, JSArray::kElementsOffset));
+ __ jmp(&done_create);
+
// Try to create the list from a JSArray object.
__ bind(&create_array);
__ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
@@ -2244,10 +2212,12 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
- __ cmp(ecx, Immediate(FAST_ELEMENTS));
- __ j(above, &create_runtime);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
__ cmp(ecx, Immediate(FAST_HOLEY_SMI_ELEMENTS));
- __ j(equal, &create_runtime);
+ __ j(equal, &create_holey_array, Label::kNear);
+ __ cmp(ecx, Immediate(FAST_HOLEY_ELEMENTS));
+ __ j(equal, &create_holey_array, Label::kNear);
+ __ j(above, &create_runtime);
__ mov(ebx, FieldOperand(eax, JSArray::kLengthOffset));
__ SmiUntag(ebx);
__ mov(eax, FieldOperand(eax, JSArray::kElementsOffset));
@@ -2287,18 +2257,26 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Push arguments onto the stack (thisArgument is already on the stack).
{
__ movd(xmm0, edx);
+ __ movd(xmm1, edi);
__ PopReturnAddressTo(edx);
__ Move(ecx, Immediate(0));
- Label done, loop;
+ Label done, push, loop;
__ bind(&loop);
__ cmp(ecx, ebx);
__ j(equal, &done, Label::kNear);
- __ Push(
- FieldOperand(eax, ecx, times_pointer_size, FixedArray::kHeaderSize));
+ // Turn the hole into undefined as we go.
+ __ mov(edi,
+ FieldOperand(eax, ecx, times_pointer_size, FixedArray::kHeaderSize));
+ __ CompareRoot(edi, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &push, Label::kNear);
+ __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
+ __ bind(&push);
+ __ Push(edi);
__ inc(ecx);
__ jmp(&loop);
__ bind(&done);
__ PushReturnAddressFrom(edx);
+ __ movd(edi, xmm1);
__ movd(edx, xmm0);
__ Move(eax, ebx);
}
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index b9c4a72dd0..78ca6c5a6f 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -139,7 +139,7 @@ void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
__ LoadRoot(t2, root_index);
__ ldc1(f0, FieldMemOperand(t2, HeapNumber::kValueOffset));
- Label done_loop, loop;
+ Label done_loop, loop, done;
__ mov(a3, a0);
__ bind(&loop);
{
@@ -195,15 +195,25 @@ void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
// accumulator value on the left hand side (f0) and the next parameter value
// on the right hand side (f2).
// We need to work out which HeapNumber (or smi) the result came from.
- Label compare_nan, set_value;
+ Label compare_nan, set_value, ool_min, ool_max;
__ BranchF(nullptr, &compare_nan, eq, f0, f2);
__ Move(t0, t1, f0);
if (kind == MathMaxMinKind::kMin) {
- __ MinNaNCheck_d(f0, f0, f2);
+ __ Float64Min(f0, f0, f2, &ool_min);
} else {
DCHECK(kind == MathMaxMinKind::kMax);
- __ MaxNaNCheck_d(f0, f0, f2);
+ __ Float64Max(f0, f0, f2, &ool_max);
}
+ __ jmp(&done);
+
+ __ bind(&ool_min);
+ __ Float64MinOutOfLine(f0, f0, f2);
+ __ jmp(&done);
+
+ __ bind(&ool_max);
+ __ Float64MaxOutOfLine(f0, f0, f2);
+
+ __ bind(&done);
__ Move(at, t8, f0);
__ Branch(&set_value, ne, t0, Operand(at));
__ Branch(&set_value, ne, t1, Operand(t8));
@@ -331,11 +341,11 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameScope scope(masm, StackFrame::MANUAL);
- FastNewObjectStub stub(masm->isolate());
__ SmiTag(t0);
__ EnterBuiltinFrame(cp, a1, t0);
__ Push(a0); // first argument
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(a0);
__ LeaveBuiltinFrame(cp, a1, t0);
__ SmiUntag(t0);
@@ -482,11 +492,11 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameScope scope(masm, StackFrame::MANUAL);
- FastNewObjectStub stub(masm->isolate());
__ SmiTag(t0);
__ EnterBuiltinFrame(cp, a1, t0);
__ Push(a0); // first argument
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(a0);
__ LeaveBuiltinFrame(cp, a1, t0);
__ SmiUntag(t0);
@@ -575,8 +585,8 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
if (create_implicit_receiver) {
// Allocate the new receiver object.
__ Push(a1, a3);
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ mov(t4, v0);
__ Pop(a1, a3);
@@ -854,18 +864,17 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ sw(a2, FieldMemOperand(a1, JSGeneratorObject::kResumeModeOffset));
// Load suspended function and context.
- __ lw(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset));
__ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+ __ lw(cp, FieldMemOperand(t0, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
Label stepping_prepared;
- ExternalReference last_step_action =
- ExternalReference::debug_last_step_action_address(masm->isolate());
- STATIC_ASSERT(StepFrame > StepIn);
- __ li(t1, Operand(last_step_action));
+ ExternalReference debug_hook =
+ ExternalReference::debug_hook_on_function_call_address(masm->isolate());
+ __ li(t1, Operand(debug_hook));
__ lb(t1, MemOperand(t1));
- __ Branch(&prepare_step_in_if_stepping, ge, t1, Operand(StepIn));
+ __ Branch(&prepare_step_in_if_stepping, ne, t1, Operand(zero_reg));
// Flood function if we need to continue stepping in the suspended generator.
ExternalReference debug_suspended_generator =
@@ -905,14 +914,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&done_loop);
}
- // Dispatch on the kind of generator object.
- Label old_generator;
- __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
- __ GetObjectType(a3, a3, a3);
- __ Branch(&old_generator, ne, a3, Operand(BYTECODE_ARRAY_TYPE));
+ // Underlying function needs to have bytecode available.
+ if (FLAG_debug_code) {
+ __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
+ __ GetObjectType(a3, a3, a3);
+ __ Assert(eq, kMissingBytecodeArray, a3, Operand(BYTECODE_ARRAY_TYPE));
+ }
- // New-style (ignition/turbofan) generator object.
+ // Resume (Ignition/TurboFan) generator object.
{
__ lw(a0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
__ lw(a0,
@@ -927,54 +937,11 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Jump(a2);
}
- // Old-style (full-codegen) generator object
- __ bind(&old_generator);
- {
- // Enter a new JavaScript frame, and initialize its slots as they were when
- // the generator was suspended.
- FrameScope scope(masm, StackFrame::MANUAL);
- __ Push(ra, fp);
- __ Move(fp, sp);
- __ Push(cp, t0);
-
- // Restore the operand stack.
- __ lw(a0, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
- __ lw(a3, FieldMemOperand(a0, FixedArray::kLengthOffset));
- __ Addu(a0, a0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ Lsa(a3, a0, a3, kPointerSizeLog2 - 1);
- {
- Label done_loop, loop;
- __ bind(&loop);
- __ Branch(&done_loop, eq, a0, Operand(a3));
- __ lw(t1, MemOperand(a0));
- __ Push(t1);
- __ Branch(USE_DELAY_SLOT, &loop);
- __ addiu(a0, a0, kPointerSize); // In delay slot.
- __ bind(&done_loop);
- }
-
- // Reset operand stack so we don't leak.
- __ LoadRoot(t1, Heap::kEmptyFixedArrayRootIndex);
- __ sw(t1, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
-
- // Resume the generator function at the continuation.
- __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
- __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ lw(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
- __ SmiUntag(a2);
- __ Addu(a3, a3, Operand(a2));
- __ li(a2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
- __ sw(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
- __ Move(v0, a1); // Continuation expects generator object in v0.
- __ Jump(a3);
- }
-
__ bind(&prepare_step_in_if_stepping);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(a1, a2, t0);
- __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ __ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(a1, a2);
}
__ Branch(USE_DELAY_SLOT, &stepping_prepared);
@@ -1071,6 +1038,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(BYTECODE_ARRAY_TYPE));
}
+ // Reset code age.
+ DCHECK_EQ(0, BytecodeArray::kNoAgeBytecodeAge);
+ __ sb(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kBytecodeAgeOffset));
+
// Load initial bytecode offset.
__ li(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
@@ -1407,11 +1379,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
SharedFunctionInfo::kOffsetToPreviousContext));
__ lw(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ Branch(&loop_bottom, ne, temp, Operand(native_context));
- // OSR id set to none?
- __ lw(temp, FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousOsrAstId));
- const int bailout_id = BailoutId::None().ToInt();
- __ Branch(&loop_bottom, ne, temp, Operand(Smi::FromInt(bailout_id)));
// Literals available?
__ lw(temp, FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousLiterals));
@@ -1484,13 +1451,13 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ And(t1, t1,
Operand(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
__ Branch(&gotta_call_runtime_no_stack, ne, t1, Operand(zero_reg));
- // Is the full code valid?
+
+ // If SFI points to anything other than CompileLazy, install that.
__ lw(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
- __ lw(t1, FieldMemOperand(entry, Code::kFlagsOffset));
- __ And(t1, t1, Operand(Code::KindField::kMask));
- __ srl(t1, t1, Code::KindField::kShift);
- __ Branch(&gotta_call_runtime_no_stack, eq, t1, Operand(Code::BUILTIN));
- // Yes, install the full code.
+ __ Move(t1, masm->CodeObject());
+ __ Branch(&gotta_call_runtime_no_stack, eq, entry, Operand(t1));
+
+ // Install the SFI's code entry.
__ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ sw(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, entry, t1);
@@ -1605,14 +1572,9 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
__ Jump(a0);
}
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
- void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
- } \
- void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+ void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
}
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
@@ -2173,7 +2135,8 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Create the list of arguments from the array-like argumentsList.
{
- Label create_arguments, create_array, create_runtime, done_create;
+ Label create_arguments, create_array, create_holey_array, create_runtime,
+ done_create;
__ JumpIfSmi(a0, &create_runtime);
// Load the map of argumentsList into a2.
@@ -2189,8 +2152,7 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
__ Branch(&create_arguments, eq, a2, Operand(at));
// Check if argumentsList is a fast JSArray.
- __ lw(v0, FieldMemOperand(a2, HeapObject::kMapOffset));
- __ lbu(v0, FieldMemOperand(v0, Map::kInstanceTypeOffset));
+ __ lbu(v0, FieldMemOperand(a2, Map::kInstanceTypeOffset));
__ Branch(&create_array, eq, v0, Operand(JS_ARRAY_TYPE));
// Ask the runtime to create the list (actually a FixedArray).
@@ -2216,15 +2178,32 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
__ mov(a0, t0);
__ Branch(&done_create);
+ // For holey JSArrays we need to check that the array prototype chain
+ // protector is intact and our prototype is the Array.prototype actually.
+ __ bind(&create_holey_array);
+ __ lw(a2, FieldMemOperand(a2, Map::kPrototypeOffset));
+ __ lw(at, ContextMemOperand(t0, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+ __ Branch(&create_runtime, ne, a2, Operand(at));
+ __ LoadRoot(at, Heap::kArrayProtectorRootIndex);
+ __ lw(a2, FieldMemOperand(at, PropertyCell::kValueOffset));
+ __ Branch(&create_runtime, ne, a2,
+ Operand(Smi::FromInt(Isolate::kProtectorValid)));
+ __ lw(a2, FieldMemOperand(a0, JSArray::kLengthOffset));
+ __ lw(a0, FieldMemOperand(a0, JSArray::kElementsOffset));
+ __ SmiUntag(a2);
+ __ Branch(&done_create);
+
// Try to create the list from a JSArray object.
__ bind(&create_array);
- __ lw(a2, FieldMemOperand(a2, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(a2);
+ __ lbu(t1, FieldMemOperand(a2, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(t1);
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
- __ Branch(&create_runtime, hi, a2, Operand(FAST_ELEMENTS));
- __ Branch(&create_runtime, eq, a2, Operand(FAST_HOLEY_SMI_ELEMENTS));
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ __ Branch(&create_holey_array, eq, t1, Operand(FAST_HOLEY_SMI_ELEMENTS));
+ __ Branch(&create_holey_array, eq, t1, Operand(FAST_HOLEY_ELEMENTS));
+ __ Branch(&create_runtime, hi, t1, Operand(FAST_ELEMENTS));
__ lw(a2, FieldMemOperand(a0, JSArray::kLengthOffset));
__ lw(a0, FieldMemOperand(a0, JSArray::kElementsOffset));
__ SmiUntag(a2);
@@ -2259,11 +2238,15 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Push arguments onto the stack (thisArgument is already on the stack).
{
__ mov(t0, zero_reg);
- Label done, loop;
+ Label done, push, loop;
+ __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
__ bind(&loop);
__ Branch(&done, eq, t0, Operand(a2));
__ Lsa(at, a0, t0, kPointerSizeLog2);
__ lw(at, FieldMemOperand(at, FixedArray::kHeaderSize));
+ __ Branch(&push, ne, t1, Operand(at));
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ bind(&push);
__ Push(at);
__ Addu(t0, t0, Operand(1));
__ Branch(&loop);
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index a6abb55c46..9541d8d5d0 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -139,7 +139,7 @@ void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
__ LoadRoot(t1, root_index);
__ ldc1(f0, FieldMemOperand(t1, HeapNumber::kValueOffset));
- Label done_loop, loop;
+ Label done_loop, loop, done;
__ mov(a3, a0);
__ bind(&loop);
{
@@ -195,15 +195,25 @@ void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
// accumulator value on the left hand side (f0) and the next parameter value
// on the right hand side (f2).
// We need to work out which HeapNumber (or smi) the result came from.
- Label compare_nan;
+ Label compare_nan, ool_min, ool_max;
__ BranchF(nullptr, &compare_nan, eq, f0, f2);
__ Move(a4, f0);
if (kind == MathMaxMinKind::kMin) {
- __ MinNaNCheck_d(f0, f0, f2);
+ __ Float64Min(f0, f0, f2, &ool_min);
} else {
DCHECK(kind == MathMaxMinKind::kMax);
- __ MaxNaNCheck_d(f0, f0, f2);
+ __ Float64Max(f0, f0, f2, &ool_max);
}
+ __ jmp(&done);
+
+ __ bind(&ool_min);
+ __ Float64MinOutOfLine(f0, f0, f2);
+ __ jmp(&done);
+
+ __ bind(&ool_max);
+ __ Float64MaxOutOfLine(f0, f0, f2);
+
+ __ bind(&done);
__ Move(at, f0);
__ Branch(&loop, eq, a4, Operand(at));
__ mov(t1, a2);
@@ -329,11 +339,11 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameScope scope(masm, StackFrame::MANUAL);
- FastNewObjectStub stub(masm->isolate());
__ SmiTag(t0);
__ EnterBuiltinFrame(cp, a1, t0);
__ Push(a0);
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(a0);
__ LeaveBuiltinFrame(cp, a1, t0);
__ SmiUntag(t0);
@@ -481,11 +491,11 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameScope scope(masm, StackFrame::MANUAL);
- FastNewObjectStub stub(masm->isolate());
__ SmiTag(t0);
__ EnterBuiltinFrame(cp, a1, t0);
__ Push(a0);
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(a0);
__ LeaveBuiltinFrame(cp, a1, t0);
__ SmiUntag(t0);
@@ -572,8 +582,8 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
if (create_implicit_receiver) {
__ Push(a1, a3);
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ mov(t0, v0);
__ Pop(a1, a3);
@@ -730,18 +740,17 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ sd(a2, FieldMemOperand(a1, JSGeneratorObject::kResumeModeOffset));
// Load suspended function and context.
- __ ld(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset));
__ ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+ __ ld(cp, FieldMemOperand(a4, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
Label stepping_prepared;
- ExternalReference last_step_action =
- ExternalReference::debug_last_step_action_address(masm->isolate());
- STATIC_ASSERT(StepFrame > StepIn);
- __ li(a5, Operand(last_step_action));
+ ExternalReference debug_hook =
+ ExternalReference::debug_hook_on_function_call_address(masm->isolate());
+ __ li(a5, Operand(debug_hook));
__ lb(a5, MemOperand(a5));
- __ Branch(&prepare_step_in_if_stepping, ge, a5, Operand(StepIn));
+ __ Branch(&prepare_step_in_if_stepping, ne, a5, Operand(zero_reg));
// Flood function if we need to continue stepping in the suspended generator.
ExternalReference debug_suspended_generator =
@@ -781,14 +790,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&done_loop);
}
- // Dispatch on the kind of generator object.
- Label old_generator;
- __ ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
- __ ld(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
- __ GetObjectType(a3, a3, a3);
- __ Branch(&old_generator, ne, a3, Operand(BYTECODE_ARRAY_TYPE));
+ // Underlying function needs to have bytecode available.
+ if (FLAG_debug_code) {
+ __ ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+ __ ld(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
+ __ GetObjectType(a3, a3, a3);
+ __ Assert(eq, kMissingBytecodeArray, a3, Operand(BYTECODE_ARRAY_TYPE));
+ }
- // New-style (ignition/turbofan) generator object.
+ // Resume (Ignition/TurboFan) generator object.
{
__ ld(a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
__ lw(a0,
@@ -802,55 +812,11 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Jump(a2);
}
- // Old-style (full-codegen) generator object
- __ bind(&old_generator);
- {
- // Enter a new JavaScript frame, and initialize its slots as they were when
- // the generator was suspended.
- FrameScope scope(masm, StackFrame::MANUAL);
- __ Push(ra, fp);
- __ Move(fp, sp);
- __ Push(cp, a4);
-
- // Restore the operand stack.
- __ ld(a0, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
- __ ld(a3, FieldMemOperand(a0, FixedArray::kLengthOffset));
- __ SmiUntag(a3);
- __ Daddu(a0, a0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ Dlsa(a3, a0, a3, kPointerSizeLog2);
- {
- Label done_loop, loop;
- __ bind(&loop);
- __ Branch(&done_loop, eq, a0, Operand(a3));
- __ ld(a5, MemOperand(a0));
- __ Push(a5);
- __ Branch(USE_DELAY_SLOT, &loop);
- __ daddiu(a0, a0, kPointerSize); // In delay slot.
- __ bind(&done_loop);
- }
-
- // Reset operand stack so we don't leak.
- __ LoadRoot(a5, Heap::kEmptyFixedArrayRootIndex);
- __ sd(a5, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
-
- // Resume the generator function at the continuation.
- __ ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
- __ ld(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
- __ Daddu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ ld(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
- __ SmiUntag(a2);
- __ Daddu(a3, a3, Operand(a2));
- __ li(a2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
- __ sd(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
- __ Move(v0, a1); // Continuation expects generator object in v0.
- __ Jump(a3);
- }
-
__ bind(&prepare_step_in_if_stepping);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(a1, a2, a4);
- __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ __ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(a1, a2);
}
__ Branch(USE_DELAY_SLOT, &stepping_prepared);
@@ -1063,6 +1029,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Operand(BYTECODE_ARRAY_TYPE));
}
+ // Reset code age.
+ DCHECK_EQ(0, BytecodeArray::kNoAgeBytecodeAge);
+ __ sb(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kBytecodeAgeOffset));
+
// Load initial bytecode offset.
__ li(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
@@ -1318,9 +1289,9 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
}
// Get the target bytecode offset from the frame.
- __ ld(kInterpreterBytecodeOffsetRegister,
- MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
- __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+ __ lw(
+ kInterpreterBytecodeOffsetRegister,
+ UntagSmiMemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
// Dispatch to the target bytecode.
__ Daddu(a1, kInterpreterBytecodeArrayRegister,
@@ -1399,11 +1370,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
SharedFunctionInfo::kOffsetToPreviousContext));
__ ld(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ Branch(&loop_bottom, ne, temp, Operand(native_context));
- // OSR id set to none?
- __ ld(temp, FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousOsrAstId));
- const int bailout_id = BailoutId::None().ToInt();
- __ Branch(&loop_bottom, ne, temp, Operand(Smi::FromInt(bailout_id)));
// Literals available?
__ ld(temp, FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousLiterals));
@@ -1476,13 +1442,13 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ And(a5, a5,
Operand(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
__ Branch(&gotta_call_runtime_no_stack, ne, a5, Operand(zero_reg));
- // Is the full code valid?
+
+ // If SFI points to anything other than CompileLazy, install that.
__ ld(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
- __ lw(a5, FieldMemOperand(entry, Code::kFlagsOffset));
- __ And(a5, a5, Operand(Code::KindField::kMask));
- __ dsrl(a5, a5, Code::KindField::kShift);
- __ Branch(&gotta_call_runtime_no_stack, eq, a5, Operand(Code::BUILTIN));
- // Yes, install the full code.
+ __ Move(t1, masm->CodeObject());
+ __ Branch(&gotta_call_runtime_no_stack, eq, entry, Operand(t1));
+
+ // Install the SFI's code entry.
__ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, entry, a5);
@@ -1596,14 +1562,9 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
__ Jump(a0);
}
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
- void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
- } \
- void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+ void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
}
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
@@ -1686,8 +1647,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
}
// Get the full codegen state from the stack and untag it -> a6.
- __ ld(a6, MemOperand(sp, 0 * kPointerSize));
- __ SmiUntag(a6);
+ __ lw(a6, UntagSmiMemOperand(sp, 0 * kPointerSize));
// Switch on the state.
Label with_tos_register, unknown_state;
__ Branch(
@@ -1855,10 +1815,10 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
- __ ld(a1, MemOperand(a1, FixedArray::OffsetOfElementAt(
- DeoptimizationInputData::kOsrPcOffsetIndex) -
- kHeapObjectTag));
- __ SmiUntag(a1);
+ __ lw(a1,
+ UntagSmiMemOperand(a1, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex) -
+ kHeapObjectTag));
// Compute the target address = code_obj + header_size + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
@@ -1886,52 +1846,56 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// -- sp[8] : receiver
// -----------------------------------
+ Register argc = a0;
+ Register arg_array = a0;
+ Register receiver = a1;
+ Register this_arg = a2;
+ Register undefined_value = a3;
+ Register scratch = a4;
+
+ __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
// 1. Load receiver into a1, argArray into a0 (if present), remove all
// arguments from the stack (including the receiver), and push thisArg (if
// present) instead.
{
- Label no_arg;
- Register scratch = a4;
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ mov(a3, a2);
- // Dlsa() cannot be used hare as scratch value used later.
- __ dsll(scratch, a0, kPointerSizeLog2);
- __ Daddu(a0, sp, Operand(scratch));
- __ ld(a1, MemOperand(a0)); // receiver
- __ Dsubu(a0, a0, Operand(kPointerSize));
- __ Branch(&no_arg, lt, a0, Operand(sp));
- __ ld(a2, MemOperand(a0)); // thisArg
- __ Dsubu(a0, a0, Operand(kPointerSize));
- __ Branch(&no_arg, lt, a0, Operand(sp));
- __ ld(a3, MemOperand(a0)); // argArray
- __ bind(&no_arg);
- __ Daddu(sp, sp, Operand(scratch));
- __ sd(a2, MemOperand(sp));
- __ mov(a0, a3);
+ // Claim (2 - argc) dummy arguments form the stack, to put the stack in a
+ // consistent state for a simple pop operation.
+
+ __ Dsubu(sp, sp, Operand(2 * kPointerSize));
+ __ Dlsa(sp, sp, argc, kPointerSizeLog2);
+ __ mov(scratch, argc);
+ __ Pop(this_arg, arg_array); // Overwrite argc
+ __ Movz(arg_array, undefined_value, scratch); // if argc == 0
+ __ Movz(this_arg, undefined_value, scratch); // if argc == 0
+ __ Dsubu(scratch, scratch, Operand(1));
+ __ Movz(arg_array, undefined_value, scratch); // if argc == 1
+ __ ld(receiver, MemOperand(sp));
+ __ sd(this_arg, MemOperand(sp));
}
// ----------- S t a t e -------------
// -- a0 : argArray
// -- a1 : receiver
+ // -- a3 : undefined root value
// -- sp[0] : thisArg
// -----------------------------------
// 2. Make sure the receiver is actually callable.
Label receiver_not_callable;
- __ JumpIfSmi(a1, &receiver_not_callable);
- __ ld(a4, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ JumpIfSmi(receiver, &receiver_not_callable);
+ __ ld(a4, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
__ And(a4, a4, Operand(1 << Map::kIsCallable));
__ Branch(&receiver_not_callable, eq, a4, Operand(zero_reg));
// 3. Tail call with no arguments if argArray is null or undefined.
Label no_arguments;
- __ JumpIfRoot(a0, Heap::kNullValueRootIndex, &no_arguments);
- __ JumpIfRoot(a0, Heap::kUndefinedValueRootIndex, &no_arguments);
+ __ JumpIfRoot(arg_array, Heap::kNullValueRootIndex, &no_arguments);
+ __ Branch(&no_arguments, eq, arg_array, Operand(undefined_value));
// 4a. Apply the receiver to the given argArray (passing undefined for
// new.target).
- __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+ DCHECK(undefined_value.is(a3));
__ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
// 4b. The argArray is either null or undefined, so we tail call without any
@@ -1939,13 +1903,14 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ bind(&no_arguments);
{
__ mov(a0, zero_reg);
+ DCHECK(receiver.is(a1));
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
// 4c. The receiver is not callable, throw an appropriate TypeError.
__ bind(&receiver_not_callable);
{
- __ sd(a1, MemOperand(sp));
+ __ sd(receiver, MemOperand(sp));
__ TailCallRuntime(Runtime::kThrowApplyNonFunction);
}
}
@@ -1995,62 +1960,67 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc
- // -- sp[0] : argumentsList
- // -- sp[4] : thisArgument
- // -- sp[8] : target
+ // -- sp[0] : argumentsList (if argc ==3)
+ // -- sp[4] : thisArgument (if argc >=2)
+ // -- sp[8] : target (if argc >=1)
// -- sp[12] : receiver
// -----------------------------------
+ Register argc = a0;
+ Register arguments_list = a0;
+ Register target = a1;
+ Register this_argument = a2;
+ Register undefined_value = a3;
+ Register scratch = a4;
+
+ __ LoadRoot(undefined_value, Heap::kUndefinedValueRootIndex);
// 1. Load target into a1 (if present), argumentsList into a0 (if present),
// remove all arguments from the stack (including the receiver), and push
// thisArgument (if present) instead.
{
- Label no_arg;
- Register scratch = a4;
- __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
- __ mov(a2, a1);
- __ mov(a3, a1);
- __ dsll(scratch, a0, kPointerSizeLog2);
- __ mov(a0, scratch);
- __ Dsubu(a0, a0, Operand(kPointerSize));
- __ Branch(&no_arg, lt, a0, Operand(zero_reg));
- __ Daddu(a0, sp, Operand(a0));
- __ ld(a1, MemOperand(a0)); // target
- __ Dsubu(a0, a0, Operand(kPointerSize));
- __ Branch(&no_arg, lt, a0, Operand(sp));
- __ ld(a2, MemOperand(a0)); // thisArgument
- __ Dsubu(a0, a0, Operand(kPointerSize));
- __ Branch(&no_arg, lt, a0, Operand(sp));
- __ ld(a3, MemOperand(a0)); // argumentsList
- __ bind(&no_arg);
- __ Daddu(sp, sp, Operand(scratch));
- __ sd(a2, MemOperand(sp));
- __ mov(a0, a3);
+ // Claim (3 - argc) dummy arguments form the stack, to put the stack in a
+ // consistent state for a simple pop operation.
+
+ __ Dsubu(sp, sp, Operand(3 * kPointerSize));
+ __ Dlsa(sp, sp, argc, kPointerSizeLog2);
+ __ mov(scratch, argc);
+ __ Pop(target, this_argument, arguments_list);
+ __ Movz(arguments_list, undefined_value, scratch); // if argc == 0
+ __ Movz(this_argument, undefined_value, scratch); // if argc == 0
+ __ Movz(target, undefined_value, scratch); // if argc == 0
+ __ Dsubu(scratch, scratch, Operand(1));
+ __ Movz(arguments_list, undefined_value, scratch); // if argc == 1
+ __ Movz(this_argument, undefined_value, scratch); // if argc == 1
+ __ Dsubu(scratch, scratch, Operand(1));
+ __ Movz(arguments_list, undefined_value, scratch); // if argc == 2
+
+ __ sd(this_argument, MemOperand(sp, 0)); // Overwrite receiver
}
// ----------- S t a t e -------------
// -- a0 : argumentsList
// -- a1 : target
+ // -- a3 : undefined root value
// -- sp[0] : thisArgument
// -----------------------------------
// 2. Make sure the target is actually callable.
Label target_not_callable;
- __ JumpIfSmi(a1, &target_not_callable);
- __ ld(a4, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ JumpIfSmi(target, &target_not_callable);
+ __ ld(a4, FieldMemOperand(target, HeapObject::kMapOffset));
__ lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
__ And(a4, a4, Operand(1 << Map::kIsCallable));
__ Branch(&target_not_callable, eq, a4, Operand(zero_reg));
// 3a. Apply the target to the given argumentsList (passing undefined for
// new.target).
- __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+ DCHECK(undefined_value.is(a3));
__ Jump(masm->isolate()->builtins()->Apply(), RelocInfo::CODE_TARGET);
// 3b. The target is not callable, throw an appropriate TypeError.
__ bind(&target_not_callable);
{
- __ sd(a1, MemOperand(sp));
+ __ sd(target, MemOperand(sp));
__ TailCallRuntime(Runtime::kThrowApplyNonFunction);
}
}
@@ -2058,59 +2028,61 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : argc
- // -- sp[0] : new.target (optional)
- // -- sp[4] : argumentsList
- // -- sp[8] : target
+ // -- sp[0] : new.target (optional) (dummy value if argc <= 2)
+ // -- sp[4] : argumentsList (dummy value if argc <= 1)
+ // -- sp[8] : target (dummy value if argc == 0)
// -- sp[12] : receiver
// -----------------------------------
+ Register argc = a0;
+ Register arguments_list = a0;
+ Register target = a1;
+ Register new_target = a3;
+ Register undefined_value = a4;
+ Register scratch = a5;
// 1. Load target into a1 (if present), argumentsList into a0 (if present),
// new.target into a3 (if present, otherwise use target), remove all
// arguments from the stack (including the receiver), and push thisArgument
// (if present) instead.
{
- Label no_arg;
- Register scratch = a4;
- __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
- __ mov(a2, a1);
- // Dlsa() cannot be used hare as scratch value used later.
- __ dsll(scratch, a0, kPointerSizeLog2);
- __ Daddu(a0, sp, Operand(scratch));
- __ sd(a2, MemOperand(a0)); // receiver
- __ Dsubu(a0, a0, Operand(kPointerSize));
- __ Branch(&no_arg, lt, a0, Operand(sp));
- __ ld(a1, MemOperand(a0)); // target
- __ mov(a3, a1); // new.target defaults to target
- __ Dsubu(a0, a0, Operand(kPointerSize));
- __ Branch(&no_arg, lt, a0, Operand(sp));
- __ ld(a2, MemOperand(a0)); // argumentsList
- __ Dsubu(a0, a0, Operand(kPointerSize));
- __ Branch(&no_arg, lt, a0, Operand(sp));
- __ ld(a3, MemOperand(a0)); // new.target
- __ bind(&no_arg);
- __ Daddu(sp, sp, Operand(scratch));
- __ mov(a0, a2);
+ // Claim (3 - argc) dummy arguments form the stack, to put the stack in a
+ // consistent state for a simple pop operation.
+
+ __ Dsubu(sp, sp, Operand(3 * kPointerSize));
+ __ Dlsa(sp, sp, argc, kPointerSizeLog2);
+ __ mov(scratch, argc);
+ __ Pop(target, arguments_list, new_target);
+ __ Movz(arguments_list, undefined_value, scratch); // if argc == 0
+ __ Movz(new_target, undefined_value, scratch); // if argc == 0
+ __ Movz(target, undefined_value, scratch); // if argc == 0
+ __ Dsubu(scratch, scratch, Operand(1));
+ __ Movz(arguments_list, undefined_value, scratch); // if argc == 1
+ __ Movz(new_target, target, scratch); // if argc == 1
+ __ Dsubu(scratch, scratch, Operand(1));
+ __ Movz(new_target, target, scratch); // if argc == 2
+
+ __ sd(undefined_value, MemOperand(sp, 0)); // Overwrite receiver
}
// ----------- S t a t e -------------
// -- a0 : argumentsList
- // -- a3 : new.target
// -- a1 : target
+ // -- a3 : new.target
// -- sp[0] : receiver (undefined)
// -----------------------------------
// 2. Make sure the target is actually a constructor.
Label target_not_constructor;
- __ JumpIfSmi(a1, &target_not_constructor);
- __ ld(a4, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ JumpIfSmi(target, &target_not_constructor);
+ __ ld(a4, FieldMemOperand(target, HeapObject::kMapOffset));
__ lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
__ And(a4, a4, Operand(1 << Map::kIsConstructor));
__ Branch(&target_not_constructor, eq, a4, Operand(zero_reg));
// 3. Make sure the target is actually a constructor.
Label new_target_not_constructor;
- __ JumpIfSmi(a3, &new_target_not_constructor);
- __ ld(a4, FieldMemOperand(a3, HeapObject::kMapOffset));
+ __ JumpIfSmi(new_target, &new_target_not_constructor);
+ __ ld(a4, FieldMemOperand(new_target, HeapObject::kMapOffset));
__ lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
__ And(a4, a4, Operand(1 << Map::kIsConstructor));
__ Branch(&new_target_not_constructor, eq, a4, Operand(zero_reg));
@@ -2121,14 +2093,14 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// 4b. The target is not a constructor, throw an appropriate TypeError.
__ bind(&target_not_constructor);
{
- __ sd(a1, MemOperand(sp));
+ __ sd(target, MemOperand(sp));
__ TailCallRuntime(Runtime::kThrowCalledNonCallable);
}
// 4c. The new.target is not a constructor, throw an appropriate TypeError.
__ bind(&new_target_not_constructor);
{
- __ sd(a3, MemOperand(sp));
+ __ sd(new_target, MemOperand(sp));
__ TailCallRuntime(Runtime::kThrowCalledNonCallable);
}
}
@@ -2167,63 +2139,90 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// -- sp[0] : thisArgument
// -----------------------------------
+ Register arguments_list = a0;
+ Register target = a1;
+ Register new_target = a3;
+
+ Register args = a0;
+ Register len = a2;
+
// Create the list of arguments from the array-like argumentsList.
{
- Label create_arguments, create_array, create_runtime, done_create;
- __ JumpIfSmi(a0, &create_runtime);
+ Label create_arguments, create_array, create_holey_array, create_runtime,
+ done_create;
+ __ JumpIfSmi(arguments_list, &create_runtime);
// Load the map of argumentsList into a2.
- __ ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
+ Register arguments_list_map = a2;
+ __ ld(arguments_list_map,
+ FieldMemOperand(arguments_list, HeapObject::kMapOffset));
// Load native context into a4.
- __ ld(a4, NativeContextMemOperand());
+ Register native_context = a4;
+ __ ld(native_context, NativeContextMemOperand());
// Check if argumentsList is an (unmodified) arguments object.
- __ ld(at, ContextMemOperand(a4, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
- __ Branch(&create_arguments, eq, a2, Operand(at));
- __ ld(at, ContextMemOperand(a4, Context::STRICT_ARGUMENTS_MAP_INDEX));
- __ Branch(&create_arguments, eq, a2, Operand(at));
+ __ ld(at, ContextMemOperand(native_context,
+ Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+ __ Branch(&create_arguments, eq, arguments_list_map, Operand(at));
+ __ ld(at, ContextMemOperand(native_context,
+ Context::STRICT_ARGUMENTS_MAP_INDEX));
+ __ Branch(&create_arguments, eq, arguments_list_map, Operand(at));
// Check if argumentsList is a fast JSArray.
- __ ld(v0, FieldMemOperand(a2, HeapObject::kMapOffset));
- __ lbu(v0, FieldMemOperand(v0, Map::kInstanceTypeOffset));
+ __ lbu(v0, FieldMemOperand(a2, Map::kInstanceTypeOffset));
__ Branch(&create_array, eq, v0, Operand(JS_ARRAY_TYPE));
// Ask the runtime to create the list (actually a FixedArray).
__ bind(&create_runtime);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a1, a3, a0);
+ __ Push(target, new_target, arguments_list);
__ CallRuntime(Runtime::kCreateListFromArrayLike);
- __ mov(a0, v0);
- __ Pop(a1, a3);
- __ ld(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
- __ SmiUntag(a2);
+ __ mov(arguments_list, v0);
+ __ Pop(target, new_target);
+ __ lw(len, UntagSmiFieldMemOperand(v0, FixedArray::kLengthOffset));
}
__ Branch(&done_create);
// Try to create the list from an arguments object.
__ bind(&create_arguments);
- __ ld(a2, FieldMemOperand(a0, JSArgumentsObject::kLengthOffset));
- __ ld(a4, FieldMemOperand(a0, JSObject::kElementsOffset));
- __ ld(at, FieldMemOperand(a4, FixedArray::kLengthOffset));
+ __ lw(len, UntagSmiFieldMemOperand(arguments_list,
+ JSArgumentsObject::kLengthOffset));
+ __ ld(a4, FieldMemOperand(arguments_list, JSObject::kElementsOffset));
+ __ lw(at, UntagSmiFieldMemOperand(a4, FixedArray::kLengthOffset));
+ __ Branch(&create_runtime, ne, len, Operand(at));
+ __ mov(args, a4);
+
+ __ Branch(&done_create);
+
+ // For holey JSArrays we need to check that the array prototype chain
+ // protector is intact and our prototype is the Array.prototype actually.
+ __ bind(&create_holey_array);
+ __ ld(a2, FieldMemOperand(a2, Map::kPrototypeOffset));
+ __ ld(at, ContextMemOperand(native_context,
+ Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
__ Branch(&create_runtime, ne, a2, Operand(at));
- __ SmiUntag(a2);
- __ mov(a0, a4);
+ __ LoadRoot(at, Heap::kArrayProtectorRootIndex);
+ __ lw(a2, UntagSmiFieldMemOperand(at, PropertyCell::kValueOffset));
+ __ Branch(&create_runtime, ne, a2,
+ Operand(Smi::FromInt(Isolate::kProtectorValid)));
+ __ lw(a2, UntagSmiFieldMemOperand(a0, JSArray::kLengthOffset));
+ __ ld(a0, FieldMemOperand(a0, JSArray::kElementsOffset));
__ Branch(&done_create);
// Try to create the list from a JSArray object.
__ bind(&create_array);
- __ ld(a2, FieldMemOperand(a2, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(a2);
+ __ lbu(t1, FieldMemOperand(a2, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(t1);
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
- __ Branch(&create_runtime, hi, a2, Operand(FAST_ELEMENTS));
- __ Branch(&create_runtime, eq, a2, Operand(FAST_HOLEY_SMI_ELEMENTS));
- __ ld(a2, FieldMemOperand(a0, JSArray::kLengthOffset));
- __ ld(a0, FieldMemOperand(a0, JSArray::kElementsOffset));
- __ SmiUntag(a2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ __ Branch(&create_holey_array, eq, t1, Operand(FAST_HOLEY_SMI_ELEMENTS));
+ __ Branch(&create_holey_array, eq, t1, Operand(FAST_HOLEY_ELEMENTS));
+ __ Branch(&create_runtime, hi, t1, Operand(FAST_ELEMENTS));
+ __ lw(a2, UntagSmiFieldMemOperand(arguments_list, JSArray::kLengthOffset));
+ __ ld(a0, FieldMemOperand(arguments_list, JSArray::kElementsOffset));
__ bind(&done_create);
}
@@ -2238,7 +2237,7 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// here which will cause ip to become negative.
__ Dsubu(a4, sp, a4);
// Check if the arguments will overflow the stack.
- __ dsll(at, a2, kPointerSizeLog2);
+ __ dsll(at, len, kPointerSizeLog2);
__ Branch(&done, gt, a4, Operand(at)); // Signed comparison.
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&done);
@@ -2254,19 +2253,38 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Push arguments onto the stack (thisArgument is already on the stack).
{
- __ mov(a4, zero_reg);
- Label done, loop;
+ Label done, push, loop;
+ Register src = a4;
+ Register scratch = len;
+
+ __ daddiu(src, args, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Branch(&done, eq, len, Operand(zero_reg), i::USE_DELAY_SLOT);
+ __ mov(a0, len); // The 'len' argument for Call() or Construct().
+ __ dsll(scratch, len, kPointerSizeLog2);
+ __ Dsubu(scratch, sp, Operand(scratch));
+ __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
__ bind(&loop);
- __ Branch(&done, eq, a4, Operand(a2));
- __ Dlsa(at, a0, a4, kPointerSizeLog2);
- __ ld(at, FieldMemOperand(at, FixedArray::kHeaderSize));
- __ Push(at);
- __ Daddu(a4, a4, Operand(1));
- __ Branch(&loop);
+ __ ld(a5, MemOperand(src));
+ __ Branch(&push, ne, a5, Operand(t1));
+ __ LoadRoot(a5, Heap::kUndefinedValueRootIndex);
+ __ bind(&push);
+ __ daddiu(src, src, kPointerSize);
+ __ Push(a5);
+ __ Branch(&loop, ne, scratch, Operand(sp));
__ bind(&done);
- __ Move(a0, a4);
}
+ // ----------- S t a t e -------------
+ // -- a0 : argument count (len)
+ // -- a1 : target
+ // -- a3 : new.target (checked to be constructor or undefinded)
+ // -- sp[0] : args[len-1]
+ // -- sp[8] : args[len-2]
+ // ... : ...
+ // -- sp[8*(len-2)] : args[1]
+ // -- sp[8*(len-1)] : args[0]
+ // ----------------------------------
+
// Dispatch to Call or Construct depending on whether new.target is undefined.
{
Label construct;
@@ -2344,9 +2362,8 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
// Drop current frame and load arguments count from arguments adaptor frame.
__ mov(fp, scratch2);
- __ ld(caller_args_count_reg,
- MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(caller_args_count_reg);
+ __ lw(caller_args_count_reg,
+ UntagSmiMemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ Branch(&formal_parameter_count_loaded);
__ bind(&no_arguments_adaptor);
@@ -2503,8 +2520,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
// Load [[BoundArguments]] into a2 and length of that into a4.
__ ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
- __ ld(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
- __ SmiUntag(a4);
+ __ lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
@@ -2551,8 +2567,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
// Copy [[BoundArguments]] to the stack (below the arguments).
{
Label loop, done_loop;
- __ ld(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
- __ SmiUntag(a4);
+ __ lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
__ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ bind(&loop);
__ Dsubu(a4, a4, Operand(1));
@@ -2665,8 +2680,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// Load [[BoundArguments]] into a2 and length of that into a4.
__ ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
- __ ld(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
- __ SmiUntag(a4);
+ __ lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
@@ -2714,8 +2728,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// Copy [[BoundArguments]] to the stack (below the arguments).
{
Label loop, done_loop;
- __ ld(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
- __ SmiUntag(a4);
+ __ lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
__ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ bind(&loop);
__ Dsubu(a4, a4, Operand(1));
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index be1e67cc30..ca6cefcddf 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -338,8 +338,8 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(r9);
__ EnterBuiltinFrame(cp, r4, r9);
__ Push(r5); // first argument
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(r5);
__ LeaveBuiltinFrame(cp, r4, r9);
__ SmiUntag(r9);
@@ -490,8 +490,8 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(r9);
__ EnterBuiltinFrame(cp, r4, r9);
__ Push(r5); // first argument
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(r5);
__ LeaveBuiltinFrame(cp, r4, r9);
__ SmiUntag(r9);
@@ -587,8 +587,8 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// Allocate the new receiver object.
__ Push(r4, r6);
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ mr(r7, r3);
__ Pop(r4, r6);
@@ -749,20 +749,19 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ StoreP(r5, FieldMemOperand(r4, JSGeneratorObject::kResumeModeOffset), r0);
// Load suspended function and context.
- __ LoadP(cp, FieldMemOperand(r4, JSGeneratorObject::kContextOffset));
__ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
+ __ LoadP(cp, FieldMemOperand(r7, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
Label stepping_prepared;
- ExternalReference last_step_action =
- ExternalReference::debug_last_step_action_address(masm->isolate());
- STATIC_ASSERT(StepFrame > StepIn);
- __ mov(ip, Operand(last_step_action));
+ ExternalReference debug_hook =
+ ExternalReference::debug_hook_on_function_call_address(masm->isolate());
+ __ mov(ip, Operand(debug_hook));
__ LoadByte(ip, MemOperand(ip), r0);
__ extsb(ip, ip);
- __ cmpi(ip, Operand(StepIn));
- __ bge(&prepare_step_in_if_stepping);
+ __ CmpSmiLiteral(ip, Smi::kZero, r0);
+ __ bne(&prepare_step_in_if_stepping);
// Flood function if we need to continue stepping in the suspended generator.
@@ -812,13 +811,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&done_loop);
}
- // Dispatch on the kind of generator object.
- Label old_generator;
- __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset));
- __ CompareObjectType(r6, r6, r6, BYTECODE_ARRAY_TYPE);
- __ bne(&old_generator);
+ // Underlying function needs to have bytecode available.
+ if (FLAG_debug_code) {
+ __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset));
+ __ CompareObjectType(r6, r6, r6, BYTECODE_ARRAY_TYPE);
+ __ Assert(eq, kMissingBytecodeArray);
+ }
- // New-style (ignition/turbofan) generator object
+ // Resume (Ignition/TurboFan) generator object.
{
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
@@ -829,62 +829,11 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ JumpToJSEntry(ip);
}
- // Old-style (full-codegen) generator object
- __ bind(&old_generator);
- {
- // Enter a new JavaScript frame, and initialize its slots as they were when
- // the generator was suspended.
- FrameScope scope(masm, StackFrame::MANUAL);
- __ PushStandardFrame(r7);
-
- // Restore the operand stack.
- __ LoadP(r3, FieldMemOperand(r4, JSGeneratorObject::kOperandStackOffset));
- __ LoadP(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
- __ addi(r3, r3,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
- {
- Label loop, done_loop;
- __ SmiUntag(r6, SetRC);
- __ beq(&done_loop, cr0);
- __ mtctr(r6);
- __ bind(&loop);
- __ LoadPU(ip, MemOperand(r3, kPointerSize));
- __ Push(ip);
- __ bdnz(&loop);
- __ bind(&done_loop);
- }
-
- // Reset operand stack so we don't leak.
- __ LoadRoot(ip, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(ip, FieldMemOperand(r4, JSGeneratorObject::kOperandStackOffset),
- r0);
-
- // Resume the generator function at the continuation.
- __ LoadP(r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kCodeOffset));
- __ addi(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
- {
- ConstantPoolUnavailableScope constant_pool_unavailable(masm);
- if (FLAG_enable_embedded_constant_pool) {
- __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r6);
- }
- __ LoadP(r5, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset));
- __ SmiUntag(r5);
- __ add(r6, r6, r5);
- __ LoadSmiLiteral(r5,
- Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
- __ StoreP(r5, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset),
- r0);
- __ mr(r3, r4); // Continuation expects generator object in r3.
- __ Jump(r6);
- }
- }
-
__ bind(&prepare_step_in_if_stepping);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r4, r5, r7);
- __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ __ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(r4, r5);
__ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
}
@@ -1099,12 +1048,18 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ TestIfSmi(kInterpreterBytecodeArrayRegister, r0);
- __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, cr0);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, r3, no_reg,
BYTECODE_ARRAY_TYPE);
__ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
+ // Reset code age.
+ __ mov(r8, Operand(BytecodeArray::kNoAgeBytecodeAge));
+ __ StoreByte(r8, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kBytecodeAgeOffset),
+ r0);
+
// Load initial bytecode offset.
__ mov(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
@@ -1347,7 +1302,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Check function data field is actually a BytecodeArray object.
__ TestIfSmi(kInterpreterBytecodeArrayRegister, r0);
- __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
+ __ Assert(ne, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, cr0);
__ CompareObjectType(kInterpreterBytecodeArrayRegister, r4, no_reg,
BYTECODE_ARRAY_TYPE);
__ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
@@ -1434,13 +1389,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ cmp(temp, native_context);
__ bne(&loop_bottom);
- // OSR id set to none?
- __ LoadP(temp,
- FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousOsrAstId));
- const int bailout_id = BailoutId::None().ToInt();
- __ CmpSmiLiteral(temp, Smi::FromInt(bailout_id), r0);
- __ bne(&loop_bottom);
// Literals available?
__ LoadP(temp,
FieldMemOperand(array_pointer,
@@ -1507,13 +1455,14 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
SharedFunctionInfo::kMarkedForTierUpByteOffset));
__ TestBit(r8, SharedFunctionInfo::kMarkedForTierUpBitWithinByte, r0);
__ bne(&gotta_call_runtime, cr0);
- // Is the full code valid?
+
+ // If SFI points to anything other than CompileLazy, install that.
__ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
- __ lwz(r8, FieldMemOperand(entry, Code::kFlagsOffset));
- __ DecodeField<Code::KindField>(r8);
- __ cmpi(r8, Operand(Code::BUILTIN));
+ __ mov(r8, Operand(masm->CodeObject()));
+ __ cmp(entry, r8);
__ beq(&gotta_call_runtime);
- // Yes, install the full code.
+
+ // Install the SFI's code entry.
__ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
__ RecordWriteCodeEntryField(closure, entry, r8);
@@ -1627,14 +1576,9 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
__ Jump(ip);
}
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
- void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
- } \
- void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+ void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
}
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
@@ -2219,7 +2163,8 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Create the list of arguments from the array-like argumentsList.
{
- Label create_arguments, create_array, create_runtime, done_create;
+ Label create_arguments, create_array, create_holey_array, create_runtime,
+ done_create;
__ JumpIfSmi(r3, &create_runtime);
// Load the map of argumentsList into r5.
@@ -2263,17 +2208,37 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
__ mr(r3, r7);
__ b(&done_create);
+ // For holey JSArrays we need to check that the array prototype chain
+ // protector is intact and our prototype is the Array.prototype actually.
+ __ bind(&create_holey_array);
+ __ LoadP(r5, FieldMemOperand(r5, Map::kPrototypeOffset));
+ __ LoadP(r7, ContextMemOperand(r7, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+ __ cmp(r5, r7);
+ __ bne(&create_runtime);
+ __ LoadRoot(r7, Heap::kArrayProtectorRootIndex);
+ __ LoadP(r5, FieldMemOperand(r7, PropertyCell::kValueOffset));
+ __ CmpSmiLiteral(r5, Smi::FromInt(Isolate::kProtectorValid), r0);
+ __ bne(&create_runtime);
+ __ LoadP(r5, FieldMemOperand(r3, JSArray::kLengthOffset));
+ __ LoadP(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
+ __ SmiUntag(r5);
+ __ b(&done_create);
+
// Try to create the list from a JSArray object.
+ // -- r5 and r7 must be preserved till bne create_holey_array.
__ bind(&create_array);
- __ lbz(r5, FieldMemOperand(r5, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(r5);
+ __ lbz(r8, FieldMemOperand(r5, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(r8);
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
- __ cmpi(r5, Operand(FAST_ELEMENTS));
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ __ cmpi(r8, Operand(FAST_HOLEY_ELEMENTS));
__ bgt(&create_runtime);
- __ cmpi(r5, Operand(FAST_HOLEY_SMI_ELEMENTS));
- __ beq(&create_runtime);
+ // Only FAST_XXX after this point, FAST_HOLEY_XXX are odd values.
+ __ TestBit(r8, Map::kHasNonInstancePrototype, r0);
+ __ bne(&create_holey_array, cr0);
+ // FAST_SMI_ELEMENTS or FAST_ELEMENTS after this point.
__ LoadP(r5, FieldMemOperand(r3, JSArray::kLengthOffset));
__ LoadP(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
__ SmiUntag(r5);
@@ -2308,15 +2273,20 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Push arguments onto the stack (thisArgument is already on the stack).
{
- Label loop, no_args;
+ __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
+ Label loop, no_args, skip;
__ cmpi(r5, Operand::Zero());
__ beq(&no_args);
__ addi(r3, r3,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
__ mtctr(r5);
__ bind(&loop);
- __ LoadPU(r0, MemOperand(r3, kPointerSize));
- __ push(r0);
+ __ LoadPU(ip, MemOperand(r3, kPointerSize));
+ __ CompareRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ bne(&skip);
+ __ mr(ip, r9);
+ __ bind(&skip);
+ __ push(ip);
__ bdnz(&loop);
__ bind(&no_args);
__ mr(r3, r5);
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index 8655ab8d79..2b7c4a5b10 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -334,11 +334,11 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameScope scope(masm, StackFrame::MANUAL);
- FastNewObjectStub stub(masm->isolate());
__ SmiTag(r8);
__ EnterBuiltinFrame(cp, r3, r8);
__ Push(r4); // first argument
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(r4);
__ LeaveBuiltinFrame(cp, r3, r8);
__ SmiUntag(r8);
@@ -484,11 +484,11 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ bind(&new_object);
{
FrameScope scope(masm, StackFrame::MANUAL);
- FastNewObjectStub stub(masm->isolate());
__ SmiTag(r8);
__ EnterBuiltinFrame(cp, r3, r8);
__ Push(r4); // first argument
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(r4);
__ LeaveBuiltinFrame(cp, r3, r8);
__ SmiUntag(r8);
@@ -584,8 +584,8 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// Allocate the new receiver object.
__ Push(r3, r5);
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ LoadRR(r6, r2);
__ Pop(r3, r5);
@@ -748,19 +748,18 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kResumeModeOffset));
// Load suspended function and context.
- __ LoadP(cp, FieldMemOperand(r3, JSGeneratorObject::kContextOffset));
__ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
+ __ LoadP(cp, FieldMemOperand(r6, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
Label stepping_prepared;
- ExternalReference last_step_action =
- ExternalReference::debug_last_step_action_address(masm->isolate());
- STATIC_ASSERT(StepFrame > StepIn);
- __ mov(ip, Operand(last_step_action));
+ ExternalReference debug_hook =
+ ExternalReference::debug_hook_on_function_call_address(masm->isolate());
+ __ mov(ip, Operand(debug_hook));
__ LoadB(ip, MemOperand(ip));
- __ CmpP(ip, Operand(StepIn));
- __ bge(&prepare_step_in_if_stepping);
+ __ CmpSmiLiteral(ip, Smi::kZero, r0);
+ __ bne(&prepare_step_in_if_stepping);
// Flood function if we need to continue stepping in the suspended generator.
@@ -811,13 +810,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&done_loop);
}
- // Dispatch on the kind of generator object.
- Label old_generator;
- __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
- __ CompareObjectType(r5, r5, r5, BYTECODE_ARRAY_TYPE);
- __ bne(&old_generator, Label::kNear);
+ // Underlying function needs to have bytecode available.
+ if (FLAG_debug_code) {
+ __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
+ __ CompareObjectType(r5, r5, r5, BYTECODE_ARRAY_TYPE);
+ __ Assert(eq, kMissingBytecodeArray);
+ }
- // New-style (ignition/turbofan) generator object
+ // Resume (Ignition/TurboFan) generator object.
{
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
@@ -827,61 +827,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ LoadP(ip, FieldMemOperand(r3, JSFunction::kCodeEntryOffset));
__ JumpToJSEntry(ip);
}
- // Old-style (full-codegen) generator object
- __ bind(&old_generator);
- {
- // Enter a new JavaScript frame, and initialize its slots as they were when
- // the generator was suspended.
- FrameScope scope(masm, StackFrame::MANUAL);
- __ PushStandardFrame(r6);
-
- // Restore the operand stack.
- __ LoadP(r2, FieldMemOperand(r3, JSGeneratorObject::kOperandStackOffset));
- __ LoadP(r5, FieldMemOperand(r2, FixedArray::kLengthOffset));
- __ AddP(r2, r2,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
- {
- Label loop, done_loop;
- __ SmiUntag(r5);
- __ LoadAndTestP(r5, r5);
- __ beq(&done_loop);
- __ LoadRR(r1, r5);
- __ bind(&loop);
- __ LoadP(ip, MemOperand(r2, kPointerSize));
- __ la(r2, MemOperand(r2, kPointerSize));
- __ Push(ip);
- __ BranchOnCount(r1, &loop);
- __ bind(&done_loop);
- }
-
- // Reset operand stack so we don't leak.
- __ LoadRoot(ip, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(ip, FieldMemOperand(r3, JSGeneratorObject::kOperandStackOffset),
- r0);
-
- // Resume the generator function at the continuation.
- __ LoadP(r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kCodeOffset));
- __ AddP(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag));
- {
- ConstantPoolUnavailableScope constant_pool_unavailable(masm);
- __ LoadP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset));
- __ SmiUntag(r4);
- __ AddP(r5, r5, r4);
- __ LoadSmiLiteral(r4,
- Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
- __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset),
- r0);
- __ LoadRR(r2, r3); // Continuation expects generator object in r2.
- __ Jump(r5);
- }
- }
__ bind(&prepare_step_in_if_stepping);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r3, r4, r6);
- __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ __ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(r3, r4);
__ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
}
@@ -1106,6 +1057,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Assert(eq, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
+ // Reset code age.
+ __ mov(r1, Operand(BytecodeArray::kNoAgeBytecodeAge));
+ __ StoreByte(r1, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kBytecodeAgeOffset),
+ r0);
+
// Load the initial bytecode offset.
__ mov(kInterpreterBytecodeOffsetRegister,
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
@@ -1437,13 +1394,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ CmpP(temp, native_context);
__ bne(&loop_bottom, Label::kNear);
- // OSR id set to none?
- __ LoadP(temp,
- FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousOsrAstId));
- const int bailout_id = BailoutId::None().ToInt();
- __ CmpSmiLiteral(temp, Smi::FromInt(bailout_id), r0);
- __ bne(&loop_bottom, Label::kNear);
// Literals available?
__ LoadP(temp,
FieldMemOperand(array_pointer,
@@ -1510,13 +1460,14 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
entry, SharedFunctionInfo::kMarkedForTierUpByteOffset));
__ TestBit(temp, SharedFunctionInfo::kMarkedForTierUpBitWithinByte, r0);
__ bne(&gotta_call_runtime);
- // Is the full code valid?
+
+ // If SFI points to anything other than CompileLazy, install that.
__ LoadP(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
- __ LoadlW(r7, FieldMemOperand(entry, Code::kFlagsOffset));
- __ DecodeField<Code::KindField>(r7);
- __ CmpP(r7, Operand(Code::BUILTIN));
+ __ mov(r7, Operand(masm->CodeObject()));
+ __ CmpP(entry, r7);
__ beq(&gotta_call_runtime);
- // Yes, install the full code.
+
+ // Install the SFI's code entry.
__ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
__ RecordWriteCodeEntryField(closure, entry, r7);
@@ -1632,14 +1583,9 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
__ Jump(ip);
}
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
- void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
- } \
- void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+ void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
}
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
@@ -2228,7 +2174,8 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Create the list of arguments from the array-like argumentsList.
{
- Label create_arguments, create_array, create_runtime, done_create;
+ Label create_arguments, create_array, create_holey_array, create_runtime,
+ done_create;
__ JumpIfSmi(r2, &create_runtime);
// Load the map of argumentsList into r4.
@@ -2272,17 +2219,37 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
__ LoadRR(r2, r6);
__ b(&done_create);
+ // For holey JSArrays we need to check that the array prototype chain
+ // protector is intact and our prototype is the Array.prototype actually.
+ __ bind(&create_holey_array);
+ __ LoadP(r4, FieldMemOperand(r4, Map::kPrototypeOffset));
+ __ LoadP(r6, ContextMemOperand(r6, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+ __ CmpP(r4, r6);
+ __ bne(&create_runtime);
+ __ LoadRoot(r6, Heap::kArrayProtectorRootIndex);
+ __ LoadP(r4, FieldMemOperand(r6, PropertyCell::kValueOffset));
+ __ CmpSmiLiteral(r4, Smi::FromInt(Isolate::kProtectorValid), r0);
+ __ bne(&create_runtime);
+ __ LoadP(r4, FieldMemOperand(r2, JSArray::kLengthOffset));
+ __ LoadP(r2, FieldMemOperand(r2, JSArray::kElementsOffset));
+ __ SmiUntag(r4);
+ __ b(&done_create);
+
// Try to create the list from a JSArray object.
+ // -- r4 and r6 must be preserved till bne create_holey_array.
__ bind(&create_array);
- __ LoadlB(r4, FieldMemOperand(r4, Map::kBitField2Offset));
- __ DecodeField<Map::ElementsKindBits>(r4);
+ __ LoadlB(r7, FieldMemOperand(r4, Map::kBitField2Offset));
+ __ DecodeField<Map::ElementsKindBits>(r7);
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
- __ CmpP(r4, Operand(FAST_ELEMENTS));
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ __ CmpP(r7, Operand(FAST_HOLEY_ELEMENTS));
__ bgt(&create_runtime);
- __ CmpP(r4, Operand(FAST_HOLEY_SMI_ELEMENTS));
- __ beq(&create_runtime);
+ // Only FAST_XXX after this point, FAST_HOLEY_XXX are odd values.
+ __ TestBit(r7, Map::kHasNonInstancePrototype, r0);
+ __ bne(&create_holey_array);
+ // FAST_SMI_ELEMENTS or FAST_ELEMENTS after this point.
__ LoadP(r4, FieldMemOperand(r2, JSArray::kLengthOffset));
__ LoadP(r2, FieldMemOperand(r2, JSArray::kElementsOffset));
__ SmiUntag(r4);
@@ -2317,16 +2284,21 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Push arguments onto the stack (thisArgument is already on the stack).
{
- Label loop, no_args;
+ __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
+ Label loop, no_args, skip;
__ CmpP(r4, Operand::Zero());
__ beq(&no_args);
__ AddP(r2, r2,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
__ LoadRR(r1, r4);
__ bind(&loop);
- __ LoadP(r0, MemOperand(r2, kPointerSize));
+ __ LoadP(ip, MemOperand(r2, kPointerSize));
__ la(r2, MemOperand(r2, kPointerSize));
- __ push(r0);
+ __ CompareRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ bne(&skip, Label::kNear);
+ __ LoadRR(ip, r8);
+ __ bind(&skip);
+ __ push(ip);
__ BranchOnCount(r1, &loop);
__ bind(&no_args);
__ LoadRR(r2, r4);
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index cde02647ac..1404a9b4c9 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -137,8 +137,8 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// Allocate the new receiver object.
__ Push(rdi);
__ Push(rdx);
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ movp(rbx, rax);
__ Pop(rdx);
__ Pop(rdi);
@@ -151,9 +151,7 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// Retrieve smi-tagged arguments count from the stack.
__ SmiToInteger32(rax, Operand(rsp, 0 * kPointerSize));
- }
- if (create_implicit_receiver) {
// Push the allocated receiver to the stack. We need two copies
// because we may have to return the original one and the calling
// conventions dictate that the called function pops the receiver.
@@ -460,18 +458,18 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ movp(FieldOperand(rbx, JSGeneratorObject::kResumeModeOffset), rdx);
// Load suspended function and context.
- __ movp(rsi, FieldOperand(rbx, JSGeneratorObject::kContextOffset));
__ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
Label stepping_prepared;
- ExternalReference last_step_action =
- ExternalReference::debug_last_step_action_address(masm->isolate());
- Operand last_step_action_operand = masm->ExternalOperand(last_step_action);
+ ExternalReference debug_hook =
+ ExternalReference::debug_hook_on_function_call_address(masm->isolate());
+ Operand debug_hook_operand = masm->ExternalOperand(debug_hook);
STATIC_ASSERT(StepFrame > StepIn);
- __ cmpb(last_step_action_operand, Immediate(StepIn));
- __ j(greater_equal, &prepare_step_in_if_stepping);
+ __ cmpb(debug_hook_operand, Immediate(0));
+ __ j(not_equal, &prepare_step_in_if_stepping);
// Flood function if we need to continue stepping in the suspended generator.
ExternalReference debug_suspended_generator =
@@ -514,14 +512,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&done_loop);
}
- // Dispatch on the kind of generator object.
- Label old_generator;
- __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset));
- __ CmpObjectType(rcx, BYTECODE_ARRAY_TYPE, rcx);
- __ j(not_equal, &old_generator);
+ // Underlying function needs to have bytecode available.
+ if (FLAG_debug_code) {
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset));
+ __ CmpObjectType(rcx, BYTECODE_ARRAY_TYPE, rcx);
+ __ Assert(equal, kMissingBytecodeArray);
+ }
- // New-style (ignition/turbofan) generator object.
+ // Resume (Ignition/TurboFan) generator object.
{
__ PushReturnAddressFrom(rax);
__ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
@@ -534,60 +533,13 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ jmp(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
}
- // Old-style (full-codegen) generator object.
- __ bind(&old_generator);
- {
- // Enter a new JavaScript frame, and initialize its slots as they were when
- // the generator was suspended.
- FrameScope scope(masm, StackFrame::MANUAL);
- __ PushReturnAddressFrom(rax); // Return address.
- __ Push(rbp); // Caller's frame pointer.
- __ Move(rbp, rsp);
- __ Push(rsi); // Callee's context.
- __ Push(rdi); // Callee's JS Function.
-
- // Restore the operand stack.
- __ movp(rsi, FieldOperand(rbx, JSGeneratorObject::kOperandStackOffset));
- __ SmiToInteger32(rax, FieldOperand(rsi, FixedArray::kLengthOffset));
- {
- Label done_loop, loop;
- __ Set(rcx, 0);
- __ bind(&loop);
- __ cmpl(rcx, rax);
- __ j(equal, &done_loop, Label::kNear);
- __ Push(
- FieldOperand(rsi, rcx, times_pointer_size, FixedArray::kHeaderSize));
- __ addl(rcx, Immediate(1));
- __ jmp(&loop);
- __ bind(&done_loop);
- }
-
- // Reset operand stack so we don't leak.
- __ LoadRoot(FieldOperand(rbx, JSGeneratorObject::kOperandStackOffset),
- Heap::kEmptyFixedArrayRootIndex);
-
- // Restore context.
- __ movp(rsi, FieldOperand(rbx, JSGeneratorObject::kContextOffset));
-
- // Resume the generator function at the continuation.
- __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movp(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
- __ SmiToInteger64(
- rcx, FieldOperand(rbx, JSGeneratorObject::kContinuationOffset));
- __ leap(rdx, FieldOperand(rdx, rcx, times_1, Code::kHeaderSize));
- __ Move(FieldOperand(rbx, JSGeneratorObject::kContinuationOffset),
- Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
- __ movp(rax, rbx); // Continuation expects generator object in rax.
- __ jmp(rdx);
- }
-
__ bind(&prepare_step_in_if_stepping);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(rbx);
__ Push(rdx);
__ Push(rdi);
- __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ __ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(rdx);
__ Pop(rbx);
__ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
@@ -689,6 +641,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
+ // Reset code age.
+ __ movb(FieldOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kBytecodeAgeOffset),
+ Immediate(BytecodeArray::kNoAgeBytecodeAge));
+
// Load initial bytecode offset.
__ movp(kInterpreterBytecodeOffsetRegister,
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
@@ -1060,13 +1017,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ movp(temp, FieldOperand(temp, WeakCell::kValueOffset));
__ cmpp(temp, native_context);
__ j(not_equal, &loop_bottom);
- // OSR id set to none?
- __ movp(temp, FieldOperand(map, index, times_pointer_size,
- SharedFunctionInfo::kOffsetToPreviousOsrAstId));
- __ SmiToInteger32(temp, temp);
- const int bailout_id = BailoutId::None().ToInt();
- __ cmpl(temp, Immediate(bailout_id));
- __ j(not_equal, &loop_bottom);
// Literals available?
__ movp(temp, FieldOperand(map, index, times_pointer_size,
SharedFunctionInfo::kOffsetToPreviousLiterals));
@@ -1126,14 +1076,14 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ testb(FieldOperand(entry, SharedFunctionInfo::kMarkedForTierUpByteOffset),
Immediate(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
__ j(not_zero, &gotta_call_runtime);
- // Is the full code valid?
+
+ // If SFI points to anything other than CompileLazy, install that.
__ movp(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
- __ movl(rbx, FieldOperand(entry, Code::kFlagsOffset));
- __ andl(rbx, Immediate(Code::KindField::kMask));
- __ shrl(rbx, Immediate(Code::KindField::kShift));
- __ cmpl(rbx, Immediate(Code::BUILTIN));
+ __ Move(rbx, masm->CodeObject());
+ __ cmpp(entry, rbx);
__ j(equal, &gotta_call_runtime);
- // Yes, install the full code.
+
+ // Install the SFI's code entry.
__ leap(entry, FieldOperand(entry, Code::kHeaderSize));
__ movp(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
__ RecordWriteCodeEntryField(closure, entry, r15);
@@ -1166,7 +1116,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Preserve argument count for later compare.
- __ movp(kScratchRegister, rax);
+ __ movp(rcx, rax);
// Push the number of arguments to the callee.
__ Integer32ToSmi(rax, rax);
__ Push(rax);
@@ -1181,7 +1131,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
for (int j = 0; j < 4; ++j) {
Label over;
if (j < 3) {
- __ cmpp(kScratchRegister, Immediate(j));
+ __ cmpp(rcx, Immediate(j));
__ j(not_equal, &over, Label::kNear);
}
for (int i = j - 1; i >= 0; --i) {
@@ -1204,13 +1154,13 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ JumpIfSmi(rax, &failed, Label::kNear);
__ Drop(2);
- __ Pop(kScratchRegister);
- __ SmiToInteger32(kScratchRegister, kScratchRegister);
+ __ Pop(rcx);
+ __ SmiToInteger32(rcx, rcx);
scope.GenerateLeaveFrame();
__ PopReturnAddressTo(rbx);
- __ incp(kScratchRegister);
- __ leap(rsp, Operand(rsp, kScratchRegister, times_pointer_size, 0));
+ __ incp(rcx);
+ __ leap(rsp, Operand(rsp, rcx, times_pointer_size, 0));
__ PushReturnAddressFrom(rbx);
__ ret(0);
@@ -1248,14 +1198,9 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
__ ret(0);
}
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
- void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
- } \
- void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+ void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
}
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
@@ -1931,8 +1876,8 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterBuiltinFrame(rsi, rdi, r8);
__ Push(rbx); // the first argument
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(FieldOperand(rax, JSValue::kValueOffset));
__ LeaveBuiltinFrame(rsi, rdi, r8);
}
@@ -2086,8 +2031,8 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterBuiltinFrame(rsi, rdi, r8);
__ Push(rbx); // the first argument
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(FieldOperand(rax, JSValue::kValueOffset));
__ LeaveBuiltinFrame(rsi, rdi, r8);
}
@@ -2292,7 +2237,8 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Create the list of arguments from the array-like argumentsList.
{
- Label create_arguments, create_array, create_runtime, done_create;
+ Label create_arguments, create_array, create_holey_array, create_runtime,
+ done_create;
__ JumpIfSmi(rax, &create_runtime);
// Load the map of argumentsList into rcx.
@@ -2335,6 +2281,21 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
__ movp(rax, rcx);
__ jmp(&done_create);
+ __ bind(&create_holey_array);
+ // For holey JSArrays we need to check that the array prototype chain
+ // protector is intact and our prototype is the Array.prototype actually.
+ __ movp(rcx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movp(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
+ __ cmpp(rcx, ContextOperand(rbx, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+ __ j(not_equal, &create_runtime);
+ __ LoadRoot(rcx, Heap::kArrayProtectorRootIndex);
+ __ Cmp(FieldOperand(rcx, PropertyCell::kValueOffset),
+ Smi::FromInt(Isolate::kProtectorValid));
+ __ j(not_equal, &create_runtime);
+ __ SmiToInteger32(rbx, FieldOperand(rax, JSArray::kLengthOffset));
+ __ movp(rax, FieldOperand(rax, JSArray::kElementsOffset));
+ __ jmp(&done_create);
+
// Try to create the list from a JSArray object.
__ bind(&create_array);
__ movzxbp(rcx, FieldOperand(rcx, Map::kBitField2Offset));
@@ -2342,10 +2303,12 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
- __ cmpl(rcx, Immediate(FAST_ELEMENTS));
- __ j(above, &create_runtime);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
__ cmpl(rcx, Immediate(FAST_HOLEY_SMI_ELEMENTS));
- __ j(equal, &create_runtime);
+ __ j(equal, &create_holey_array);
+ __ cmpl(rcx, Immediate(FAST_HOLEY_ELEMENTS));
+ __ j(equal, &create_holey_array);
+ __ j(above, &create_runtime);
__ SmiToInteger32(rbx, FieldOperand(rax, JSArray::kLengthOffset));
__ movp(rax, FieldOperand(rax, JSArray::kElementsOffset));
@@ -2383,12 +2346,18 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
{
__ PopReturnAddressTo(r8);
__ Set(rcx, 0);
- Label done, loop;
+ Label done, push, loop;
__ bind(&loop);
__ cmpl(rcx, rbx);
__ j(equal, &done, Label::kNear);
- __ Push(
- FieldOperand(rax, rcx, times_pointer_size, FixedArray::kHeaderSize));
+ // Turn the hole into undefined as we go.
+ __ movp(r9, FieldOperand(rax, rcx, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ CompareRoot(r9, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &push, Label::kNear);
+ __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
+ __ bind(&push);
+ __ Push(r9);
__ incl(rcx);
__ jmp(&loop);
__ bind(&done);
diff --git a/deps/v8/src/builtins/x87/OWNERS b/deps/v8/src/builtins/x87/OWNERS
index dd9998b261..61245ae8e2 100644
--- a/deps/v8/src/builtins/x87/OWNERS
+++ b/deps/v8/src/builtins/x87/OWNERS
@@ -1 +1,2 @@
weiliang.lin@intel.com
+chunyang.dai@intel.com
diff --git a/deps/v8/src/builtins/x87/builtins-x87.cc b/deps/v8/src/builtins/x87/builtins-x87.cc
index 2187f86f61..9071beb59d 100644
--- a/deps/v8/src/builtins/x87/builtins-x87.cc
+++ b/deps/v8/src/builtins/x87/builtins-x87.cc
@@ -135,8 +135,8 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// Allocate the new receiver object.
__ Push(edi);
__ Push(edx);
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ mov(ebx, eax);
__ Pop(edx);
__ Pop(edi);
@@ -387,17 +387,16 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ mov(FieldOperand(ebx, JSGeneratorObject::kResumeModeOffset), edx);
// Load suspended function and context.
- __ mov(esi, FieldOperand(ebx, JSGeneratorObject::kContextOffset));
__ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
Label stepping_prepared;
- ExternalReference last_step_action =
- ExternalReference::debug_last_step_action_address(masm->isolate());
- STATIC_ASSERT(StepFrame > StepIn);
- __ cmpb(Operand::StaticVariable(last_step_action), Immediate(StepIn));
- __ j(greater_equal, &prepare_step_in_if_stepping);
+ ExternalReference debug_hook =
+ ExternalReference::debug_hook_on_function_call_address(masm->isolate());
+ __ cmpb(Operand::StaticVariable(debug_hook), Immediate(0));
+ __ j(not_equal, &prepare_step_in_if_stepping);
// Flood function if we need to continue stepping in the suspended generator.
ExternalReference debug_suspended_generator =
@@ -438,19 +437,20 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&done_loop);
}
- // Dispatch on the kind of generator object.
- Label old_generator;
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
- __ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, ecx);
- __ j(not_equal, &old_generator);
+ // Underlying function needs to have bytecode available.
+ if (FLAG_debug_code) {
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kFunctionDataOffset));
+ __ CmpObjectType(ecx, BYTECODE_ARRAY_TYPE, ecx);
+ __ Assert(equal, kMissingBytecodeArray);
+ }
- // New-style (ignition/turbofan) generator object
+ // Resume (Ignition/TurboFan) generator object.
{
__ PushReturnAddressFrom(eax);
__ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(eax,
- FieldOperand(ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+ FieldOperand(eax, SharedFunctionInfo::kFormalParameterCountOffset));
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
@@ -458,56 +458,13 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ jmp(FieldOperand(edi, JSFunction::kCodeEntryOffset));
}
- // Old-style (full-codegen) generator object
- __ bind(&old_generator);
- {
- // Enter a new JavaScript frame, and initialize its slots as they were when
- // the generator was suspended.
- FrameScope scope(masm, StackFrame::MANUAL);
- __ PushReturnAddressFrom(eax); // Return address.
- __ Push(ebp); // Caller's frame pointer.
- __ Move(ebp, esp);
- __ Push(esi); // Callee's context.
- __ Push(edi); // Callee's JS Function.
-
- // Restore the operand stack.
- __ mov(eax, FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset));
- {
- Label done_loop, loop;
- __ Move(ecx, Smi::kZero);
- __ bind(&loop);
- __ cmp(ecx, FieldOperand(eax, FixedArray::kLengthOffset));
- __ j(equal, &done_loop, Label::kNear);
- __ Push(FieldOperand(eax, ecx, times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ add(ecx, Immediate(Smi::FromInt(1)));
- __ jmp(&loop);
- __ bind(&done_loop);
- }
-
- // Reset operand stack so we don't leak.
- __ mov(FieldOperand(ebx, JSGeneratorObject::kOperandStackOffset),
- Immediate(masm->isolate()->factory()->empty_fixed_array()));
-
- // Resume the generator function at the continuation.
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
- __ mov(ecx, FieldOperand(ebx, JSGeneratorObject::kContinuationOffset));
- __ SmiUntag(ecx);
- __ lea(edx, FieldOperand(edx, ecx, times_1, Code::kHeaderSize));
- __ mov(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
- Immediate(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
- __ mov(eax, ebx); // Continuation expects generator object in eax.
- __ jmp(edx);
- }
-
__ bind(&prepare_step_in_if_stepping);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(ebx);
__ Push(edx);
__ Push(edi);
- __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ __ CallRuntime(Runtime::kDebugOnFunctionCall);
__ Pop(edx);
__ Pop(ebx);
__ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
@@ -606,6 +563,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Assert(equal, kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
+ // Reset code age.
+ __ mov_b(FieldOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kBytecodeAgeOffset),
+ Immediate(BytecodeArray::kNoAgeBytecodeAge));
+
// Push bytecode array.
__ push(kInterpreterBytecodeArrayRegister);
// Push Smi tagged initial bytecode array offset.
@@ -1093,12 +1055,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
__ cmp(temp, native_context);
__ j(not_equal, &loop_bottom);
- // OSR id set to none?
- __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
- SharedFunctionInfo::kOffsetToPreviousOsrAstId));
- const int bailout_id = BailoutId::None().ToInt();
- __ cmp(temp, Immediate(Smi::FromInt(bailout_id)));
- __ j(not_equal, &loop_bottom);
// Literals available?
__ mov(temp, FieldOperand(map, index, times_half_pointer_size,
SharedFunctionInfo::kOffsetToPreviousLiterals));
@@ -1166,14 +1122,14 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ test_b(FieldOperand(entry, SharedFunctionInfo::kMarkedForTierUpByteOffset),
Immediate(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
__ j(not_zero, &gotta_call_runtime_no_stack);
- // Is the full code valid?
+
+ // If SFI points to anything other than CompileLazy, install that.
__ mov(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
- __ mov(ebx, FieldOperand(entry, Code::kFlagsOffset));
- __ and_(ebx, Code::KindField::kMask);
- __ shr(ebx, Code::KindField::kShift);
- __ cmp(ebx, Immediate(Code::BUILTIN));
+ __ Move(ebx, masm->CodeObject());
+ __ cmp(entry, ebx);
__ j(equal, &gotta_call_runtime_no_stack);
- // Yes, install the full code.
+
+ // Install the SFI's code entry.
__ lea(entry, FieldOperand(entry, Code::kHeaderSize));
__ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
__ RecordWriteCodeEntryField(closure, entry, ebx);
@@ -1295,14 +1251,9 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
__ ret(0);
}
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
- void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
- } \
- void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+ void Builtins::Generate_Make##C##CodeYoungAgain(MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
}
CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
@@ -1986,8 +1937,8 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterBuiltinFrame(esi, edi, ecx);
__ Push(ebx); // the first argument
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(FieldOperand(eax, JSValue::kValueOffset));
__ LeaveBuiltinFrame(esi, edi, ecx);
}
@@ -2149,8 +2100,8 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ SmiTag(ebx);
__ EnterBuiltinFrame(esi, edi, ebx);
__ Push(eax); // the first argument
- FastNewObjectStub stub(masm->isolate());
- __ CallStub(&stub);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
__ Pop(FieldOperand(eax, JSValue::kValueOffset));
__ LeaveBuiltinFrame(esi, edi, ebx);
__ SmiUntag(ebx);
@@ -2210,7 +2161,8 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Create the list of arguments from the array-like argumentsList.
{
- Label create_arguments, create_array, create_runtime, done_create;
+ Label create_arguments, create_array, create_holey_array, create_runtime,
+ done_create;
__ JumpIfSmi(eax, &create_runtime);
// Load the map of argumentsList into ecx.
@@ -2254,6 +2206,22 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
__ mov(eax, ecx);
__ jmp(&done_create);
+ // For holey JSArrays we need to check that the array prototype chain
+ // protector is intact and our prototype is the Array.prototype actually.
+ __ bind(&create_holey_array);
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ mov(ecx, FieldOperand(ecx, Map::kPrototypeOffset));
+ __ cmp(ecx, ContextOperand(ebx, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
+ __ j(not_equal, &create_runtime);
+ __ LoadRoot(ecx, Heap::kArrayProtectorRootIndex);
+ __ cmp(FieldOperand(ecx, PropertyCell::kValueOffset),
+ Immediate(Smi::FromInt(Isolate::kProtectorValid)));
+ __ j(not_equal, &create_runtime);
+ __ mov(ebx, FieldOperand(eax, JSArray::kLengthOffset));
+ __ SmiUntag(ebx);
+ __ mov(eax, FieldOperand(eax, JSArray::kElementsOffset));
+ __ jmp(&done_create);
+
// Try to create the list from a JSArray object.
__ bind(&create_array);
__ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
@@ -2261,10 +2229,12 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
- __ cmp(ecx, Immediate(FAST_ELEMENTS));
- __ j(above, &create_runtime);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
__ cmp(ecx, Immediate(FAST_HOLEY_SMI_ELEMENTS));
- __ j(equal, &create_runtime);
+ __ j(equal, &create_holey_array, Label::kNear);
+ __ cmp(ecx, Immediate(FAST_HOLEY_ELEMENTS));
+ __ j(equal, &create_holey_array, Label::kNear);
+ __ j(above, &create_runtime);
__ mov(ebx, FieldOperand(eax, JSArray::kLengthOffset));
__ SmiUntag(ebx);
__ mov(eax, FieldOperand(eax, JSArray::kElementsOffset));
@@ -2303,26 +2273,38 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Push arguments onto the stack (thisArgument is already on the stack).
{
+ // Save edx/edi to stX0/stX1.
__ push(edx);
+ __ push(edi);
__ fld_s(MemOperand(esp, 0));
- __ lea(esp, Operand(esp, kFloatSize));
+ __ fld_s(MemOperand(esp, 4));
+ __ lea(esp, Operand(esp, 2 * kFloatSize));
__ PopReturnAddressTo(edx);
__ Move(ecx, Immediate(0));
- Label done, loop;
+ Label done, push, loop;
__ bind(&loop);
__ cmp(ecx, ebx);
__ j(equal, &done, Label::kNear);
- __ Push(
- FieldOperand(eax, ecx, times_pointer_size, FixedArray::kHeaderSize));
+ // Turn the hole into undefined as we go.
+ __ mov(edi,
+ FieldOperand(eax, ecx, times_pointer_size, FixedArray::kHeaderSize));
+ __ CompareRoot(edi, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &push, Label::kNear);
+ __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
+ __ bind(&push);
+ __ Push(edi);
__ inc(ecx);
__ jmp(&loop);
__ bind(&done);
__ PushReturnAddressFrom(edx);
- __ lea(esp, Operand(esp, -kFloatSize));
+ // Restore edx/edi from stX0/stX1.
+ __ lea(esp, Operand(esp, -2 * kFloatSize));
__ fstp_s(MemOperand(esp, 0));
+ __ fstp_s(MemOperand(esp, 4));
__ pop(edx);
+ __ pop(edi);
__ Move(eax, ebx);
}
diff --git a/deps/v8/src/cancelable-task.cc b/deps/v8/src/cancelable-task.cc
index ea351f8908..b0387f4dc0 100644
--- a/deps/v8/src/cancelable-task.cc
+++ b/deps/v8/src/cancelable-task.cc
@@ -93,13 +93,36 @@ void CancelableTaskManager::CancelAndWait() {
}
}
+CancelableTaskManager::TryAbortResult CancelableTaskManager::TryAbortAll() {
+ // Clean up all cancelable fore- and background tasks. Tasks are canceled on
+ // the way if possible, i.e., if they have not started yet.
+ base::LockGuard<base::Mutex> guard(&mutex_);
+
+ if (cancelable_tasks_.empty()) return kTaskRemoved;
+
+ for (auto it = cancelable_tasks_.begin(); it != cancelable_tasks_.end();) {
+ if (it->second->Cancel()) {
+ it = cancelable_tasks_.erase(it);
+ } else {
+ ++it;
+ }
+ }
+
+ return cancelable_tasks_.empty() ? kTaskAborted : kTaskRunning;
+}
CancelableTask::CancelableTask(Isolate* isolate)
- : Cancelable(isolate->cancelable_task_manager()), isolate_(isolate) {}
+ : CancelableTask(isolate, isolate->cancelable_task_manager()) {}
+CancelableTask::CancelableTask(Isolate* isolate, CancelableTaskManager* manager)
+ : Cancelable(manager), isolate_(isolate) {}
CancelableIdleTask::CancelableIdleTask(Isolate* isolate)
- : Cancelable(isolate->cancelable_task_manager()), isolate_(isolate) {}
+ : CancelableIdleTask(isolate, isolate->cancelable_task_manager()) {}
+
+CancelableIdleTask::CancelableIdleTask(Isolate* isolate,
+ CancelableTaskManager* manager)
+ : Cancelable(manager), isolate_(isolate) {}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/cancelable-task.h b/deps/v8/src/cancelable-task.h
index 65f98e7662..5b1a5f1def 100644
--- a/deps/v8/src/cancelable-task.h
+++ b/deps/v8/src/cancelable-task.h
@@ -45,6 +45,17 @@ class V8_EXPORT_PRIVATE CancelableTaskManager {
// already running. This disallows subsequent Register calls.
void CancelAndWait();
+ // Tries to cancel all remaining registered tasks. The return value indicates
+ // whether
+ //
+ // 1) No tasks were registered (kTaskRemoved), or
+ //
+ // 2) There is at least one remaining task that couldn't be cancelled
+ // (kTaskRunning), or
+ //
+ // 3) All registered tasks were cancelled (kTaskAborted).
+ TryAbortResult TryAbortAll();
+
private:
// Only called by {Cancelable} destructor. The task is done with executing,
// but needs to be removed.
@@ -123,9 +134,11 @@ class V8_EXPORT_PRIVATE Cancelable {
// Multiple inheritance can be used because Task is a pure interface.
-class CancelableTask : public Cancelable, public Task {
+class V8_EXPORT_PRIVATE CancelableTask : public Cancelable,
+ NON_EXPORTED_BASE(public Task) {
public:
explicit CancelableTask(Isolate* isolate);
+ CancelableTask(Isolate* isolate, CancelableTaskManager* manager);
// Task overrides.
void Run() final {
@@ -148,6 +161,7 @@ class CancelableTask : public Cancelable, public Task {
class CancelableIdleTask : public Cancelable, public IdleTask {
public:
explicit CancelableIdleTask(Isolate* isolate);
+ CancelableIdleTask(Isolate* isolate, CancelableTaskManager* manager);
// IdleTask overrides.
void Run(double deadline_in_seconds) final {
diff --git a/deps/v8/src/code-events.h b/deps/v8/src/code-events.h
index 94f7dbdfc0..db43d88b68 100644
--- a/deps/v8/src/code-events.h
+++ b/deps/v8/src/code-events.h
@@ -90,7 +90,7 @@ class CodeEventListener {
virtual void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
Name* name) = 0;
virtual void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
- SharedFunctionInfo* shared, Name* name) = 0;
+ SharedFunctionInfo* shared, Name* source) = 0;
virtual void CodeCreateEvent(LogEventsAndTags tag, AbstractCode* code,
SharedFunctionInfo* shared, Name* source,
int line, int column) = 0;
diff --git a/deps/v8/src/code-factory.cc b/deps/v8/src/code-factory.cc
index 128c709998..ab652964c6 100644
--- a/deps/v8/src/code-factory.cc
+++ b/deps/v8/src/code-factory.cc
@@ -22,9 +22,15 @@ Callable make_callable(Stub& stub) {
} // namespace
// static
+Handle<Code> CodeFactory::RuntimeCEntry(Isolate* isolate, int result_size) {
+ CEntryStub stub(isolate, result_size);
+ return stub.GetCode();
+}
+
+// static
Callable CodeFactory::LoadIC(Isolate* isolate) {
- LoadICTrampolineStub stub(isolate);
- return make_callable(stub);
+ return Callable(isolate->builtins()->LoadICTrampoline(),
+ LoadDescriptor(isolate));
}
// static
@@ -35,33 +41,38 @@ Callable CodeFactory::ApiGetter(Isolate* isolate) {
// static
Callable CodeFactory::LoadICInOptimizedCode(Isolate* isolate) {
- LoadICStub stub(isolate);
- return make_callable(stub);
+ return Callable(isolate->builtins()->LoadIC(),
+ LoadWithVectorDescriptor(isolate));
}
// static
Callable CodeFactory::LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode) {
- LoadGlobalICTrampolineStub stub(isolate, LoadGlobalICState(typeof_mode));
- return make_callable(stub);
+ return Callable(
+ typeof_mode == NOT_INSIDE_TYPEOF
+ ? isolate->builtins()->LoadGlobalICTrampoline()
+ : isolate->builtins()->LoadGlobalICInsideTypeofTrampoline(),
+ LoadGlobalDescriptor(isolate));
}
// static
Callable CodeFactory::LoadGlobalICInOptimizedCode(Isolate* isolate,
TypeofMode typeof_mode) {
- LoadGlobalICStub stub(isolate, LoadGlobalICState(typeof_mode));
- return make_callable(stub);
+ return Callable(typeof_mode == NOT_INSIDE_TYPEOF
+ ? isolate->builtins()->LoadGlobalIC()
+ : isolate->builtins()->LoadGlobalICInsideTypeof(),
+ LoadGlobalWithVectorDescriptor(isolate));
}
// static
Callable CodeFactory::KeyedLoadIC(Isolate* isolate) {
- KeyedLoadICTrampolineTFStub stub(isolate);
- return make_callable(stub);
+ return Callable(isolate->builtins()->KeyedLoadICTrampoline(),
+ LoadDescriptor(isolate));
}
// static
Callable CodeFactory::KeyedLoadICInOptimizedCode(Isolate* isolate) {
- KeyedLoadICTFStub stub(isolate);
- return make_callable(stub);
+ return Callable(isolate->builtins()->KeyedLoadIC(),
+ LoadWithVectorDescriptor(isolate));
}
// static
@@ -87,53 +98,46 @@ Callable CodeFactory::CallICInOptimizedCode(Isolate* isolate,
// static
Callable CodeFactory::StoreIC(Isolate* isolate, LanguageMode language_mode) {
- StoreICTrampolineStub stub(isolate, StoreICState(language_mode));
- return make_callable(stub);
+ return Callable(language_mode == STRICT
+ ? isolate->builtins()->StoreICStrictTrampoline()
+ : isolate->builtins()->StoreICTrampoline(),
+ StoreDescriptor(isolate));
}
// static
Callable CodeFactory::StoreICInOptimizedCode(Isolate* isolate,
LanguageMode language_mode) {
- StoreICStub stub(isolate, StoreICState(language_mode));
- return make_callable(stub);
+ return Callable(language_mode == STRICT ? isolate->builtins()->StoreICStrict()
+ : isolate->builtins()->StoreIC(),
+ StoreWithVectorDescriptor(isolate));
}
// static
Callable CodeFactory::KeyedStoreIC(Isolate* isolate,
LanguageMode language_mode) {
- if (FLAG_tf_store_ic_stub) {
- KeyedStoreICTrampolineTFStub stub(isolate, StoreICState(language_mode));
- return make_callable(stub);
- }
- KeyedStoreICTrampolineStub stub(isolate, StoreICState(language_mode));
- return make_callable(stub);
+ return Callable(language_mode == STRICT
+ ? isolate->builtins()->KeyedStoreICStrictTrampoline()
+ : isolate->builtins()->KeyedStoreICTrampoline(),
+ StoreDescriptor(isolate));
}
// static
Callable CodeFactory::KeyedStoreICInOptimizedCode(Isolate* isolate,
LanguageMode language_mode) {
- if (FLAG_tf_store_ic_stub) {
- KeyedStoreICTFStub stub(isolate, StoreICState(language_mode));
- return make_callable(stub);
- }
- KeyedStoreICStub stub(isolate, StoreICState(language_mode));
- return make_callable(stub);
+ return Callable(language_mode == STRICT
+ ? isolate->builtins()->KeyedStoreICStrict()
+ : isolate->builtins()->KeyedStoreIC(),
+ StoreWithVectorDescriptor(isolate));
}
// static
Callable CodeFactory::KeyedStoreIC_Megamorphic(Isolate* isolate,
LanguageMode language_mode) {
- if (FLAG_tf_store_ic_stub) {
- return Callable(
- language_mode == STRICT
- ? isolate->builtins()->KeyedStoreIC_Megamorphic_Strict_TF()
- : isolate->builtins()->KeyedStoreIC_Megamorphic_TF(),
- StoreWithVectorDescriptor(isolate));
- }
- return Callable(language_mode == STRICT
- ? isolate->builtins()->KeyedStoreIC_Megamorphic_Strict()
- : isolate->builtins()->KeyedStoreIC_Megamorphic(),
- StoreWithVectorDescriptor(isolate));
+ return Callable(
+ language_mode == STRICT
+ ? isolate->builtins()->KeyedStoreIC_Megamorphic_Strict_TF()
+ : isolate->builtins()->KeyedStoreIC_Megamorphic_TF(),
+ StoreWithVectorDescriptor(isolate));
}
// static
@@ -248,6 +252,7 @@ TFS_BUILTIN(Equal)
TFS_BUILTIN(NotEqual)
TFS_BUILTIN(StrictEqual)
TFS_BUILTIN(StrictNotEqual)
+TFS_BUILTIN(CreateIterResultObject)
TFS_BUILTIN(HasProperty)
TFS_BUILTIN(ToInteger)
TFS_BUILTIN(ToLength)
@@ -256,22 +261,16 @@ TFS_BUILTIN(Typeof)
TFS_BUILTIN(InstanceOf)
TFS_BUILTIN(OrdinaryHasInstance)
TFS_BUILTIN(ForInFilter)
+TFS_BUILTIN(NewUnmappedArgumentsElements)
+TFS_BUILTIN(NewRestParameterElements)
+TFS_BUILTIN(PromiseHandleReject)
+TFS_BUILTIN(GetSuperConstructor)
+TFS_BUILTIN(StringCharAt)
+TFS_BUILTIN(StringCharCodeAt)
#undef TFS_BUILTIN
// static
-Callable CodeFactory::Inc(Isolate* isolate) {
- IncStub stub(isolate);
- return make_callable(stub);
-}
-
-// static
-Callable CodeFactory::Dec(Isolate* isolate) {
- DecStub stub(isolate);
- return make_callable(stub);
-}
-
-// static
Callable CodeFactory::StringAdd(Isolate* isolate, StringAddFlags flags,
PretenureFlag pretenure_flag) {
StringAddStub stub(isolate, flags, pretenure_flag);
@@ -352,40 +351,40 @@ Callable CodeFactory::ResumeGenerator(Isolate* isolate) {
// static
Callable CodeFactory::FastCloneRegExp(Isolate* isolate) {
- FastCloneRegExpStub stub(isolate);
- return make_callable(stub);
+ return Callable(isolate->builtins()->FastCloneRegExp(),
+ FastCloneRegExpDescriptor(isolate));
}
// static
-Callable CodeFactory::FastCloneShallowArray(Isolate* isolate) {
- // TODO(mstarzinger): Thread through AllocationSiteMode at some point.
- FastCloneShallowArrayStub stub(isolate, DONT_TRACK_ALLOCATION_SITE);
- return make_callable(stub);
+Callable CodeFactory::FastCloneShallowArray(
+ Isolate* isolate, AllocationSiteMode allocation_mode) {
+ return Callable(isolate->builtins()->NewCloneShallowArray(allocation_mode),
+ FastCloneShallowArrayDescriptor(isolate));
}
// static
Callable CodeFactory::FastCloneShallowObject(Isolate* isolate, int length) {
- FastCloneShallowObjectStub stub(isolate, length);
- return make_callable(stub);
+ return Callable(isolate->builtins()->NewCloneShallowObject(length),
+ FastCloneShallowObjectDescriptor(isolate));
}
-
// static
-Callable CodeFactory::FastNewFunctionContext(Isolate* isolate) {
- FastNewFunctionContextStub stub(isolate);
- return make_callable(stub);
+Callable CodeFactory::FastNewFunctionContext(Isolate* isolate,
+ ScopeType scope_type) {
+ return Callable(isolate->builtins()->NewFunctionContext(scope_type),
+ FastNewFunctionContextDescriptor(isolate));
}
// static
Callable CodeFactory::FastNewClosure(Isolate* isolate) {
- FastNewClosureStub stub(isolate);
- return make_callable(stub);
+ return Callable(isolate->builtins()->FastNewClosure(),
+ FastNewClosureDescriptor(isolate));
}
// static
Callable CodeFactory::FastNewObject(Isolate* isolate) {
- FastNewObjectStub stub(isolate);
- return make_callable(stub);
+ return Callable(isolate->builtins()->FastNewObject(),
+ FastNewObjectDescriptor(isolate));
}
// static
@@ -509,5 +508,16 @@ Callable CodeFactory::InterpreterOnStackReplacement(Isolate* isolate) {
ContextOnlyDescriptor(isolate));
}
+// static
+Callable CodeFactory::ArrayPush(Isolate* isolate) {
+ return Callable(isolate->builtins()->ArrayPush(), BuiltinDescriptor(isolate));
+}
+
+// static
+Callable CodeFactory::FunctionPrototypeBind(Isolate* isolate) {
+ return Callable(isolate->builtins()->FunctionPrototypeBind(),
+ BuiltinDescriptor(isolate));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/code-factory.h b/deps/v8/src/code-factory.h
index 033e5d54fb..2500ebd274 100644
--- a/deps/v8/src/code-factory.h
+++ b/deps/v8/src/code-factory.h
@@ -30,6 +30,12 @@ class Callable final BASE_EMBEDDED {
class V8_EXPORT_PRIVATE CodeFactory final {
public:
+ // CEntryStub has var-args semantics (all the arguments are passed on the
+ // stack and the arguments count is passed via register) which currently
+ // can't be expressed in CallInterfaceDescriptor. Therefore only the code
+ // is exported here.
+ static Handle<Code> RuntimeCEntry(Isolate* isolate, int result_size = 1);
+
// Initial states for ICs.
static Callable LoadIC(Isolate* isolate);
static Callable LoadICInOptimizedCode(Isolate* isolate);
@@ -99,8 +105,6 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable BitwiseAnd(Isolate* isolate);
static Callable BitwiseOr(Isolate* isolate);
static Callable BitwiseXor(Isolate* isolate);
- static Callable Inc(Isolate* isolate);
- static Callable Dec(Isolate* isolate);
static Callable LessThan(Isolate* isolate);
static Callable LessThanOrEqual(Isolate* isolate);
static Callable GreaterThan(Isolate* isolate);
@@ -112,6 +116,8 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable StringAdd(Isolate* isolate, StringAddFlags flags,
PretenureFlag pretenure_flag);
+ static Callable StringCharAt(Isolate* isolate);
+ static Callable StringCharCodeAt(Isolate* isolate);
static Callable StringCompare(Isolate* isolate, Token::Value token);
static Callable StringEqual(Isolate* isolate);
static Callable StringNotEqual(Isolate* isolate);
@@ -122,12 +128,15 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable SubString(Isolate* isolate);
static Callable Typeof(Isolate* isolate);
+ static Callable GetSuperConstructor(Isolate* isolate);
static Callable FastCloneRegExp(Isolate* isolate);
- static Callable FastCloneShallowArray(Isolate* isolate);
+ static Callable FastCloneShallowArray(Isolate* isolate,
+ AllocationSiteMode allocation_mode);
static Callable FastCloneShallowObject(Isolate* isolate, int length);
- static Callable FastNewFunctionContext(Isolate* isolate);
+ static Callable FastNewFunctionContext(Isolate* isolate,
+ ScopeType scope_type);
static Callable FastNewClosure(Isolate* isolate);
static Callable FastNewObject(Isolate* isolate);
static Callable FastNewRestParameter(Isolate* isolate,
@@ -141,6 +150,9 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable GrowFastDoubleElements(Isolate* isolate);
static Callable GrowFastSmiOrObjectElements(Isolate* isolate);
+ static Callable NewUnmappedArgumentsElements(Isolate* isolate);
+ static Callable NewRestParameterElements(Isolate* isolate);
+
static Callable AllocateHeapNumber(Isolate* isolate);
#define SIMD128_ALLOC(TYPE, Type, type, lane_count, lane_type) \
static Callable Allocate##Type(Isolate* isolate);
@@ -155,6 +167,7 @@ class V8_EXPORT_PRIVATE CodeFactory final {
Isolate* isolate, ConvertReceiverMode mode = ConvertReceiverMode::kAny);
static Callable Construct(Isolate* isolate);
static Callable ConstructFunction(Isolate* isolate);
+ static Callable CreateIterResultObject(Isolate* isolate);
static Callable HasProperty(Isolate* isolate);
static Callable ForInFilter(Isolate* isolate);
@@ -166,6 +179,10 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable InterpreterPushArgsAndConstructArray(Isolate* isolate);
static Callable InterpreterCEntry(Isolate* isolate, int result_size = 1);
static Callable InterpreterOnStackReplacement(Isolate* isolate);
+
+ static Callable ArrayPush(Isolate* isolate);
+ static Callable FunctionPrototypeBind(Isolate* isolate);
+ static Callable PromiseHandleReject(Isolate* isolate);
};
} // namespace internal
diff --git a/deps/v8/src/code-stub-assembler.cc b/deps/v8/src/code-stub-assembler.cc
index b1ed2f13c7..4a96e81ebf 100644
--- a/deps/v8/src/code-stub-assembler.cc
+++ b/deps/v8/src/code-stub-assembler.cc
@@ -5,28 +5,47 @@
#include "src/code-factory.h"
#include "src/frames-inl.h"
#include "src/frames.h"
-#include "src/ic/handler-configuration.h"
-#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
using compiler::Node;
-CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
- const CallInterfaceDescriptor& descriptor,
- Code::Flags flags, const char* name,
- size_t result_size)
- : compiler::CodeAssembler(isolate, zone, descriptor, flags, name,
- result_size) {}
+CodeStubAssembler::CodeStubAssembler(compiler::CodeAssemblerState* state)
+ : compiler::CodeAssembler(state) {
+ if (DEBUG_BOOL && FLAG_csa_trap_on_node != nullptr) {
+ HandleBreakOnNode();
+ }
+}
-CodeStubAssembler::CodeStubAssembler(Isolate* isolate, Zone* zone,
- int parameter_count, Code::Flags flags,
- const char* name)
- : compiler::CodeAssembler(isolate, zone, parameter_count, flags, name) {}
+void CodeStubAssembler::HandleBreakOnNode() {
+ // FLAG_csa_trap_on_node should be in a form "STUB,NODE" where STUB is a
+ // string specifying the name of a stub and NODE is number specifying node id.
+ const char* name = state()->name();
+ size_t name_length = strlen(name);
+ if (strncmp(FLAG_csa_trap_on_node, name, name_length) != 0) {
+ // Different name.
+ return;
+ }
+ size_t option_length = strlen(FLAG_csa_trap_on_node);
+ if (option_length < name_length + 2 ||
+ FLAG_csa_trap_on_node[name_length] != ',') {
+ // Option is too short.
+ return;
+ }
+ const char* start = &FLAG_csa_trap_on_node[name_length + 1];
+ char* end;
+ int node_id = static_cast<int>(strtol(start, &end, 10));
+ if (start == end) {
+ // Bad node id.
+ return;
+ }
+ BreakOnNode(node_id);
+}
-void CodeStubAssembler::Assert(ConditionBody codition_body, const char* message,
- const char* file, int line) {
+void CodeStubAssembler::Assert(const NodeGenerator& codition_body,
+ const char* message, const char* file,
+ int line) {
#if defined(DEBUG)
Label ok(this);
Label not_ok(this, Label::kDeferred);
@@ -58,6 +77,67 @@ void CodeStubAssembler::Assert(ConditionBody codition_body, const char* message,
#endif
}
+Node* CodeStubAssembler::Select(Node* condition, const NodeGenerator& true_body,
+ const NodeGenerator& false_body,
+ MachineRepresentation rep) {
+ Variable value(this, rep);
+ Label vtrue(this), vfalse(this), end(this);
+ Branch(condition, &vtrue, &vfalse);
+
+ Bind(&vtrue);
+ {
+ value.Bind(true_body());
+ Goto(&end);
+ }
+ Bind(&vfalse);
+ {
+ value.Bind(false_body());
+ Goto(&end);
+ }
+
+ Bind(&end);
+ return value.value();
+}
+
+Node* CodeStubAssembler::SelectConstant(Node* condition, Node* true_value,
+ Node* false_value,
+ MachineRepresentation rep) {
+ return Select(condition, [=] { return true_value; },
+ [=] { return false_value; }, rep);
+}
+
+Node* CodeStubAssembler::SelectInt32Constant(Node* condition, int true_value,
+ int false_value) {
+ return SelectConstant(condition, Int32Constant(true_value),
+ Int32Constant(false_value),
+ MachineRepresentation::kWord32);
+}
+
+Node* CodeStubAssembler::SelectIntPtrConstant(Node* condition, int true_value,
+ int false_value) {
+ return SelectConstant(condition, IntPtrConstant(true_value),
+ IntPtrConstant(false_value),
+ MachineType::PointerRepresentation());
+}
+
+Node* CodeStubAssembler::SelectBooleanConstant(Node* condition) {
+ return SelectConstant(condition, TrueConstant(), FalseConstant(),
+ MachineRepresentation::kTagged);
+}
+
+Node* CodeStubAssembler::SelectTaggedConstant(Node* condition, Node* true_value,
+ Node* false_value) {
+ return SelectConstant(condition, true_value, false_value,
+ MachineRepresentation::kTagged);
+}
+
+Node* CodeStubAssembler::SelectSmiConstant(Node* condition, Smi* true_value,
+ Smi* false_value) {
+ return SelectConstant(condition, SmiConstant(true_value),
+ SmiConstant(false_value),
+ MachineRepresentation::kTaggedSigned);
+}
+
Node* CodeStubAssembler::NoContextConstant() { return NumberConstant(0); }
#define HEAP_CONSTANT_ACCESSOR(rootName, name) \
@@ -86,48 +166,11 @@ Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) {
if (mode == SMI_PARAMETERS) {
return SmiConstant(Smi::FromInt(value));
} else {
- DCHECK(mode == INTEGER_PARAMETERS || mode == INTPTR_PARAMETERS);
+ DCHECK_EQ(INTPTR_PARAMETERS, mode);
return IntPtrConstant(value);
}
}
-Node* CodeStubAssembler::IntPtrAddFoldConstants(Node* left, Node* right) {
- int32_t left_constant;
- bool is_left_constant = ToInt32Constant(left, left_constant);
- int32_t right_constant;
- bool is_right_constant = ToInt32Constant(right, right_constant);
- if (is_left_constant) {
- if (is_right_constant) {
- return IntPtrConstant(left_constant + right_constant);
- }
- if (left_constant == 0) {
- return right;
- }
- } else if (is_right_constant) {
- if (right_constant == 0) {
- return left;
- }
- }
- return IntPtrAdd(left, right);
-}
-
-Node* CodeStubAssembler::IntPtrSubFoldConstants(Node* left, Node* right) {
- int32_t left_constant;
- bool is_left_constant = ToInt32Constant(left, left_constant);
- int32_t right_constant;
- bool is_right_constant = ToInt32Constant(right, right_constant);
- if (is_left_constant) {
- if (is_right_constant) {
- return IntPtrConstant(left_constant - right_constant);
- }
- } else if (is_right_constant) {
- if (right_constant == 0) {
- return left;
- }
- }
- return IntPtrSub(left, right);
-}
-
Node* CodeStubAssembler::IntPtrRoundUpToPowerOfTwo32(Node* value) {
Comment("IntPtrRoundUpToPowerOfTwo32");
CSA_ASSERT(this, UintPtrLessThanOrEqual(value, IntPtrConstant(0x80000000u)));
@@ -141,9 +184,11 @@ Node* CodeStubAssembler::IntPtrRoundUpToPowerOfTwo32(Node* value) {
Node* CodeStubAssembler::WordIsPowerOfTwo(Node* value) {
// value && !(value & (value - 1))
return WordEqual(
- Select(WordEqual(value, IntPtrConstant(0)), IntPtrConstant(1),
- WordAnd(value, IntPtrSub(value, IntPtrConstant(1))),
- MachineType::PointerRepresentation()),
+ Select(
+ WordEqual(value, IntPtrConstant(0)),
+ [=] { return IntPtrConstant(1); },
+ [=] { return WordAnd(value, IntPtrSub(value, IntPtrConstant(1))); },
+ MachineType::PointerRepresentation()),
IntPtrConstant(0));
}
@@ -388,57 +433,19 @@ Node* CodeStubAssembler::SmiUntag(Node* value) {
Node* CodeStubAssembler::SmiToWord32(Node* value) {
Node* result = SmiUntag(value);
- if (Is64()) {
- result = TruncateInt64ToInt32(result);
- }
- return result;
+ return TruncateWordToWord32(result);
}
Node* CodeStubAssembler::SmiToFloat64(Node* value) {
return ChangeInt32ToFloat64(SmiToWord32(value));
}
-Node* CodeStubAssembler::SmiAdd(Node* a, Node* b) {
- return BitcastWordToTaggedSigned(
- IntPtrAdd(BitcastTaggedToWord(a), BitcastTaggedToWord(b)));
-}
-
-Node* CodeStubAssembler::SmiSub(Node* a, Node* b) {
- return BitcastWordToTaggedSigned(
- IntPtrSub(BitcastTaggedToWord(a), BitcastTaggedToWord(b)));
-}
-
-Node* CodeStubAssembler::SmiEqual(Node* a, Node* b) {
- return WordEqual(BitcastTaggedToWord(a), BitcastTaggedToWord(b));
-}
-
-Node* CodeStubAssembler::SmiAbove(Node* a, Node* b) {
- return UintPtrGreaterThan(BitcastTaggedToWord(a), BitcastTaggedToWord(b));
-}
-
-Node* CodeStubAssembler::SmiAboveOrEqual(Node* a, Node* b) {
- return UintPtrGreaterThanOrEqual(BitcastTaggedToWord(a),
- BitcastTaggedToWord(b));
-}
-
-Node* CodeStubAssembler::SmiBelow(Node* a, Node* b) {
- return UintPtrLessThan(BitcastTaggedToWord(a), BitcastTaggedToWord(b));
-}
-
-Node* CodeStubAssembler::SmiLessThan(Node* a, Node* b) {
- return IntPtrLessThan(BitcastTaggedToWord(a), BitcastTaggedToWord(b));
-}
-
-Node* CodeStubAssembler::SmiLessThanOrEqual(Node* a, Node* b) {
- return IntPtrLessThanOrEqual(BitcastTaggedToWord(a), BitcastTaggedToWord(b));
-}
-
Node* CodeStubAssembler::SmiMax(Node* a, Node* b) {
- return Select(SmiLessThan(a, b), b, a);
+ return SelectTaggedConstant(SmiLessThan(a, b), b, a);
}
Node* CodeStubAssembler::SmiMin(Node* a, Node* b) {
- return Select(SmiLessThan(a, b), a, b);
+ return SelectTaggedConstant(SmiLessThan(a, b), a, b);
}
Node* CodeStubAssembler::SmiMod(Node* a, Node* b) {
@@ -527,7 +534,7 @@ Node* CodeStubAssembler::SmiMul(Node* a, Node* b) {
Label answer_zero(this), answer_not_zero(this);
Node* answer = Projection(0, pair);
Node* zero = Int32Constant(0);
- Branch(WordEqual(answer, zero), &answer_zero, &answer_not_zero);
+ Branch(Word32Equal(answer, zero), &answer_zero, &answer_not_zero);
Bind(&answer_not_zero);
{
var_result.Bind(ChangeInt32ToTagged(answer));
@@ -546,7 +553,7 @@ Node* CodeStubAssembler::SmiMul(Node* a, Node* b) {
}
Bind(&if_should_be_zero);
{
- var_result.Bind(zero);
+ var_result.Bind(SmiConstant(0));
Goto(&return_result);
}
}
@@ -565,13 +572,27 @@ Node* CodeStubAssembler::SmiMul(Node* a, Node* b) {
return var_result.value();
}
+Node* CodeStubAssembler::TruncateWordToWord32(Node* value) {
+ if (Is64()) {
+ return TruncateInt64ToInt32(value);
+ }
+ return value;
+}
+
Node* CodeStubAssembler::TaggedIsSmi(Node* a) {
return WordEqual(WordAnd(BitcastTaggedToWord(a), IntPtrConstant(kSmiTagMask)),
IntPtrConstant(0));
}
-Node* CodeStubAssembler::WordIsPositiveSmi(Node* a) {
- return WordEqual(WordAnd(a, IntPtrConstant(kSmiTagMask | kSmiSignMask)),
+Node* CodeStubAssembler::TaggedIsNotSmi(Node* a) {
+ return WordNotEqual(
+ WordAnd(BitcastTaggedToWord(a), IntPtrConstant(kSmiTagMask)),
+ IntPtrConstant(0));
+}
+
+Node* CodeStubAssembler::TaggedIsPositiveSmi(Node* a) {
+ return WordEqual(WordAnd(BitcastTaggedToWord(a),
+ IntPtrConstant(kSmiTagMask | kSmiSignMask)),
IntPtrConstant(0));
}
@@ -698,15 +719,16 @@ void CodeStubAssembler::BranchIfJSObject(Node* object, Label* if_true,
if_true, if_false);
}
-void CodeStubAssembler::BranchIfFastJSArray(Node* object, Node* context,
- Label* if_true, Label* if_false) {
+void CodeStubAssembler::BranchIfFastJSArray(
+ Node* object, Node* context, CodeStubAssembler::FastJSArrayAccessMode mode,
+ Label* if_true, Label* if_false) {
// Bailout if receiver is a Smi.
GotoIf(TaggedIsSmi(object), if_false);
Node* map = LoadMap(object);
// Bailout if instance type is not JS_ARRAY_TYPE.
- GotoIf(WordNotEqual(LoadMapInstanceType(map), Int32Constant(JS_ARRAY_TYPE)),
+ GotoIf(Word32NotEqual(LoadMapInstanceType(map), Int32Constant(JS_ARRAY_TYPE)),
if_false);
Node* elements_kind = LoadMapElementsKind(map);
@@ -715,8 +737,9 @@ void CodeStubAssembler::BranchIfFastJSArray(Node* object, Node* context,
GotoUnless(IsFastElementsKind(elements_kind), if_false);
// Check prototype chain if receiver does not have packed elements.
- GotoUnless(IsHoleyFastElementsKind(elements_kind), if_true);
-
+ if (mode == FastJSArrayAccessMode::INBOUNDS_READ) {
+ GotoUnless(IsHoleyFastElementsKind(elements_kind), if_true);
+ }
BranchIfPrototypesHaveNoElements(map, if_true, if_false);
}
@@ -732,6 +755,22 @@ Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
Label runtime_call(this, Label::kDeferred), no_runtime_call(this);
Label merge_runtime(this, &result);
+ if (flags & kAllowLargeObjectAllocation) {
+ Label next(this);
+ GotoIf(IsRegularHeapObjectSize(size_in_bytes), &next);
+
+ Node* runtime_flags = SmiConstant(
+ Smi::FromInt(AllocateDoubleAlignFlag::encode(false) |
+ AllocateTargetSpace::encode(AllocationSpace::LO_SPACE)));
+ Node* const runtime_result =
+ CallRuntime(Runtime::kAllocateInTargetSpace, NoContextConstant(),
+ SmiTag(size_in_bytes), runtime_flags);
+ result.Bind(runtime_result);
+ Goto(&merge_runtime);
+
+ Bind(&next);
+ }
+
Node* new_top = IntPtrAdd(top, size_in_bytes);
Branch(UintPtrGreaterThanOrEqual(new_top, limit), &runtime_call,
&no_runtime_call);
@@ -827,10 +866,17 @@ Node* CodeStubAssembler::Allocate(Node* size_in_bytes, AllocationFlags flags) {
new_space
? ExternalReference::new_space_allocation_top_address(isolate())
: ExternalReference::old_space_allocation_top_address(isolate()));
- Node* limit_address = ExternalConstant(
- new_space
- ? ExternalReference::new_space_allocation_limit_address(isolate())
- : ExternalReference::old_space_allocation_limit_address(isolate()));
+ DCHECK_EQ(kPointerSize,
+ ExternalReference::new_space_allocation_limit_address(isolate())
+ .address() -
+ ExternalReference::new_space_allocation_top_address(isolate())
+ .address());
+ DCHECK_EQ(kPointerSize,
+ ExternalReference::old_space_allocation_limit_address(isolate())
+ .address() -
+ ExternalReference::old_space_allocation_top_address(isolate())
+ .address());
+ Node* limit_address = IntPtrAdd(top_address, IntPtrConstant(kPointerSize));
#ifdef V8_HOST_ARCH_32_BIT
if (flags & kDoubleAlignment) {
@@ -846,7 +892,7 @@ Node* CodeStubAssembler::Allocate(int size_in_bytes, AllocationFlags flags) {
}
Node* CodeStubAssembler::InnerAllocate(Node* previous, Node* offset) {
- return BitcastWordToTagged(IntPtrAdd(previous, offset));
+ return BitcastWordToTagged(IntPtrAdd(BitcastTaggedToWord(previous), offset));
}
Node* CodeStubAssembler::InnerAllocate(Node* previous, int offset) {
@@ -929,13 +975,12 @@ void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
}
}
-compiler::Node* CodeStubAssembler::LoadFromFrame(int offset, MachineType rep) {
+Node* CodeStubAssembler::LoadFromFrame(int offset, MachineType rep) {
Node* frame_pointer = LoadFramePointer();
return Load(rep, frame_pointer, IntPtrConstant(offset));
}
-compiler::Node* CodeStubAssembler::LoadFromParentFrame(int offset,
- MachineType rep) {
+Node* CodeStubAssembler::LoadFromParentFrame(int offset, MachineType rep) {
Node* frame_pointer = LoadParentFramePointer();
return Load(rep, frame_pointer, IntPtrConstant(offset));
}
@@ -1027,6 +1072,11 @@ Node* CodeStubAssembler::HasInstanceType(Node* object,
return Word32Equal(LoadInstanceType(object), Int32Constant(instance_type));
}
+Node* CodeStubAssembler::DoesntHaveInstanceType(Node* object,
+ InstanceType instance_type) {
+ return Word32NotEqual(LoadInstanceType(object), Int32Constant(instance_type));
+}
+
Node* CodeStubAssembler::LoadProperties(Node* object) {
return LoadObjectField(object, JSObject::kPropertiesOffset);
}
@@ -1303,8 +1353,7 @@ Node* CodeStubAssembler::LoadContextElement(Node* context, Node* slot_index) {
Node* CodeStubAssembler::StoreContextElement(Node* context, int slot_index,
Node* value) {
int offset = Context::SlotOffset(slot_index);
- return Store(MachineRepresentation::kTagged, context, IntPtrConstant(offset),
- value);
+ return Store(context, IntPtrConstant(offset), value);
}
Node* CodeStubAssembler::StoreContextElement(Node* context, Node* slot_index,
@@ -1312,7 +1361,15 @@ Node* CodeStubAssembler::StoreContextElement(Node* context, Node* slot_index,
Node* offset =
IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
- return Store(MachineRepresentation::kTagged, context, offset, value);
+ return Store(context, offset, value);
+}
+
+Node* CodeStubAssembler::StoreContextElementNoWriteBarrier(Node* context,
+ int slot_index,
+ Node* value) {
+ int offset = Context::SlotOffset(slot_index);
+ return StoreNoWriteBarrier(MachineRepresentation::kTagged, context,
+ IntPtrConstant(offset), value);
}
Node* CodeStubAssembler::LoadNativeContext(Node* context) {
@@ -1322,8 +1379,7 @@ Node* CodeStubAssembler::LoadNativeContext(Node* context) {
Node* CodeStubAssembler::LoadJSArrayElementsMap(ElementsKind kind,
Node* native_context) {
CSA_ASSERT(this, IsNativeContext(native_context));
- return LoadFixedArrayElement(native_context,
- IntPtrConstant(Context::ArrayMapIndex(kind)));
+ return LoadContextElement(native_context, Context::ArrayMapIndex(kind));
}
Node* CodeStubAssembler::StoreHeapNumberValue(Node* object, Node* value) {
@@ -1333,8 +1389,8 @@ Node* CodeStubAssembler::StoreHeapNumberValue(Node* object, Node* value) {
Node* CodeStubAssembler::StoreObjectField(
Node* object, int offset, Node* value) {
- return Store(MachineRepresentation::kTagged, object,
- IntPtrConstant(offset - kHeapObjectTag), value);
+ DCHECK_NE(HeapObject::kMapOffset, offset); // Use StoreMap instead.
+ return Store(object, IntPtrConstant(offset - kHeapObjectTag), value);
}
Node* CodeStubAssembler::StoreObjectField(Node* object, Node* offset,
@@ -1343,8 +1399,8 @@ Node* CodeStubAssembler::StoreObjectField(Node* object, Node* offset,
if (ToInt32Constant(offset, const_offset)) {
return StoreObjectField(object, const_offset, value);
}
- return Store(MachineRepresentation::kTagged, object,
- IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)), value);
+ return Store(object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)),
+ value);
}
Node* CodeStubAssembler::StoreObjectFieldNoWriteBarrier(
@@ -1363,10 +1419,22 @@ Node* CodeStubAssembler::StoreObjectFieldNoWriteBarrier(
rep, object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)), value);
}
+Node* CodeStubAssembler::StoreMap(Node* object, Node* map) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
+ return StoreWithMapWriteBarrier(
+ object, IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag), map);
+}
+
+Node* CodeStubAssembler::StoreMapNoWriteBarrier(
+ Node* object, Heap::RootListIndex map_root_index) {
+ return StoreMapNoWriteBarrier(object, LoadRoot(map_root_index));
+}
+
Node* CodeStubAssembler::StoreMapNoWriteBarrier(Node* object, Node* map) {
+ CSA_SLOW_ASSERT(this, IsMap(map));
return StoreNoWriteBarrier(
MachineRepresentation::kTagged, object,
- IntPtrConstant(HeapNumber::kMapOffset - kHeapObjectTag), map);
+ IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag), map);
}
Node* CodeStubAssembler::StoreObjectFieldRoot(Node* object, int offset,
@@ -1381,17 +1449,19 @@ Node* CodeStubAssembler::StoreObjectFieldRoot(Node* object, int offset,
Node* CodeStubAssembler::StoreFixedArrayElement(Node* object, Node* index_node,
Node* value,
WriteBarrierMode barrier_mode,
+ int additional_offset,
ParameterMode parameter_mode) {
DCHECK(barrier_mode == SKIP_WRITE_BARRIER ||
barrier_mode == UPDATE_WRITE_BARRIER);
- Node* offset =
- ElementOffsetFromIndex(index_node, FAST_HOLEY_ELEMENTS, parameter_mode,
- FixedArray::kHeaderSize - kHeapObjectTag);
- MachineRepresentation rep = MachineRepresentation::kTagged;
+ int header_size =
+ FixedArray::kHeaderSize + additional_offset - kHeapObjectTag;
+ Node* offset = ElementOffsetFromIndex(index_node, FAST_HOLEY_ELEMENTS,
+ parameter_mode, header_size);
if (barrier_mode == SKIP_WRITE_BARRIER) {
- return StoreNoWriteBarrier(rep, object, offset, value);
+ return StoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset,
+ value);
} else {
- return Store(rep, object, offset, value);
+ return Store(object, offset, value);
}
}
@@ -1405,13 +1475,90 @@ Node* CodeStubAssembler::StoreFixedDoubleArrayElement(
return StoreNoWriteBarrier(rep, object, offset, value);
}
+Node* CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, Node* context,
+ Node* array,
+ CodeStubArguments& args,
+ Variable& arg_index,
+ Label* bailout) {
+ Comment("BuildAppendJSArray: %s", ElementsKindToString(kind));
+ Label pre_bailout(this);
+ Label success(this);
+ Variable var_elements(this, MachineRepresentation::kTagged);
+ Variable var_tagged_length(this, MachineRepresentation::kTagged);
+ ParameterMode mode = OptimalParameterMode();
+ Variable var_length(this, OptimalParameterRepresentation());
+ var_length.Bind(TaggedToParameter(LoadJSArrayLength(array), mode));
+ var_elements.Bind(LoadElements(array));
+ Node* capacity =
+ TaggedToParameter(LoadFixedArrayBaseLength(var_elements.value()), mode);
+
+ // Resize the capacity of the fixed array if it doesn't fit.
+ Label fits(this, &var_elements);
+ Node* first = arg_index.value();
+ Node* growth = IntPtrSub(args.GetLength(), first);
+ Node* new_length =
+ IntPtrOrSmiAdd(WordToParameter(growth, mode), var_length.value(), mode);
+ GotoUnless(IntPtrOrSmiGreaterThan(new_length, capacity, mode), &fits);
+ Node* new_capacity = CalculateNewElementsCapacity(new_length, mode);
+ var_elements.Bind(GrowElementsCapacity(array, var_elements.value(), kind,
+ kind, capacity, new_capacity, mode,
+ &pre_bailout));
+ Goto(&fits);
+ Bind(&fits);
+ Node* elements = var_elements.value();
+
+ // Push each argument onto the end of the array now that there is enough
+ // capacity.
+ CodeStubAssembler::VariableList push_vars({&var_length}, zone());
+ args.ForEach(
+ push_vars,
+ [this, kind, mode, elements, &var_length, &pre_bailout](Node* arg) {
+ if (IsFastSmiElementsKind(kind)) {
+ GotoIf(TaggedIsNotSmi(arg), &pre_bailout);
+ } else if (IsFastDoubleElementsKind(kind)) {
+ GotoIfNotNumber(arg, &pre_bailout);
+ }
+ if (IsFastDoubleElementsKind(kind)) {
+ Node* double_value = ChangeNumberToFloat64(arg);
+ StoreFixedDoubleArrayElement(elements, var_length.value(),
+ Float64SilenceNaN(double_value), mode);
+ } else {
+ WriteBarrierMode barrier_mode = IsFastSmiElementsKind(kind)
+ ? SKIP_WRITE_BARRIER
+ : UPDATE_WRITE_BARRIER;
+ StoreFixedArrayElement(elements, var_length.value(), arg,
+ barrier_mode, 0, mode);
+ }
+ Increment(var_length, 1, mode);
+ },
+ first, nullptr);
+ {
+ Node* length = ParameterToTagged(var_length.value(), mode);
+ var_tagged_length.Bind(length);
+ StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
+ Goto(&success);
+ }
+
+ Bind(&pre_bailout);
+ {
+ Node* length = ParameterToTagged(var_length.value(), mode);
+ var_tagged_length.Bind(length);
+ Node* diff = SmiSub(length, LoadJSArrayLength(array));
+ StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
+ arg_index.Bind(IntPtrAdd(arg_index.value(), SmiUntag(diff)));
+ Goto(bailout);
+ }
+
+ Bind(&success);
+ return var_tagged_length.value();
+}
+
Node* CodeStubAssembler::AllocateHeapNumber(MutableMode mode) {
Node* result = Allocate(HeapNumber::kSize, kNone);
Heap::RootListIndex heap_map_index =
mode == IMMUTABLE ? Heap::kHeapNumberMapRootIndex
: Heap::kMutableHeapNumberMapRootIndex;
- Node* map = LoadRoot(heap_map_index);
- StoreMapNoWriteBarrier(result, map);
+ StoreMapNoWriteBarrier(result, heap_map_index);
return result;
}
@@ -1427,12 +1574,13 @@ Node* CodeStubAssembler::AllocateSeqOneByteString(int length,
Comment("AllocateSeqOneByteString");
Node* result = Allocate(SeqOneByteString::SizeFor(length), flags);
DCHECK(Heap::RootIsImmortalImmovable(Heap::kOneByteStringMapRootIndex));
- StoreMapNoWriteBarrier(result, LoadRoot(Heap::kOneByteStringMapRootIndex));
+ StoreMapNoWriteBarrier(result, Heap::kOneByteStringMapRootIndex);
StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset,
SmiConstant(Smi::FromInt(length)));
- StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldOffset,
+ // Initialize both used and unused parts of hash field slot at once.
+ StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldSlot,
IntPtrConstant(String::kEmptyHashField),
- MachineRepresentation::kWord32);
+ MachineType::PointerRepresentation());
return result;
}
@@ -1457,13 +1605,13 @@ Node* CodeStubAssembler::AllocateSeqOneByteString(Node* context, Node* length,
// Just allocate the SeqOneByteString in new space.
Node* result = Allocate(size, flags);
DCHECK(Heap::RootIsImmortalImmovable(Heap::kOneByteStringMapRootIndex));
- StoreMapNoWriteBarrier(result, LoadRoot(Heap::kOneByteStringMapRootIndex));
- StoreObjectFieldNoWriteBarrier(
- result, SeqOneByteString::kLengthOffset,
- mode == SMI_PARAMETERS ? length : SmiFromWord(length));
- StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldOffset,
+ StoreMapNoWriteBarrier(result, Heap::kOneByteStringMapRootIndex);
+ StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset,
+ ParameterToTagged(length, mode));
+ // Initialize both used and unused parts of hash field slot at once.
+ StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldSlot,
IntPtrConstant(String::kEmptyHashField),
- MachineRepresentation::kWord32);
+ MachineType::PointerRepresentation());
var_result.Bind(result);
Goto(&if_join);
}
@@ -1471,9 +1619,8 @@ Node* CodeStubAssembler::AllocateSeqOneByteString(Node* context, Node* length,
Bind(&if_notsizeissmall);
{
// We might need to allocate in large object space, go to the runtime.
- Node* result =
- CallRuntime(Runtime::kAllocateSeqOneByteString, context,
- mode == SMI_PARAMETERS ? length : SmiFromWord(length));
+ Node* result = CallRuntime(Runtime::kAllocateSeqOneByteString, context,
+ ParameterToTagged(length, mode));
var_result.Bind(result);
Goto(&if_join);
}
@@ -1487,12 +1634,13 @@ Node* CodeStubAssembler::AllocateSeqTwoByteString(int length,
Comment("AllocateSeqTwoByteString");
Node* result = Allocate(SeqTwoByteString::SizeFor(length), flags);
DCHECK(Heap::RootIsImmortalImmovable(Heap::kStringMapRootIndex));
- StoreMapNoWriteBarrier(result, LoadRoot(Heap::kStringMapRootIndex));
+ StoreMapNoWriteBarrier(result, Heap::kStringMapRootIndex);
StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kLengthOffset,
SmiConstant(Smi::FromInt(length)));
- StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldOffset,
+ // Initialize both used and unused parts of hash field slot at once.
+ StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldSlot,
IntPtrConstant(String::kEmptyHashField),
- MachineRepresentation::kWord32);
+ MachineType::PointerRepresentation());
return result;
}
@@ -1517,13 +1665,14 @@ Node* CodeStubAssembler::AllocateSeqTwoByteString(Node* context, Node* length,
// Just allocate the SeqTwoByteString in new space.
Node* result = Allocate(size, flags);
DCHECK(Heap::RootIsImmortalImmovable(Heap::kStringMapRootIndex));
- StoreMapNoWriteBarrier(result, LoadRoot(Heap::kStringMapRootIndex));
+ StoreMapNoWriteBarrier(result, Heap::kStringMapRootIndex);
StoreObjectFieldNoWriteBarrier(
result, SeqTwoByteString::kLengthOffset,
mode == SMI_PARAMETERS ? length : SmiFromWord(length));
- StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldOffset,
+ // Initialize both used and unused parts of hash field slot at once.
+ StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldSlot,
IntPtrConstant(String::kEmptyHashField),
- MachineRepresentation::kWord32);
+ MachineType::PointerRepresentation());
var_result.Bind(result);
Goto(&if_join);
}
@@ -1547,14 +1696,14 @@ Node* CodeStubAssembler::AllocateSlicedString(
Node* offset) {
CSA_ASSERT(this, TaggedIsSmi(length));
Node* result = Allocate(SlicedString::kSize);
- Node* map = LoadRoot(map_root_index);
DCHECK(Heap::RootIsImmortalImmovable(map_root_index));
- StoreMapNoWriteBarrier(result, map);
+ StoreMapNoWriteBarrier(result, map_root_index);
StoreObjectFieldNoWriteBarrier(result, SlicedString::kLengthOffset, length,
MachineRepresentation::kTagged);
- StoreObjectFieldNoWriteBarrier(result, SlicedString::kHashFieldOffset,
- Int32Constant(String::kEmptyHashField),
- MachineRepresentation::kWord32);
+ // Initialize both used and unused parts of hash field slot at once.
+ StoreObjectFieldNoWriteBarrier(result, SlicedString::kHashFieldSlot,
+ IntPtrConstant(String::kEmptyHashField),
+ MachineType::PointerRepresentation());
StoreObjectFieldNoWriteBarrier(result, SlicedString::kParentOffset, parent,
MachineRepresentation::kTagged);
StoreObjectFieldNoWriteBarrier(result, SlicedString::kOffsetOffset, offset,
@@ -1580,14 +1729,14 @@ Node* CodeStubAssembler::AllocateConsString(Heap::RootListIndex map_root_index,
AllocationFlags flags) {
CSA_ASSERT(this, TaggedIsSmi(length));
Node* result = Allocate(ConsString::kSize, flags);
- Node* map = LoadRoot(map_root_index);
DCHECK(Heap::RootIsImmortalImmovable(map_root_index));
- StoreMapNoWriteBarrier(result, map);
+ StoreMapNoWriteBarrier(result, map_root_index);
StoreObjectFieldNoWriteBarrier(result, ConsString::kLengthOffset, length,
MachineRepresentation::kTagged);
- StoreObjectFieldNoWriteBarrier(result, ConsString::kHashFieldOffset,
- Int32Constant(String::kEmptyHashField),
- MachineRepresentation::kWord32);
+ // Initialize both used and unused parts of hash field slot at once.
+ StoreObjectFieldNoWriteBarrier(result, ConsString::kHashFieldSlot,
+ IntPtrConstant(String::kEmptyHashField),
+ MachineType::PointerRepresentation());
bool const new_space = !(flags & kPretenured);
if (new_space) {
StoreObjectFieldNoWriteBarrier(result, ConsString::kFirstOffset, first,
@@ -1624,8 +1773,10 @@ Node* CodeStubAssembler::NewConsString(Node* context, Node* length, Node* left,
Node* right_instance_type = LoadInstanceType(right);
// Compute intersection and difference of instance types.
- Node* anded_instance_types = WordAnd(left_instance_type, right_instance_type);
- Node* xored_instance_types = WordXor(left_instance_type, right_instance_type);
+ Node* anded_instance_types =
+ Word32And(left_instance_type, right_instance_type);
+ Node* xored_instance_types =
+ Word32Xor(left_instance_type, right_instance_type);
// We create a one-byte cons string if
// 1. both strings are one-byte, or
@@ -1642,15 +1793,15 @@ Node* CodeStubAssembler::NewConsString(Node* context, Node* length, Node* left,
Label two_byte_map(this);
Variable result(this, MachineRepresentation::kTagged);
Label done(this, &result);
- GotoIf(WordNotEqual(
- WordAnd(anded_instance_types,
- IntPtrConstant(kStringEncodingMask | kOneByteDataHintTag)),
- IntPtrConstant(0)),
+ GotoIf(Word32NotEqual(Word32And(anded_instance_types,
+ Int32Constant(kStringEncodingMask |
+ kOneByteDataHintTag)),
+ Int32Constant(0)),
&one_byte_map);
- Branch(WordNotEqual(WordAnd(xored_instance_types,
- IntPtrConstant(kStringEncodingMask |
- kOneByteDataHintMask)),
- IntPtrConstant(kOneByteStringTag | kOneByteDataHintTag)),
+ Branch(Word32NotEqual(Word32And(xored_instance_types,
+ Int32Constant(kStringEncodingMask |
+ kOneByteDataHintMask)),
+ Int32Constant(kOneByteStringTag | kOneByteDataHintTag)),
&two_byte_map, &one_byte_map);
Bind(&one_byte_map);
@@ -1700,15 +1851,13 @@ Node* CodeStubAssembler::AllocateRegExpResult(Node* context, Node* length,
Node* const zero = IntPtrConstant(0);
Node* const length_intptr = SmiUntag(length);
const ElementsKind elements_kind = FAST_ELEMENTS;
- const ParameterMode parameter_mode = INTPTR_PARAMETERS;
- Node* const elements =
- AllocateFixedArray(elements_kind, length_intptr, parameter_mode);
+ Node* const elements = AllocateFixedArray(elements_kind, length_intptr);
StoreObjectField(result, JSArray::kElementsOffset, elements);
// Fill in the elements with undefined.
FillFixedArrayWithValue(elements_kind, elements, zero, length_intptr,
- Heap::kUndefinedValueRootIndex, parameter_mode);
+ Heap::kUndefinedValueRootIndex);
return result;
}
@@ -1727,14 +1876,14 @@ Node* CodeStubAssembler::AllocateNameDictionary(Node* at_least_space_for) {
Node* length = EntryToIndex<NameDictionary>(capacity);
Node* store_size =
- IntPtrAddFoldConstants(WordShl(length, IntPtrConstant(kPointerSizeLog2)),
- IntPtrConstant(NameDictionary::kHeaderSize));
+ IntPtrAdd(WordShl(length, IntPtrConstant(kPointerSizeLog2)),
+ IntPtrConstant(NameDictionary::kHeaderSize));
Node* result = Allocate(store_size);
Comment("Initialize NameDictionary");
// Initialize FixedArray fields.
- StoreObjectFieldRoot(result, FixedArray::kMapOffset,
- Heap::kHashTableMapRootIndex);
+ DCHECK(Heap::RootIsImmortalImmovable(Heap::kHashTableMapRootIndex));
+ StoreMapNoWriteBarrier(result, Heap::kHashTableMapRootIndex);
StoreObjectFieldNoWriteBarrier(result, FixedArray::kLengthOffset,
SmiFromWord(length));
// Initialized HashTable fields.
@@ -1754,25 +1903,25 @@ Node* CodeStubAssembler::AllocateNameDictionary(Node* at_least_space_for) {
SKIP_WRITE_BARRIER);
// Initialize NameDictionary elements.
- result = BitcastTaggedToWord(result);
+ Node* result_word = BitcastTaggedToWord(result);
Node* start_address = IntPtrAdd(
- result, IntPtrConstant(NameDictionary::OffsetOfElementAt(
- NameDictionary::kElementsStartIndex) -
- kHeapObjectTag));
+ result_word, IntPtrConstant(NameDictionary::OffsetOfElementAt(
+ NameDictionary::kElementsStartIndex) -
+ kHeapObjectTag));
Node* end_address = IntPtrAdd(
- result,
- IntPtrSubFoldConstants(store_size, IntPtrConstant(kHeapObjectTag)));
+ result_word, IntPtrSub(store_size, IntPtrConstant(kHeapObjectTag)));
StoreFieldsNoWriteBarrier(start_address, end_address, filler);
return result;
}
Node* CodeStubAssembler::AllocateJSObjectFromMap(Node* map, Node* properties,
- Node* elements) {
+ Node* elements,
+ AllocationFlags flags) {
CSA_ASSERT(this, IsMap(map));
Node* size =
IntPtrMul(LoadMapInstanceSize(map), IntPtrConstant(kPointerSize));
CSA_ASSERT(this, IsRegularHeapObjectSize(size));
- Node* object = Allocate(size);
+ Node* object = Allocate(size, flags);
StoreMapNoWriteBarrier(object, map);
InitializeJSObjectFromMap(object, map, size, properties, elements);
return object;
@@ -1806,6 +1955,7 @@ void CodeStubAssembler::InitializeJSObjectBody(Node* object, Node* map,
Comment("InitializeJSObjectBody");
Node* filler = LoadRoot(Heap::kUndefinedValueRootIndex);
// Calculate the untagged field addresses.
+ object = BitcastTaggedToWord(object);
Node* start_address =
IntPtrAdd(object, IntPtrConstant(start_offset - kHeapObjectTag));
Node* end_address =
@@ -1821,8 +1971,8 @@ void CodeStubAssembler::StoreFieldsNoWriteBarrier(Node* start_address,
CSA_ASSERT(this, WordIsWordAligned(end_address));
BuildFastLoop(
MachineType::PointerRepresentation(), start_address, end_address,
- [value](CodeStubAssembler* a, Node* current) {
- a->StoreNoWriteBarrier(MachineRepresentation::kTagged, current, value);
+ [this, value](Node* current) {
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, current, value);
},
kPointerSize, IndexAdvanceMode::kPost);
}
@@ -1861,9 +2011,8 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
Node* array = AllocateUninitializedJSArray(kind, array_map, length,
allocation_site, size);
- // The bitcast here is safe because InnerAllocate doesn't actually allocate.
- Node* elements = InnerAllocate(BitcastTaggedToWord(array), elements_offset);
- StoreObjectField(array, JSObject::kElementsOffset, elements);
+ Node* elements = InnerAllocate(array, elements_offset);
+ StoreObjectFieldNoWriteBarrier(array, JSObject::kElementsOffset, elements);
return {array, elements};
}
@@ -1878,6 +2027,7 @@ Node* CodeStubAssembler::AllocateUninitializedJSArray(ElementsKind kind,
Comment("write JSArray headers");
StoreMapNoWriteBarrier(array, array_map);
+ CSA_ASSERT(this, TaggedIsSmi(length));
StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
StoreObjectFieldRoot(array, JSArray::kPropertiesOffset,
@@ -1893,25 +2043,32 @@ Node* CodeStubAssembler::AllocateJSArray(ElementsKind kind, Node* array_map,
Node* capacity, Node* length,
Node* allocation_site,
ParameterMode capacity_mode) {
- bool is_double = IsFastDoubleElementsKind(kind);
-
- // Allocate both array and elements object, and initialize the JSArray.
- Node *array, *elements;
- std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
- kind, array_map, length, allocation_site, capacity, capacity_mode);
- // Setup elements object.
- Heap* heap = isolate()->heap();
- Handle<Map> elements_map(is_double ? heap->fixed_double_array_map()
- : heap->fixed_array_map());
- StoreMapNoWriteBarrier(elements, HeapConstant(elements_map));
- StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset,
- TagParameter(capacity, capacity_mode));
-
- // Fill in the elements with holes.
- FillFixedArrayWithValue(
- kind, elements, capacity_mode == SMI_PARAMETERS ? SmiConstant(Smi::kZero)
- : IntPtrConstant(0),
- capacity, Heap::kTheHoleValueRootIndex, capacity_mode);
+ Node *array = nullptr, *elements = nullptr;
+ int32_t constant_capacity;
+ if (ToInt32Constant(capacity, constant_capacity) && constant_capacity == 0) {
+ // Array is empty. Use the shared empty fixed array instead of allocating a
+ // new one.
+ array = AllocateUninitializedJSArrayWithoutElements(kind, array_map, length,
+ nullptr);
+ StoreObjectFieldRoot(array, JSArray::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ } else {
+ // Allocate both array and elements object, and initialize the JSArray.
+ std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
+ kind, array_map, length, allocation_site, capacity, capacity_mode);
+ // Setup elements object.
+ Heap::RootListIndex elements_map_index =
+ IsFastDoubleElementsKind(kind) ? Heap::kFixedDoubleArrayMapRootIndex
+ : Heap::kFixedArrayMapRootIndex;
+ DCHECK(Heap::RootIsImmortalImmovable(elements_map_index));
+ StoreMapNoWriteBarrier(elements, elements_map_index);
+ StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset,
+ ParameterToTagged(capacity, capacity_mode));
+ // Fill in the elements with holes.
+ FillFixedArrayWithValue(kind, elements,
+ IntPtrOrSmiConstant(0, capacity_mode), capacity,
+ Heap::kTheHoleValueRootIndex, capacity_mode);
+ }
return array;
}
@@ -1920,23 +2077,19 @@ Node* CodeStubAssembler::AllocateFixedArray(ElementsKind kind,
Node* capacity_node,
ParameterMode mode,
AllocationFlags flags) {
- CSA_ASSERT(this,
- IntPtrGreaterThan(capacity_node, IntPtrOrSmiConstant(0, mode)));
+ CSA_ASSERT(this, IntPtrOrSmiGreaterThan(capacity_node,
+ IntPtrOrSmiConstant(0, mode), mode));
Node* total_size = GetFixedArrayAllocationSize(capacity_node, kind, mode);
// Allocate both array and elements object, and initialize the JSArray.
Node* array = Allocate(total_size, flags);
- Heap* heap = isolate()->heap();
- Handle<Map> map(IsFastDoubleElementsKind(kind)
- ? heap->fixed_double_array_map()
- : heap->fixed_array_map());
- if (flags & kPretenured) {
- StoreObjectField(array, JSObject::kMapOffset, HeapConstant(map));
- } else {
- StoreMapNoWriteBarrier(array, HeapConstant(map));
- }
+ Heap::RootListIndex map_index = IsFastDoubleElementsKind(kind)
+ ? Heap::kFixedDoubleArrayMapRootIndex
+ : Heap::kFixedArrayMapRootIndex;
+ DCHECK(Heap::RootIsImmortalImmovable(map_index));
+ StoreMapNoWriteBarrier(array, map_index);
StoreObjectFieldNoWriteBarrier(array, FixedArray::kLengthOffset,
- TagParameter(capacity_node, mode));
+ ParameterToTagged(capacity_node, mode));
return array;
}
@@ -1954,8 +2107,7 @@ void CodeStubAssembler::FillFixedArrayWithValue(
BuildFastFixedArrayForEach(
array, kind, from_node, to_node,
- [value, is_double, double_hole](CodeStubAssembler* assembler, Node* array,
- Node* offset) {
+ [this, value, is_double, double_hole](Node* array, Node* offset) {
if (is_double) {
// Don't use doubles to store the hole double, since manipulating the
// signaling NaN used for the hole in C++, e.g. with bit_cast, will
@@ -1965,21 +2117,19 @@ void CodeStubAssembler::FillFixedArrayWithValue(
// TODO(danno): When we have a Float32/Float64 wrapper class that
// preserves double bits during manipulation, remove this code/change
// this to an indexed Float64 store.
- if (assembler->Is64()) {
- assembler->StoreNoWriteBarrier(MachineRepresentation::kWord64,
- array, offset, double_hole);
+ if (Is64()) {
+ StoreNoWriteBarrier(MachineRepresentation::kWord64, array, offset,
+ double_hole);
} else {
- assembler->StoreNoWriteBarrier(MachineRepresentation::kWord32,
- array, offset, double_hole);
- assembler->StoreNoWriteBarrier(
- MachineRepresentation::kWord32, array,
- assembler->IntPtrAdd(offset,
- assembler->IntPtrConstant(kPointerSize)),
- double_hole);
+ StoreNoWriteBarrier(MachineRepresentation::kWord32, array, offset,
+ double_hole);
+ StoreNoWriteBarrier(MachineRepresentation::kWord32, array,
+ IntPtrAdd(offset, IntPtrConstant(kPointerSize)),
+ double_hole);
}
} else {
- assembler->StoreNoWriteBarrier(MachineRepresentation::kTagged, array,
- offset, value);
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, array, offset,
+ value);
}
},
mode);
@@ -2076,7 +2226,7 @@ void CodeStubAssembler::CopyFixedArrayElements(
from_array, var_from_offset.value(), from_kind, to_kind, if_hole);
if (needs_write_barrier) {
- Store(MachineRepresentation::kTagged, to_array, to_offset, value);
+ Store(to_array, to_offset, value);
} else if (to_double_elements) {
StoreNoWriteBarrier(MachineRepresentation::kFloat64, to_array, to_offset,
value);
@@ -2119,11 +2269,12 @@ void CodeStubAssembler::CopyFixedArrayElements(
Comment("] CopyFixedArrayElements");
}
-void CodeStubAssembler::CopyStringCharacters(
- compiler::Node* from_string, compiler::Node* to_string,
- compiler::Node* from_index, compiler::Node* to_index,
- compiler::Node* character_count, String::Encoding from_encoding,
- String::Encoding to_encoding, ParameterMode mode) {
+void CodeStubAssembler::CopyStringCharacters(Node* from_string, Node* to_string,
+ Node* from_index, Node* to_index,
+ Node* character_count,
+ String::Encoding from_encoding,
+ String::Encoding to_encoding,
+ ParameterMode mode) {
bool from_one_byte = from_encoding == String::ONE_BYTE_ENCODING;
bool to_one_byte = to_encoding == String::ONE_BYTE_ENCODING;
DCHECK_IMPLIES(to_one_byte, from_one_byte);
@@ -2140,7 +2291,7 @@ void CodeStubAssembler::CopyStringCharacters(
Node* to_offset =
ElementOffsetFromIndex(to_index, to_kind, mode, header_size);
Node* byte_count = ElementOffsetFromIndex(character_count, from_kind, mode);
- Node* limit_offset = IntPtrAddFoldConstants(from_offset, byte_count);
+ Node* limit_offset = IntPtrAdd(from_offset, byte_count);
// Prepare the fast loop
MachineType type =
@@ -2166,16 +2317,14 @@ void CodeStubAssembler::CopyStringCharacters(
to_index_smi == from_index_smi));
BuildFastLoop(vars, MachineType::PointerRepresentation(), from_offset,
limit_offset,
- [from_string, to_string, &current_to_offset, to_increment, type,
- rep, index_same](CodeStubAssembler* assembler, Node* offset) {
- Node* value = assembler->Load(type, from_string, offset);
- assembler->StoreNoWriteBarrier(
+ [this, from_string, to_string, &current_to_offset, to_increment,
+ type, rep, index_same](Node* offset) {
+ Node* value = Load(type, from_string, offset);
+ StoreNoWriteBarrier(
rep, to_string,
index_same ? offset : current_to_offset.value(), value);
if (!index_same) {
- current_to_offset.Bind(assembler->IntPtrAdd(
- current_to_offset.value(),
- assembler->IntPtrConstant(to_increment)));
+ Increment(current_to_offset, to_increment);
}
},
from_increment, IndexAdvanceMode::kPost);
@@ -2212,17 +2361,10 @@ Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array,
Node* CodeStubAssembler::CalculateNewElementsCapacity(Node* old_capacity,
ParameterMode mode) {
- Node* half_old_capacity = WordShr(old_capacity, IntPtrConstant(1));
- Node* new_capacity = IntPtrAdd(half_old_capacity, old_capacity);
- Node* unconditioned_result =
- IntPtrAdd(new_capacity, IntPtrOrSmiConstant(16, mode));
- if (mode == INTEGER_PARAMETERS || mode == INTPTR_PARAMETERS) {
- return unconditioned_result;
- } else {
- int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
- return WordAnd(unconditioned_result,
- IntPtrConstant(static_cast<size_t>(-1) << kSmiShiftBits));
- }
+ Node* half_old_capacity = WordOrSmiShr(old_capacity, 1, mode);
+ Node* new_capacity = IntPtrOrSmiAdd(half_old_capacity, old_capacity, mode);
+ Node* padding = IntPtrOrSmiConstant(16, mode);
+ return IntPtrOrSmiAdd(new_capacity, padding, mode);
}
Node* CodeStubAssembler::TryGrowElementsCapacity(Node* object, Node* elements,
@@ -2231,8 +2373,8 @@ Node* CodeStubAssembler::TryGrowElementsCapacity(Node* object, Node* elements,
Node* capacity = LoadFixedArrayBaseLength(elements);
ParameterMode mode = OptimalParameterMode();
- capacity = UntagParameter(capacity, mode);
- key = UntagParameter(key, mode);
+ capacity = TaggedToParameter(capacity, mode);
+ key = TaggedToParameter(key, mode);
return TryGrowElementsCapacity(object, elements, kind, key, capacity, mode,
bailout);
@@ -2247,12 +2389,12 @@ Node* CodeStubAssembler::TryGrowElementsCapacity(Node* object, Node* elements,
// If the gap growth is too big, fall back to the runtime.
Node* max_gap = IntPtrOrSmiConstant(JSObject::kMaxGap, mode);
- Node* max_capacity = IntPtrAdd(capacity, max_gap);
- GotoIf(UintPtrGreaterThanOrEqual(key, max_capacity), bailout);
+ Node* max_capacity = IntPtrOrSmiAdd(capacity, max_gap, mode);
+ GotoIf(UintPtrOrSmiGreaterThanOrEqual(key, max_capacity, mode), bailout);
// Calculate the capacity of the new backing store.
Node* new_capacity = CalculateNewElementsCapacity(
- IntPtrAdd(key, IntPtrOrSmiConstant(1, mode)), mode);
+ IntPtrOrSmiAdd(key, IntPtrOrSmiConstant(1, mode), mode), mode);
return GrowElementsCapacity(object, elements, kind, kind, capacity,
new_capacity, mode, bailout);
}
@@ -2264,8 +2406,8 @@ Node* CodeStubAssembler::GrowElementsCapacity(
// If size of the allocation for the new capacity doesn't fit in a page
// that we can bump-pointer allocate from, fall back to the runtime.
int max_size = FixedArrayBase::GetMaxLengthForNewSpaceAllocation(to_kind);
- GotoIf(UintPtrGreaterThanOrEqual(new_capacity,
- IntPtrOrSmiConstant(max_size, mode)),
+ GotoIf(UintPtrOrSmiGreaterThanOrEqual(
+ new_capacity, IntPtrOrSmiConstant(max_size, mode), mode),
bailout);
// Allocate the new backing store.
@@ -2282,9 +2424,9 @@ Node* CodeStubAssembler::GrowElementsCapacity(
return new_elements;
}
-void CodeStubAssembler::InitializeAllocationMemento(
- compiler::Node* base_allocation, int base_allocation_size,
- compiler::Node* allocation_site) {
+void CodeStubAssembler::InitializeAllocationMemento(Node* base_allocation,
+ int base_allocation_size,
+ Node* allocation_site) {
StoreObjectFieldNoWriteBarrier(
base_allocation, AllocationMemento::kMapOffset + base_allocation_size,
HeapConstant(Handle<Map>(isolate()->heap()->allocation_memento_map())));
@@ -2396,8 +2538,8 @@ Node* CodeStubAssembler::TruncateTaggedToWord32(Node* context, Node* value) {
// Check if {value} is a HeapNumber.
Label if_valueisheapnumber(this),
if_valueisnotheapnumber(this, Label::kDeferred);
- Branch(WordEqual(LoadMap(value), HeapNumberMapConstant()),
- &if_valueisheapnumber, &if_valueisnotheapnumber);
+ Branch(IsHeapNumberMap(LoadMap(value)), &if_valueisheapnumber,
+ &if_valueisnotheapnumber);
Bind(&if_valueisheapnumber);
{
@@ -2457,7 +2599,7 @@ Node* CodeStubAssembler::ChangeFloat64ToTagged(Node* value) {
Goto(&if_valueisheapnumber);
Bind(&if_notoverflow);
{
- Node* result = Projection(0, pair);
+ Node* result = BitcastWordToTaggedSigned(Projection(0, pair));
var_result.Bind(result);
Goto(&if_join);
}
@@ -2492,7 +2634,7 @@ Node* CodeStubAssembler::ChangeInt32ToTagged(Node* value) {
Goto(&if_join);
Bind(&if_notoverflow);
{
- Node* result = Projection(0, pair);
+ Node* result = BitcastWordToTaggedSigned(Projection(0, pair));
var_result.Bind(result);
}
Goto(&if_join);
@@ -2519,7 +2661,7 @@ Node* CodeStubAssembler::ChangeUint32ToTagged(Node* value) {
Node* overflow = Projection(1, pair);
GotoIf(overflow, &if_overflow);
- Node* result = Projection(0, pair);
+ Node* result = BitcastWordToTaggedSigned(Projection(0, pair));
var_result.Bind(result);
}
}
@@ -2597,6 +2739,25 @@ Node* CodeStubAssembler::ToThisString(Node* context, Node* value,
return var_value.value();
}
+Node* CodeStubAssembler::ChangeNumberToFloat64(compiler::Node* value) {
+ Variable result(this, MachineRepresentation::kFloat64);
+ Label smi(this);
+ Label done(this, &result);
+ GotoIf(TaggedIsSmi(value), &smi);
+ result.Bind(
+ LoadObjectField(value, HeapNumber::kValueOffset, MachineType::Float64()));
+ Goto(&done);
+
+ Bind(&smi);
+ {
+ result.Bind(SmiToFloat64(value));
+ Goto(&done);
+ }
+
+ Bind(&done);
+ return result.value();
+}
+
Node* CodeStubAssembler::ToThisValue(Node* context, Node* value,
PrimitiveType primitive_type,
char const* method_name) {
@@ -2698,14 +2859,19 @@ Node* CodeStubAssembler::ThrowIfNotInstanceType(Node* context, Node* value,
return var_value_map.value();
}
+Node* CodeStubAssembler::InstanceTypeEqual(Node* instance_type, int type) {
+ return Word32Equal(instance_type, Int32Constant(type));
+}
+
Node* CodeStubAssembler::IsSpecialReceiverMap(Node* map) {
Node* is_special = IsSpecialReceiverInstanceType(LoadMapInstanceType(map));
uint32_t mask =
1 << Map::kHasNamedInterceptor | 1 << Map::kIsAccessCheckNeeded;
USE(mask);
// Interceptors or access checks imply special receiver.
- CSA_ASSERT(this, Select(IsSetWord32(LoadMapBitField(map), mask), is_special,
- Int32Constant(1), MachineRepresentation::kWord32));
+ CSA_ASSERT(this,
+ SelectConstant(IsSetWord32(LoadMapBitField(map), mask), is_special,
+ Int32Constant(1), MachineRepresentation::kWord32));
return is_special;
}
@@ -2723,6 +2889,13 @@ Node* CodeStubAssembler::IsCallableMap(Node* map) {
Int32Constant(0));
}
+Node* CodeStubAssembler::IsConstructorMap(Node* map) {
+ CSA_ASSERT(this, IsMap(map));
+ return Word32NotEqual(
+ Word32And(LoadMapBitField(map), Int32Constant(1 << Map::kIsConstructor)),
+ Int32Constant(0));
+}
+
Node* CodeStubAssembler::IsSpecialReceiverInstanceType(Node* instance_type) {
STATIC_ASSERT(JS_GLOBAL_OBJECT_TYPE <= LAST_SPECIAL_RECEIVER_TYPE);
return Int32LessThanOrEqual(instance_type,
@@ -2782,6 +2955,22 @@ Node* CodeStubAssembler::IsString(Node* object) {
Int32Constant(FIRST_NONSTRING_TYPE));
}
+Node* CodeStubAssembler::IsSymbol(Node* object) {
+ return IsSymbolMap(LoadMap(object));
+}
+
+Node* CodeStubAssembler::IsPrivateSymbol(Node* object) {
+ return Select(
+ IsSymbol(object),
+ [=] {
+ Node* const flags =
+ SmiToWord32(LoadObjectField(object, Symbol::kFlagsOffset));
+ const int kPrivateMask = 1 << Symbol::kPrivateBit;
+ return IsSetWord32(flags, kPrivateMask);
+ },
+ [=] { return Int32Constant(0); }, MachineRepresentation::kWord32);
+}
+
Node* CodeStubAssembler::IsNativeContext(Node* object) {
return WordEqual(LoadMap(object), LoadRoot(Heap::kNativeContextMapRootIndex));
}
@@ -2795,7 +2984,7 @@ Node* CodeStubAssembler::IsHashTable(Node* object) {
}
Node* CodeStubAssembler::IsDictionary(Node* object) {
- return WordOr(IsHashTable(object), IsUnseededNumberDictionary(object));
+ return Word32Or(IsHashTable(object), IsUnseededNumberDictionary(object));
}
Node* CodeStubAssembler::IsUnseededNumberDictionary(Node* object) {
@@ -2803,10 +2992,15 @@ Node* CodeStubAssembler::IsUnseededNumberDictionary(Node* object) {
LoadRoot(Heap::kUnseededNumberDictionaryMapRootIndex));
}
-Node* CodeStubAssembler::StringCharCodeAt(Node* string, Node* index) {
+Node* CodeStubAssembler::IsJSFunction(Node* object) {
+ return HasInstanceType(object, JS_FUNCTION_TYPE);
+}
+
+Node* CodeStubAssembler::StringCharCodeAt(Node* string, Node* index,
+ ParameterMode parameter_mode) {
CSA_ASSERT(this, IsString(string));
// Translate the {index} into a Word.
- index = SmiToWord(index);
+ index = ParameterToWord(index, parameter_mode);
// We may need to loop in case of cons or sliced strings.
Variable var_index(this, MachineType::PointerRepresentation());
@@ -2989,12 +3183,13 @@ Node* CodeStubAssembler::StringFromCharCode(Node* code) {
{
// Load the isolate wide single character string cache.
Node* cache = LoadRoot(Heap::kSingleCharacterStringCacheRootIndex);
+ Node* code_index = ChangeUint32ToWord(code);
// Check if we have an entry for the {code} in the single character string
// cache already.
Label if_entryisundefined(this, Label::kDeferred),
if_entryisnotundefined(this);
- Node* entry = LoadFixedArrayElement(cache, code);
+ Node* entry = LoadFixedArrayElement(cache, code_index);
Branch(WordEqual(entry, UndefinedConstant()), &if_entryisundefined,
&if_entryisnotundefined);
@@ -3005,7 +3200,7 @@ Node* CodeStubAssembler::StringFromCharCode(Node* code) {
StoreNoWriteBarrier(
MachineRepresentation::kWord8, result,
IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag), code);
- StoreFixedArrayElement(cache, code, result);
+ StoreFixedArrayElement(cache, code_index, result);
var_result.Bind(result);
Goto(&if_done);
}
@@ -3096,7 +3291,7 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
Label end(this);
Label runtime(this);
- Variable var_instance_type(this, MachineRepresentation::kWord8); // Int32.
+ Variable var_instance_type(this, MachineRepresentation::kWord32); // Int32.
Variable var_result(this, MachineRepresentation::kTagged); // String.
Variable var_from(this, MachineRepresentation::kTagged); // Smi.
Variable var_string(this, MachineRepresentation::kTagged); // String.
@@ -3119,8 +3314,8 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
// Make sure that both from and to are non-negative smis.
- GotoUnless(WordIsPositiveSmi(from), &runtime);
- GotoUnless(WordIsPositiveSmi(to), &runtime);
+ GotoUnless(TaggedIsPositiveSmi(from), &runtime);
+ GotoUnless(TaggedIsPositiveSmi(to), &runtime);
Node* const substr_length = SmiSub(to, from);
Node* const string_length = LoadStringLength(string);
@@ -3260,8 +3455,9 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
STATIC_ASSERT(SeqTwoByteString::kHeaderSize ==
SeqOneByteString::kHeaderSize);
- Node* resource_data = LoadObjectField(var_string.value(),
- ExternalString::kResourceDataOffset);
+ Node* resource_data =
+ LoadObjectField(var_string.value(), ExternalString::kResourceDataOffset,
+ MachineType::Pointer());
Node* const fake_sequential_string = IntPtrSub(
resource_data,
IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
@@ -3339,12 +3535,10 @@ Node* CodeStubAssembler::StringAdd(Node* context, Node* left, Node* right,
CSA_ASSERT(this, TaggedIsSmi(left_length));
CSA_ASSERT(this, TaggedIsSmi(right_length));
Node* new_length = SmiAdd(left_length, right_length);
- GotoIf(UintPtrGreaterThanOrEqual(
- new_length, SmiConstant(Smi::FromInt(String::kMaxLength))),
+ GotoIf(SmiAboveOrEqual(new_length, SmiConstant(String::kMaxLength)),
&runtime);
- GotoIf(IntPtrLessThan(new_length,
- SmiConstant(Smi::FromInt(ConsString::kMinLength))),
+ GotoIf(SmiLessThan(new_length, SmiConstant(ConsString::kMinLength)),
&non_cons);
result.Bind(NewConsString(context, new_length, left, right, flags));
@@ -3357,23 +3551,24 @@ Node* CodeStubAssembler::StringAdd(Node* context, Node* left, Node* right,
Node* right_instance_type = LoadInstanceType(right);
// Compute intersection and difference of instance types.
- Node* ored_instance_types = WordOr(left_instance_type, right_instance_type);
- Node* xored_instance_types = WordXor(left_instance_type, right_instance_type);
+ Node* ored_instance_types = Word32Or(left_instance_type, right_instance_type);
+ Node* xored_instance_types =
+ Word32Xor(left_instance_type, right_instance_type);
// Check if both strings have the same encoding and both are sequential.
- GotoIf(WordNotEqual(
- WordAnd(xored_instance_types, IntPtrConstant(kStringEncodingMask)),
- IntPtrConstant(0)),
+ GotoIf(Word32NotEqual(Word32And(xored_instance_types,
+ Int32Constant(kStringEncodingMask)),
+ Int32Constant(0)),
&runtime);
- GotoIf(WordNotEqual(WordAnd(ored_instance_types,
- IntPtrConstant(kStringRepresentationMask)),
- IntPtrConstant(0)),
+ GotoIf(Word32NotEqual(Word32And(ored_instance_types,
+ Int32Constant(kStringRepresentationMask)),
+ Int32Constant(0)),
&runtime);
Label two_byte(this);
- GotoIf(WordEqual(
- WordAnd(ored_instance_types, IntPtrConstant(kStringEncodingMask)),
- IntPtrConstant(kTwoByteStringTag)),
+ GotoIf(Word32Equal(
+ Word32And(ored_instance_types, Int32Constant(kStringEncodingMask)),
+ Int32Constant(kTwoByteStringTag)),
&two_byte);
// One-byte sequential string case
Node* new_string =
@@ -3428,9 +3623,10 @@ Node* CodeStubAssembler::StringIndexOfChar(Node* context, Node* string,
// Let runtime handle non-one-byte {needle_char}.
- Node* const one_byte_char_mask = IntPtrConstant(0xFF);
- GotoUnless(WordEqual(WordAnd(needle_char, one_byte_char_mask), needle_char),
- &runtime);
+ Node* const one_byte_char_mask = Int32Constant(0xFF);
+ GotoUnless(
+ Word32Equal(Word32And(needle_char, one_byte_char_mask), needle_char),
+ &runtime);
// TODO(jgruber): Handle external and two-byte strings.
@@ -3455,21 +3651,21 @@ Node* CodeStubAssembler::StringIndexOfChar(Node* context, Node* string,
var_result.Bind(SmiConstant(Smi::FromInt(-1)));
- BuildFastLoop(MachineType::PointerRepresentation(), cursor, end,
- [string, needle_char, begin, &var_result, &out](
- CodeStubAssembler* csa, Node* cursor) {
- Label next(csa);
- Node* value = csa->Load(MachineType::Uint8(), string, cursor);
- csa->GotoUnless(csa->WordEqual(value, needle_char), &next);
-
- // Found a match.
- Node* index = csa->SmiTag(csa->IntPtrSub(cursor, begin));
- var_result.Bind(index);
- csa->Goto(&out);
+ BuildFastLoop(
+ MachineType::PointerRepresentation(), cursor, end,
+ [this, string, needle_char, begin, &var_result, &out](Node* cursor) {
+ Label next(this);
+ Node* value = Load(MachineType::Uint8(), string, cursor);
+ GotoUnless(Word32Equal(value, needle_char), &next);
+
+ // Found a match.
+ Node* index = SmiTag(IntPtrSub(cursor, begin));
+ var_result.Bind(index);
+ Goto(&out);
- csa->Bind(&next);
- },
- 1, IndexAdvanceMode::kPost);
+ Bind(&next);
+ },
+ 1, IndexAdvanceMode::kPost);
Goto(&out);
Bind(&runtime);
@@ -3485,7 +3681,7 @@ Node* CodeStubAssembler::StringIndexOfChar(Node* context, Node* string,
return var_result.value();
}
-Node* CodeStubAssembler::StringFromCodePoint(compiler::Node* codepoint,
+Node* CodeStubAssembler::StringFromCodePoint(Node* codepoint,
UnicodeEncoding encoding) {
Variable var_result(this, MachineRepresentation::kTagged);
var_result.Bind(EmptyStringConstant());
@@ -3563,8 +3759,7 @@ Node* CodeStubAssembler::StringToNumber(Node* context, Node* input) {
return var_result.value();
}
-Node* CodeStubAssembler::NumberToString(compiler::Node* context,
- compiler::Node* argument) {
+Node* CodeStubAssembler::NumberToString(Node* context, Node* argument) {
Variable result(this, MachineRepresentation::kTagged);
Label runtime(this, Label::kDeferred);
Label smi(this);
@@ -3575,7 +3770,9 @@ Node* CodeStubAssembler::NumberToString(compiler::Node* context,
// Make the hash mask from the length of the number string cache. It
// contains two elements (number and string) for each cache entry.
- Node* mask = LoadFixedArrayBaseLength(number_string_cache);
+ // TODO(ishell): cleanup mask handling.
+ Node* mask =
+ BitcastTaggedToWord(LoadFixedArrayBaseLength(number_string_cache));
Node* one = IntPtrConstant(1);
mask = IntPtrSub(mask, one);
@@ -3583,7 +3780,7 @@ Node* CodeStubAssembler::NumberToString(compiler::Node* context,
// Argument isn't smi, check to see if it's a heap-number.
Node* map = LoadMap(argument);
- GotoUnless(WordEqual(map, HeapNumberMapConstant()), &runtime);
+ GotoUnless(IsHeapNumberMap(map), &runtime);
// Make a hash from the two 32-bit values of the double.
Node* low =
@@ -3591,29 +3788,27 @@ Node* CodeStubAssembler::NumberToString(compiler::Node* context,
Node* high = LoadObjectField(argument, HeapNumber::kValueOffset + kIntSize,
MachineType::Int32());
Node* hash = Word32Xor(low, high);
- if (Is64()) hash = ChangeInt32ToInt64(hash);
+ hash = ChangeInt32ToIntPtr(hash);
hash = WordShl(hash, one);
- Node* index = WordAnd(hash, SmiToWord(mask));
+ Node* index = WordAnd(hash, SmiUntag(BitcastWordToTagged(mask)));
// Cache entry's key must be a heap number
- Node* number_key =
- LoadFixedArrayElement(number_string_cache, index, 0, INTPTR_PARAMETERS);
+ Node* number_key = LoadFixedArrayElement(number_string_cache, index);
GotoIf(TaggedIsSmi(number_key), &runtime);
map = LoadMap(number_key);
- GotoUnless(WordEqual(map, HeapNumberMapConstant()), &runtime);
+ GotoUnless(IsHeapNumberMap(map), &runtime);
// Cache entry's key must match the heap number value we're looking for.
Node* low_compare = LoadObjectField(number_key, HeapNumber::kValueOffset,
MachineType::Int32());
Node* high_compare = LoadObjectField(
number_key, HeapNumber::kValueOffset + kIntSize, MachineType::Int32());
- GotoUnless(WordEqual(low, low_compare), &runtime);
- GotoUnless(WordEqual(high, high_compare), &runtime);
+ GotoUnless(Word32Equal(low, low_compare), &runtime);
+ GotoUnless(Word32Equal(high, high_compare), &runtime);
- // Heap number match, return value fro cache entry.
+ // Heap number match, return value from cache entry.
IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
- result.Bind(LoadFixedArrayElement(number_string_cache, index, kPointerSize,
- INTPTR_PARAMETERS));
+ result.Bind(LoadFixedArrayElement(number_string_cache, index, kPointerSize));
Goto(&done);
Bind(&runtime);
@@ -3626,7 +3821,8 @@ Node* CodeStubAssembler::NumberToString(compiler::Node* context,
Bind(&smi);
{
// Load the smi key, make sure it matches the smi we're looking for.
- Node* smi_index = WordAnd(WordShl(argument, one), mask);
+ Node* smi_index = BitcastWordToTagged(
+ WordAnd(WordShl(BitcastTaggedToWord(argument), one), mask));
Node* smi_key = LoadFixedArrayElement(number_string_cache, smi_index, 0,
SMI_PARAMETERS);
GotoIf(WordNotEqual(smi_key, argument), &runtime);
@@ -3643,9 +3839,6 @@ Node* CodeStubAssembler::NumberToString(compiler::Node* context,
}
Node* CodeStubAssembler::ToName(Node* context, Node* value) {
- typedef CodeStubAssembler::Label Label;
- typedef CodeStubAssembler::Variable Variable;
-
Label end(this);
Variable var_result(this, MachineRepresentation::kTagged);
@@ -3694,7 +3887,7 @@ Node* CodeStubAssembler::ToName(Node* context, Node* value) {
Node* CodeStubAssembler::NonNumberToNumber(Node* context, Node* input) {
// Assert input is a HeapObject (not smi or heap number)
CSA_ASSERT(this, Word32BinaryNot(TaggedIsSmi(input)));
- CSA_ASSERT(this, Word32NotEqual(LoadMap(input), HeapNumberMapConstant()));
+ CSA_ASSERT(this, Word32BinaryNot(IsHeapNumberMap(LoadMap(input))));
// We might need to loop once here due to ToPrimitive conversions.
Variable var_input(this, MachineRepresentation::kTagged);
@@ -3745,7 +3938,7 @@ Node* CodeStubAssembler::NonNumberToNumber(Node* context, Node* input) {
Label if_resultisnumber(this), if_resultisnotnumber(this);
GotoIf(TaggedIsSmi(result), &if_resultisnumber);
Node* result_map = LoadMap(result);
- Branch(WordEqual(result_map, HeapNumberMapConstant()), &if_resultisnumber,
+ Branch(IsHeapNumberMap(result_map), &if_resultisnumber,
&if_resultisnotnumber);
Bind(&if_resultisnumber);
@@ -3793,8 +3986,7 @@ Node* CodeStubAssembler::ToNumber(Node* context, Node* input) {
{
Label not_heap_number(this, Label::kDeferred);
Node* input_map = LoadMap(input);
- GotoIf(Word32NotEqual(input_map, HeapNumberMapConstant()),
- &not_heap_number);
+ GotoUnless(IsHeapNumberMap(input_map), &not_heap_number);
var_result.Bind(input);
Goto(&end);
@@ -3810,6 +4002,108 @@ Node* CodeStubAssembler::ToNumber(Node* context, Node* input) {
return var_result.value();
}
+Node* CodeStubAssembler::ToUint32(Node* context, Node* input) {
+ Node* const float_zero = Float64Constant(0.0);
+ Node* const float_two_32 = Float64Constant(static_cast<double>(1ULL << 32));
+
+ Label out(this);
+
+ Variable var_result(this, MachineRepresentation::kTagged);
+ var_result.Bind(input);
+
+ // Early exit for positive smis.
+ {
+ // TODO(jgruber): This branch and the recheck below can be removed once we
+ // have a ToNumber with multiple exits.
+ Label next(this, Label::kDeferred);
+ Branch(TaggedIsPositiveSmi(input), &out, &next);
+ Bind(&next);
+ }
+
+ Node* const number = ToNumber(context, input);
+ var_result.Bind(number);
+
+ // Perhaps we have a positive smi now.
+ {
+ Label next(this, Label::kDeferred);
+ Branch(TaggedIsPositiveSmi(number), &out, &next);
+ Bind(&next);
+ }
+
+ Label if_isnegativesmi(this), if_isheapnumber(this);
+ Branch(TaggedIsSmi(number), &if_isnegativesmi, &if_isheapnumber);
+
+ Bind(&if_isnegativesmi);
+ {
+ // floor({input}) mod 2^32 === {input} + 2^32.
+ Node* const float_number = SmiToFloat64(number);
+ Node* const float_result = Float64Add(float_number, float_two_32);
+ Node* const result = ChangeFloat64ToTagged(float_result);
+ var_result.Bind(result);
+ Goto(&out);
+ }
+
+ Bind(&if_isheapnumber);
+ {
+ Label return_zero(this);
+ Node* const value = LoadHeapNumberValue(number);
+
+ {
+ // +-0.
+ Label next(this);
+ Branch(Float64Equal(value, float_zero), &return_zero, &next);
+ Bind(&next);
+ }
+
+ {
+ // NaN.
+ Label next(this);
+ Branch(Float64Equal(value, value), &next, &return_zero);
+ Bind(&next);
+ }
+
+ {
+ // +Infinity.
+ Label next(this);
+ Node* const positive_infinity =
+ Float64Constant(std::numeric_limits<double>::infinity());
+ Branch(Float64Equal(value, positive_infinity), &return_zero, &next);
+ Bind(&next);
+ }
+
+ {
+ // -Infinity.
+ Label next(this);
+ Node* const negative_infinity =
+ Float64Constant(-1.0 * std::numeric_limits<double>::infinity());
+ Branch(Float64Equal(value, negative_infinity), &return_zero, &next);
+ Bind(&next);
+ }
+
+ // Return floor({input}) mod 2^32 (assuming mod semantics that always return
+ // positive results).
+ {
+ Node* x = Float64Floor(value);
+ x = Float64Mod(x, float_two_32);
+ x = Float64Add(x, float_two_32);
+ x = Float64Mod(x, float_two_32);
+
+ Node* const result = ChangeFloat64ToTagged(x);
+ var_result.Bind(result);
+ Goto(&out);
+ }
+
+ Bind(&return_zero);
+ {
+ var_result.Bind(SmiConstant(Smi::kZero));
+ Goto(&out);
+ }
+ }
+
+ Bind(&out);
+ return var_result.value();
+}
+
Node* CodeStubAssembler::ToString(Node* context, Node* input) {
Label is_number(this);
Label runtime(this, Label::kDeferred);
@@ -3825,8 +4119,7 @@ Node* CodeStubAssembler::ToString(Node* context, Node* input) {
GotoIf(IsStringInstanceType(input_instance_type), &done);
Label not_heap_number(this);
- Branch(WordNotEqual(input_map, HeapNumberMapConstant()), &not_heap_number,
- &is_number);
+ Branch(IsHeapNumberMap(input_map), &is_number, &not_heap_number);
Bind(&is_number);
result.Bind(NumberToString(context, input));
@@ -3935,8 +4228,8 @@ Node* CodeStubAssembler::ToInteger(Node* context, Node* input,
// Check if {arg} is a HeapNumber.
Label if_argisheapnumber(this),
if_argisnotheapnumber(this, Label::kDeferred);
- Branch(WordEqual(LoadMap(arg), HeapNumberMapConstant()),
- &if_argisheapnumber, &if_argisnotheapnumber);
+ Branch(IsHeapNumberMap(LoadMap(arg)), &if_argisheapnumber,
+ &if_argisnotheapnumber);
Bind(&if_argisheapnumber);
{
@@ -4013,6 +4306,17 @@ void CodeStubAssembler::DecrementCounter(StatsCounter* counter, int delta) {
}
}
+void CodeStubAssembler::Increment(Variable& variable, int value,
+ ParameterMode mode) {
+ DCHECK_IMPLIES(mode == INTPTR_PARAMETERS,
+ variable.rep() == MachineType::PointerRepresentation());
+ DCHECK_IMPLIES(mode == SMI_PARAMETERS,
+ variable.rep() == MachineRepresentation::kTagged ||
+ variable.rep() == MachineRepresentation::kTaggedSigned);
+ variable.Bind(
+ IntPtrOrSmiAdd(variable.value(), IntPtrOrSmiConstant(value, mode), mode));
+}
+
void CodeStubAssembler::Use(Label* label) {
GotoIf(Word32Equal(Int32Constant(0), Int32Constant(1)), label);
}
@@ -4029,10 +4333,10 @@ void CodeStubAssembler::TryToName(Node* key, Label* if_keyisindex,
Goto(if_keyisindex);
Bind(&if_keyisnotindex);
- Node* key_instance_type = LoadInstanceType(key);
+ Node* key_map = LoadMap(key);
// Symbols are unique.
- GotoIf(Word32Equal(key_instance_type, Int32Constant(SYMBOL_TYPE)),
- if_keyisunique);
+ GotoIf(IsSymbolMap(key_map), if_keyisunique);
+ Node* key_instance_type = LoadMapInstanceType(key_map);
// Miss if |key| is not a String.
STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
GotoUnless(IsStringInstanceType(key_instance_type), if_bailout);
@@ -4067,6 +4371,8 @@ Node* CodeStubAssembler::EntryToIndex(Node* entry, int field_index) {
template Node* CodeStubAssembler::EntryToIndex<NameDictionary>(Node*, int);
template Node* CodeStubAssembler::EntryToIndex<GlobalDictionary>(Node*, int);
+template Node* CodeStubAssembler::EntryToIndex<SeededNumberDictionary>(Node*,
+ int);
Node* CodeStubAssembler::HashTableComputeCapacity(Node* at_least_space_for) {
Node* capacity = IntPtrRoundUpToPowerOfTwo32(
@@ -4075,8 +4381,49 @@ Node* CodeStubAssembler::HashTableComputeCapacity(Node* at_least_space_for) {
}
Node* CodeStubAssembler::IntPtrMax(Node* left, Node* right) {
- return Select(IntPtrGreaterThanOrEqual(left, right), left, right,
- MachineType::PointerRepresentation());
+ return SelectConstant(IntPtrGreaterThanOrEqual(left, right), left, right,
+ MachineType::PointerRepresentation());
+}
+
+Node* CodeStubAssembler::IntPtrMin(Node* left, Node* right) {
+ return SelectConstant(IntPtrLessThanOrEqual(left, right), left, right,
+ MachineType::PointerRepresentation());
+}
+
+template <class Dictionary>
+Node* CodeStubAssembler::GetNumberOfElements(Node* dictionary) {
+ return LoadFixedArrayElement(dictionary, Dictionary::kNumberOfElementsIndex);
+}
+
+template <class Dictionary>
+void CodeStubAssembler::SetNumberOfElements(Node* dictionary,
+ Node* num_elements_smi) {
+ StoreFixedArrayElement(dictionary, Dictionary::kNumberOfElementsIndex,
+ num_elements_smi, SKIP_WRITE_BARRIER);
+}
+
+template <class Dictionary>
+Node* CodeStubAssembler::GetNumberOfDeletedElements(Node* dictionary) {
+ return LoadFixedArrayElement(dictionary,
+ Dictionary::kNumberOfDeletedElementsIndex);
+}
+
+template <class Dictionary>
+Node* CodeStubAssembler::GetCapacity(Node* dictionary) {
+ return LoadFixedArrayElement(dictionary, Dictionary::kCapacityIndex);
+}
+
+template <class Dictionary>
+Node* CodeStubAssembler::GetNextEnumerationIndex(Node* dictionary) {
+ return LoadFixedArrayElement(dictionary,
+ Dictionary::kNextEnumerationIndexIndex);
+}
+
+template <class Dictionary>
+void CodeStubAssembler::SetNextEnumerationIndex(Node* dictionary,
+ Node* next_enum_index_smi) {
+ StoreFixedArrayElement(dictionary, Dictionary::kNextEnumerationIndexIndex,
+ next_enum_index_smi, SKIP_WRITE_BARRIER);
}
template <typename Dictionary>
@@ -4084,14 +4431,15 @@ void CodeStubAssembler::NameDictionaryLookup(Node* dictionary,
Node* unique_name, Label* if_found,
Variable* var_name_index,
Label* if_not_found,
- int inlined_probes) {
+ int inlined_probes,
+ LookupMode mode) {
CSA_ASSERT(this, IsDictionary(dictionary));
DCHECK_EQ(MachineType::PointerRepresentation(), var_name_index->rep());
+ DCHECK_IMPLIES(mode == kFindInsertionIndex,
+ inlined_probes == 0 && if_found == nullptr);
Comment("NameDictionaryLookup");
- Node* capacity = SmiUntag(LoadFixedArrayElement(
- dictionary, IntPtrConstant(Dictionary::kCapacityIndex), 0,
- INTPTR_PARAMETERS));
+ Node* capacity = SmiUntag(GetCapacity<Dictionary>(dictionary));
Node* mask = IntPtrSub(capacity, IntPtrConstant(1));
Node* hash = ChangeUint32ToWord(LoadNameHash(unique_name));
@@ -4103,16 +4451,20 @@ void CodeStubAssembler::NameDictionaryLookup(Node* dictionary,
Node* index = EntryToIndex<Dictionary>(entry);
var_name_index->Bind(index);
- Node* current =
- LoadFixedArrayElement(dictionary, index, 0, INTPTR_PARAMETERS);
+ Node* current = LoadFixedArrayElement(dictionary, index);
GotoIf(WordEqual(current, unique_name), if_found);
// See Dictionary::NextProbe().
count = IntPtrConstant(i + 1);
entry = WordAnd(IntPtrAdd(entry, count), mask);
}
+ if (mode == kFindInsertionIndex) {
+ // Appease the variable merging algorithm for "Goto(&loop)" below.
+ var_name_index->Bind(IntPtrConstant(0));
+ }
Node* undefined = UndefinedConstant();
+ Node* the_hole = mode == kFindExisting ? nullptr : TheHoleConstant();
Variable var_count(this, MachineType::PointerRepresentation());
Variable var_entry(this, MachineType::PointerRepresentation());
@@ -4123,22 +4475,24 @@ void CodeStubAssembler::NameDictionaryLookup(Node* dictionary,
Goto(&loop);
Bind(&loop);
{
- Node* count = var_count.value();
Node* entry = var_entry.value();
Node* index = EntryToIndex<Dictionary>(entry);
var_name_index->Bind(index);
- Node* current =
- LoadFixedArrayElement(dictionary, index, 0, INTPTR_PARAMETERS);
+ Node* current = LoadFixedArrayElement(dictionary, index);
GotoIf(WordEqual(current, undefined), if_not_found);
- GotoIf(WordEqual(current, unique_name), if_found);
+ if (mode == kFindExisting) {
+ GotoIf(WordEqual(current, unique_name), if_found);
+ } else {
+ DCHECK_EQ(kFindInsertionIndex, mode);
+ GotoIf(WordEqual(current, the_hole), if_not_found);
+ }
// See Dictionary::NextProbe().
- count = IntPtrAdd(count, IntPtrConstant(1));
- entry = WordAnd(IntPtrAdd(entry, count), mask);
+ Increment(var_count);
+ entry = WordAnd(IntPtrAdd(entry, var_count.value()), mask);
- var_count.Bind(count);
var_entry.Bind(entry);
Goto(&loop);
}
@@ -4146,13 +4500,13 @@ void CodeStubAssembler::NameDictionaryLookup(Node* dictionary,
// Instantiate template methods to workaround GCC compilation issue.
template void CodeStubAssembler::NameDictionaryLookup<NameDictionary>(
- Node*, Node*, Label*, Variable*, Label*, int);
+ Node*, Node*, Label*, Variable*, Label*, int, LookupMode);
template void CodeStubAssembler::NameDictionaryLookup<GlobalDictionary>(
- Node*, Node*, Label*, Variable*, Label*, int);
+ Node*, Node*, Label*, Variable*, Label*, int, LookupMode);
Node* CodeStubAssembler::ComputeIntegerHash(Node* key, Node* seed) {
// See v8::internal::ComputeIntegerHash()
- Node* hash = key;
+ Node* hash = TruncateWordToWord32(key);
hash = Word32Xor(hash, seed);
hash = Int32Add(Word32Xor(hash, Int32Constant(0xffffffff)),
Word32Shl(hash, Int32Constant(15)));
@@ -4174,9 +4528,7 @@ void CodeStubAssembler::NumberDictionaryLookup(Node* dictionary,
DCHECK_EQ(MachineType::PointerRepresentation(), var_entry->rep());
Comment("NumberDictionaryLookup");
- Node* capacity = SmiUntag(LoadFixedArrayElement(
- dictionary, IntPtrConstant(Dictionary::kCapacityIndex), 0,
- INTPTR_PARAMETERS));
+ Node* capacity = SmiUntag(GetCapacity<Dictionary>(dictionary));
Node* mask = IntPtrSub(capacity, IntPtrConstant(1));
Node* int32_seed;
@@ -4203,12 +4555,10 @@ void CodeStubAssembler::NumberDictionaryLookup(Node* dictionary,
Goto(&loop);
Bind(&loop);
{
- Node* count = var_count.value();
Node* entry = var_entry->value();
Node* index = EntryToIndex<Dictionary>(entry);
- Node* current =
- LoadFixedArrayElement(dictionary, index, 0, INTPTR_PARAMETERS);
+ Node* current = LoadFixedArrayElement(dictionary, index);
GotoIf(WordEqual(current, undefined), if_not_found);
Label next_probe(this);
{
@@ -4231,15 +4581,127 @@ void CodeStubAssembler::NumberDictionaryLookup(Node* dictionary,
Bind(&next_probe);
// See Dictionary::NextProbe().
- count = IntPtrAdd(count, IntPtrConstant(1));
- entry = WordAnd(IntPtrAdd(entry, count), mask);
+ Increment(var_count);
+ entry = WordAnd(IntPtrAdd(entry, var_count.value()), mask);
- var_count.Bind(count);
var_entry->Bind(entry);
Goto(&loop);
}
}
+template <class Dictionary>
+void CodeStubAssembler::FindInsertionEntry(Node* dictionary, Node* key,
+ Variable* var_key_index) {
+ UNREACHABLE();
+}
+
+template <>
+void CodeStubAssembler::FindInsertionEntry<NameDictionary>(
+ Node* dictionary, Node* key, Variable* var_key_index) {
+ Label done(this);
+ NameDictionaryLookup<NameDictionary>(dictionary, key, nullptr, var_key_index,
+ &done, 0, kFindInsertionIndex);
+ Bind(&done);
+}
+
+template <class Dictionary>
+void CodeStubAssembler::InsertEntry(Node* dictionary, Node* key, Node* value,
+ Node* index, Node* enum_index) {
+ UNREACHABLE(); // Use specializations instead.
+}
+
+template <>
+void CodeStubAssembler::InsertEntry<NameDictionary>(Node* dictionary,
+ Node* name, Node* value,
+ Node* index,
+ Node* enum_index) {
+ // Store name and value.
+ StoreFixedArrayElement(dictionary, index, name);
+ const int kNameToValueOffset =
+ (NameDictionary::kEntryValueIndex - NameDictionary::kEntryKeyIndex) *
+ kPointerSize;
+ StoreFixedArrayElement(dictionary, index, value, UPDATE_WRITE_BARRIER,
+ kNameToValueOffset);
+
+ // Prepare details of the new property.
+ Variable var_details(this, MachineRepresentation::kTaggedSigned);
+ const int kInitialIndex = 0;
+ PropertyDetails d(kData, NONE, kInitialIndex, PropertyCellType::kNoCell);
+ enum_index =
+ SmiShl(enum_index, PropertyDetails::DictionaryStorageField::kShift);
+ STATIC_ASSERT(kInitialIndex == 0);
+ var_details.Bind(SmiOr(SmiConstant(d.AsSmi()), enum_index));
+
+ // Private names must be marked non-enumerable.
+ Label not_private(this, &var_details);
+ GotoUnless(IsSymbolMap(LoadMap(name)), &not_private);
+ Node* flags = SmiToWord32(LoadObjectField(name, Symbol::kFlagsOffset));
+ const int kPrivateMask = 1 << Symbol::kPrivateBit;
+ GotoUnless(IsSetWord32(flags, kPrivateMask), &not_private);
+ Node* dont_enum =
+ SmiShl(SmiConstant(DONT_ENUM), PropertyDetails::AttributesField::kShift);
+ var_details.Bind(SmiOr(var_details.value(), dont_enum));
+ Goto(&not_private);
+ Bind(&not_private);
+
+ // Finally, store the details.
+ const int kNameToDetailsOffset =
+ (NameDictionary::kEntryDetailsIndex - NameDictionary::kEntryKeyIndex) *
+ kPointerSize;
+ StoreFixedArrayElement(dictionary, index, var_details.value(),
+ SKIP_WRITE_BARRIER, kNameToDetailsOffset);
+}
+
+template <>
+void CodeStubAssembler::InsertEntry<GlobalDictionary>(Node* dictionary,
+ Node* key, Node* value,
+ Node* index,
+ Node* enum_index) {
+ UNIMPLEMENTED();
+}
+
+template <class Dictionary>
+void CodeStubAssembler::Add(Node* dictionary, Node* key, Node* value,
+ Label* bailout) {
+ Node* capacity = GetCapacity<Dictionary>(dictionary);
+ Node* nof = GetNumberOfElements<Dictionary>(dictionary);
+ Node* new_nof = SmiAdd(nof, SmiConstant(1));
+ // Require 33% to still be free after adding additional_elements.
+ // Computing "x + (x >> 1)" on a Smi x does not return a valid Smi!
+ // But that's OK here because it's only used for a comparison.
+ Node* required_capacity_pseudo_smi = SmiAdd(new_nof, SmiShr(new_nof, 1));
+ GotoIf(SmiBelow(capacity, required_capacity_pseudo_smi), bailout);
+ // Require rehashing if more than 50% of free elements are deleted elements.
+ Node* deleted = GetNumberOfDeletedElements<Dictionary>(dictionary);
+ CSA_ASSERT(this, SmiAbove(capacity, new_nof));
+ Node* half_of_free_elements = SmiShr(SmiSub(capacity, new_nof), 1);
+ GotoIf(SmiAbove(deleted, half_of_free_elements), bailout);
+ Node* enum_index = nullptr;
+ if (Dictionary::kIsEnumerable) {
+ enum_index = GetNextEnumerationIndex<Dictionary>(dictionary);
+ Node* new_enum_index = SmiAdd(enum_index, SmiConstant(1));
+ Node* max_enum_index =
+ SmiConstant(PropertyDetails::DictionaryStorageField::kMax);
+ GotoIf(SmiAbove(new_enum_index, max_enum_index), bailout);
+
+ // No more bailouts after this point.
+ // Operations from here on can have side effects.
+
+ SetNextEnumerationIndex<Dictionary>(dictionary, new_enum_index);
+ } else {
+ USE(enum_index);
+ }
+ SetNumberOfElements<Dictionary>(dictionary, new_nof);
+
+ Variable var_key_index(this, MachineType::PointerRepresentation());
+ FindInsertionEntry<Dictionary>(dictionary, key, &var_key_index);
+ InsertEntry<Dictionary>(dictionary, key, value, var_key_index.value(),
+ enum_index);
+}
+
+template void CodeStubAssembler::Add<NameDictionary>(Node*, Node*, Node*,
+ Label*);
+
void CodeStubAssembler::DescriptorLookupLinear(Node* unique_name,
Node* descriptors, Node* nof,
Label* if_found,
@@ -4251,13 +4713,11 @@ void CodeStubAssembler::DescriptorLookupLinear(Node* unique_name,
BuildFastLoop(
MachineType::PointerRepresentation(), last_exclusive, first_inclusive,
- [descriptors, unique_name, if_found, var_name_index](
- CodeStubAssembler* assembler, Node* name_index) {
- Node* candidate_name = assembler->LoadFixedArrayElement(
- descriptors, name_index, 0, INTPTR_PARAMETERS);
+ [this, descriptors, unique_name, if_found,
+ var_name_index](Node* name_index) {
+ Node* candidate_name = LoadFixedArrayElement(descriptors, name_index);
var_name_index->Bind(name_index);
- assembler->GotoIf(assembler->WordEqual(candidate_name, unique_name),
- if_found);
+ GotoIf(WordEqual(candidate_name, unique_name), if_found);
},
-DescriptorArray::kDescriptorSize, IndexAdvanceMode::kPre);
Goto(if_not_found);
@@ -4332,11 +4792,10 @@ void CodeStubAssembler::TryLookupProperty(
}
}
-void CodeStubAssembler::TryHasOwnProperty(compiler::Node* object,
- compiler::Node* map,
- compiler::Node* instance_type,
- compiler::Node* unique_name,
- Label* if_found, Label* if_not_found,
+void CodeStubAssembler::TryHasOwnProperty(Node* object, Node* map,
+ Node* instance_type,
+ Node* unique_name, Label* if_found,
+ Label* if_not_found,
Label* if_bailout) {
Comment("TryHasOwnProperty");
Variable var_meta_storage(this, MachineRepresentation::kTagged);
@@ -4666,8 +5125,7 @@ void CodeStubAssembler::TryLookupElement(Node* object, Node* map,
GotoUnless(UintPtrLessThan(intptr_index, length), &if_oob);
- Node* element =
- LoadFixedArrayElement(elements, intptr_index, 0, INTPTR_PARAMETERS);
+ Node* element = LoadFixedArrayElement(elements, intptr_index);
Node* the_hole = TheHoleConstant();
Branch(WordEqual(element, the_hole), if_not_found, if_found);
}
@@ -4727,8 +5185,8 @@ template void CodeStubAssembler::NumberDictionaryLookup<
UnseededNumberDictionary>(Node*, Node*, Label*, Variable*, Label*);
void CodeStubAssembler::TryPrototypeChainLookup(
- Node* receiver, Node* key, LookupInHolder& lookup_property_in_holder,
- LookupInHolder& lookup_element_in_holder, Label* if_end,
+ Node* receiver, Node* key, const LookupInHolder& lookup_property_in_holder,
+ const LookupInHolder& lookup_element_in_holder, Label* if_end,
Label* if_bailout) {
// Ensure receiver is JSReceiver, otherwise bailout.
Label if_objectisnotsmi(this);
@@ -4756,7 +5214,7 @@ void CodeStubAssembler::TryPrototypeChainLookup(
{
Variable var_holder(this, MachineRepresentation::kTagged);
Variable var_holder_map(this, MachineRepresentation::kTagged);
- Variable var_holder_instance_type(this, MachineRepresentation::kWord8);
+ Variable var_holder_instance_type(this, MachineRepresentation::kWord32);
Variable* merged_variables[] = {&var_holder, &var_holder_map,
&var_holder_instance_type};
@@ -4800,7 +5258,7 @@ void CodeStubAssembler::TryPrototypeChainLookup(
{
Variable var_holder(this, MachineRepresentation::kTagged);
Variable var_holder_map(this, MachineRepresentation::kTagged);
- Variable var_holder_instance_type(this, MachineRepresentation::kWord8);
+ Variable var_holder_instance_type(this, MachineRepresentation::kWord32);
Variable* merged_variables[] = {&var_holder, &var_holder_map,
&var_holder_instance_type};
@@ -4971,10 +5429,10 @@ Node* CodeStubAssembler::OrdinaryHasInstance(Node* context, Node* callable,
return var_result.value();
}
-compiler::Node* CodeStubAssembler::ElementOffsetFromIndex(Node* index_node,
- ElementsKind kind,
- ParameterMode mode,
- int base_size) {
+Node* CodeStubAssembler::ElementOffsetFromIndex(Node* index_node,
+ ElementsKind kind,
+ ParameterMode mode,
+ int base_size) {
int element_size_shift = ElementsKindToShiftSize(kind);
int element_size = 1 << element_size_shift;
int const kSmiShiftBits = kSmiShiftSize + kSmiTagSize;
@@ -4986,10 +5444,6 @@ compiler::Node* CodeStubAssembler::ElementOffsetFromIndex(Node* index_node,
constant_index = ToSmiConstant(index_node, smi_index);
if (constant_index) index = smi_index->value();
index_node = BitcastTaggedToWord(index_node);
- } else if (mode == INTEGER_PARAMETERS) {
- int32_t temp = 0;
- constant_index = ToInt32Constant(index_node, temp);
- index = static_cast<intptr_t>(temp);
} else {
DCHECK(mode == INTPTR_PARAMETERS);
constant_index = ToIntPtrConstant(index_node, index);
@@ -4997,9 +5451,6 @@ compiler::Node* CodeStubAssembler::ElementOffsetFromIndex(Node* index_node,
if (constant_index) {
return IntPtrConstant(base_size + element_size * index);
}
- if (Is64() && mode == INTEGER_PARAMETERS) {
- index_node = ChangeInt32ToInt64(index_node);
- }
Node* shifted_index =
(element_size_shift == 0)
@@ -5007,32 +5458,30 @@ compiler::Node* CodeStubAssembler::ElementOffsetFromIndex(Node* index_node,
: ((element_size_shift > 0)
? WordShl(index_node, IntPtrConstant(element_size_shift))
: WordShr(index_node, IntPtrConstant(-element_size_shift)));
- return IntPtrAddFoldConstants(IntPtrConstant(base_size), shifted_index);
+ return IntPtrAdd(IntPtrConstant(base_size), shifted_index);
}
-compiler::Node* CodeStubAssembler::LoadTypeFeedbackVectorForStub() {
+Node* CodeStubAssembler::LoadTypeFeedbackVectorForStub() {
Node* function =
LoadFromParentFrame(JavaScriptFrameConstants::kFunctionOffset);
Node* literals = LoadObjectField(function, JSFunction::kLiteralsOffset);
return LoadObjectField(literals, LiteralsArray::kFeedbackVectorOffset);
}
-void CodeStubAssembler::UpdateFeedback(compiler::Node* feedback,
- compiler::Node* type_feedback_vector,
- compiler::Node* slot_id) {
+void CodeStubAssembler::UpdateFeedback(Node* feedback,
+ Node* type_feedback_vector,
+ Node* slot_id) {
// This method is used for binary op and compare feedback. These
// vector nodes are initialized with a smi 0, so we can simply OR
// our new feedback in place.
- // TODO(interpreter): Consider passing the feedback as Smi already to avoid
- // the tagging completely.
Node* previous_feedback =
LoadFixedArrayElement(type_feedback_vector, slot_id);
- Node* combined_feedback = SmiOr(previous_feedback, SmiFromWord32(feedback));
+ Node* combined_feedback = SmiOr(previous_feedback, feedback);
StoreFixedArrayElement(type_feedback_vector, slot_id, combined_feedback,
SKIP_WRITE_BARRIER);
}
-compiler::Node* CodeStubAssembler::LoadReceiverMap(compiler::Node* receiver) {
+Node* CodeStubAssembler::LoadReceiverMap(Node* receiver) {
Variable var_receiver_map(this, MachineRepresentation::kTagged);
Label load_smi_map(this, Label::kDeferred), load_receiver_map(this),
if_result(this);
@@ -5052,248 +5501,12 @@ compiler::Node* CodeStubAssembler::LoadReceiverMap(compiler::Node* receiver) {
return var_receiver_map.value();
}
-compiler::Node* CodeStubAssembler::TryMonomorphicCase(
- compiler::Node* slot, compiler::Node* vector, compiler::Node* receiver_map,
- Label* if_handler, Variable* var_handler, Label* if_miss) {
- Comment("TryMonomorphicCase");
- DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
-
- // TODO(ishell): add helper class that hides offset computations for a series
- // of loads.
- int32_t header_size = FixedArray::kHeaderSize - kHeapObjectTag;
- // Adding |header_size| with a separate IntPtrAdd rather than passing it
- // into ElementOffsetFromIndex() allows it to be folded into a single
- // [base, index, offset] indirect memory access on x64.
- Node* offset =
- ElementOffsetFromIndex(slot, FAST_HOLEY_ELEMENTS, SMI_PARAMETERS);
- Node* feedback = Load(MachineType::AnyTagged(), vector,
- IntPtrAdd(offset, IntPtrConstant(header_size)));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- GotoIf(WordNotEqual(receiver_map, LoadWeakCellValueUnchecked(feedback)),
- if_miss);
-
- Node* handler =
- Load(MachineType::AnyTagged(), vector,
- IntPtrAdd(offset, IntPtrConstant(header_size + kPointerSize)));
-
- var_handler->Bind(handler);
- Goto(if_handler);
- return feedback;
-}
-
-void CodeStubAssembler::HandlePolymorphicCase(
- compiler::Node* receiver_map, compiler::Node* feedback, Label* if_handler,
- Variable* var_handler, Label* if_miss, int unroll_count) {
- Comment("HandlePolymorphicCase");
- DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
-
- // Iterate {feedback} array.
- const int kEntrySize = 2;
-
- for (int i = 0; i < unroll_count; i++) {
- Label next_entry(this);
- Node* cached_map = LoadWeakCellValue(LoadFixedArrayElement(
- feedback, IntPtrConstant(i * kEntrySize), 0, INTPTR_PARAMETERS));
- GotoIf(WordNotEqual(receiver_map, cached_map), &next_entry);
-
- // Found, now call handler.
- Node* handler = LoadFixedArrayElement(
- feedback, IntPtrConstant(i * kEntrySize + 1), 0, INTPTR_PARAMETERS);
- var_handler->Bind(handler);
- Goto(if_handler);
-
- Bind(&next_entry);
- }
-
- // Loop from {unroll_count}*kEntrySize to {length}.
- Node* init = IntPtrConstant(unroll_count * kEntrySize);
- Node* length = LoadAndUntagFixedArrayBaseLength(feedback);
- BuildFastLoop(
- MachineType::PointerRepresentation(), init, length,
- [receiver_map, feedback, if_handler, var_handler](CodeStubAssembler* csa,
- Node* index) {
- Node* cached_map = csa->LoadWeakCellValue(
- csa->LoadFixedArrayElement(feedback, index, 0, INTPTR_PARAMETERS));
-
- Label next_entry(csa);
- csa->GotoIf(csa->WordNotEqual(receiver_map, cached_map), &next_entry);
-
- // Found, now call handler.
- Node* handler = csa->LoadFixedArrayElement(
- feedback, index, kPointerSize, INTPTR_PARAMETERS);
- var_handler->Bind(handler);
- csa->Goto(if_handler);
-
- csa->Bind(&next_entry);
- },
- kEntrySize, IndexAdvanceMode::kPost);
- // The loop falls through if no handler was found.
- Goto(if_miss);
-}
-
-void CodeStubAssembler::HandleKeyedStorePolymorphicCase(
- compiler::Node* receiver_map, compiler::Node* feedback, Label* if_handler,
- Variable* var_handler, Label* if_transition_handler,
- Variable* var_transition_map_cell, Label* if_miss) {
- DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
- DCHECK_EQ(MachineRepresentation::kTagged, var_transition_map_cell->rep());
-
- const int kEntrySize = 3;
-
- Node* init = IntPtrConstant(0);
- Node* length = LoadAndUntagFixedArrayBaseLength(feedback);
- BuildFastLoop(
- MachineType::PointerRepresentation(), init, length,
- [receiver_map, feedback, if_handler, var_handler, if_transition_handler,
- var_transition_map_cell](CodeStubAssembler* csa, Node* index) {
- Node* cached_map = csa->LoadWeakCellValue(
- csa->LoadFixedArrayElement(feedback, index, 0, INTPTR_PARAMETERS));
- Label next_entry(csa);
- csa->GotoIf(csa->WordNotEqual(receiver_map, cached_map), &next_entry);
-
- Node* maybe_transition_map_cell = csa->LoadFixedArrayElement(
- feedback, index, kPointerSize, INTPTR_PARAMETERS);
-
- var_handler->Bind(csa->LoadFixedArrayElement(
- feedback, index, 2 * kPointerSize, INTPTR_PARAMETERS));
- csa->GotoIf(
- csa->WordEqual(maybe_transition_map_cell,
- csa->LoadRoot(Heap::kUndefinedValueRootIndex)),
- if_handler);
- var_transition_map_cell->Bind(maybe_transition_map_cell);
- csa->Goto(if_transition_handler);
-
- csa->Bind(&next_entry);
- },
- kEntrySize, IndexAdvanceMode::kPost);
- // The loop falls through if no handler was found.
- Goto(if_miss);
-}
-
-compiler::Node* CodeStubAssembler::StubCachePrimaryOffset(compiler::Node* name,
- compiler::Node* map) {
- // See v8::internal::StubCache::PrimaryOffset().
- STATIC_ASSERT(StubCache::kCacheIndexShift == Name::kHashShift);
- // Compute the hash of the name (use entire hash field).
- Node* hash_field = LoadNameHashField(name);
- CSA_ASSERT(this,
- Word32Equal(Word32And(hash_field,
- Int32Constant(Name::kHashNotComputedMask)),
- Int32Constant(0)));
-
- // Using only the low bits in 64-bit mode is unlikely to increase the
- // risk of collision even if the heap is spread over an area larger than
- // 4Gb (and not at all if it isn't).
- Node* hash = Int32Add(hash_field, map);
- // Base the offset on a simple combination of name and map.
- hash = Word32Xor(hash, Int32Constant(StubCache::kPrimaryMagic));
- uint32_t mask = (StubCache::kPrimaryTableSize - 1)
- << StubCache::kCacheIndexShift;
- return ChangeUint32ToWord(Word32And(hash, Int32Constant(mask)));
-}
-
-compiler::Node* CodeStubAssembler::StubCacheSecondaryOffset(
- compiler::Node* name, compiler::Node* seed) {
- // See v8::internal::StubCache::SecondaryOffset().
-
- // Use the seed from the primary cache in the secondary cache.
- Node* hash = Int32Sub(seed, name);
- hash = Int32Add(hash, Int32Constant(StubCache::kSecondaryMagic));
- int32_t mask = (StubCache::kSecondaryTableSize - 1)
- << StubCache::kCacheIndexShift;
- return ChangeUint32ToWord(Word32And(hash, Int32Constant(mask)));
-}
-
-enum CodeStubAssembler::StubCacheTable : int {
- kPrimary = static_cast<int>(StubCache::kPrimary),
- kSecondary = static_cast<int>(StubCache::kSecondary)
-};
-
-void CodeStubAssembler::TryProbeStubCacheTable(
- StubCache* stub_cache, StubCacheTable table_id,
- compiler::Node* entry_offset, compiler::Node* name, compiler::Node* map,
- Label* if_handler, Variable* var_handler, Label* if_miss) {
- StubCache::Table table = static_cast<StubCache::Table>(table_id);
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- Goto(if_miss);
- return;
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- Goto(if_miss);
- return;
- }
-#endif
- // The {table_offset} holds the entry offset times four (due to masking
- // and shifting optimizations).
- const int kMultiplier = sizeof(StubCache::Entry) >> Name::kHashShift;
- entry_offset = IntPtrMul(entry_offset, IntPtrConstant(kMultiplier));
-
- // Check that the key in the entry matches the name.
- Node* key_base =
- ExternalConstant(ExternalReference(stub_cache->key_reference(table)));
- Node* entry_key = Load(MachineType::Pointer(), key_base, entry_offset);
- GotoIf(WordNotEqual(name, entry_key), if_miss);
-
- // Get the map entry from the cache.
- DCHECK_EQ(kPointerSize * 2, stub_cache->map_reference(table).address() -
- stub_cache->key_reference(table).address());
- Node* entry_map =
- Load(MachineType::Pointer(), key_base,
- IntPtrAdd(entry_offset, IntPtrConstant(kPointerSize * 2)));
- GotoIf(WordNotEqual(map, entry_map), if_miss);
-
- DCHECK_EQ(kPointerSize, stub_cache->value_reference(table).address() -
- stub_cache->key_reference(table).address());
- Node* handler = Load(MachineType::TaggedPointer(), key_base,
- IntPtrAdd(entry_offset, IntPtrConstant(kPointerSize)));
-
- // We found the handler.
- var_handler->Bind(handler);
- Goto(if_handler);
-}
-
-void CodeStubAssembler::TryProbeStubCache(
- StubCache* stub_cache, compiler::Node* receiver, compiler::Node* name,
- Label* if_handler, Variable* var_handler, Label* if_miss) {
- Label try_secondary(this), miss(this);
-
- Counters* counters = isolate()->counters();
- IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
-
- // Check that the {receiver} isn't a smi.
- GotoIf(TaggedIsSmi(receiver), &miss);
-
- Node* receiver_map = LoadMap(receiver);
-
- // Probe the primary table.
- Node* primary_offset = StubCachePrimaryOffset(name, receiver_map);
- TryProbeStubCacheTable(stub_cache, kPrimary, primary_offset, name,
- receiver_map, if_handler, var_handler, &try_secondary);
-
- Bind(&try_secondary);
- {
- // Probe the secondary table.
- Node* secondary_offset = StubCacheSecondaryOffset(name, primary_offset);
- TryProbeStubCacheTable(stub_cache, kSecondary, secondary_offset, name,
- receiver_map, if_handler, var_handler, &miss);
- }
-
- Bind(&miss);
- {
- IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
- Goto(if_miss);
- }
-}
-
Node* CodeStubAssembler::TryToIntptr(Node* key, Label* miss) {
Variable var_intptr_key(this, MachineType::PointerRepresentation());
Label done(this, &var_intptr_key), key_is_smi(this);
GotoIf(TaggedIsSmi(key), &key_is_smi);
// Try to convert a heap number to a Smi.
- GotoUnless(WordEqual(LoadMap(key), HeapNumberMapConstant()), miss);
+ GotoUnless(IsHeapNumberMap(LoadMap(key)), miss);
{
Node* value = LoadHeapNumberValue(key);
Node* int_value = RoundFloat64ToInt32(value);
@@ -5312,1377 +5525,6 @@ Node* CodeStubAssembler::TryToIntptr(Node* key, Label* miss) {
return var_intptr_key.value();
}
-void CodeStubAssembler::EmitFastElementsBoundsCheck(Node* object,
- Node* elements,
- Node* intptr_index,
- Node* is_jsarray_condition,
- Label* miss) {
- Variable var_length(this, MachineType::PointerRepresentation());
- Comment("Fast elements bounds check");
- Label if_array(this), length_loaded(this, &var_length);
- GotoIf(is_jsarray_condition, &if_array);
- {
- var_length.Bind(SmiUntag(LoadFixedArrayBaseLength(elements)));
- Goto(&length_loaded);
- }
- Bind(&if_array);
- {
- var_length.Bind(SmiUntag(LoadJSArrayLength(object)));
- Goto(&length_loaded);
- }
- Bind(&length_loaded);
- GotoUnless(UintPtrLessThan(intptr_index, var_length.value()), miss);
-}
-
-void CodeStubAssembler::EmitElementLoad(Node* object, Node* elements,
- Node* elements_kind, Node* intptr_index,
- Node* is_jsarray_condition,
- Label* if_hole, Label* rebox_double,
- Variable* var_double_value,
- Label* unimplemented_elements_kind,
- Label* out_of_bounds, Label* miss) {
- Label if_typed_array(this), if_fast_packed(this), if_fast_holey(this),
- if_fast_double(this), if_fast_holey_double(this), if_nonfast(this),
- if_dictionary(this);
- GotoIf(
- IntPtrGreaterThan(elements_kind, IntPtrConstant(LAST_FAST_ELEMENTS_KIND)),
- &if_nonfast);
-
- EmitFastElementsBoundsCheck(object, elements, intptr_index,
- is_jsarray_condition, out_of_bounds);
- int32_t kinds[] = {// Handled by if_fast_packed.
- FAST_SMI_ELEMENTS, FAST_ELEMENTS,
- // Handled by if_fast_holey.
- FAST_HOLEY_SMI_ELEMENTS, FAST_HOLEY_ELEMENTS,
- // Handled by if_fast_double.
- FAST_DOUBLE_ELEMENTS,
- // Handled by if_fast_holey_double.
- FAST_HOLEY_DOUBLE_ELEMENTS};
- Label* labels[] = {// FAST_{SMI,}_ELEMENTS
- &if_fast_packed, &if_fast_packed,
- // FAST_HOLEY_{SMI,}_ELEMENTS
- &if_fast_holey, &if_fast_holey,
- // FAST_DOUBLE_ELEMENTS
- &if_fast_double,
- // FAST_HOLEY_DOUBLE_ELEMENTS
- &if_fast_holey_double};
- Switch(elements_kind, unimplemented_elements_kind, kinds, labels,
- arraysize(kinds));
-
- Bind(&if_fast_packed);
- {
- Comment("fast packed elements");
- Return(LoadFixedArrayElement(elements, intptr_index, 0, INTPTR_PARAMETERS));
- }
-
- Bind(&if_fast_holey);
- {
- Comment("fast holey elements");
- Node* element =
- LoadFixedArrayElement(elements, intptr_index, 0, INTPTR_PARAMETERS);
- GotoIf(WordEqual(element, TheHoleConstant()), if_hole);
- Return(element);
- }
-
- Bind(&if_fast_double);
- {
- Comment("packed double elements");
- var_double_value->Bind(LoadFixedDoubleArrayElement(
- elements, intptr_index, MachineType::Float64(), 0, INTPTR_PARAMETERS));
- Goto(rebox_double);
- }
-
- Bind(&if_fast_holey_double);
- {
- Comment("holey double elements");
- Node* value = LoadFixedDoubleArrayElement(elements, intptr_index,
- MachineType::Float64(), 0,
- INTPTR_PARAMETERS, if_hole);
- var_double_value->Bind(value);
- Goto(rebox_double);
- }
-
- Bind(&if_nonfast);
- {
- STATIC_ASSERT(LAST_ELEMENTS_KIND == LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
- GotoIf(IntPtrGreaterThanOrEqual(
- elements_kind,
- IntPtrConstant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND)),
- &if_typed_array);
- GotoIf(IntPtrEqual(elements_kind, IntPtrConstant(DICTIONARY_ELEMENTS)),
- &if_dictionary);
- Goto(unimplemented_elements_kind);
- }
-
- Bind(&if_dictionary);
- {
- Comment("dictionary elements");
- GotoIf(IntPtrLessThan(intptr_index, IntPtrConstant(0)), out_of_bounds);
- Variable var_entry(this, MachineType::PointerRepresentation());
- Label if_found(this);
- NumberDictionaryLookup<SeededNumberDictionary>(
- elements, intptr_index, &if_found, &var_entry, if_hole);
- Bind(&if_found);
- // Check that the value is a data property.
- Node* details_index = EntryToIndex<SeededNumberDictionary>(
- var_entry.value(), SeededNumberDictionary::kEntryDetailsIndex);
- Node* details = SmiToWord32(
- LoadFixedArrayElement(elements, details_index, 0, INTPTR_PARAMETERS));
- Node* kind = DecodeWord32<PropertyDetails::KindField>(details);
- // TODO(jkummerow): Support accessors without missing?
- GotoUnless(Word32Equal(kind, Int32Constant(kData)), miss);
- // Finally, load the value.
- Node* value_index = EntryToIndex<SeededNumberDictionary>(
- var_entry.value(), SeededNumberDictionary::kEntryValueIndex);
- Return(LoadFixedArrayElement(elements, value_index, 0, INTPTR_PARAMETERS));
- }
-
- Bind(&if_typed_array);
- {
- Comment("typed elements");
- // Check if buffer has been neutered.
- Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
- Node* bitfield = LoadObjectField(buffer, JSArrayBuffer::kBitFieldOffset,
- MachineType::Uint32());
- Node* neutered_bit =
- Word32And(bitfield, Int32Constant(JSArrayBuffer::WasNeutered::kMask));
- GotoUnless(Word32Equal(neutered_bit, Int32Constant(0)), miss);
-
- // Bounds check.
- Node* length =
- SmiUntag(LoadObjectField(object, JSTypedArray::kLengthOffset));
- GotoUnless(UintPtrLessThan(intptr_index, length), out_of_bounds);
-
- // Backing store = external_pointer + base_pointer.
- Node* external_pointer =
- LoadObjectField(elements, FixedTypedArrayBase::kExternalPointerOffset,
- MachineType::Pointer());
- Node* base_pointer =
- LoadObjectField(elements, FixedTypedArrayBase::kBasePointerOffset);
- Node* backing_store = IntPtrAdd(external_pointer, base_pointer);
-
- Label uint8_elements(this), int8_elements(this), uint16_elements(this),
- int16_elements(this), uint32_elements(this), int32_elements(this),
- float32_elements(this), float64_elements(this);
- Label* elements_kind_labels[] = {
- &uint8_elements, &uint8_elements, &int8_elements,
- &uint16_elements, &int16_elements, &uint32_elements,
- &int32_elements, &float32_elements, &float64_elements};
- int32_t elements_kinds[] = {
- UINT8_ELEMENTS, UINT8_CLAMPED_ELEMENTS, INT8_ELEMENTS,
- UINT16_ELEMENTS, INT16_ELEMENTS, UINT32_ELEMENTS,
- INT32_ELEMENTS, FLOAT32_ELEMENTS, FLOAT64_ELEMENTS};
- const size_t kTypedElementsKindCount =
- LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND -
- FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND + 1;
- DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kinds));
- DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kind_labels));
- Switch(elements_kind, miss, elements_kinds, elements_kind_labels,
- kTypedElementsKindCount);
- Bind(&uint8_elements);
- {
- Comment("UINT8_ELEMENTS"); // Handles UINT8_CLAMPED_ELEMENTS too.
- Return(SmiTag(Load(MachineType::Uint8(), backing_store, intptr_index)));
- }
- Bind(&int8_elements);
- {
- Comment("INT8_ELEMENTS");
- Return(SmiTag(Load(MachineType::Int8(), backing_store, intptr_index)));
- }
- Bind(&uint16_elements);
- {
- Comment("UINT16_ELEMENTS");
- Node* index = WordShl(intptr_index, IntPtrConstant(1));
- Return(SmiTag(Load(MachineType::Uint16(), backing_store, index)));
- }
- Bind(&int16_elements);
- {
- Comment("INT16_ELEMENTS");
- Node* index = WordShl(intptr_index, IntPtrConstant(1));
- Return(SmiTag(Load(MachineType::Int16(), backing_store, index)));
- }
- Bind(&uint32_elements);
- {
- Comment("UINT32_ELEMENTS");
- Node* index = WordShl(intptr_index, IntPtrConstant(2));
- Node* element = Load(MachineType::Uint32(), backing_store, index);
- Return(ChangeUint32ToTagged(element));
- }
- Bind(&int32_elements);
- {
- Comment("INT32_ELEMENTS");
- Node* index = WordShl(intptr_index, IntPtrConstant(2));
- Node* element = Load(MachineType::Int32(), backing_store, index);
- Return(ChangeInt32ToTagged(element));
- }
- Bind(&float32_elements);
- {
- Comment("FLOAT32_ELEMENTS");
- Node* index = WordShl(intptr_index, IntPtrConstant(2));
- Node* element = Load(MachineType::Float32(), backing_store, index);
- var_double_value->Bind(ChangeFloat32ToFloat64(element));
- Goto(rebox_double);
- }
- Bind(&float64_elements);
- {
- Comment("FLOAT64_ELEMENTS");
- Node* index = WordShl(intptr_index, IntPtrConstant(3));
- Node* element = Load(MachineType::Float64(), backing_store, index);
- var_double_value->Bind(element);
- Goto(rebox_double);
- }
- }
-}
-
-void CodeStubAssembler::HandleLoadICHandlerCase(
- const LoadICParameters* p, Node* handler, Label* miss,
- ElementSupport support_elements) {
- Comment("have_handler");
- Variable var_holder(this, MachineRepresentation::kTagged);
- var_holder.Bind(p->receiver);
- Variable var_smi_handler(this, MachineRepresentation::kTagged);
- var_smi_handler.Bind(handler);
-
- Variable* vars[] = {&var_holder, &var_smi_handler};
- Label if_smi_handler(this, 2, vars);
- Label try_proto_handler(this), call_handler(this);
-
- Branch(TaggedIsSmi(handler), &if_smi_handler, &try_proto_handler);
-
- // |handler| is a Smi, encoding what to do. See SmiHandler methods
- // for the encoding format.
- Bind(&if_smi_handler);
- {
- HandleLoadICSmiHandlerCase(p, var_holder.value(), var_smi_handler.value(),
- miss, support_elements);
- }
-
- Bind(&try_proto_handler);
- {
- GotoIf(IsCodeMap(LoadMap(handler)), &call_handler);
- HandleLoadICProtoHandler(p, handler, &var_holder, &var_smi_handler,
- &if_smi_handler, miss);
- }
-
- Bind(&call_handler);
- {
- typedef LoadWithVectorDescriptor Descriptor;
- TailCallStub(Descriptor(isolate()), handler, p->context,
- Arg(Descriptor::kReceiver, p->receiver),
- Arg(Descriptor::kName, p->name),
- Arg(Descriptor::kSlot, p->slot),
- Arg(Descriptor::kVector, p->vector));
- }
-}
-
-void CodeStubAssembler::HandleLoadICSmiHandlerCase(
- const LoadICParameters* p, Node* holder, Node* smi_handler, Label* miss,
- ElementSupport support_elements) {
- Variable var_double_value(this, MachineRepresentation::kFloat64);
- Label rebox_double(this, &var_double_value);
-
- Node* handler_word = SmiUntag(smi_handler);
- Node* handler_kind = DecodeWord<LoadHandler::KindBits>(handler_word);
- if (support_elements == kSupportElements) {
- Label property(this);
- GotoUnless(
- WordEqual(handler_kind, IntPtrConstant(LoadHandler::kForElements)),
- &property);
-
- Comment("element_load");
- Node* intptr_index = TryToIntptr(p->name, miss);
- Node* elements = LoadElements(holder);
- Node* is_jsarray_condition =
- IsSetWord<LoadHandler::IsJsArrayBits>(handler_word);
- Node* elements_kind =
- DecodeWord<LoadHandler::ElementsKindBits>(handler_word);
- Label if_hole(this), unimplemented_elements_kind(this);
- Label* out_of_bounds = miss;
- EmitElementLoad(holder, elements, elements_kind, intptr_index,
- is_jsarray_condition, &if_hole, &rebox_double,
- &var_double_value, &unimplemented_elements_kind,
- out_of_bounds, miss);
-
- Bind(&unimplemented_elements_kind);
- {
- // Smi handlers should only be installed for supported elements kinds.
- // Crash if we get here.
- DebugBreak();
- Goto(miss);
- }
-
- Bind(&if_hole);
- {
- Comment("convert hole");
- GotoUnless(IsSetWord<LoadHandler::ConvertHoleBits>(handler_word), miss);
- Node* protector_cell = LoadRoot(Heap::kArrayProtectorRootIndex);
- DCHECK(isolate()->heap()->array_protector()->IsPropertyCell());
- GotoUnless(
- WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
- SmiConstant(Smi::FromInt(Isolate::kProtectorValid))),
- miss);
- Return(UndefinedConstant());
- }
-
- Bind(&property);
- Comment("property_load");
- }
-
- Label constant(this), field(this);
- Branch(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kForFields)),
- &field, &constant);
-
- Bind(&field);
- {
- Comment("field_load");
- Node* offset = DecodeWord<LoadHandler::FieldOffsetBits>(handler_word);
-
- Label inobject(this), out_of_object(this);
- Branch(IsSetWord<LoadHandler::IsInobjectBits>(handler_word), &inobject,
- &out_of_object);
-
- Bind(&inobject);
- {
- Label is_double(this);
- GotoIf(IsSetWord<LoadHandler::IsDoubleBits>(handler_word), &is_double);
- Return(LoadObjectField(holder, offset));
-
- Bind(&is_double);
- if (FLAG_unbox_double_fields) {
- var_double_value.Bind(
- LoadObjectField(holder, offset, MachineType::Float64()));
- } else {
- Node* mutable_heap_number = LoadObjectField(holder, offset);
- var_double_value.Bind(LoadHeapNumberValue(mutable_heap_number));
- }
- Goto(&rebox_double);
- }
-
- Bind(&out_of_object);
- {
- Label is_double(this);
- Node* properties = LoadProperties(holder);
- Node* value = LoadObjectField(properties, offset);
- GotoIf(IsSetWord<LoadHandler::IsDoubleBits>(handler_word), &is_double);
- Return(value);
-
- Bind(&is_double);
- var_double_value.Bind(LoadHeapNumberValue(value));
- Goto(&rebox_double);
- }
-
- Bind(&rebox_double);
- Return(AllocateHeapNumberWithValue(var_double_value.value()));
- }
-
- Bind(&constant);
- {
- Comment("constant_load");
- Node* descriptors = LoadMapDescriptors(LoadMap(holder));
- Node* descriptor =
- DecodeWord<LoadHandler::DescriptorValueIndexBits>(handler_word);
- CSA_ASSERT(this,
- UintPtrLessThan(descriptor,
- LoadAndUntagFixedArrayBaseLength(descriptors)));
- Node* value =
- LoadFixedArrayElement(descriptors, descriptor, 0, INTPTR_PARAMETERS);
-
- Label if_accessor_info(this);
- GotoIf(IsSetWord<LoadHandler::IsAccessorInfoBits>(handler_word),
- &if_accessor_info);
- Return(value);
-
- Bind(&if_accessor_info);
- Callable callable = CodeFactory::ApiGetter(isolate());
- TailCallStub(callable, p->context, p->receiver, holder, value);
- }
-}
-
-void CodeStubAssembler::HandleLoadICProtoHandler(
- const LoadICParameters* p, Node* handler, Variable* var_holder,
- Variable* var_smi_handler, Label* if_smi_handler, Label* miss) {
- DCHECK_EQ(MachineRepresentation::kTagged, var_holder->rep());
- DCHECK_EQ(MachineRepresentation::kTagged, var_smi_handler->rep());
-
- // IC dispatchers rely on these assumptions to be held.
- STATIC_ASSERT(FixedArray::kLengthOffset == LoadHandler::kHolderCellOffset);
- DCHECK_EQ(FixedArray::OffsetOfElementAt(LoadHandler::kSmiHandlerIndex),
- LoadHandler::kSmiHandlerOffset);
- DCHECK_EQ(FixedArray::OffsetOfElementAt(LoadHandler::kValidityCellIndex),
- LoadHandler::kValidityCellOffset);
-
- // Both FixedArray and Tuple3 handlers have validity cell at the same offset.
- Label validity_cell_check_done(this);
- Node* validity_cell =
- LoadObjectField(handler, LoadHandler::kValidityCellOffset);
- GotoIf(WordEqual(validity_cell, IntPtrConstant(0)),
- &validity_cell_check_done);
- Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
- GotoIf(WordNotEqual(cell_value,
- SmiConstant(Smi::FromInt(Map::kPrototypeChainValid))),
- miss);
- Goto(&validity_cell_check_done);
-
- Bind(&validity_cell_check_done);
- Node* smi_handler = LoadObjectField(handler, LoadHandler::kSmiHandlerOffset);
- CSA_ASSERT(this, TaggedIsSmi(smi_handler));
- Node* handler_flags = SmiUntag(smi_handler);
-
- Label check_prototypes(this);
- GotoUnless(
- IsSetWord<LoadHandler::DoNegativeLookupOnReceiverBits>(handler_flags),
- &check_prototypes);
- {
- CSA_ASSERT(this, Word32BinaryNot(
- HasInstanceType(p->receiver, JS_GLOBAL_OBJECT_TYPE)));
- // We have a dictionary receiver, do a negative lookup check.
- NameDictionaryNegativeLookup(p->receiver, p->name, miss);
- Goto(&check_prototypes);
- }
-
- Bind(&check_prototypes);
- Node* maybe_holder_cell =
- LoadObjectField(handler, LoadHandler::kHolderCellOffset);
- Label array_handler(this), tuple_handler(this);
- Branch(TaggedIsSmi(maybe_holder_cell), &array_handler, &tuple_handler);
-
- Bind(&tuple_handler);
- {
- Label load_existent(this);
- GotoIf(WordNotEqual(maybe_holder_cell, NullConstant()), &load_existent);
- // This is a handler for a load of a non-existent value.
- Return(UndefinedConstant());
-
- Bind(&load_existent);
- Node* holder = LoadWeakCellValue(maybe_holder_cell);
- // The |holder| is guaranteed to be alive at this point since we passed
- // both the receiver map check and the validity cell check.
- CSA_ASSERT(this, WordNotEqual(holder, IntPtrConstant(0)));
-
- var_holder->Bind(holder);
- var_smi_handler->Bind(smi_handler);
- Goto(if_smi_handler);
- }
-
- Bind(&array_handler);
- {
- typedef LoadICProtoArrayDescriptor Descriptor;
- LoadICProtoArrayStub stub(isolate());
- Node* target = HeapConstant(stub.GetCode());
- TailCallStub(Descriptor(isolate()), target, p->context,
- Arg(Descriptor::kReceiver, p->receiver),
- Arg(Descriptor::kName, p->name),
- Arg(Descriptor::kSlot, p->slot),
- Arg(Descriptor::kVector, p->vector),
- Arg(Descriptor::kHandler, handler));
- }
-}
-
-void CodeStubAssembler::LoadICProtoArray(const LoadICParameters* p,
- Node* handler) {
- Label miss(this);
- CSA_ASSERT(this, Word32BinaryNot(TaggedIsSmi(handler)));
- CSA_ASSERT(this, IsFixedArrayMap(LoadMap(handler)));
-
- Node* smi_handler = LoadObjectField(handler, LoadHandler::kSmiHandlerOffset);
- Node* handler_flags = SmiUntag(smi_handler);
-
- Node* handler_length = LoadAndUntagFixedArrayBaseLength(handler);
-
- Node* holder = EmitLoadICProtoArrayCheck(p, handler, handler_length,
- handler_flags, &miss);
-
- HandleLoadICSmiHandlerCase(p, holder, smi_handler, &miss, kOnlyProperties);
-
- Bind(&miss);
- {
- TailCallRuntime(Runtime::kLoadIC_Miss, p->context, p->receiver, p->name,
- p->slot, p->vector);
- }
-}
-
-Node* CodeStubAssembler::EmitLoadICProtoArrayCheck(const LoadICParameters* p,
- Node* handler,
- Node* handler_length,
- Node* handler_flags,
- Label* miss) {
- Variable start_index(this, MachineType::PointerRepresentation());
- start_index.Bind(IntPtrConstant(LoadHandler::kFirstPrototypeIndex));
-
- Label can_access(this);
- GotoUnless(IsSetWord<LoadHandler::DoAccessCheckOnReceiverBits>(handler_flags),
- &can_access);
- {
- // Skip this entry of a handler.
- start_index.Bind(IntPtrConstant(LoadHandler::kFirstPrototypeIndex + 1));
-
- int offset =
- FixedArray::OffsetOfElementAt(LoadHandler::kFirstPrototypeIndex);
- Node* expected_native_context =
- LoadWeakCellValue(LoadObjectField(handler, offset), miss);
- CSA_ASSERT(this, IsNativeContext(expected_native_context));
-
- Node* native_context = LoadNativeContext(p->context);
- GotoIf(WordEqual(expected_native_context, native_context), &can_access);
- // If the receiver is not a JSGlobalProxy then we miss.
- GotoUnless(IsJSGlobalProxy(p->receiver), miss);
- // For JSGlobalProxy receiver try to compare security tokens of current
- // and expected native contexts.
- Node* expected_token = LoadContextElement(expected_native_context,
- Context::SECURITY_TOKEN_INDEX);
- Node* current_token =
- LoadContextElement(native_context, Context::SECURITY_TOKEN_INDEX);
- Branch(WordEqual(expected_token, current_token), &can_access, miss);
- }
- Bind(&can_access);
-
- BuildFastLoop(
- MachineType::PointerRepresentation(), start_index.value(), handler_length,
- [this, p, handler, miss](CodeStubAssembler*, Node* current) {
- Node* prototype_cell =
- LoadFixedArrayElement(handler, current, 0, INTPTR_PARAMETERS);
- CheckPrototype(prototype_cell, p->name, miss);
- },
- 1, IndexAdvanceMode::kPost);
-
- Node* maybe_holder_cell = LoadFixedArrayElement(
- handler, IntPtrConstant(LoadHandler::kHolderCellIndex), 0,
- INTPTR_PARAMETERS);
- Label load_existent(this);
- GotoIf(WordNotEqual(maybe_holder_cell, NullConstant()), &load_existent);
- // This is a handler for a load of a non-existent value.
- Return(UndefinedConstant());
-
- Bind(&load_existent);
- Node* holder = LoadWeakCellValue(maybe_holder_cell);
- // The |holder| is guaranteed to be alive at this point since we passed
- // the receiver map check, the validity cell check and the prototype chain
- // check.
- CSA_ASSERT(this, WordNotEqual(holder, IntPtrConstant(0)));
- return holder;
-}
-
-void CodeStubAssembler::CheckPrototype(Node* prototype_cell, Node* name,
- Label* miss) {
- Node* maybe_prototype = LoadWeakCellValue(prototype_cell, miss);
-
- Label done(this);
- Label if_property_cell(this), if_dictionary_object(this);
-
- // |maybe_prototype| is either a PropertyCell or a slow-mode prototype.
- Branch(WordEqual(LoadMap(maybe_prototype),
- LoadRoot(Heap::kGlobalPropertyCellMapRootIndex)),
- &if_property_cell, &if_dictionary_object);
-
- Bind(&if_dictionary_object);
- {
- CSA_ASSERT(this, IsDictionaryMap(LoadMap(maybe_prototype)));
- NameDictionaryNegativeLookup(maybe_prototype, name, miss);
- Goto(&done);
- }
-
- Bind(&if_property_cell);
- {
- // Ensure the property cell still contains the hole.
- Node* value = LoadObjectField(maybe_prototype, PropertyCell::kValueOffset);
- GotoIf(WordNotEqual(value, LoadRoot(Heap::kTheHoleValueRootIndex)), miss);
- Goto(&done);
- }
-
- Bind(&done);
-}
-
-void CodeStubAssembler::NameDictionaryNegativeLookup(Node* object, Node* name,
- Label* miss) {
- CSA_ASSERT(this, IsDictionaryMap(LoadMap(object)));
- Node* properties = LoadProperties(object);
- // Ensure the property does not exist in a dictionary-mode object.
- Variable var_name_index(this, MachineType::PointerRepresentation());
- Label done(this);
- NameDictionaryLookup<NameDictionary>(properties, name, miss, &var_name_index,
- &done);
- Bind(&done);
-}
-
-void CodeStubAssembler::LoadIC(const LoadICParameters* p) {
- Variable var_handler(this, MachineRepresentation::kTagged);
- // TODO(ishell): defer blocks when it works.
- Label if_handler(this, &var_handler), try_polymorphic(this),
- try_megamorphic(this /*, Label::kDeferred*/),
- miss(this /*, Label::kDeferred*/);
-
- Node* receiver_map = LoadReceiverMap(p->receiver);
-
- // Check monomorphic case.
- Node* feedback =
- TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
- &var_handler, &try_polymorphic);
- Bind(&if_handler);
- {
- HandleLoadICHandlerCase(p, var_handler.value(), &miss);
- }
-
- Bind(&try_polymorphic);
- {
- // Check polymorphic case.
- Comment("LoadIC_try_polymorphic");
- GotoUnless(WordEqual(LoadMap(feedback), FixedArrayMapConstant()),
- &try_megamorphic);
- HandlePolymorphicCase(receiver_map, feedback, &if_handler, &var_handler,
- &miss, 2);
- }
-
- Bind(&try_megamorphic);
- {
- // Check megamorphic case.
- GotoUnless(
- WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
- &miss);
-
- TryProbeStubCache(isolate()->load_stub_cache(), p->receiver, p->name,
- &if_handler, &var_handler, &miss);
- }
- Bind(&miss);
- {
- TailCallRuntime(Runtime::kLoadIC_Miss, p->context, p->receiver, p->name,
- p->slot, p->vector);
- }
-}
-
-void CodeStubAssembler::KeyedLoadIC(const LoadICParameters* p) {
- Variable var_handler(this, MachineRepresentation::kTagged);
- // TODO(ishell): defer blocks when it works.
- Label if_handler(this, &var_handler), try_polymorphic(this),
- try_megamorphic(this /*, Label::kDeferred*/),
- try_polymorphic_name(this /*, Label::kDeferred*/),
- miss(this /*, Label::kDeferred*/);
-
- Node* receiver_map = LoadReceiverMap(p->receiver);
-
- // Check monomorphic case.
- Node* feedback =
- TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
- &var_handler, &try_polymorphic);
- Bind(&if_handler);
- {
- HandleLoadICHandlerCase(p, var_handler.value(), &miss, kSupportElements);
- }
-
- Bind(&try_polymorphic);
- {
- // Check polymorphic case.
- Comment("KeyedLoadIC_try_polymorphic");
- GotoUnless(WordEqual(LoadMap(feedback), FixedArrayMapConstant()),
- &try_megamorphic);
- HandlePolymorphicCase(receiver_map, feedback, &if_handler, &var_handler,
- &miss, 2);
- }
-
- Bind(&try_megamorphic);
- {
- // Check megamorphic case.
- Comment("KeyedLoadIC_try_megamorphic");
- GotoUnless(
- WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
- &try_polymorphic_name);
- // TODO(jkummerow): Inline this? Or some of it?
- TailCallStub(CodeFactory::KeyedLoadIC_Megamorphic(isolate()), p->context,
- p->receiver, p->name, p->slot, p->vector);
- }
- Bind(&try_polymorphic_name);
- {
- // We might have a name in feedback, and a fixed array in the next slot.
- Comment("KeyedLoadIC_try_polymorphic_name");
- GotoUnless(WordEqual(feedback, p->name), &miss);
- // If the name comparison succeeded, we know we have a fixed array with
- // at least one map/handler pair.
- Node* offset = ElementOffsetFromIndex(
- p->slot, FAST_HOLEY_ELEMENTS, SMI_PARAMETERS,
- FixedArray::kHeaderSize + kPointerSize - kHeapObjectTag);
- Node* array = Load(MachineType::AnyTagged(), p->vector, offset);
- HandlePolymorphicCase(receiver_map, array, &if_handler, &var_handler, &miss,
- 1);
- }
- Bind(&miss);
- {
- Comment("KeyedLoadIC_miss");
- TailCallRuntime(Runtime::kKeyedLoadIC_Miss, p->context, p->receiver,
- p->name, p->slot, p->vector);
- }
-}
-
-void CodeStubAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
- Variable var_index(this, MachineType::PointerRepresentation());
- Variable var_details(this, MachineRepresentation::kWord32);
- Variable var_value(this, MachineRepresentation::kTagged);
- Label if_index(this), if_unique_name(this), if_element_hole(this),
- if_oob(this), slow(this), stub_cache_miss(this),
- if_property_dictionary(this), if_found_on_receiver(this);
-
- Node* receiver = p->receiver;
- GotoIf(TaggedIsSmi(receiver), &slow);
- Node* receiver_map = LoadMap(receiver);
- Node* instance_type = LoadMapInstanceType(receiver_map);
- // Receivers requiring non-standard element accesses (interceptors, access
- // checks, strings and string wrappers, proxies) are handled in the runtime.
- GotoIf(Int32LessThanOrEqual(instance_type,
- Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
- &slow);
-
- Node* key = p->name;
- TryToName(key, &if_index, &var_index, &if_unique_name, &slow);
-
- Bind(&if_index);
- {
- Comment("integer index");
- Node* index = var_index.value();
- Node* elements = LoadElements(receiver);
- Node* elements_kind = LoadMapElementsKind(receiver_map);
- Node* is_jsarray_condition =
- Word32Equal(instance_type, Int32Constant(JS_ARRAY_TYPE));
- Variable var_double_value(this, MachineRepresentation::kFloat64);
- Label rebox_double(this, &var_double_value);
-
- // Unimplemented elements kinds fall back to a runtime call.
- Label* unimplemented_elements_kind = &slow;
- IncrementCounter(isolate()->counters()->ic_keyed_load_generic_smi(), 1);
- EmitElementLoad(receiver, elements, elements_kind, index,
- is_jsarray_condition, &if_element_hole, &rebox_double,
- &var_double_value, unimplemented_elements_kind, &if_oob,
- &slow);
-
- Bind(&rebox_double);
- Return(AllocateHeapNumberWithValue(var_double_value.value()));
- }
-
- Bind(&if_oob);
- {
- Comment("out of bounds");
- Node* index = var_index.value();
- // Negative keys can't take the fast OOB path.
- GotoIf(IntPtrLessThan(index, IntPtrConstant(0)), &slow);
- // Positive OOB indices are effectively the same as hole loads.
- Goto(&if_element_hole);
- }
-
- Bind(&if_element_hole);
- {
- Comment("found the hole");
- Label return_undefined(this);
- BranchIfPrototypesHaveNoElements(receiver_map, &return_undefined, &slow);
-
- Bind(&return_undefined);
- Return(UndefinedConstant());
- }
-
- Node* properties = nullptr;
- Bind(&if_unique_name);
- {
- Comment("key is unique name");
- // Check if the receiver has fast or slow properties.
- properties = LoadProperties(receiver);
- Node* properties_map = LoadMap(properties);
- GotoIf(WordEqual(properties_map, LoadRoot(Heap::kHashTableMapRootIndex)),
- &if_property_dictionary);
-
- // Try looking up the property on the receiver; if unsuccessful, look
- // for a handler in the stub cache.
- Comment("DescriptorArray lookup");
-
- // Skip linear search if there are too many descriptors.
- // TODO(jkummerow): Consider implementing binary search.
- // See also TryLookupProperty() which has the same limitation.
- const int32_t kMaxLinear = 210;
- Label stub_cache(this);
- Node* bitfield3 = LoadMapBitField3(receiver_map);
- Node* nof =
- DecodeWordFromWord32<Map::NumberOfOwnDescriptorsBits>(bitfield3);
- GotoIf(UintPtrGreaterThan(nof, IntPtrConstant(kMaxLinear)), &stub_cache);
- Node* descriptors = LoadMapDescriptors(receiver_map);
- Variable var_name_index(this, MachineType::PointerRepresentation());
- Label if_descriptor_found(this);
- DescriptorLookupLinear(key, descriptors, nof, &if_descriptor_found,
- &var_name_index, &stub_cache);
-
- Bind(&if_descriptor_found);
- {
- LoadPropertyFromFastObject(receiver, receiver_map, descriptors,
- var_name_index.value(), &var_details,
- &var_value);
- Goto(&if_found_on_receiver);
- }
-
- Bind(&stub_cache);
- {
- Comment("stub cache probe for fast property load");
- Variable var_handler(this, MachineRepresentation::kTagged);
- Label found_handler(this, &var_handler), stub_cache_miss(this);
- TryProbeStubCache(isolate()->load_stub_cache(), receiver, key,
- &found_handler, &var_handler, &stub_cache_miss);
- Bind(&found_handler);
- { HandleLoadICHandlerCase(p, var_handler.value(), &slow); }
-
- Bind(&stub_cache_miss);
- {
- Comment("KeyedLoadGeneric_miss");
- TailCallRuntime(Runtime::kKeyedLoadIC_Miss, p->context, p->receiver,
- p->name, p->slot, p->vector);
- }
- }
- }
-
- Bind(&if_property_dictionary);
- {
- Comment("dictionary property load");
- // We checked for LAST_CUSTOM_ELEMENTS_RECEIVER before, which rules out
- // seeing global objects here (which would need special handling).
-
- Variable var_name_index(this, MachineType::PointerRepresentation());
- Label dictionary_found(this, &var_name_index);
- NameDictionaryLookup<NameDictionary>(properties, key, &dictionary_found,
- &var_name_index, &slow);
- Bind(&dictionary_found);
- {
- LoadPropertyFromNameDictionary(properties, var_name_index.value(),
- &var_details, &var_value);
- Goto(&if_found_on_receiver);
- }
- }
-
- Bind(&if_found_on_receiver);
- {
- Node* value = CallGetterIfAccessor(var_value.value(), var_details.value(),
- p->context, receiver, &slow);
- IncrementCounter(isolate()->counters()->ic_keyed_load_generic_symbol(), 1);
- Return(value);
- }
-
- Bind(&slow);
- {
- Comment("KeyedLoadGeneric_slow");
- IncrementCounter(isolate()->counters()->ic_keyed_load_generic_slow(), 1);
- // TODO(jkummerow): Should we use the GetProperty TF stub instead?
- TailCallRuntime(Runtime::kKeyedGetProperty, p->context, p->receiver,
- p->name);
- }
-}
-
-void CodeStubAssembler::HandleStoreFieldAndReturn(Node* handler_word,
- Node* holder,
- Representation representation,
- Node* value, Node* transition,
- Label* miss) {
- bool transition_to_field = transition != nullptr;
- Node* prepared_value = PrepareValueForWrite(value, representation, miss);
-
- if (transition_to_field) {
- Label storage_extended(this);
- GotoUnless(IsSetWord<StoreHandler::ExtendStorageBits>(handler_word),
- &storage_extended);
- Comment("[ Extend storage");
- ExtendPropertiesBackingStore(holder);
- Comment("] Extend storage");
- Goto(&storage_extended);
-
- Bind(&storage_extended);
- }
-
- Node* offset = DecodeWord<StoreHandler::FieldOffsetBits>(handler_word);
- Label if_inobject(this), if_out_of_object(this);
- Branch(IsSetWord<StoreHandler::IsInobjectBits>(handler_word), &if_inobject,
- &if_out_of_object);
-
- Bind(&if_inobject);
- {
- StoreNamedField(holder, offset, true, representation, prepared_value,
- transition_to_field);
- if (transition_to_field) {
- StoreObjectField(holder, JSObject::kMapOffset, transition);
- }
- Return(value);
- }
-
- Bind(&if_out_of_object);
- {
- StoreNamedField(holder, offset, false, representation, prepared_value,
- transition_to_field);
- if (transition_to_field) {
- StoreObjectField(holder, JSObject::kMapOffset, transition);
- }
- Return(value);
- }
-}
-
-void CodeStubAssembler::HandleStoreICSmiHandlerCase(Node* handler_word,
- Node* holder, Node* value,
- Node* transition,
- Label* miss) {
- Comment(transition ? "transitioning field store" : "field store");
-
-#ifdef DEBUG
- Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
- if (transition) {
- CSA_ASSERT(
- this,
- WordOr(WordEqual(handler_kind,
- IntPtrConstant(StoreHandler::kTransitionToField)),
- WordEqual(handler_kind,
- IntPtrConstant(StoreHandler::kTransitionToConstant))));
- } else {
- CSA_ASSERT(this, WordEqual(handler_kind,
- IntPtrConstant(StoreHandler::kStoreField)));
- }
-#endif
-
- Node* field_representation =
- DecodeWord<StoreHandler::FieldRepresentationBits>(handler_word);
-
- Label if_smi_field(this), if_double_field(this), if_heap_object_field(this),
- if_tagged_field(this);
-
- GotoIf(WordEqual(field_representation, IntPtrConstant(StoreHandler::kTagged)),
- &if_tagged_field);
- GotoIf(WordEqual(field_representation,
- IntPtrConstant(StoreHandler::kHeapObject)),
- &if_heap_object_field);
- GotoIf(WordEqual(field_representation, IntPtrConstant(StoreHandler::kDouble)),
- &if_double_field);
- CSA_ASSERT(this, WordEqual(field_representation,
- IntPtrConstant(StoreHandler::kSmi)));
- Goto(&if_smi_field);
-
- Bind(&if_tagged_field);
- {
- Comment("store tagged field");
- HandleStoreFieldAndReturn(handler_word, holder, Representation::Tagged(),
- value, transition, miss);
- }
-
- Bind(&if_double_field);
- {
- Comment("store double field");
- HandleStoreFieldAndReturn(handler_word, holder, Representation::Double(),
- value, transition, miss);
- }
-
- Bind(&if_heap_object_field);
- {
- Comment("store heap object field");
- // Generate full field type check here and then store value as Tagged.
- Node* prepared_value =
- PrepareValueForWrite(value, Representation::HeapObject(), miss);
- Node* value_index_in_descriptor =
- DecodeWord<StoreHandler::DescriptorValueIndexBits>(handler_word);
- Node* descriptors =
- LoadMapDescriptors(transition ? transition : LoadMap(holder));
- Node* maybe_field_type = LoadFixedArrayElement(
- descriptors, value_index_in_descriptor, 0, INTPTR_PARAMETERS);
- Label do_store(this);
- GotoIf(TaggedIsSmi(maybe_field_type), &do_store);
- // Check that value type matches the field type.
- {
- Node* field_type = LoadWeakCellValue(maybe_field_type, miss);
- Branch(WordEqual(LoadMap(prepared_value), field_type), &do_store, miss);
- }
- Bind(&do_store);
- HandleStoreFieldAndReturn(handler_word, holder, Representation::Tagged(),
- prepared_value, transition, miss);
- }
-
- Bind(&if_smi_field);
- {
- Comment("store smi field");
- HandleStoreFieldAndReturn(handler_word, holder, Representation::Smi(),
- value, transition, miss);
- }
-}
-
-void CodeStubAssembler::HandleStoreICHandlerCase(const StoreICParameters* p,
- Node* handler, Label* miss) {
- Label if_smi_handler(this);
- Label try_proto_handler(this), call_handler(this);
-
- Branch(TaggedIsSmi(handler), &if_smi_handler, &try_proto_handler);
-
- // |handler| is a Smi, encoding what to do. See SmiHandler methods
- // for the encoding format.
- Bind(&if_smi_handler);
- {
- Node* holder = p->receiver;
- Node* handler_word = SmiUntag(handler);
-
- // Handle non-transitioning field stores.
- HandleStoreICSmiHandlerCase(handler_word, holder, p->value, nullptr, miss);
- }
-
- Bind(&try_proto_handler);
- {
- GotoIf(IsCodeMap(LoadMap(handler)), &call_handler);
- HandleStoreICProtoHandler(p, handler, miss);
- }
-
- // |handler| is a heap object. Must be code, call it.
- Bind(&call_handler);
- {
- StoreWithVectorDescriptor descriptor(isolate());
- TailCallStub(descriptor, handler, p->context, p->receiver, p->name,
- p->value, p->slot, p->vector);
- }
-}
-
-void CodeStubAssembler::HandleStoreICProtoHandler(const StoreICParameters* p,
- Node* handler, Label* miss) {
- // IC dispatchers rely on these assumptions to be held.
- STATIC_ASSERT(FixedArray::kLengthOffset ==
- StoreHandler::kTransitionCellOffset);
- DCHECK_EQ(FixedArray::OffsetOfElementAt(StoreHandler::kSmiHandlerIndex),
- StoreHandler::kSmiHandlerOffset);
- DCHECK_EQ(FixedArray::OffsetOfElementAt(StoreHandler::kValidityCellIndex),
- StoreHandler::kValidityCellOffset);
-
- // Both FixedArray and Tuple3 handlers have validity cell at the same offset.
- Label validity_cell_check_done(this);
- Node* validity_cell =
- LoadObjectField(handler, StoreHandler::kValidityCellOffset);
- GotoIf(WordEqual(validity_cell, IntPtrConstant(0)),
- &validity_cell_check_done);
- Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
- GotoIf(WordNotEqual(cell_value,
- SmiConstant(Smi::FromInt(Map::kPrototypeChainValid))),
- miss);
- Goto(&validity_cell_check_done);
-
- Bind(&validity_cell_check_done);
- Node* smi_handler = LoadObjectField(handler, StoreHandler::kSmiHandlerOffset);
- CSA_ASSERT(this, TaggedIsSmi(smi_handler));
-
- Node* maybe_transition_cell =
- LoadObjectField(handler, StoreHandler::kTransitionCellOffset);
- Label array_handler(this), tuple_handler(this);
- Branch(TaggedIsSmi(maybe_transition_cell), &array_handler, &tuple_handler);
-
- Variable var_transition(this, MachineRepresentation::kTagged);
- Label if_transition(this), if_transition_to_constant(this);
- Bind(&tuple_handler);
- {
- Node* transition = LoadWeakCellValue(maybe_transition_cell, miss);
- var_transition.Bind(transition);
- Goto(&if_transition);
- }
-
- Bind(&array_handler);
- {
- Node* length = SmiUntag(maybe_transition_cell);
- BuildFastLoop(MachineType::PointerRepresentation(),
- IntPtrConstant(StoreHandler::kFirstPrototypeIndex), length,
- [this, p, handler, miss](CodeStubAssembler*, Node* current) {
- Node* prototype_cell = LoadFixedArrayElement(
- handler, current, 0, INTPTR_PARAMETERS);
- CheckPrototype(prototype_cell, p->name, miss);
- },
- 1, IndexAdvanceMode::kPost);
-
- Node* maybe_transition_cell = LoadFixedArrayElement(
- handler, IntPtrConstant(StoreHandler::kTransitionCellIndex), 0,
- INTPTR_PARAMETERS);
- Node* transition = LoadWeakCellValue(maybe_transition_cell, miss);
- var_transition.Bind(transition);
- Goto(&if_transition);
- }
-
- Bind(&if_transition);
- {
- Node* holder = p->receiver;
- Node* transition = var_transition.value();
- Node* handler_word = SmiUntag(smi_handler);
-
- GotoIf(IsSetWord32<Map::Deprecated>(LoadMapBitField3(transition)), miss);
-
- Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
- GotoIf(WordEqual(handler_kind,
- IntPtrConstant(StoreHandler::kTransitionToConstant)),
- &if_transition_to_constant);
-
- // Handle transitioning field stores.
- HandleStoreICSmiHandlerCase(handler_word, holder, p->value, transition,
- miss);
-
- Bind(&if_transition_to_constant);
- {
- // Check that constant matches value.
- Node* value_index_in_descriptor =
- DecodeWord<StoreHandler::DescriptorValueIndexBits>(handler_word);
- Node* descriptors = LoadMapDescriptors(transition);
- Node* constant = LoadFixedArrayElement(
- descriptors, value_index_in_descriptor, 0, INTPTR_PARAMETERS);
- GotoIf(WordNotEqual(p->value, constant), miss);
-
- StoreObjectField(p->receiver, JSObject::kMapOffset, transition);
- Return(p->value);
- }
- }
-}
-
-void CodeStubAssembler::StoreIC(const StoreICParameters* p) {
- Variable var_handler(this, MachineRepresentation::kTagged);
- // TODO(ishell): defer blocks when it works.
- Label if_handler(this, &var_handler), try_polymorphic(this),
- try_megamorphic(this /*, Label::kDeferred*/),
- miss(this /*, Label::kDeferred*/);
-
- Node* receiver_map = LoadReceiverMap(p->receiver);
-
- // Check monomorphic case.
- Node* feedback =
- TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
- &var_handler, &try_polymorphic);
- Bind(&if_handler);
- {
- Comment("StoreIC_if_handler");
- HandleStoreICHandlerCase(p, var_handler.value(), &miss);
- }
-
- Bind(&try_polymorphic);
- {
- // Check polymorphic case.
- Comment("StoreIC_try_polymorphic");
- GotoUnless(
- WordEqual(LoadMap(feedback), LoadRoot(Heap::kFixedArrayMapRootIndex)),
- &try_megamorphic);
- HandlePolymorphicCase(receiver_map, feedback, &if_handler, &var_handler,
- &miss, 2);
- }
-
- Bind(&try_megamorphic);
- {
- // Check megamorphic case.
- GotoUnless(
- WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
- &miss);
-
- TryProbeStubCache(isolate()->store_stub_cache(), p->receiver, p->name,
- &if_handler, &var_handler, &miss);
- }
- Bind(&miss);
- {
- TailCallRuntime(Runtime::kStoreIC_Miss, p->context, p->value, p->slot,
- p->vector, p->receiver, p->name);
- }
-}
-
-void CodeStubAssembler::KeyedStoreIC(const StoreICParameters* p,
- LanguageMode language_mode) {
- Variable var_handler(this, MachineRepresentation::kTagged);
- // This is to make |miss| label see the var_handler bound on all paths.
- var_handler.Bind(IntPtrConstant(0));
-
- // TODO(ishell): defer blocks when it works.
- Label if_handler(this, &var_handler), try_polymorphic(this),
- try_megamorphic(this /*, Label::kDeferred*/),
- try_polymorphic_name(this /*, Label::kDeferred*/),
- miss(this /*, Label::kDeferred*/);
-
- Node* receiver_map = LoadReceiverMap(p->receiver);
-
- // Check monomorphic case.
- Node* feedback =
- TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
- &var_handler, &try_polymorphic);
- Bind(&if_handler);
- {
- Comment("KeyedStoreIC_if_handler");
- HandleStoreICHandlerCase(p, var_handler.value(), &miss);
- }
-
- Bind(&try_polymorphic);
- {
- // CheckPolymorphic case.
- Comment("KeyedStoreIC_try_polymorphic");
- GotoUnless(
- WordEqual(LoadMap(feedback), LoadRoot(Heap::kFixedArrayMapRootIndex)),
- &try_megamorphic);
- Label if_transition_handler(this);
- Variable var_transition_map_cell(this, MachineRepresentation::kTagged);
- HandleKeyedStorePolymorphicCase(receiver_map, feedback, &if_handler,
- &var_handler, &if_transition_handler,
- &var_transition_map_cell, &miss);
- Bind(&if_transition_handler);
- Comment("KeyedStoreIC_polymorphic_transition");
- Node* transition_map =
- LoadWeakCellValue(var_transition_map_cell.value(), &miss);
- StoreTransitionDescriptor descriptor(isolate());
- TailCallStub(descriptor, var_handler.value(), p->context, p->receiver,
- p->name, transition_map, p->value, p->slot, p->vector);
- }
-
- Bind(&try_megamorphic);
- {
- // Check megamorphic case.
- Comment("KeyedStoreIC_try_megamorphic");
- GotoUnless(
- WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
- &try_polymorphic_name);
- TailCallStub(
- CodeFactory::KeyedStoreIC_Megamorphic(isolate(), language_mode),
- p->context, p->receiver, p->name, p->value, p->slot, p->vector);
- }
-
- Bind(&try_polymorphic_name);
- {
- // We might have a name in feedback, and a fixed array in the next slot.
- Comment("KeyedStoreIC_try_polymorphic_name");
- GotoUnless(WordEqual(feedback, p->name), &miss);
- // If the name comparison succeeded, we know we have a FixedArray with
- // at least one map/handler pair.
- Node* offset = ElementOffsetFromIndex(
- p->slot, FAST_HOLEY_ELEMENTS, SMI_PARAMETERS,
- FixedArray::kHeaderSize + kPointerSize - kHeapObjectTag);
- Node* array = Load(MachineType::AnyTagged(), p->vector, offset);
- HandlePolymorphicCase(receiver_map, array, &if_handler, &var_handler, &miss,
- 1);
- }
-
- Bind(&miss);
- {
- Comment("KeyedStoreIC_miss");
- TailCallRuntime(Runtime::kKeyedStoreIC_Miss, p->context, p->value, p->slot,
- p->vector, p->receiver, p->name);
- }
-}
-
-void CodeStubAssembler::LoadGlobalIC(const LoadICParameters* p) {
- Label try_handler(this), miss(this);
- Node* weak_cell =
- LoadFixedArrayElement(p->vector, p->slot, 0, SMI_PARAMETERS);
- CSA_ASSERT(this, HasInstanceType(weak_cell, WEAK_CELL_TYPE));
-
- // Load value or try handler case if the {weak_cell} is cleared.
- Node* property_cell = LoadWeakCellValue(weak_cell, &try_handler);
- CSA_ASSERT(this, HasInstanceType(property_cell, PROPERTY_CELL_TYPE));
-
- Node* value = LoadObjectField(property_cell, PropertyCell::kValueOffset);
- GotoIf(WordEqual(value, TheHoleConstant()), &miss);
- Return(value);
-
- Bind(&try_handler);
- {
- Node* handler =
- LoadFixedArrayElement(p->vector, p->slot, kPointerSize, SMI_PARAMETERS);
- GotoIf(WordEqual(handler, LoadRoot(Heap::kuninitialized_symbolRootIndex)),
- &miss);
-
- // In this case {handler} must be a Code object.
- CSA_ASSERT(this, HasInstanceType(handler, CODE_TYPE));
- LoadWithVectorDescriptor descriptor(isolate());
- Node* native_context = LoadNativeContext(p->context);
- Node* receiver =
- LoadContextElement(native_context, Context::EXTENSION_INDEX);
- Node* fake_name = IntPtrConstant(0);
- TailCallStub(descriptor, handler, p->context, receiver, fake_name, p->slot,
- p->vector);
- }
- Bind(&miss);
- {
- TailCallRuntime(Runtime::kLoadGlobalIC_Miss, p->context, p->slot,
- p->vector);
- }
-}
-
-void CodeStubAssembler::ExtendPropertiesBackingStore(compiler::Node* object) {
- Node* properties = LoadProperties(object);
- Node* length = LoadFixedArrayBaseLength(properties);
-
- ParameterMode mode = OptimalParameterMode();
- length = UntagParameter(length, mode);
-
- Node* delta = IntPtrOrSmiConstant(JSObject::kFieldsAdded, mode);
- Node* new_capacity = IntPtrAdd(length, delta);
-
- // Grow properties array.
- ElementsKind kind = FAST_ELEMENTS;
- DCHECK(kMaxNumberOfDescriptors + JSObject::kFieldsAdded <
- FixedArrayBase::GetMaxLengthForNewSpaceAllocation(kind));
- // The size of a new properties backing store is guaranteed to be small
- // enough that the new backing store will be allocated in new space.
- CSA_ASSERT(this, UintPtrLessThan(new_capacity,
- IntPtrConstant(kMaxNumberOfDescriptors +
- JSObject::kFieldsAdded)));
-
- Node* new_properties = AllocateFixedArray(kind, new_capacity, mode);
-
- FillFixedArrayWithValue(kind, new_properties, length, new_capacity,
- Heap::kUndefinedValueRootIndex, mode);
-
- // |new_properties| is guaranteed to be in new space, so we can skip
- // the write barrier.
- CopyFixedArrayElements(kind, properties, new_properties, length,
- SKIP_WRITE_BARRIER, mode);
-
- StoreObjectField(object, JSObject::kPropertiesOffset, new_properties);
-}
-
-Node* CodeStubAssembler::PrepareValueForWrite(Node* value,
- Representation representation,
- Label* bailout) {
- if (representation.IsDouble()) {
- value = TryTaggedToFloat64(value, bailout);
- } else if (representation.IsHeapObject()) {
- // Field type is checked by the handler, here we only check if the value
- // is a heap object.
- GotoIf(TaggedIsSmi(value), bailout);
- } else if (representation.IsSmi()) {
- GotoUnless(TaggedIsSmi(value), bailout);
- } else {
- DCHECK(representation.IsTagged());
- }
- return value;
-}
-
-void CodeStubAssembler::StoreNamedField(Node* object, FieldIndex index,
- Representation representation,
- Node* value, bool transition_to_field) {
- DCHECK_EQ(index.is_double(), representation.IsDouble());
-
- StoreNamedField(object, IntPtrConstant(index.offset()), index.is_inobject(),
- representation, value, transition_to_field);
-}
-
-void CodeStubAssembler::StoreNamedField(Node* object, Node* offset,
- bool is_inobject,
- Representation representation,
- Node* value, bool transition_to_field) {
- bool store_value_as_double = representation.IsDouble();
- Node* property_storage = object;
- if (!is_inobject) {
- property_storage = LoadProperties(object);
- }
-
- if (representation.IsDouble()) {
- if (!FLAG_unbox_double_fields || !is_inobject) {
- if (transition_to_field) {
- Node* heap_number = AllocateHeapNumberWithValue(value, MUTABLE);
- // Store the new mutable heap number into the object.
- value = heap_number;
- store_value_as_double = false;
- } else {
- // Load the heap number.
- property_storage = LoadObjectField(property_storage, offset);
- // Store the double value into it.
- offset = IntPtrConstant(HeapNumber::kValueOffset);
- }
- }
- }
-
- if (store_value_as_double) {
- StoreObjectFieldNoWriteBarrier(property_storage, offset, value,
- MachineRepresentation::kFloat64);
- } else if (representation.IsSmi()) {
- StoreObjectFieldNoWriteBarrier(property_storage, offset, value);
- } else {
- StoreObjectField(property_storage, offset, value);
- }
-}
-
Node* CodeStubAssembler::EmitKeyedSloppyArguments(Node* receiver, Node* key,
Node* value, Label* bailout) {
// Mapped arguments are actual arguments. Unmapped arguments are values added
@@ -6730,37 +5572,33 @@ Node* CodeStubAssembler::EmitKeyedSloppyArguments(Node* receiver, Node* key,
GotoIf(UintPtrGreaterThanOrEqual(key, adjusted_length), &if_unmapped);
- Node* mapped_index = LoadFixedArrayElement(
- elements, IntPtrAdd(key, intptr_two), 0, INTPTR_PARAMETERS);
+ Node* mapped_index =
+ LoadFixedArrayElement(elements, IntPtrAdd(key, intptr_two));
Branch(WordEqual(mapped_index, TheHoleConstant()), &if_unmapped, &if_mapped);
Bind(&if_mapped);
{
CSA_ASSERT(this, TaggedIsSmi(mapped_index));
mapped_index = SmiUntag(mapped_index);
- Node* the_context = LoadFixedArrayElement(elements, IntPtrConstant(0), 0,
- INTPTR_PARAMETERS);
+ Node* the_context = LoadFixedArrayElement(elements, 0);
// Assert that we can use LoadFixedArrayElement/StoreFixedArrayElement
// methods for accessing Context.
STATIC_ASSERT(Context::kHeaderSize == FixedArray::kHeaderSize);
DCHECK_EQ(Context::SlotOffset(0) + kHeapObjectTag,
FixedArray::OffsetOfElementAt(0));
if (is_load) {
- Node* result = LoadFixedArrayElement(the_context, mapped_index, 0,
- INTPTR_PARAMETERS);
+ Node* result = LoadFixedArrayElement(the_context, mapped_index);
CSA_ASSERT(this, WordNotEqual(result, TheHoleConstant()));
var_result.Bind(result);
} else {
- StoreFixedArrayElement(the_context, mapped_index, value,
- UPDATE_WRITE_BARRIER, INTPTR_PARAMETERS);
+ StoreFixedArrayElement(the_context, mapped_index, value);
}
Goto(&end);
}
Bind(&if_unmapped);
{
- Node* backing_store = LoadFixedArrayElement(elements, IntPtrConstant(1), 0,
- INTPTR_PARAMETERS);
+ Node* backing_store = LoadFixedArrayElement(elements, 1);
GotoIf(WordNotEqual(LoadMap(backing_store), FixedArrayMapConstant()),
bailout);
@@ -6770,13 +5608,11 @@ Node* CodeStubAssembler::EmitKeyedSloppyArguments(Node* receiver, Node* key,
// The key falls into unmapped range.
if (is_load) {
- Node* result =
- LoadFixedArrayElement(backing_store, key, 0, INTPTR_PARAMETERS);
+ Node* result = LoadFixedArrayElement(backing_store, key);
GotoIf(WordEqual(result, TheHoleConstant()), bailout);
var_result.Bind(result);
} else {
- StoreFixedArrayElement(backing_store, key, value, UPDATE_WRITE_BARRIER,
- INTPTR_PARAMETERS);
+ StoreFixedArrayElement(backing_store, key, value);
}
Goto(&end);
}
@@ -6844,7 +5680,7 @@ void CodeStubAssembler::StoreElement(Node* elements, ElementsKind kind,
value = Float64SilenceNaN(value);
StoreFixedDoubleArrayElement(elements, index, value, mode);
} else {
- StoreFixedArrayElement(elements, index, value, barrier_mode, mode);
+ StoreFixedArrayElement(elements, index, value, barrier_mode, 0, mode);
}
}
@@ -6979,14 +5815,10 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
// Check if buffer has been neutered.
Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
- Node* bitfield = LoadObjectField(buffer, JSArrayBuffer::kBitFieldOffset,
- MachineType::Uint32());
- Node* neutered_bit =
- Word32And(bitfield, Int32Constant(JSArrayBuffer::WasNeutered::kMask));
- GotoUnless(Word32Equal(neutered_bit, Int32Constant(0)), bailout);
+ GotoIf(IsDetachedBuffer(buffer), bailout);
// Bounds check.
- Node* length = UntagParameter(
+ Node* length = TaggedToParameter(
LoadObjectField(object, JSTypedArray::kLengthOffset), parameter_mode);
if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
@@ -7004,7 +5836,8 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
MachineType::Pointer());
Node* base_pointer =
LoadObjectField(elements, FixedTypedArrayBase::kBasePointerOffset);
- Node* backing_store = IntPtrAdd(external_pointer, base_pointer);
+ Node* backing_store =
+ IntPtrAdd(external_pointer, BitcastTaggedToWord(base_pointer));
StoreElement(backing_store, elements_kind, key, value, parameter_mode);
Goto(&done);
@@ -7016,7 +5849,7 @@ void CodeStubAssembler::EmitElementStore(Node* object, Node* key, Node* value,
Node* length = is_jsarray ? LoadObjectField(object, JSArray::kLengthOffset)
: LoadFixedArrayBaseLength(elements);
- length = UntagParameter(length, parameter_mode);
+ length = TaggedToParameter(length, parameter_mode);
// In case value is stored into a fast smi array, assure that the value is
// a smi before manipulating the backing store. Otherwise the backing store
@@ -7061,7 +5894,7 @@ Node* CodeStubAssembler::CheckForCapacityGrow(Node* object, Node* elements,
Bind(&grow_case);
{
Node* current_capacity =
- UntagParameter(LoadFixedArrayBaseLength(elements), mode);
+ TaggedToParameter(LoadFixedArrayBaseLength(elements), mode);
checked_elements.Bind(elements);
@@ -7079,7 +5912,7 @@ Node* CodeStubAssembler::CheckForCapacityGrow(Node* object, Node* elements,
if (is_js_array) {
Node* new_length = IntPtrAdd(key, IntPtrOrSmiConstant(1, mode));
StoreObjectFieldNoWriteBarrier(object, JSArray::kLengthOffset,
- TagParameter(new_length, mode));
+ ParameterToTagged(new_length, mode));
}
Goto(&done);
}
@@ -7107,7 +5940,8 @@ Node* CodeStubAssembler::CopyElementsOnWrite(Node* object, Node* elements,
WordEqual(LoadMap(elements), LoadRoot(Heap::kFixedCOWArrayMapRootIndex)),
&done);
{
- Node* capacity = UntagParameter(LoadFixedArrayBaseLength(elements), mode);
+ Node* capacity =
+ TaggedToParameter(LoadFixedArrayBaseLength(elements), mode);
Node* new_elements = GrowElementsCapacity(object, elements, kind, kind,
length, capacity, mode, bailout);
@@ -7119,9 +5953,11 @@ Node* CodeStubAssembler::CopyElementsOnWrite(Node* object, Node* elements,
return new_elements_var.value();
}
-void CodeStubAssembler::TransitionElementsKind(
- compiler::Node* object, compiler::Node* map, ElementsKind from_kind,
- ElementsKind to_kind, bool is_jsarray, Label* bailout) {
+void CodeStubAssembler::TransitionElementsKind(Node* object, Node* map,
+ ElementsKind from_kind,
+ ElementsKind to_kind,
+ bool is_jsarray,
+ Label* bailout) {
DCHECK(!IsFastHoleyElementsKind(from_kind) ||
IsFastHoleyElementsKind(to_kind));
if (AllocationSite::GetMode(from_kind, to_kind) == TRACK_ALLOCATION_SITE) {
@@ -7151,7 +5987,7 @@ void CodeStubAssembler::TransitionElementsKind(
Bind(&done);
}
- StoreObjectField(object, JSObject::kMapOffset, map);
+ StoreMap(object, map);
}
void CodeStubAssembler::TrapAllocationMemento(Node* object,
@@ -7167,7 +6003,8 @@ void CodeStubAssembler::TrapAllocationMemento(Node* object,
kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
// Bail out if the object is not in new space.
- Node* object_page = PageFromAddress(object);
+ Node* object_word = BitcastTaggedToWord(object);
+ Node* object_page = PageFromAddress(object_word);
{
Node* page_flags = Load(MachineType::IntPtr(), object_page,
IntPtrConstant(Page::kFlagsOffset));
@@ -7178,7 +6015,7 @@ void CodeStubAssembler::TrapAllocationMemento(Node* object,
}
Node* memento_last_word = IntPtrAdd(
- object, IntPtrConstant(kMementoLastWordOffset - kHeapObjectTag));
+ object_word, IntPtrConstant(kMementoLastWordOffset - kHeapObjectTag));
Node* memento_last_word_page = PageFromAddress(memento_last_word);
Node* new_space_top = Load(MachineType::Pointer(), new_space_top_address);
@@ -7291,15 +6128,13 @@ Node* CodeStubAssembler::CreateAllocationSiteInFeedbackVector(
Node* size = IntPtrConstant(AllocationSite::kSize);
Node* site = Allocate(size, CodeStubAssembler::kPretenured);
- // Store the map
- StoreObjectFieldRoot(site, AllocationSite::kMapOffset,
- Heap::kAllocationSiteMapRootIndex);
- Node* kind = SmiConstant(Smi::FromInt(GetInitialFastElementsKind()));
+ StoreMap(site, AllocationSiteMapConstant());
+ Node* kind = SmiConstant(GetInitialFastElementsKind());
StoreObjectFieldNoWriteBarrier(site, AllocationSite::kTransitionInfoOffset,
kind);
// Unlike literals, constructed arrays don't have nested sites
- Node* zero = IntPtrConstant(0);
+ Node* zero = SmiConstant(0);
StoreObjectFieldNoWriteBarrier(site, AllocationSite::kNestedSiteOffset, zero);
// Pretenuring calculation field.
@@ -7327,7 +6162,7 @@ Node* CodeStubAssembler::CreateAllocationSiteInFeedbackVector(
StoreObjectField(site, AllocationSite::kWeakNextOffset, next_site);
StoreNoWriteBarrier(MachineRepresentation::kTagged, site_list, site);
- StoreFixedArrayElement(feedback_vector, slot, site, UPDATE_WRITE_BARRIER,
+ StoreFixedArrayElement(feedback_vector, slot, site, UPDATE_WRITE_BARRIER, 0,
CodeStubAssembler::SMI_PARAMETERS);
return site;
}
@@ -7339,13 +6174,14 @@ Node* CodeStubAssembler::CreateWeakCellInFeedbackVector(Node* feedback_vector,
Node* cell = Allocate(size, CodeStubAssembler::kPretenured);
// Initialize the WeakCell.
- StoreObjectFieldRoot(cell, WeakCell::kMapOffset, Heap::kWeakCellMapRootIndex);
+ DCHECK(Heap::RootIsImmortalImmovable(Heap::kWeakCellMapRootIndex));
+ StoreMapNoWriteBarrier(cell, Heap::kWeakCellMapRootIndex);
StoreObjectField(cell, WeakCell::kValueOffset, value);
StoreObjectFieldRoot(cell, WeakCell::kNextOffset,
Heap::kTheHoleValueRootIndex);
// Store the WeakCell in the feedback vector.
- StoreFixedArrayElement(feedback_vector, slot, cell, UPDATE_WRITE_BARRIER,
+ StoreFixedArrayElement(feedback_vector, slot, cell, UPDATE_WRITE_BARRIER, 0,
CodeStubAssembler::SMI_PARAMETERS);
return cell;
}
@@ -7353,8 +6189,7 @@ Node* CodeStubAssembler::CreateWeakCellInFeedbackVector(Node* feedback_vector,
void CodeStubAssembler::BuildFastLoop(
const CodeStubAssembler::VariableList& vars,
MachineRepresentation index_rep, Node* start_index, Node* end_index,
- std::function<void(CodeStubAssembler* assembler, Node* index)> body,
- int increment, IndexAdvanceMode mode) {
+ const FastLoopBody& body, int increment, IndexAdvanceMode mode) {
Variable var(this, index_rep);
VariableList vars_copy(vars, zone());
vars_copy.Add(&var, zone());
@@ -7372,11 +6207,11 @@ void CodeStubAssembler::BuildFastLoop(
Bind(&loop);
{
if (mode == IndexAdvanceMode::kPre) {
- var.Bind(IntPtrAdd(var.value(), IntPtrConstant(increment)));
+ Increment(var, increment);
}
- body(this, var.value());
+ body(var.value());
if (mode == IndexAdvanceMode::kPost) {
- var.Bind(IntPtrAdd(var.value(), IntPtrConstant(increment)));
+ Increment(var, increment);
}
Branch(WordNotEqual(var.value(), end_index), &loop, &after_loop);
}
@@ -7384,12 +6219,8 @@ void CodeStubAssembler::BuildFastLoop(
}
void CodeStubAssembler::BuildFastFixedArrayForEach(
- compiler::Node* fixed_array, ElementsKind kind,
- compiler::Node* first_element_inclusive,
- compiler::Node* last_element_exclusive,
- std::function<void(CodeStubAssembler* assembler,
- compiler::Node* fixed_array, compiler::Node* offset)>
- body,
+ Node* fixed_array, ElementsKind kind, Node* first_element_inclusive,
+ Node* last_element_exclusive, const FastFixedArrayForEachBody& body,
ParameterMode mode, ForEachDirection direction) {
STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
int32_t first_val;
@@ -7406,7 +6237,7 @@ void CodeStubAssembler::BuildFastFixedArrayForEach(
Node* offset =
ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS,
FixedArray::kHeaderSize - kHeapObjectTag);
- body(this, fixed_array, offset);
+ body(fixed_array, offset);
}
} else {
for (int i = last_val - 1; i >= first_val; --i) {
@@ -7414,7 +6245,7 @@ void CodeStubAssembler::BuildFastFixedArrayForEach(
Node* offset =
ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS,
FixedArray::kHeaderSize - kHeapObjectTag);
- body(this, fixed_array, offset);
+ body(fixed_array, offset);
}
}
return;
@@ -7432,19 +6263,29 @@ void CodeStubAssembler::BuildFastFixedArrayForEach(
int increment = IsFastDoubleElementsKind(kind) ? kDoubleSize : kPointerSize;
BuildFastLoop(
MachineType::PointerRepresentation(), start, limit,
- [fixed_array, body](CodeStubAssembler* assembler, Node* offset) {
- body(assembler, fixed_array, offset);
- },
+ [fixed_array, &body](Node* offset) { body(fixed_array, offset); },
direction == ForEachDirection::kReverse ? -increment : increment,
direction == ForEachDirection::kReverse ? IndexAdvanceMode::kPre
: IndexAdvanceMode::kPost);
}
-void CodeStubAssembler::BranchIfNumericRelationalComparison(
- RelationalComparisonMode mode, compiler::Node* lhs, compiler::Node* rhs,
- Label* if_true, Label* if_false) {
- typedef compiler::Node Node;
+void CodeStubAssembler::InitializeFieldsWithRoot(
+ Node* object, Node* start_offset, Node* end_offset,
+ Heap::RootListIndex root_index) {
+ start_offset = IntPtrAdd(start_offset, IntPtrConstant(-kHeapObjectTag));
+ end_offset = IntPtrAdd(end_offset, IntPtrConstant(-kHeapObjectTag));
+ Node* root_value = LoadRoot(root_index);
+ BuildFastLoop(MachineType::PointerRepresentation(), end_offset, start_offset,
+ [this, object, root_value](Node* current) {
+ StoreNoWriteBarrier(MachineRepresentation::kTagged, object,
+ current, root_value);
+ },
+ -kPointerSize, CodeStubAssembler::IndexAdvanceMode::kPre);
+}
+void CodeStubAssembler::BranchIfNumericRelationalComparison(
+ RelationalComparisonMode mode, Node* lhs, Node* rhs, Label* if_true,
+ Label* if_false) {
Label end(this);
Variable result(this, MachineRepresentation::kTagged);
@@ -7484,7 +6325,7 @@ void CodeStubAssembler::BranchIfNumericRelationalComparison(
Bind(&if_rhsisnotsmi);
{
- CSA_ASSERT(this, WordEqual(LoadMap(rhs), HeapNumberMapConstant()));
+ CSA_ASSERT(this, IsHeapNumberMap(LoadMap(rhs)));
// Convert the {lhs} and {rhs} to floating point values, and
// perform a floating point comparison.
var_fcmp_lhs.Bind(SmiToFloat64(lhs));
@@ -7495,7 +6336,7 @@ void CodeStubAssembler::BranchIfNumericRelationalComparison(
Bind(&if_lhsisnotsmi);
{
- CSA_ASSERT(this, WordEqual(LoadMap(lhs), HeapNumberMapConstant()));
+ CSA_ASSERT(this, IsHeapNumberMap(LoadMap(lhs)));
// Check if {rhs} is a Smi or a HeapObject.
Label if_rhsissmi(this), if_rhsisnotsmi(this);
@@ -7512,7 +6353,7 @@ void CodeStubAssembler::BranchIfNumericRelationalComparison(
Bind(&if_rhsisnotsmi);
{
- CSA_ASSERT(this, WordEqual(LoadMap(rhs), HeapNumberMapConstant()));
+ CSA_ASSERT(this, IsHeapNumberMap(LoadMap(rhs)));
// Convert the {lhs} and {rhs} to floating point values, and
// perform a floating point comparison.
@@ -7546,19 +6387,16 @@ void CodeStubAssembler::BranchIfNumericRelationalComparison(
}
}
-void CodeStubAssembler::GotoUnlessNumberLessThan(compiler::Node* lhs,
- compiler::Node* rhs,
+void CodeStubAssembler::GotoUnlessNumberLessThan(Node* lhs, Node* rhs,
Label* if_false) {
Label if_true(this);
BranchIfNumericRelationalComparison(kLessThan, lhs, rhs, &if_true, if_false);
Bind(&if_true);
}
-compiler::Node* CodeStubAssembler::RelationalComparison(
- RelationalComparisonMode mode, compiler::Node* lhs, compiler::Node* rhs,
- compiler::Node* context) {
- typedef compiler::Node Node;
-
+Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
+ Node* lhs, Node* rhs,
+ Node* context) {
Label return_true(this), return_false(this), end(this);
Variable result(this, MachineRepresentation::kTagged);
@@ -7644,9 +6482,6 @@ compiler::Node* CodeStubAssembler::RelationalComparison(
Bind(&if_lhsisnotsmi);
{
- // Load the HeapNumber map for later comparisons.
- Node* number_map = HeapNumberMapConstant();
-
// Load the map of {lhs}.
Node* lhs_map = LoadMap(lhs);
@@ -7658,8 +6493,7 @@ compiler::Node* CodeStubAssembler::RelationalComparison(
{
// Check if the {lhs} is a HeapNumber.
Label if_lhsisnumber(this), if_lhsisnotnumber(this, Label::kDeferred);
- Branch(WordEqual(lhs_map, number_map), &if_lhsisnumber,
- &if_lhsisnotnumber);
+ Branch(IsHeapNumberMap(lhs_map), &if_lhsisnumber, &if_lhsisnotnumber);
Bind(&if_lhsisnumber);
{
@@ -7689,8 +6523,7 @@ compiler::Node* CodeStubAssembler::RelationalComparison(
// Check if {lhs} is a HeapNumber.
Label if_lhsisnumber(this), if_lhsisnotnumber(this);
- Branch(WordEqual(lhs_map, number_map), &if_lhsisnumber,
- &if_lhsisnotnumber);
+ Branch(IsHeapNumberMap(lhs_map), &if_lhsisnumber, &if_lhsisnotnumber);
Bind(&if_lhsisnumber);
{
@@ -7879,7 +6712,7 @@ compiler::Node* CodeStubAssembler::RelationalComparison(
namespace {
-void GenerateEqual_Same(CodeStubAssembler* assembler, compiler::Node* value,
+void GenerateEqual_Same(CodeStubAssembler* assembler, Node* value,
CodeStubAssembler::Label* if_equal,
CodeStubAssembler::Label* if_notequal) {
// In case of abstract or strict equality checks, we need additional checks
@@ -7889,7 +6722,6 @@ void GenerateEqual_Same(CodeStubAssembler* assembler, compiler::Node* value,
// seems to be what is tested in the current SIMD.js testsuite.
typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
// Check if {value} is a Smi or a HeapObject.
Label if_valueissmi(assembler), if_valueisnotsmi(assembler);
@@ -7924,9 +6756,9 @@ void GenerateEqual_Same(CodeStubAssembler* assembler, compiler::Node* value,
}
void GenerateEqual_Simd128Value_HeapObject(
- CodeStubAssembler* assembler, compiler::Node* lhs, compiler::Node* lhs_map,
- compiler::Node* rhs, compiler::Node* rhs_map,
- CodeStubAssembler::Label* if_equal, CodeStubAssembler::Label* if_notequal) {
+ CodeStubAssembler* assembler, Node* lhs, Node* lhs_map, Node* rhs,
+ Node* rhs_map, CodeStubAssembler::Label* if_equal,
+ CodeStubAssembler::Label* if_notequal) {
assembler->BranchIfSimd128Equal(lhs, lhs_map, rhs, rhs_map, if_equal,
if_notequal);
}
@@ -7934,14 +6766,12 @@ void GenerateEqual_Simd128Value_HeapObject(
} // namespace
// ES6 section 7.2.12 Abstract Equality Comparison
-compiler::Node* CodeStubAssembler::Equal(ResultMode mode, compiler::Node* lhs,
- compiler::Node* rhs,
- compiler::Node* context) {
+Node* CodeStubAssembler::Equal(ResultMode mode, Node* lhs, Node* rhs,
+ Node* context) {
// This is a slightly optimized version of Object::Equals represented as
// scheduled TurboFan graph utilizing the CodeStubAssembler. Whenever you
// change something functionality wise in here, remember to update the
// Object::Equals method as well.
- typedef compiler::Node Node;
Label if_equal(this), if_notequal(this),
do_rhsstringtonumber(this, Label::kDeferred), end(this);
@@ -8001,10 +6831,8 @@ compiler::Node* CodeStubAssembler::Equal(ResultMode mode, compiler::Node* lhs,
Node* rhs_map = LoadMap(rhs);
// Check if {rhs} is a HeapNumber.
- Node* number_map = HeapNumberMapConstant();
Label if_rhsisnumber(this), if_rhsisnotnumber(this);
- Branch(WordEqual(rhs_map, number_map), &if_rhsisnumber,
- &if_rhsisnotnumber);
+ Branch(IsHeapNumberMap(rhs_map), &if_rhsisnumber, &if_rhsisnotnumber);
Bind(&if_rhsisnumber);
{
@@ -8435,10 +7263,8 @@ compiler::Node* CodeStubAssembler::Equal(ResultMode mode, compiler::Node* lhs,
return result.value();
}
-compiler::Node* CodeStubAssembler::StrictEqual(ResultMode mode,
- compiler::Node* lhs,
- compiler::Node* rhs,
- compiler::Node* context) {
+Node* CodeStubAssembler::StrictEqual(ResultMode mode, Node* lhs, Node* rhs,
+ Node* context) {
// Here's pseudo-code for the algorithm below in case of kDontNegateResult
// mode; for kNegateResult mode we properly negate the result.
//
@@ -8487,8 +7313,6 @@ compiler::Node* CodeStubAssembler::StrictEqual(ResultMode mode,
// }
// }
- typedef compiler::Node Node;
-
Label if_equal(this), if_notequal(this), end(this);
Variable result(this, MachineRepresentation::kTagged);
@@ -8507,7 +7331,6 @@ compiler::Node* CodeStubAssembler::StrictEqual(ResultMode mode,
{
// The {lhs} and {rhs} reference different objects, yet for Smi, HeapNumber,
// String and Simd128Value they can still be considered equal.
- Node* number_map = HeapNumberMapConstant();
// Check if {lhs} is a Smi or a HeapObject.
Label if_lhsissmi(this), if_lhsisnotsmi(this);
@@ -8520,8 +7343,7 @@ compiler::Node* CodeStubAssembler::StrictEqual(ResultMode mode,
// Check if {lhs} is a HeapNumber.
Label if_lhsisnumber(this), if_lhsisnotnumber(this);
- Branch(WordEqual(lhs_map, number_map), &if_lhsisnumber,
- &if_lhsisnotnumber);
+ Branch(IsHeapNumberMap(lhs_map), &if_lhsisnumber, &if_lhsisnotnumber);
Bind(&if_lhsisnumber);
{
@@ -8546,8 +7368,7 @@ compiler::Node* CodeStubAssembler::StrictEqual(ResultMode mode,
// Check if {rhs} is also a HeapNumber.
Label if_rhsisnumber(this), if_rhsisnotnumber(this);
- Branch(WordEqual(rhs_map, number_map), &if_rhsisnumber,
- &if_rhsisnotnumber);
+ Branch(IsHeapNumberMap(rhs_map), &if_rhsisnumber, &if_rhsisnotnumber);
Bind(&if_rhsisnumber);
{
@@ -8652,8 +7473,7 @@ compiler::Node* CodeStubAssembler::StrictEqual(ResultMode mode,
// The {rhs} could be a HeapNumber with the same value as {lhs}.
Label if_rhsisnumber(this), if_rhsisnotnumber(this);
- Branch(WordEqual(rhs_map, number_map), &if_rhsisnumber,
- &if_rhsisnotnumber);
+ Branch(IsHeapNumberMap(rhs_map), &if_rhsisnumber, &if_rhsisnotnumber);
Bind(&if_rhsisnumber);
{
@@ -8690,14 +7510,12 @@ compiler::Node* CodeStubAssembler::StrictEqual(ResultMode mode,
// ECMA#sec-samevalue
// This algorithm differs from the Strict Equality Comparison Algorithm in its
// treatment of signed zeroes and NaNs.
-compiler::Node* CodeStubAssembler::SameValue(compiler::Node* lhs,
- compiler::Node* rhs,
- compiler::Node* context) {
- Variable var_result(this, MachineType::PointerRepresentation());
+Node* CodeStubAssembler::SameValue(Node* lhs, Node* rhs, Node* context) {
+ Variable var_result(this, MachineRepresentation::kWord32);
Label strict_equal(this), out(this);
- Node* const int_false = IntPtrConstant(0);
- Node* const int_true = IntPtrConstant(1);
+ Node* const int_false = Int32Constant(0);
+ Node* const int_true = Int32Constant(1);
Label if_equal(this), if_notequal(this);
Branch(WordEqual(lhs, rhs), &if_equal, &if_notequal);
@@ -8727,8 +7545,8 @@ compiler::Node* CodeStubAssembler::SameValue(compiler::Node* lhs,
// Return true iff {rhs} is NaN.
Node* const result =
- Select(Float64Equal(rhs_float, rhs_float), int_false, int_true,
- MachineType::PointerRepresentation());
+ SelectConstant(Float64Equal(rhs_float, rhs_float), int_false,
+ int_true, MachineRepresentation::kWord32);
var_result.Bind(result);
Goto(&out);
}
@@ -8776,9 +7594,7 @@ compiler::Node* CodeStubAssembler::SameValue(compiler::Node* lhs,
return var_result.value();
}
-compiler::Node* CodeStubAssembler::ForInFilter(compiler::Node* key,
- compiler::Node* object,
- compiler::Node* context) {
+Node* CodeStubAssembler::ForInFilter(Node* key, Node* object, Node* context) {
Label return_undefined(this, Label::kDeferred), return_to_name(this),
end(this);
@@ -8806,13 +7622,9 @@ compiler::Node* CodeStubAssembler::ForInFilter(compiler::Node* key,
return var_result.value();
}
-compiler::Node* CodeStubAssembler::HasProperty(
- compiler::Node* object, compiler::Node* key, compiler::Node* context,
+Node* CodeStubAssembler::HasProperty(
+ Node* object, Node* key, Node* context,
Runtime::FunctionId fallback_runtime_function_id) {
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Label Label;
- typedef CodeStubAssembler::Variable Variable;
-
Label call_runtime(this, Label::kDeferred), return_true(this),
return_false(this), end(this);
@@ -8860,8 +7672,7 @@ compiler::Node* CodeStubAssembler::HasProperty(
return result.value();
}
-compiler::Node* CodeStubAssembler::Typeof(compiler::Node* value,
- compiler::Node* context) {
+Node* CodeStubAssembler::Typeof(Node* value, Node* context) {
Variable result_var(this, MachineRepresentation::kTagged);
Label return_number(this, Label::kDeferred), if_oddball(this),
@@ -8954,9 +7765,34 @@ compiler::Node* CodeStubAssembler::Typeof(compiler::Node* value,
return result_var.value();
}
-compiler::Node* CodeStubAssembler::InstanceOf(compiler::Node* object,
- compiler::Node* callable,
- compiler::Node* context) {
+Node* CodeStubAssembler::GetSuperConstructor(Node* active_function,
+ Node* context) {
+ CSA_ASSERT(this, IsJSFunction(active_function));
+
+ Label is_not_constructor(this, Label::kDeferred), out(this);
+ Variable result(this, MachineRepresentation::kTagged);
+
+ Node* map = LoadMap(active_function);
+ Node* prototype = LoadMapPrototype(map);
+ Node* prototype_map = LoadMap(prototype);
+ GotoUnless(IsConstructorMap(prototype_map), &is_not_constructor);
+
+ result.Bind(prototype);
+ Goto(&out);
+
+ Bind(&is_not_constructor);
+ {
+ result.Bind(CallRuntime(Runtime::kThrowNotSuperConstructor, context,
+ prototype, active_function));
+ Goto(&out);
+ }
+
+ Bind(&out);
+ return result.value();
+}
+
+Node* CodeStubAssembler::InstanceOf(Node* object, Node* callable,
+ Node* context) {
Label return_runtime(this, Label::kDeferred), end(this);
Variable result(this, MachineRepresentation::kTagged);
@@ -8986,7 +7822,7 @@ compiler::Node* CodeStubAssembler::InstanceOf(compiler::Node* object,
return result.value();
}
-compiler::Node* CodeStubAssembler::NumberInc(compiler::Node* value) {
+Node* CodeStubAssembler::NumberInc(Node* value) {
Variable var_result(this, MachineRepresentation::kTagged),
var_finc_value(this, MachineRepresentation::kFloat64);
Label if_issmi(this), if_isnotsmi(this), do_finc(this), end(this);
@@ -9005,7 +7841,7 @@ compiler::Node* CodeStubAssembler::NumberInc(compiler::Node* value) {
Branch(overflow, &if_overflow, &if_notoverflow);
Bind(&if_notoverflow);
- var_result.Bind(Projection(0, pair));
+ var_result.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
Goto(&end);
Bind(&if_overflow);
@@ -9038,9 +7874,23 @@ compiler::Node* CodeStubAssembler::NumberInc(compiler::Node* value) {
return var_result.value();
}
-compiler::Node* CodeStubAssembler::CreateArrayIterator(
- compiler::Node* array, compiler::Node* array_map,
- compiler::Node* array_type, compiler::Node* context, IterationKind mode) {
+void CodeStubAssembler::GotoIfNotNumber(Node* input, Label* is_not_number) {
+ Label is_number(this);
+ GotoIf(TaggedIsSmi(input), &is_number);
+ Node* input_map = LoadMap(input);
+ Branch(IsHeapNumberMap(input_map), &is_number, is_not_number);
+ Bind(&is_number);
+}
+
+void CodeStubAssembler::GotoIfNumber(Node* input, Label* is_number) {
+ GotoIf(TaggedIsSmi(input), is_number);
+ Node* input_map = LoadMap(input);
+ GotoIf(IsHeapNumberMap(input_map), is_number);
+}
+
+Node* CodeStubAssembler::CreateArrayIterator(Node* array, Node* array_map,
+ Node* array_type, Node* context,
+ IterationKind mode) {
int kBaseMapIndex = 0;
switch (mode) {
case IterationKind::kKeys:
@@ -9094,7 +7944,8 @@ compiler::Node* CodeStubAssembler::CreateArrayIterator(
Bind(&if_isgeneric);
{
Label if_isfast(this), if_isslow(this);
- BranchIfFastJSArray(array, context, &if_isfast, &if_isslow);
+ BranchIfFastJSArray(array, context, FastJSArrayAccessMode::INBOUNDS_READ,
+ &if_isfast, &if_isslow);
Bind(&if_isfast);
{
@@ -9128,7 +7979,8 @@ compiler::Node* CodeStubAssembler::CreateArrayIterator(
Bind(&if_isgeneric);
{
Label if_isfast(this), if_isslow(this);
- BranchIfFastJSArray(array, context, &if_isfast, &if_isslow);
+ BranchIfFastJSArray(array, context, FastJSArrayAccessMode::INBOUNDS_READ,
+ &if_isfast, &if_isslow);
Bind(&if_isfast);
{
@@ -9173,7 +8025,7 @@ compiler::Node* CodeStubAssembler::CreateArrayIterator(
{
Node* map_index =
IntPtrAdd(IntPtrConstant(kBaseMapIndex + kFastIteratorOffset),
- LoadMapElementsKind(array_map));
+ ChangeUint32ToWord(LoadMapElementsKind(array_map)));
CSA_ASSERT(this, IntPtrGreaterThanOrEqual(
map_index, IntPtrConstant(kBaseMapIndex +
kFastIteratorOffset)));
@@ -9201,7 +8053,7 @@ compiler::Node* CodeStubAssembler::CreateArrayIterator(
{
Node* map_index =
IntPtrAdd(IntPtrConstant(kBaseMapIndex - UINT8_ELEMENTS),
- LoadMapElementsKind(array_map));
+ ChangeUint32ToWord(LoadMapElementsKind(array_map)));
CSA_ASSERT(
this, IntPtrLessThan(map_index, IntPtrConstant(kBaseMapIndex +
kFastIteratorOffset)));
@@ -9215,9 +8067,8 @@ compiler::Node* CodeStubAssembler::CreateArrayIterator(
Bind(&allocate_iterator);
{
- Node* map =
- LoadFixedArrayElement(LoadNativeContext(context), var_map_index.value(),
- 0, CodeStubAssembler::INTPTR_PARAMETERS);
+ Node* map = LoadFixedArrayElement(LoadNativeContext(context),
+ var_map_index.value());
var_result.Bind(AllocateJSArrayIterator(array, var_array_map.value(), map));
Goto(&return_result);
}
@@ -9226,8 +8077,8 @@ compiler::Node* CodeStubAssembler::CreateArrayIterator(
return var_result.value();
}
-compiler::Node* CodeStubAssembler::AllocateJSArrayIterator(
- compiler::Node* array, compiler::Node* array_map, compiler::Node* map) {
+Node* CodeStubAssembler::AllocateJSArrayIterator(Node* array, Node* array_map,
+ Node* map) {
Node* iterator = Allocate(JSArrayIterator::kSize);
StoreMapNoWriteBarrier(iterator, map);
StoreObjectFieldRoot(iterator, JSArrayIterator::kPropertiesOffset,
@@ -9243,58 +8094,54 @@ compiler::Node* CodeStubAssembler::AllocateJSArrayIterator(
return iterator;
}
-compiler::Node* CodeStubAssembler::IsDetachedBuffer(compiler::Node* buffer) {
+Node* CodeStubAssembler::IsDetachedBuffer(Node* buffer) {
CSA_ASSERT(this, HasInstanceType(buffer, JS_ARRAY_BUFFER_TYPE));
Node* buffer_bit_field = LoadObjectField(
buffer, JSArrayBuffer::kBitFieldOffset, MachineType::Uint32());
- Node* was_neutered_mask = Int32Constant(JSArrayBuffer::WasNeutered::kMask);
-
- return Word32NotEqual(Word32And(buffer_bit_field, was_neutered_mask),
- Int32Constant(0));
+ return IsSetWord32<JSArrayBuffer::WasNeutered>(buffer_bit_field);
}
-CodeStubArguments::CodeStubArguments(CodeStubAssembler* assembler,
- compiler::Node* argc,
- CodeStubAssembler::ParameterMode mode)
+CodeStubArguments::CodeStubArguments(CodeStubAssembler* assembler, Node* argc)
: assembler_(assembler),
argc_(argc),
arguments_(nullptr),
fp_(assembler->LoadFramePointer()) {
- compiler::Node* offset = assembler->ElementOffsetFromIndex(
- argc_, FAST_ELEMENTS, mode,
+ argc_ = assembler->ChangeUint32ToWord(argc_);
+ Node* offset = assembler->ElementOffsetFromIndex(
+ argc_, FAST_ELEMENTS, CodeStubAssembler::INTPTR_PARAMETERS,
(StandardFrameConstants::kFixedSlotCountAboveFp - 1) * kPointerSize);
- arguments_ = assembler_->IntPtrAddFoldConstants(fp_, offset);
- if (mode == CodeStubAssembler::INTEGER_PARAMETERS) {
- argc_ = assembler->ChangeInt32ToIntPtr(argc_);
- } else if (mode == CodeStubAssembler::SMI_PARAMETERS) {
- argc_ = assembler->SmiUntag(argc_);
- }
+ arguments_ = assembler_->IntPtrAdd(fp_, offset);
}
-compiler::Node* CodeStubArguments::GetReceiver() {
+Node* CodeStubArguments::GetReceiver() const {
return assembler_->Load(MachineType::AnyTagged(), arguments_,
assembler_->IntPtrConstant(kPointerSize));
}
-compiler::Node* CodeStubArguments::AtIndex(
- compiler::Node* index, CodeStubAssembler::ParameterMode mode) {
+Node* CodeStubArguments::AtIndex(Node* index,
+ CodeStubAssembler::ParameterMode mode) const {
typedef compiler::Node Node;
- Node* negated_index = assembler_->IntPtrSubFoldConstants(
- assembler_->IntPtrOrSmiConstant(0, mode), index);
+ CSA_ASSERT(assembler_, assembler_->UintPtrLessThan(
+ mode == CodeStubAssembler::INTPTR_PARAMETERS
+ ? index
+ : assembler_->SmiUntag(index),
+ GetLength()));
+ Node* negated_index =
+ assembler_->IntPtrSub(assembler_->IntPtrOrSmiConstant(0, mode), index);
Node* offset =
assembler_->ElementOffsetFromIndex(negated_index, FAST_ELEMENTS, mode, 0);
return assembler_->Load(MachineType::AnyTagged(), arguments_, offset);
}
-compiler::Node* CodeStubArguments::AtIndex(int index) {
+Node* CodeStubArguments::AtIndex(int index) const {
return AtIndex(assembler_->IntPtrConstant(index));
}
-void CodeStubArguments::ForEach(const CodeStubAssembler::VariableList& vars,
- CodeStubArguments::ForEachBodyFunction body,
- compiler::Node* first, compiler::Node* last,
- CodeStubAssembler::ParameterMode mode) {
+void CodeStubArguments::ForEach(
+ const CodeStubAssembler::VariableList& vars,
+ const CodeStubArguments::ForEachBodyFunction& body, Node* first, Node* last,
+ CodeStubAssembler::ParameterMode mode) {
assembler_->Comment("CodeStubArguments::ForEach");
DCHECK_IMPLIES(first == nullptr || last == nullptr,
mode == CodeStubAssembler::INTPTR_PARAMETERS);
@@ -9304,35 +8151,32 @@ void CodeStubArguments::ForEach(const CodeStubAssembler::VariableList& vars,
if (last == nullptr) {
last = argc_;
}
- compiler::Node* start = assembler_->IntPtrSubFoldConstants(
+ Node* start = assembler_->IntPtrSub(
arguments_,
assembler_->ElementOffsetFromIndex(first, FAST_ELEMENTS, mode));
- compiler::Node* end = assembler_->IntPtrSubFoldConstants(
+ Node* end = assembler_->IntPtrSub(
arguments_,
assembler_->ElementOffsetFromIndex(last, FAST_ELEMENTS, mode));
assembler_->BuildFastLoop(
vars, MachineType::PointerRepresentation(), start, end,
- [body](CodeStubAssembler* assembler, compiler::Node* current) {
- Node* arg = assembler->Load(MachineType::AnyTagged(), current);
- body(assembler, arg);
+ [this, &body](Node* current) {
+ Node* arg = assembler_->Load(MachineType::AnyTagged(), current);
+ body(arg);
},
-kPointerSize, CodeStubAssembler::IndexAdvanceMode::kPost);
}
-void CodeStubArguments::PopAndReturn(compiler::Node* value) {
+void CodeStubArguments::PopAndReturn(Node* value) {
assembler_->PopAndReturn(
- assembler_->IntPtrAddFoldConstants(argc_, assembler_->IntPtrConstant(1)),
- value);
+ assembler_->IntPtrAdd(argc_, assembler_->IntPtrConstant(1)), value);
}
-compiler::Node* CodeStubAssembler::IsFastElementsKind(
- compiler::Node* elements_kind) {
+Node* CodeStubAssembler::IsFastElementsKind(Node* elements_kind) {
return Uint32LessThanOrEqual(elements_kind,
Int32Constant(LAST_FAST_ELEMENTS_KIND));
}
-compiler::Node* CodeStubAssembler::IsHoleyFastElementsKind(
- compiler::Node* elements_kind) {
+Node* CodeStubAssembler::IsHoleyFastElementsKind(Node* elements_kind) {
CSA_ASSERT(this, IsFastElementsKind(elements_kind));
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == (FAST_SMI_ELEMENTS | 1));
@@ -9344,5 +8188,72 @@ compiler::Node* CodeStubAssembler::IsHoleyFastElementsKind(
return Word32Equal(holey_elements, Int32Constant(1));
}
+Node* CodeStubAssembler::IsDebugActive() {
+ Node* is_debug_active = Load(
+ MachineType::Uint8(),
+ ExternalConstant(ExternalReference::debug_is_active_address(isolate())));
+ return Word32NotEqual(is_debug_active, Int32Constant(0));
+}
+
+Node* CodeStubAssembler::IsPromiseHookEnabled() {
+ Node* const promise_hook = Load(
+ MachineType::Pointer(),
+ ExternalConstant(ExternalReference::promise_hook_address(isolate())));
+ return WordNotEqual(promise_hook, IntPtrConstant(0));
+}
+
+Node* CodeStubAssembler::AllocateFunctionWithMapAndContext(Node* map,
+ Node* shared_info,
+ Node* context) {
+ Node* const code = BitcastTaggedToWord(
+ LoadObjectField(shared_info, SharedFunctionInfo::kCodeOffset));
+ Node* const code_entry =
+ IntPtrAdd(code, IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
+
+ Node* const fun = Allocate(JSFunction::kSize);
+ StoreMapNoWriteBarrier(fun, map);
+ StoreObjectFieldRoot(fun, JSObject::kPropertiesOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldRoot(fun, JSObject::kElementsOffset,
+ Heap::kEmptyFixedArrayRootIndex);
+ StoreObjectFieldRoot(fun, JSFunction::kLiteralsOffset,
+ Heap::kEmptyLiteralsArrayRootIndex);
+ StoreObjectFieldRoot(fun, JSFunction::kPrototypeOrInitialMapOffset,
+ Heap::kTheHoleValueRootIndex);
+ StoreObjectFieldNoWriteBarrier(fun, JSFunction::kSharedFunctionInfoOffset,
+ shared_info);
+ StoreObjectFieldNoWriteBarrier(fun, JSFunction::kContextOffset, context);
+ StoreObjectFieldNoWriteBarrier(fun, JSFunction::kCodeEntryOffset, code_entry,
+ MachineType::PointerRepresentation());
+ StoreObjectFieldRoot(fun, JSFunction::kNextFunctionLinkOffset,
+ Heap::kUndefinedValueRootIndex);
+
+ return fun;
+}
+
+Node* CodeStubAssembler::AllocatePromiseReactionJobInfo(
+ Node* value, Node* tasks, Node* deferred_promise, Node* deferred_on_resolve,
+ Node* deferred_on_reject, Node* context) {
+ Node* const result = Allocate(PromiseReactionJobInfo::kSize);
+ StoreMapNoWriteBarrier(result, Heap::kPromiseReactionJobInfoMapRootIndex);
+ StoreObjectFieldNoWriteBarrier(result, PromiseReactionJobInfo::kValueOffset,
+ value);
+ StoreObjectFieldNoWriteBarrier(result, PromiseReactionJobInfo::kTasksOffset,
+ tasks);
+ StoreObjectFieldNoWriteBarrier(
+ result, PromiseReactionJobInfo::kDeferredPromiseOffset, deferred_promise);
+ StoreObjectFieldNoWriteBarrier(
+ result, PromiseReactionJobInfo::kDeferredOnResolveOffset,
+ deferred_on_resolve);
+ StoreObjectFieldNoWriteBarrier(
+ result, PromiseReactionJobInfo::kDeferredOnRejectOffset,
+ deferred_on_reject);
+ StoreObjectFieldNoWriteBarrier(result, PromiseReactionJobInfo::kDebugIdOffset,
+ SmiConstant(kDebugPromiseNoID));
+ StoreObjectFieldNoWriteBarrier(result, PromiseReactionJobInfo::kContextOffset,
+ context);
+ return result;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/code-stub-assembler.h b/deps/v8/src/code-stub-assembler.h
index f8f2686f8d..e82a494926 100644
--- a/deps/v8/src/code-stub-assembler.h
+++ b/deps/v8/src/code-stub-assembler.h
@@ -15,26 +15,34 @@ namespace v8 {
namespace internal {
class CallInterfaceDescriptor;
+class CodeStubArguments;
class StatsCounter;
class StubCache;
enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
-#define HEAP_CONSTANT_LIST(V) \
- V(BooleanMap, BooleanMap) \
- V(CodeMap, CodeMap) \
- V(empty_string, EmptyString) \
- V(EmptyFixedArray, EmptyFixedArray) \
- V(FalseValue, False) \
- V(FixedArrayMap, FixedArrayMap) \
- V(FixedCOWArrayMap, FixedCOWArrayMap) \
- V(FixedDoubleArrayMap, FixedDoubleArrayMap) \
- V(HeapNumberMap, HeapNumberMap) \
- V(MinusZeroValue, MinusZero) \
- V(NanValue, Nan) \
- V(NullValue, Null) \
- V(TheHoleValue, TheHole) \
- V(TrueValue, True) \
+#define HEAP_CONSTANT_LIST(V) \
+ V(AccessorInfoMap, AccessorInfoMap) \
+ V(AllocationSiteMap, AllocationSiteMap) \
+ V(BooleanMap, BooleanMap) \
+ V(CodeMap, CodeMap) \
+ V(empty_string, EmptyString) \
+ V(EmptyFixedArray, EmptyFixedArray) \
+ V(EmptyLiteralsArray, EmptyLiteralsArray) \
+ V(FalseValue, False) \
+ V(FixedArrayMap, FixedArrayMap) \
+ V(FixedCOWArrayMap, FixedCOWArrayMap) \
+ V(FixedDoubleArrayMap, FixedDoubleArrayMap) \
+ V(FunctionTemplateInfoMap, FunctionTemplateInfoMap) \
+ V(HeapNumberMap, HeapNumberMap) \
+ V(MinusZeroValue, MinusZero) \
+ V(NanValue, Nan) \
+ V(NullValue, Null) \
+ V(SymbolMap, SymbolMap) \
+ V(TheHoleValue, TheHole) \
+ V(TrueValue, True) \
+ V(Tuple2Map, Tuple2Map) \
+ V(Tuple3Map, Tuple3Map) \
V(UndefinedValue, Undefined)
// Provides JavaScript-specific "macro-assembler" functionality on top of the
@@ -44,29 +52,20 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
// from a compiler directory OWNER).
class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
public:
- // Create with CallStub linkage.
- // |result_size| specifies the number of results returned by the stub.
- // TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
- CodeStubAssembler(Isolate* isolate, Zone* zone,
- const CallInterfaceDescriptor& descriptor,
- Code::Flags flags, const char* name,
- size_t result_size = 1);
-
- // Create with JSCall linkage.
- CodeStubAssembler(Isolate* isolate, Zone* zone, int parameter_count,
- Code::Flags flags, const char* name);
+ typedef compiler::Node Node;
+
+ CodeStubAssembler(compiler::CodeAssemblerState* state);
enum AllocationFlag : uint8_t {
kNone = 0,
kDoubleAlignment = 1,
- kPretenured = 1 << 1
+ kPretenured = 1 << 1,
+ kAllowLargeObjectAllocation = 1 << 2,
};
typedef base::Flags<AllocationFlag> AllocationFlags;
- // TODO(ishell): Fix all loads/stores from arrays by int32 offsets/indices
- // and eventually remove INTEGER_PARAMETERS in favour of INTPTR_PARAMETERS.
- enum ParameterMode { INTEGER_PARAMETERS, SMI_PARAMETERS, INTPTR_PARAMETERS };
+ enum ParameterMode { SMI_PARAMETERS, INTPTR_PARAMETERS };
// On 32-bit platforms, there is a slight performance advantage to doing all
// of the array offset/index arithmetic with SMIs, since it's possible
@@ -78,437 +77,505 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
return Is64() ? INTPTR_PARAMETERS : SMI_PARAMETERS;
}
- compiler::Node* UntagParameter(compiler::Node* value, ParameterMode mode) {
- if (mode != SMI_PARAMETERS) value = SmiUntag(value);
+ MachineRepresentation OptimalParameterRepresentation() const {
+ return OptimalParameterMode() == INTPTR_PARAMETERS
+ ? MachineType::PointerRepresentation()
+ : MachineRepresentation::kTaggedSigned;
+ }
+
+ Node* ParameterToWord(Node* value, ParameterMode mode) {
+ if (mode == SMI_PARAMETERS) value = SmiUntag(value);
+ return value;
+ }
+
+ Node* WordToParameter(Node* value, ParameterMode mode) {
+ if (mode == SMI_PARAMETERS) value = SmiTag(value);
return value;
}
- compiler::Node* TagParameter(compiler::Node* value, ParameterMode mode) {
+ Node* ParameterToTagged(Node* value, ParameterMode mode) {
if (mode != SMI_PARAMETERS) value = SmiTag(value);
return value;
}
- compiler::Node* NoContextConstant();
-#define HEAP_CONSTANT_ACCESSOR(rootName, name) compiler::Node* name##Constant();
+ Node* TaggedToParameter(Node* value, ParameterMode mode) {
+ if (mode != SMI_PARAMETERS) value = SmiUntag(value);
+ return value;
+ }
+
+#define PARAMETER_BINOP(OpName, IntPtrOpName, SmiOpName) \
+ Node* OpName(Node* a, Node* b, ParameterMode mode) { \
+ if (mode == SMI_PARAMETERS) { \
+ return SmiOpName(a, b); \
+ } else { \
+ DCHECK_EQ(INTPTR_PARAMETERS, mode); \
+ return IntPtrOpName(a, b); \
+ } \
+ }
+ PARAMETER_BINOP(IntPtrOrSmiAdd, IntPtrAdd, SmiAdd)
+ PARAMETER_BINOP(IntPtrOrSmiLessThan, IntPtrLessThan, SmiLessThan)
+ PARAMETER_BINOP(IntPtrOrSmiGreaterThan, IntPtrGreaterThan, SmiGreaterThan)
+ PARAMETER_BINOP(IntPtrOrSmiGreaterThanOrEqual, IntPtrGreaterThanOrEqual,
+ SmiGreaterThanOrEqual)
+ PARAMETER_BINOP(UintPtrOrSmiLessThan, UintPtrLessThan, SmiBelow)
+ PARAMETER_BINOP(UintPtrOrSmiGreaterThanOrEqual, UintPtrGreaterThanOrEqual,
+ SmiAboveOrEqual)
+#undef PARAMETER_BINOP
+
+ Node* NoContextConstant();
+#define HEAP_CONSTANT_ACCESSOR(rootName, name) Node* name##Constant();
HEAP_CONSTANT_LIST(HEAP_CONSTANT_ACCESSOR)
#undef HEAP_CONSTANT_ACCESSOR
-#define HEAP_CONSTANT_TEST(rootName, name) \
- compiler::Node* Is##name(compiler::Node* value);
+#define HEAP_CONSTANT_TEST(rootName, name) Node* Is##name(Node* value);
HEAP_CONSTANT_LIST(HEAP_CONSTANT_TEST)
#undef HEAP_CONSTANT_TEST
- compiler::Node* HashSeed();
- compiler::Node* StaleRegisterConstant();
+ Node* HashSeed();
+ Node* StaleRegisterConstant();
- compiler::Node* IntPtrOrSmiConstant(int value, ParameterMode mode);
+ Node* IntPtrOrSmiConstant(int value, ParameterMode mode);
- compiler::Node* IntPtrAddFoldConstants(compiler::Node* left,
- compiler::Node* right);
- compiler::Node* IntPtrSubFoldConstants(compiler::Node* left,
- compiler::Node* right);
// Round the 32bits payload of the provided word up to the next power of two.
- compiler::Node* IntPtrRoundUpToPowerOfTwo32(compiler::Node* value);
- compiler::Node* IntPtrMax(compiler::Node* left, compiler::Node* right);
+ Node* IntPtrRoundUpToPowerOfTwo32(Node* value);
+ // Select the maximum of the two provided IntPtr values.
+ Node* IntPtrMax(Node* left, Node* right);
+ // Select the minimum of the two provided IntPtr values.
+ Node* IntPtrMin(Node* left, Node* right);
// Float64 operations.
- compiler::Node* Float64Ceil(compiler::Node* x);
- compiler::Node* Float64Floor(compiler::Node* x);
- compiler::Node* Float64Round(compiler::Node* x);
- compiler::Node* Float64RoundToEven(compiler::Node* x);
- compiler::Node* Float64Trunc(compiler::Node* x);
+ Node* Float64Ceil(Node* x);
+ Node* Float64Floor(Node* x);
+ Node* Float64Round(Node* x);
+ Node* Float64RoundToEven(Node* x);
+ Node* Float64Trunc(Node* x);
// Tag a Word as a Smi value.
- compiler::Node* SmiTag(compiler::Node* value);
+ Node* SmiTag(Node* value);
// Untag a Smi value as a Word.
- compiler::Node* SmiUntag(compiler::Node* value);
+ Node* SmiUntag(Node* value);
// Smi conversions.
- compiler::Node* SmiToFloat64(compiler::Node* value);
- compiler::Node* SmiFromWord(compiler::Node* value) { return SmiTag(value); }
- compiler::Node* SmiFromWord32(compiler::Node* value);
- compiler::Node* SmiToWord(compiler::Node* value) { return SmiUntag(value); }
- compiler::Node* SmiToWord32(compiler::Node* value);
+ Node* SmiToFloat64(Node* value);
+ Node* SmiFromWord(Node* value) { return SmiTag(value); }
+ Node* SmiFromWord32(Node* value);
+ Node* SmiToWord(Node* value) { return SmiUntag(value); }
+ Node* SmiToWord32(Node* value);
// Smi operations.
- compiler::Node* SmiAdd(compiler::Node* a, compiler::Node* b);
- compiler::Node* SmiSub(compiler::Node* a, compiler::Node* b);
- compiler::Node* SmiEqual(compiler::Node* a, compiler::Node* b);
- compiler::Node* SmiAbove(compiler::Node* a, compiler::Node* b);
- compiler::Node* SmiAboveOrEqual(compiler::Node* a, compiler::Node* b);
- compiler::Node* SmiBelow(compiler::Node* a, compiler::Node* b);
- compiler::Node* SmiLessThan(compiler::Node* a, compiler::Node* b);
- compiler::Node* SmiLessThanOrEqual(compiler::Node* a, compiler::Node* b);
- compiler::Node* SmiMax(compiler::Node* a, compiler::Node* b);
- compiler::Node* SmiMin(compiler::Node* a, compiler::Node* b);
- // Computes a % b for Smi inputs a and b; result is not necessarily a Smi.
- compiler::Node* SmiMod(compiler::Node* a, compiler::Node* b);
- // Computes a * b for Smi inputs a and b; result is not necessarily a Smi.
- compiler::Node* SmiMul(compiler::Node* a, compiler::Node* b);
- compiler::Node* SmiOr(compiler::Node* a, compiler::Node* b) {
+#define SMI_ARITHMETIC_BINOP(SmiOpName, IntPtrOpName) \
+ Node* SmiOpName(Node* a, Node* b) { \
+ return BitcastWordToTaggedSigned( \
+ IntPtrOpName(BitcastTaggedToWord(a), BitcastTaggedToWord(b))); \
+ }
+ SMI_ARITHMETIC_BINOP(SmiAdd, IntPtrAdd)
+ SMI_ARITHMETIC_BINOP(SmiSub, IntPtrSub)
+ SMI_ARITHMETIC_BINOP(SmiAnd, WordAnd)
+ SMI_ARITHMETIC_BINOP(SmiOr, WordOr)
+#undef SMI_ARITHMETIC_BINOP
+
+ Node* SmiShl(Node* a, int shift) {
+ return BitcastWordToTaggedSigned(WordShl(BitcastTaggedToWord(a), shift));
+ }
+
+ Node* SmiShr(Node* a, int shift) {
return BitcastWordToTaggedSigned(
- WordOr(BitcastTaggedToWord(a), BitcastTaggedToWord(b)));
+ WordAnd(WordShr(BitcastTaggedToWord(a), shift),
+ BitcastTaggedToWord(SmiConstant(-1))));
+ }
+
+ Node* WordOrSmiShl(Node* a, int shift, ParameterMode mode) {
+ if (mode == SMI_PARAMETERS) {
+ return SmiShl(a, shift);
+ } else {
+ DCHECK_EQ(INTPTR_PARAMETERS, mode);
+ return WordShl(a, shift);
+ }
+ }
+
+ Node* WordOrSmiShr(Node* a, int shift, ParameterMode mode) {
+ if (mode == SMI_PARAMETERS) {
+ return SmiShr(a, shift);
+ } else {
+ DCHECK_EQ(INTPTR_PARAMETERS, mode);
+ return WordShr(a, shift);
+ }
}
+#define SMI_COMPARISON_OP(SmiOpName, IntPtrOpName) \
+ Node* SmiOpName(Node* a, Node* b) { \
+ return IntPtrOpName(BitcastTaggedToWord(a), BitcastTaggedToWord(b)); \
+ }
+ SMI_COMPARISON_OP(SmiEqual, WordEqual)
+ SMI_COMPARISON_OP(SmiAbove, UintPtrGreaterThan)
+ SMI_COMPARISON_OP(SmiAboveOrEqual, UintPtrGreaterThanOrEqual)
+ SMI_COMPARISON_OP(SmiBelow, UintPtrLessThan)
+ SMI_COMPARISON_OP(SmiLessThan, IntPtrLessThan)
+ SMI_COMPARISON_OP(SmiLessThanOrEqual, IntPtrLessThanOrEqual)
+ SMI_COMPARISON_OP(SmiGreaterThan, IntPtrGreaterThan)
+ SMI_COMPARISON_OP(SmiGreaterThanOrEqual, IntPtrGreaterThanOrEqual)
+#undef SMI_COMPARISON_OP
+ Node* SmiMax(Node* a, Node* b);
+ Node* SmiMin(Node* a, Node* b);
+ // Computes a % b for Smi inputs a and b; result is not necessarily a Smi.
+ Node* SmiMod(Node* a, Node* b);
+ // Computes a * b for Smi inputs a and b; result is not necessarily a Smi.
+ Node* SmiMul(Node* a, Node* b);
+
// Smi | HeapNumber operations.
- compiler::Node* NumberInc(compiler::Node* value);
+ Node* NumberInc(Node* value);
+ void GotoIfNotNumber(Node* value, Label* is_not_number);
+ void GotoIfNumber(Node* value, Label* is_number);
// Allocate an object of the given size.
- compiler::Node* Allocate(compiler::Node* size, AllocationFlags flags = kNone);
- compiler::Node* Allocate(int size, AllocationFlags flags = kNone);
- compiler::Node* InnerAllocate(compiler::Node* previous, int offset);
- compiler::Node* InnerAllocate(compiler::Node* previous,
- compiler::Node* offset);
- compiler::Node* IsRegularHeapObjectSize(compiler::Node* size);
-
- typedef std::function<compiler::Node*()> ConditionBody;
- void Assert(ConditionBody condition_body, const char* string = nullptr,
+ Node* Allocate(Node* size, AllocationFlags flags = kNone);
+ Node* Allocate(int size, AllocationFlags flags = kNone);
+ Node* InnerAllocate(Node* previous, int offset);
+ Node* InnerAllocate(Node* previous, Node* offset);
+ Node* IsRegularHeapObjectSize(Node* size);
+
+ typedef std::function<Node*()> NodeGenerator;
+
+ void Assert(const NodeGenerator& condition_body, const char* string = nullptr,
const char* file = nullptr, int line = 0);
+ Node* Select(Node* condition, const NodeGenerator& true_body,
+ const NodeGenerator& false_body, MachineRepresentation rep);
+
+ Node* SelectConstant(Node* condition, Node* true_value, Node* false_value,
+ MachineRepresentation rep);
+
+ Node* SelectInt32Constant(Node* condition, int true_value, int false_value);
+ Node* SelectIntPtrConstant(Node* condition, int true_value, int false_value);
+ Node* SelectBooleanConstant(Node* condition);
+ Node* SelectTaggedConstant(Node* condition, Node* true_value,
+ Node* false_value);
+ Node* SelectSmiConstant(Node* condition, Smi* true_value, Smi* false_value);
+ Node* SelectSmiConstant(Node* condition, int true_value, Smi* false_value) {
+ return SelectSmiConstant(condition, Smi::FromInt(true_value), false_value);
+ }
+ Node* SelectSmiConstant(Node* condition, Smi* true_value, int false_value) {
+ return SelectSmiConstant(condition, true_value, Smi::FromInt(false_value));
+ }
+ Node* SelectSmiConstant(Node* condition, int true_value, int false_value) {
+ return SelectSmiConstant(condition, Smi::FromInt(true_value),
+ Smi::FromInt(false_value));
+ }
+
+ Node* TruncateWordToWord32(Node* value);
+
// Check a value for smi-ness
- compiler::Node* TaggedIsSmi(compiler::Node* a);
+ Node* TaggedIsSmi(Node* a);
+ Node* TaggedIsNotSmi(Node* a);
// Check that the value is a non-negative smi.
- compiler::Node* WordIsPositiveSmi(compiler::Node* a);
+ Node* TaggedIsPositiveSmi(Node* a);
// Check that a word has a word-aligned address.
- compiler::Node* WordIsWordAligned(compiler::Node* word);
- compiler::Node* WordIsPowerOfTwo(compiler::Node* value);
+ Node* WordIsWordAligned(Node* word);
+ Node* WordIsPowerOfTwo(Node* value);
- void BranchIfSmiEqual(compiler::Node* a, compiler::Node* b, Label* if_true,
- Label* if_false) {
+ void BranchIfSmiEqual(Node* a, Node* b, Label* if_true, Label* if_false) {
Branch(SmiEqual(a, b), if_true, if_false);
}
- void BranchIfSmiLessThan(compiler::Node* a, compiler::Node* b, Label* if_true,
- Label* if_false) {
+ void BranchIfSmiLessThan(Node* a, Node* b, Label* if_true, Label* if_false) {
Branch(SmiLessThan(a, b), if_true, if_false);
}
- void BranchIfSmiLessThanOrEqual(compiler::Node* a, compiler::Node* b,
- Label* if_true, Label* if_false) {
+ void BranchIfSmiLessThanOrEqual(Node* a, Node* b, Label* if_true,
+ Label* if_false) {
Branch(SmiLessThanOrEqual(a, b), if_true, if_false);
}
- void BranchIfFloat64IsNaN(compiler::Node* value, Label* if_true,
- Label* if_false) {
+ void BranchIfFloat64IsNaN(Node* value, Label* if_true, Label* if_false) {
Branch(Float64Equal(value, value), if_false, if_true);
}
// Branches to {if_true} if ToBoolean applied to {value} yields true,
// otherwise goes to {if_false}.
- void BranchIfToBooleanIsTrue(compiler::Node* value, Label* if_true,
- Label* if_false);
+ void BranchIfToBooleanIsTrue(Node* value, Label* if_true, Label* if_false);
- void BranchIfSimd128Equal(compiler::Node* lhs, compiler::Node* lhs_map,
- compiler::Node* rhs, compiler::Node* rhs_map,
+ void BranchIfSimd128Equal(Node* lhs, Node* lhs_map, Node* rhs, Node* rhs_map,
Label* if_equal, Label* if_notequal);
- void BranchIfSimd128Equal(compiler::Node* lhs, compiler::Node* rhs,
- Label* if_equal, Label* if_notequal) {
+ void BranchIfSimd128Equal(Node* lhs, Node* rhs, Label* if_equal,
+ Label* if_notequal) {
BranchIfSimd128Equal(lhs, LoadMap(lhs), rhs, LoadMap(rhs), if_equal,
if_notequal);
}
- void BranchIfJSReceiver(compiler::Node* object, Label* if_true,
- Label* if_false);
- void BranchIfJSObject(compiler::Node* object, Label* if_true,
- Label* if_false);
- void BranchIfFastJSArray(compiler::Node* object, compiler::Node* context,
- Label* if_true, Label* if_false);
+ void BranchIfJSReceiver(Node* object, Label* if_true, Label* if_false);
+ void BranchIfJSObject(Node* object, Label* if_true, Label* if_false);
+
+ enum class FastJSArrayAccessMode { INBOUNDS_READ, ANY_ACCESS };
+ void BranchIfFastJSArray(Node* object, Node* context,
+ FastJSArrayAccessMode mode, Label* if_true,
+ Label* if_false);
// Load value from current frame by given offset in bytes.
- compiler::Node* LoadFromFrame(int offset,
- MachineType rep = MachineType::AnyTagged());
+ Node* LoadFromFrame(int offset, MachineType rep = MachineType::AnyTagged());
// Load value from current parent frame by given offset in bytes.
- compiler::Node* LoadFromParentFrame(
- int offset, MachineType rep = MachineType::AnyTagged());
+ Node* LoadFromParentFrame(int offset,
+ MachineType rep = MachineType::AnyTagged());
// Load an object pointer from a buffer that isn't in the heap.
- compiler::Node* LoadBufferObject(compiler::Node* buffer, int offset,
- MachineType rep = MachineType::AnyTagged());
+ Node* LoadBufferObject(Node* buffer, int offset,
+ MachineType rep = MachineType::AnyTagged());
// Load a field from an object on the heap.
- compiler::Node* LoadObjectField(compiler::Node* object, int offset,
- MachineType rep = MachineType::AnyTagged());
- compiler::Node* LoadObjectField(compiler::Node* object,
- compiler::Node* offset,
- MachineType rep = MachineType::AnyTagged());
+ Node* LoadObjectField(Node* object, int offset,
+ MachineType rep = MachineType::AnyTagged());
+ Node* LoadObjectField(Node* object, Node* offset,
+ MachineType rep = MachineType::AnyTagged());
// Load a SMI field and untag it.
- compiler::Node* LoadAndUntagObjectField(compiler::Node* object, int offset);
+ Node* LoadAndUntagObjectField(Node* object, int offset);
// Load a SMI field, untag it, and convert to Word32.
- compiler::Node* LoadAndUntagToWord32ObjectField(compiler::Node* object,
- int offset);
+ Node* LoadAndUntagToWord32ObjectField(Node* object, int offset);
// Load a SMI and untag it.
- compiler::Node* LoadAndUntagSmi(compiler::Node* base, int index);
+ Node* LoadAndUntagSmi(Node* base, int index);
// Load a SMI root, untag it, and convert to Word32.
- compiler::Node* LoadAndUntagToWord32Root(Heap::RootListIndex root_index);
+ Node* LoadAndUntagToWord32Root(Heap::RootListIndex root_index);
// Load the floating point value of a HeapNumber.
- compiler::Node* LoadHeapNumberValue(compiler::Node* object);
+ Node* LoadHeapNumberValue(Node* object);
// Load the Map of an HeapObject.
- compiler::Node* LoadMap(compiler::Node* object);
+ Node* LoadMap(Node* object);
// Load the instance type of an HeapObject.
- compiler::Node* LoadInstanceType(compiler::Node* object);
+ Node* LoadInstanceType(Node* object);
// Compare the instance the type of the object against the provided one.
- compiler::Node* HasInstanceType(compiler::Node* object, InstanceType type);
+ Node* HasInstanceType(Node* object, InstanceType type);
+ Node* DoesntHaveInstanceType(Node* object, InstanceType type);
// Load the properties backing store of a JSObject.
- compiler::Node* LoadProperties(compiler::Node* object);
+ Node* LoadProperties(Node* object);
// Load the elements backing store of a JSObject.
- compiler::Node* LoadElements(compiler::Node* object);
+ Node* LoadElements(Node* object);
// Load the length of a JSArray instance.
- compiler::Node* LoadJSArrayLength(compiler::Node* array);
+ Node* LoadJSArrayLength(Node* array);
// Load the length of a fixed array base instance.
- compiler::Node* LoadFixedArrayBaseLength(compiler::Node* array);
+ Node* LoadFixedArrayBaseLength(Node* array);
// Load the length of a fixed array base instance.
- compiler::Node* LoadAndUntagFixedArrayBaseLength(compiler::Node* array);
+ Node* LoadAndUntagFixedArrayBaseLength(Node* array);
// Load the bit field of a Map.
- compiler::Node* LoadMapBitField(compiler::Node* map);
+ Node* LoadMapBitField(Node* map);
// Load bit field 2 of a map.
- compiler::Node* LoadMapBitField2(compiler::Node* map);
+ Node* LoadMapBitField2(Node* map);
// Load bit field 3 of a map.
- compiler::Node* LoadMapBitField3(compiler::Node* map);
+ Node* LoadMapBitField3(Node* map);
// Load the instance type of a map.
- compiler::Node* LoadMapInstanceType(compiler::Node* map);
+ Node* LoadMapInstanceType(Node* map);
// Load the ElementsKind of a map.
- compiler::Node* LoadMapElementsKind(compiler::Node* map);
+ Node* LoadMapElementsKind(Node* map);
// Load the instance descriptors of a map.
- compiler::Node* LoadMapDescriptors(compiler::Node* map);
+ Node* LoadMapDescriptors(Node* map);
// Load the prototype of a map.
- compiler::Node* LoadMapPrototype(compiler::Node* map);
+ Node* LoadMapPrototype(Node* map);
// Load the prototype info of a map. The result has to be checked if it is a
// prototype info object or not.
- compiler::Node* LoadMapPrototypeInfo(compiler::Node* map,
- Label* if_has_no_proto_info);
+ Node* LoadMapPrototypeInfo(Node* map, Label* if_has_no_proto_info);
// Load the instance size of a Map.
- compiler::Node* LoadMapInstanceSize(compiler::Node* map);
+ Node* LoadMapInstanceSize(Node* map);
// Load the inobject properties count of a Map (valid only for JSObjects).
- compiler::Node* LoadMapInobjectProperties(compiler::Node* map);
+ Node* LoadMapInobjectProperties(Node* map);
// Load the constructor function index of a Map (only for primitive maps).
- compiler::Node* LoadMapConstructorFunctionIndex(compiler::Node* map);
+ Node* LoadMapConstructorFunctionIndex(Node* map);
// Load the constructor of a Map (equivalent to Map::GetConstructor()).
- compiler::Node* LoadMapConstructor(compiler::Node* map);
+ Node* LoadMapConstructor(Node* map);
// Check if the map is set for slow properties.
- compiler::Node* IsDictionaryMap(compiler::Node* map);
+ Node* IsDictionaryMap(Node* map);
// Load the hash field of a name as an uint32 value.
- compiler::Node* LoadNameHashField(compiler::Node* name);
+ Node* LoadNameHashField(Node* name);
// Load the hash value of a name as an uint32 value.
// If {if_hash_not_computed} label is specified then it also checks if
// hash is actually computed.
- compiler::Node* LoadNameHash(compiler::Node* name,
- Label* if_hash_not_computed = nullptr);
+ Node* LoadNameHash(Node* name, Label* if_hash_not_computed = nullptr);
// Load length field of a String object.
- compiler::Node* LoadStringLength(compiler::Node* object);
+ Node* LoadStringLength(Node* object);
// Load value field of a JSValue object.
- compiler::Node* LoadJSValueValue(compiler::Node* object);
+ Node* LoadJSValueValue(Node* object);
// Load value field of a WeakCell object.
- compiler::Node* LoadWeakCellValueUnchecked(compiler::Node* weak_cell);
- compiler::Node* LoadWeakCellValue(compiler::Node* weak_cell,
- Label* if_cleared = nullptr);
+ Node* LoadWeakCellValueUnchecked(Node* weak_cell);
+ Node* LoadWeakCellValue(Node* weak_cell, Label* if_cleared = nullptr);
// Load an array element from a FixedArray.
- compiler::Node* LoadFixedArrayElement(
- compiler::Node* object, compiler::Node* index, int additional_offset = 0,
- ParameterMode parameter_mode = INTEGER_PARAMETERS);
+ Node* LoadFixedArrayElement(Node* object, Node* index,
+ int additional_offset = 0,
+ ParameterMode parameter_mode = INTPTR_PARAMETERS);
+ Node* LoadFixedArrayElement(Node* object, int index,
+ int additional_offset = 0) {
+ return LoadFixedArrayElement(object, IntPtrConstant(index),
+ additional_offset);
+ }
// Load an array element from a FixedArray, untag it and return it as Word32.
- compiler::Node* LoadAndUntagToWord32FixedArrayElement(
- compiler::Node* object, compiler::Node* index, int additional_offset = 0,
- ParameterMode parameter_mode = INTEGER_PARAMETERS);
+ Node* LoadAndUntagToWord32FixedArrayElement(
+ Node* object, Node* index, int additional_offset = 0,
+ ParameterMode parameter_mode = INTPTR_PARAMETERS);
// Load an array element from a FixedDoubleArray.
- compiler::Node* LoadFixedDoubleArrayElement(
- compiler::Node* object, compiler::Node* index, MachineType machine_type,
+ Node* LoadFixedDoubleArrayElement(
+ Node* object, Node* index, MachineType machine_type,
int additional_offset = 0,
- ParameterMode parameter_mode = INTEGER_PARAMETERS,
+ ParameterMode parameter_mode = INTPTR_PARAMETERS,
Label* if_hole = nullptr);
// Load Float64 value by |base| + |offset| address. If the value is a double
// hole then jump to |if_hole|. If |machine_type| is None then only the hole
// check is generated.
- compiler::Node* LoadDoubleWithHoleCheck(
- compiler::Node* base, compiler::Node* offset, Label* if_hole,
+ Node* LoadDoubleWithHoleCheck(
+ Node* base, Node* offset, Label* if_hole,
MachineType machine_type = MachineType::Float64());
- compiler::Node* LoadFixedTypedArrayElement(
- compiler::Node* data_pointer, compiler::Node* index_node,
- ElementsKind elements_kind,
- ParameterMode parameter_mode = INTEGER_PARAMETERS);
+ Node* LoadFixedTypedArrayElement(
+ Node* data_pointer, Node* index_node, ElementsKind elements_kind,
+ ParameterMode parameter_mode = INTPTR_PARAMETERS);
// Context manipulation
- compiler::Node* LoadContextElement(compiler::Node* context, int slot_index);
- compiler::Node* LoadContextElement(compiler::Node* context,
- compiler::Node* slot_index);
- compiler::Node* StoreContextElement(compiler::Node* context, int slot_index,
- compiler::Node* value);
- compiler::Node* StoreContextElement(compiler::Node* context,
- compiler::Node* slot_index,
- compiler::Node* value);
- compiler::Node* LoadNativeContext(compiler::Node* context);
-
- compiler::Node* LoadJSArrayElementsMap(ElementsKind kind,
- compiler::Node* native_context);
+ Node* LoadContextElement(Node* context, int slot_index);
+ Node* LoadContextElement(Node* context, Node* slot_index);
+ Node* StoreContextElement(Node* context, int slot_index, Node* value);
+ Node* StoreContextElement(Node* context, Node* slot_index, Node* value);
+ Node* StoreContextElementNoWriteBarrier(Node* context, int slot_index,
+ Node* value);
+ Node* LoadNativeContext(Node* context);
+
+ Node* LoadJSArrayElementsMap(ElementsKind kind, Node* native_context);
// Store the floating point value of a HeapNumber.
- compiler::Node* StoreHeapNumberValue(compiler::Node* object,
- compiler::Node* value);
+ Node* StoreHeapNumberValue(Node* object, Node* value);
// Store a field to an object on the heap.
- compiler::Node* StoreObjectField(
- compiler::Node* object, int offset, compiler::Node* value);
- compiler::Node* StoreObjectField(compiler::Node* object,
- compiler::Node* offset,
- compiler::Node* value);
- compiler::Node* StoreObjectFieldNoWriteBarrier(
- compiler::Node* object, int offset, compiler::Node* value,
+ Node* StoreObjectField(Node* object, int offset, Node* value);
+ Node* StoreObjectField(Node* object, Node* offset, Node* value);
+ Node* StoreObjectFieldNoWriteBarrier(
+ Node* object, int offset, Node* value,
MachineRepresentation rep = MachineRepresentation::kTagged);
- compiler::Node* StoreObjectFieldNoWriteBarrier(
- compiler::Node* object, compiler::Node* offset, compiler::Node* value,
+ Node* StoreObjectFieldNoWriteBarrier(
+ Node* object, Node* offset, Node* value,
MachineRepresentation rep = MachineRepresentation::kTagged);
// Store the Map of an HeapObject.
- compiler::Node* StoreMapNoWriteBarrier(compiler::Node* object,
- compiler::Node* map);
- compiler::Node* StoreObjectFieldRoot(compiler::Node* object, int offset,
- Heap::RootListIndex root);
+ Node* StoreMap(Node* object, Node* map);
+ Node* StoreMapNoWriteBarrier(Node* object,
+ Heap::RootListIndex map_root_index);
+ Node* StoreMapNoWriteBarrier(Node* object, Node* map);
+ Node* StoreObjectFieldRoot(Node* object, int offset,
+ Heap::RootListIndex root);
// Store an array element to a FixedArray.
- compiler::Node* StoreFixedArrayElement(
- compiler::Node* object, int index, compiler::Node* value,
- WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
- ParameterMode parameter_mode = INTEGER_PARAMETERS) {
- return StoreFixedArrayElement(object, Int32Constant(index), value,
- barrier_mode, parameter_mode);
+ Node* StoreFixedArrayElement(
+ Node* object, int index, Node* value,
+ WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) {
+ return StoreFixedArrayElement(object, IntPtrConstant(index), value,
+ barrier_mode);
}
- compiler::Node* StoreFixedArrayElement(
- compiler::Node* object, compiler::Node* index, compiler::Node* value,
+ Node* StoreFixedArrayElement(
+ Node* object, Node* index, Node* value,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
- ParameterMode parameter_mode = INTEGER_PARAMETERS);
+ int additional_offset = 0,
+ ParameterMode parameter_mode = INTPTR_PARAMETERS);
+
+ Node* StoreFixedDoubleArrayElement(
+ Node* object, Node* index, Node* value,
+ ParameterMode parameter_mode = INTPTR_PARAMETERS);
- compiler::Node* StoreFixedDoubleArrayElement(
- compiler::Node* object, compiler::Node* index, compiler::Node* value,
- ParameterMode parameter_mode = INTEGER_PARAMETERS);
+ Node* BuildAppendJSArray(ElementsKind kind, Node* context, Node* array,
+ CodeStubArguments& args, Variable& arg_index,
+ Label* bailout);
- void StoreFieldsNoWriteBarrier(compiler::Node* start_address,
- compiler::Node* end_address,
- compiler::Node* value);
+ void StoreFieldsNoWriteBarrier(Node* start_address, Node* end_address,
+ Node* value);
// Allocate a HeapNumber without initializing its value.
- compiler::Node* AllocateHeapNumber(MutableMode mode = IMMUTABLE);
+ Node* AllocateHeapNumber(MutableMode mode = IMMUTABLE);
// Allocate a HeapNumber with a specific value.
- compiler::Node* AllocateHeapNumberWithValue(compiler::Node* value,
- MutableMode mode = IMMUTABLE);
+ Node* AllocateHeapNumberWithValue(Node* value, MutableMode mode = IMMUTABLE);
// Allocate a SeqOneByteString with the given length.
- compiler::Node* AllocateSeqOneByteString(int length,
- AllocationFlags flags = kNone);
- compiler::Node* AllocateSeqOneByteString(
- compiler::Node* context, compiler::Node* length,
- ParameterMode mode = INTPTR_PARAMETERS, AllocationFlags flags = kNone);
+ Node* AllocateSeqOneByteString(int length, AllocationFlags flags = kNone);
+ Node* AllocateSeqOneByteString(Node* context, Node* length,
+ ParameterMode mode = INTPTR_PARAMETERS,
+ AllocationFlags flags = kNone);
// Allocate a SeqTwoByteString with the given length.
- compiler::Node* AllocateSeqTwoByteString(int length,
- AllocationFlags flags = kNone);
- compiler::Node* AllocateSeqTwoByteString(
- compiler::Node* context, compiler::Node* length,
- ParameterMode mode = INTPTR_PARAMETERS, AllocationFlags flags = kNone);
+ Node* AllocateSeqTwoByteString(int length, AllocationFlags flags = kNone);
+ Node* AllocateSeqTwoByteString(Node* context, Node* length,
+ ParameterMode mode = INTPTR_PARAMETERS,
+ AllocationFlags flags = kNone);
// Allocate a SlicedOneByteString with the given length, parent and offset.
// |length| and |offset| are expected to be tagged.
- compiler::Node* AllocateSlicedOneByteString(compiler::Node* length,
- compiler::Node* parent,
- compiler::Node* offset);
+ Node* AllocateSlicedOneByteString(Node* length, Node* parent, Node* offset);
// Allocate a SlicedTwoByteString with the given length, parent and offset.
// |length| and |offset| are expected to be tagged.
- compiler::Node* AllocateSlicedTwoByteString(compiler::Node* length,
- compiler::Node* parent,
- compiler::Node* offset);
+ Node* AllocateSlicedTwoByteString(Node* length, Node* parent, Node* offset);
// Allocate a one-byte ConsString with the given length, first and second
// parts. |length| is expected to be tagged, and |first| and |second| are
// expected to be one-byte strings.
- compiler::Node* AllocateOneByteConsString(compiler::Node* length,
- compiler::Node* first,
- compiler::Node* second,
- AllocationFlags flags = kNone);
+ Node* AllocateOneByteConsString(Node* length, Node* first, Node* second,
+ AllocationFlags flags = kNone);
// Allocate a two-byte ConsString with the given length, first and second
// parts. |length| is expected to be tagged, and |first| and |second| are
// expected to be two-byte strings.
- compiler::Node* AllocateTwoByteConsString(compiler::Node* length,
- compiler::Node* first,
- compiler::Node* second,
- AllocationFlags flags = kNone);
+ Node* AllocateTwoByteConsString(Node* length, Node* first, Node* second,
+ AllocationFlags flags = kNone);
// Allocate an appropriate one- or two-byte ConsString with the first and
// second parts specified by |first| and |second|.
- compiler::Node* NewConsString(compiler::Node* context, compiler::Node* length,
- compiler::Node* left, compiler::Node* right,
- AllocationFlags flags = kNone);
+ Node* NewConsString(Node* context, Node* length, Node* left, Node* right,
+ AllocationFlags flags = kNone);
// Allocate a RegExpResult with the given length (the number of captures,
// including the match itself), index (the index where the match starts),
// and input string. |length| and |index| are expected to be tagged, and
// |input| must be a string.
- compiler::Node* AllocateRegExpResult(compiler::Node* context,
- compiler::Node* length,
- compiler::Node* index,
- compiler::Node* input);
+ Node* AllocateRegExpResult(Node* context, Node* length, Node* index,
+ Node* input);
- compiler::Node* AllocateNameDictionary(int capacity);
- compiler::Node* AllocateNameDictionary(compiler::Node* capacity);
+ Node* AllocateNameDictionary(int capacity);
+ Node* AllocateNameDictionary(Node* capacity);
- compiler::Node* AllocateJSObjectFromMap(compiler::Node* map,
- compiler::Node* properties = nullptr,
- compiler::Node* elements = nullptr);
+ Node* AllocateJSObjectFromMap(Node* map, Node* properties = nullptr,
+ Node* elements = nullptr,
+ AllocationFlags flags = kNone);
- void InitializeJSObjectFromMap(compiler::Node* object, compiler::Node* map,
- compiler::Node* size,
- compiler::Node* properties = nullptr,
- compiler::Node* elements = nullptr);
+ void InitializeJSObjectFromMap(Node* object, Node* map, Node* size,
+ Node* properties = nullptr,
+ Node* elements = nullptr);
- void InitializeJSObjectBody(compiler::Node* object, compiler::Node* map,
- compiler::Node* size,
+ void InitializeJSObjectBody(Node* object, Node* map, Node* size,
int start_offset = JSObject::kHeaderSize);
// Allocate a JSArray without elements and initialize the header fields.
- compiler::Node* AllocateUninitializedJSArrayWithoutElements(
- ElementsKind kind, compiler::Node* array_map, compiler::Node* length,
- compiler::Node* allocation_site);
+ Node* AllocateUninitializedJSArrayWithoutElements(ElementsKind kind,
+ Node* array_map,
+ Node* length,
+ Node* allocation_site);
// Allocate and return a JSArray with initialized header fields and its
// uninitialized elements.
// The ParameterMode argument is only used for the capacity parameter.
- std::pair<compiler::Node*, compiler::Node*>
- AllocateUninitializedJSArrayWithElements(
- ElementsKind kind, compiler::Node* array_map, compiler::Node* length,
- compiler::Node* allocation_site, compiler::Node* capacity,
- ParameterMode capacity_mode = INTEGER_PARAMETERS);
+ std::pair<Node*, Node*> AllocateUninitializedJSArrayWithElements(
+ ElementsKind kind, Node* array_map, Node* length, Node* allocation_site,
+ Node* capacity, ParameterMode capacity_mode = INTPTR_PARAMETERS);
// Allocate a JSArray and fill elements with the hole.
// The ParameterMode argument is only used for the capacity parameter.
- compiler::Node* AllocateJSArray(
- ElementsKind kind, compiler::Node* array_map, compiler::Node* capacity,
- compiler::Node* length, compiler::Node* allocation_site = nullptr,
- ParameterMode capacity_mode = INTEGER_PARAMETERS);
+ Node* AllocateJSArray(ElementsKind kind, Node* array_map, Node* capacity,
+ Node* length, Node* allocation_site = nullptr,
+ ParameterMode capacity_mode = INTPTR_PARAMETERS);
- compiler::Node* AllocateFixedArray(ElementsKind kind,
- compiler::Node* capacity,
- ParameterMode mode = INTEGER_PARAMETERS,
- AllocationFlags flags = kNone);
+ Node* AllocateFixedArray(ElementsKind kind, Node* capacity,
+ ParameterMode mode = INTPTR_PARAMETERS,
+ AllocationFlags flags = kNone);
// Perform CreateArrayIterator (ES6 #sec-createarrayiterator).
- compiler::Node* CreateArrayIterator(compiler::Node* array,
- compiler::Node* array_map,
- compiler::Node* array_type,
- compiler::Node* context,
- IterationKind mode);
-
- compiler::Node* AllocateJSArrayIterator(compiler::Node* array,
- compiler::Node* array_map,
- compiler::Node* map);
-
- void FillFixedArrayWithValue(ElementsKind kind, compiler::Node* array,
- compiler::Node* from_index,
- compiler::Node* to_index,
+ Node* CreateArrayIterator(Node* array, Node* array_map, Node* array_type,
+ Node* context, IterationKind mode);
+
+ Node* AllocateJSArrayIterator(Node* array, Node* array_map, Node* map);
+
+ void FillFixedArrayWithValue(ElementsKind kind, Node* array, Node* from_index,
+ Node* to_index,
Heap::RootListIndex value_root_index,
- ParameterMode mode = INTEGER_PARAMETERS);
+ ParameterMode mode = INTPTR_PARAMETERS);
// Copies all elements from |from_array| of |length| size to
// |to_array| of the same size respecting the elements kind.
void CopyFixedArrayElements(
- ElementsKind kind, compiler::Node* from_array, compiler::Node* to_array,
- compiler::Node* length,
+ ElementsKind kind, Node* from_array, Node* to_array, Node* length,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
- ParameterMode mode = INTEGER_PARAMETERS) {
+ ParameterMode mode = INTPTR_PARAMETERS) {
CopyFixedArrayElements(kind, from_array, kind, to_array, length, length,
barrier_mode, mode);
}
@@ -516,11 +583,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Copies |element_count| elements from |from_array| to |to_array| of
// |capacity| size respecting both array's elements kinds.
void CopyFixedArrayElements(
- ElementsKind from_kind, compiler::Node* from_array, ElementsKind to_kind,
- compiler::Node* to_array, compiler::Node* element_count,
- compiler::Node* capacity,
+ ElementsKind from_kind, Node* from_array, ElementsKind to_kind,
+ Node* to_array, Node* element_count, Node* capacity,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
- ParameterMode mode = INTEGER_PARAMETERS);
+ ParameterMode mode = INTPTR_PARAMETERS);
// Copies |character_count| elements from |from_string| to |to_string|
// starting at the |from_index|'th character. |from_string| and |to_string|
@@ -530,11 +596,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// intptr_ts depending on |mode| s.t. 0 <= |from_index| <= |from_index| +
// |character_count| <= from_string.length and 0 <= |to_index| <= |to_index| +
// |character_count| <= to_string.length.
- void CopyStringCharacters(compiler::Node* from_string,
- compiler::Node* to_string,
- compiler::Node* from_index,
- compiler::Node* to_index,
- compiler::Node* character_count,
+ void CopyStringCharacters(Node* from_string, Node* to_string,
+ Node* from_index, Node* to_index,
+ Node* character_count,
String::Encoding from_encoding,
String::Encoding to_encoding, ParameterMode mode);
@@ -542,154 +606,143 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// (NOTE: not index!), does a hole check if |if_hole| is provided and
// converts the value so that it becomes ready for storing to array of
// |to_kind| elements.
- compiler::Node* LoadElementAndPrepareForStore(compiler::Node* array,
- compiler::Node* offset,
- ElementsKind from_kind,
- ElementsKind to_kind,
- Label* if_hole);
+ Node* LoadElementAndPrepareForStore(Node* array, Node* offset,
+ ElementsKind from_kind,
+ ElementsKind to_kind, Label* if_hole);
- compiler::Node* CalculateNewElementsCapacity(
- compiler::Node* old_capacity, ParameterMode mode = INTEGER_PARAMETERS);
+ Node* CalculateNewElementsCapacity(Node* old_capacity,
+ ParameterMode mode = INTPTR_PARAMETERS);
// Tries to grow the |elements| array of given |object| to store the |key|
// or bails out if the growing gap is too big. Returns new elements.
- compiler::Node* TryGrowElementsCapacity(compiler::Node* object,
- compiler::Node* elements,
- ElementsKind kind,
- compiler::Node* key, Label* bailout);
+ Node* TryGrowElementsCapacity(Node* object, Node* elements, ElementsKind kind,
+ Node* key, Label* bailout);
// Tries to grow the |capacity|-length |elements| array of given |object|
// to store the |key| or bails out if the growing gap is too big. Returns
// new elements.
- compiler::Node* TryGrowElementsCapacity(compiler::Node* object,
- compiler::Node* elements,
- ElementsKind kind,
- compiler::Node* key,
- compiler::Node* capacity,
- ParameterMode mode, Label* bailout);
+ Node* TryGrowElementsCapacity(Node* object, Node* elements, ElementsKind kind,
+ Node* key, Node* capacity, ParameterMode mode,
+ Label* bailout);
// Grows elements capacity of given object. Returns new elements.
- compiler::Node* GrowElementsCapacity(
- compiler::Node* object, compiler::Node* elements, ElementsKind from_kind,
- ElementsKind to_kind, compiler::Node* capacity,
- compiler::Node* new_capacity, ParameterMode mode, Label* bailout);
+ Node* GrowElementsCapacity(Node* object, Node* elements,
+ ElementsKind from_kind, ElementsKind to_kind,
+ Node* capacity, Node* new_capacity,
+ ParameterMode mode, Label* bailout);
// Allocation site manipulation
- void InitializeAllocationMemento(compiler::Node* base_allocation,
+ void InitializeAllocationMemento(Node* base_allocation,
int base_allocation_size,
- compiler::Node* allocation_site);
-
- compiler::Node* TryTaggedToFloat64(compiler::Node* value,
- Label* if_valueisnotnumber);
- compiler::Node* TruncateTaggedToFloat64(compiler::Node* context,
- compiler::Node* value);
- compiler::Node* TruncateTaggedToWord32(compiler::Node* context,
- compiler::Node* value);
+ Node* allocation_site);
+
+ Node* TryTaggedToFloat64(Node* value, Label* if_valueisnotnumber);
+ Node* TruncateTaggedToFloat64(Node* context, Node* value);
+ Node* TruncateTaggedToWord32(Node* context, Node* value);
// Truncate the floating point value of a HeapNumber to an Int32.
- compiler::Node* TruncateHeapNumberValueToWord32(compiler::Node* object);
+ Node* TruncateHeapNumberValueToWord32(Node* object);
// Conversions.
- compiler::Node* ChangeFloat64ToTagged(compiler::Node* value);
- compiler::Node* ChangeInt32ToTagged(compiler::Node* value);
- compiler::Node* ChangeUint32ToTagged(compiler::Node* value);
+ Node* ChangeFloat64ToTagged(Node* value);
+ Node* ChangeInt32ToTagged(Node* value);
+ Node* ChangeUint32ToTagged(Node* value);
+ Node* ChangeNumberToFloat64(Node* value);
// Type conversions.
// Throws a TypeError for {method_name} if {value} is not coercible to Object,
// or returns the {value} converted to a String otherwise.
- compiler::Node* ToThisString(compiler::Node* context, compiler::Node* value,
- char const* method_name);
+ Node* ToThisString(Node* context, Node* value, char const* method_name);
// Throws a TypeError for {method_name} if {value} is neither of the given
// {primitive_type} nor a JSValue wrapping a value of {primitive_type}, or
// returns the {value} (or wrapped value) otherwise.
- compiler::Node* ToThisValue(compiler::Node* context, compiler::Node* value,
- PrimitiveType primitive_type,
- char const* method_name);
+ Node* ToThisValue(Node* context, Node* value, PrimitiveType primitive_type,
+ char const* method_name);
// Throws a TypeError for {method_name} if {value} is not of the given
// instance type. Returns {value}'s map.
- compiler::Node* ThrowIfNotInstanceType(compiler::Node* context,
- compiler::Node* value,
- InstanceType instance_type,
- char const* method_name);
+ Node* ThrowIfNotInstanceType(Node* context, Node* value,
+ InstanceType instance_type,
+ char const* method_name);
// Type checks.
// Check whether the map is for an object with special properties, such as a
// JSProxy or an object with interceptors.
- compiler::Node* IsSpecialReceiverMap(compiler::Node* map);
- compiler::Node* IsSpecialReceiverInstanceType(compiler::Node* instance_type);
- compiler::Node* IsStringInstanceType(compiler::Node* instance_type);
- compiler::Node* IsString(compiler::Node* object);
- compiler::Node* IsJSObject(compiler::Node* object);
- compiler::Node* IsJSGlobalProxy(compiler::Node* object);
- compiler::Node* IsJSReceiverInstanceType(compiler::Node* instance_type);
- compiler::Node* IsJSReceiver(compiler::Node* object);
- compiler::Node* IsMap(compiler::Node* object);
- compiler::Node* IsCallableMap(compiler::Node* map);
- compiler::Node* IsName(compiler::Node* object);
- compiler::Node* IsJSValue(compiler::Node* object);
- compiler::Node* IsJSArray(compiler::Node* object);
- compiler::Node* IsNativeContext(compiler::Node* object);
- compiler::Node* IsWeakCell(compiler::Node* object);
- compiler::Node* IsFixedDoubleArray(compiler::Node* object);
- compiler::Node* IsHashTable(compiler::Node* object);
- compiler::Node* IsDictionary(compiler::Node* object);
- compiler::Node* IsUnseededNumberDictionary(compiler::Node* object);
+ Node* InstanceTypeEqual(Node* instance_type, int type);
+ Node* IsSpecialReceiverMap(Node* map);
+ Node* IsSpecialReceiverInstanceType(Node* instance_type);
+ Node* IsStringInstanceType(Node* instance_type);
+ Node* IsString(Node* object);
+ Node* IsJSObject(Node* object);
+ Node* IsJSGlobalProxy(Node* object);
+ Node* IsJSReceiverInstanceType(Node* instance_type);
+ Node* IsJSReceiver(Node* object);
+ Node* IsMap(Node* object);
+ Node* IsCallableMap(Node* map);
+ Node* IsName(Node* object);
+ Node* IsSymbol(Node* object);
+ Node* IsPrivateSymbol(Node* object);
+ Node* IsJSValue(Node* object);
+ Node* IsJSArray(Node* object);
+ Node* IsNativeContext(Node* object);
+ Node* IsWeakCell(Node* object);
+ Node* IsFixedDoubleArray(Node* object);
+ Node* IsHashTable(Node* object);
+ Node* IsDictionary(Node* object);
+ Node* IsUnseededNumberDictionary(Node* object);
+ Node* IsConstructorMap(Node* map);
+ Node* IsJSFunction(Node* object);
// ElementsKind helpers:
- compiler::Node* IsFastElementsKind(compiler::Node* elements_kind);
- compiler::Node* IsHoleyFastElementsKind(compiler::Node* elements_kind);
+ Node* IsFastElementsKind(Node* elements_kind);
+ Node* IsHoleyFastElementsKind(Node* elements_kind);
// String helpers.
// Load a character from a String (might flatten a ConsString).
- compiler::Node* StringCharCodeAt(compiler::Node* string,
- compiler::Node* smi_index);
+ Node* StringCharCodeAt(Node* string, Node* index,
+ ParameterMode parameter_mode = SMI_PARAMETERS);
// Return the single character string with only {code}.
- compiler::Node* StringFromCharCode(compiler::Node* code);
+ Node* StringFromCharCode(Node* code);
// Return a new string object which holds a substring containing the range
// [from,to[ of string. |from| and |to| are expected to be tagged.
- compiler::Node* SubString(compiler::Node* context, compiler::Node* string,
- compiler::Node* from, compiler::Node* to);
+ Node* SubString(Node* context, Node* string, Node* from, Node* to);
// Return a new string object produced by concatenating |first| with |second|.
- compiler::Node* StringAdd(compiler::Node* context, compiler::Node* first,
- compiler::Node* second,
- AllocationFlags flags = kNone);
+ Node* StringAdd(Node* context, Node* first, Node* second,
+ AllocationFlags flags = kNone);
// Return the first index >= {from} at which {needle_char} was found in
// {string}, or -1 if such an index does not exist. The returned value is
// a Smi, {string} is expected to be a String, {needle_char} is an intptr,
// and {from} is expected to be tagged.
- compiler::Node* StringIndexOfChar(compiler::Node* context,
- compiler::Node* string,
- compiler::Node* needle_char,
- compiler::Node* from);
+ Node* StringIndexOfChar(Node* context, Node* string, Node* needle_char,
+ Node* from);
- compiler::Node* StringFromCodePoint(compiler::Node* codepoint,
- UnicodeEncoding encoding);
+ Node* StringFromCodePoint(Node* codepoint, UnicodeEncoding encoding);
// Type conversion helpers.
// Convert a String to a Number.
- compiler::Node* StringToNumber(compiler::Node* context,
- compiler::Node* input);
- compiler::Node* NumberToString(compiler::Node* context,
- compiler::Node* input);
+ Node* StringToNumber(Node* context, Node* input);
+ Node* NumberToString(Node* context, Node* input);
// Convert an object to a name.
- compiler::Node* ToName(compiler::Node* context, compiler::Node* input);
+ Node* ToName(Node* context, Node* input);
// Convert a Non-Number object to a Number.
- compiler::Node* NonNumberToNumber(compiler::Node* context,
- compiler::Node* input);
+ Node* NonNumberToNumber(Node* context, Node* input);
// Convert any object to a Number.
- compiler::Node* ToNumber(compiler::Node* context, compiler::Node* input);
+ Node* ToNumber(Node* context, Node* input);
+
+ // Converts |input| to one of 2^32 integer values in the range 0 through
+ // 2^32−1, inclusive.
+ // ES#sec-touint32
+ compiler::Node* ToUint32(compiler::Node* context, compiler::Node* input);
// Convert any object to a String.
- compiler::Node* ToString(compiler::Node* context, compiler::Node* input);
+ Node* ToString(Node* context, Node* input);
// Convert any object to a Primitive.
- compiler::Node* JSReceiverToPrimitive(compiler::Node* context,
- compiler::Node* input);
+ Node* JSReceiverToPrimitive(Node* context, Node* input);
// Convert a String to a flat String.
- compiler::Node* FlattenString(compiler::Node* string);
+ Node* FlattenString(Node* string);
enum ToIntegerTruncationMode {
kNoTruncation,
@@ -697,127 +750,195 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
};
// Convert any object to an Integer.
- compiler::Node* ToInteger(compiler::Node* context, compiler::Node* input,
- ToIntegerTruncationMode mode = kNoTruncation);
+ Node* ToInteger(Node* context, Node* input,
+ ToIntegerTruncationMode mode = kNoTruncation);
// Returns a node that contains a decoded (unsigned!) value of a bit
// field |T| in |word32|. Returns result as an uint32 node.
template <typename T>
- compiler::Node* DecodeWord32(compiler::Node* word32) {
+ Node* DecodeWord32(Node* word32) {
return DecodeWord32(word32, T::kShift, T::kMask);
}
// Returns a node that contains a decoded (unsigned!) value of a bit
// field |T| in |word|. Returns result as a word-size node.
template <typename T>
- compiler::Node* DecodeWord(compiler::Node* word) {
+ Node* DecodeWord(Node* word) {
return DecodeWord(word, T::kShift, T::kMask);
}
// Returns a node that contains a decoded (unsigned!) value of a bit
// field |T| in |word32|. Returns result as a word-size node.
template <typename T>
- compiler::Node* DecodeWordFromWord32(compiler::Node* word32) {
+ Node* DecodeWordFromWord32(Node* word32) {
return DecodeWord<T>(ChangeUint32ToWord(word32));
}
+ // Returns a node that contains a decoded (unsigned!) value of a bit
+ // field |T| in |word|. Returns result as an uint32 node.
+ template <typename T>
+ Node* DecodeWord32FromWord(Node* word) {
+ return TruncateWordToWord32(DecodeWord<T>(word));
+ }
+
// Decodes an unsigned (!) value from |word32| to an uint32 node.
- compiler::Node* DecodeWord32(compiler::Node* word32, uint32_t shift,
- uint32_t mask);
+ Node* DecodeWord32(Node* word32, uint32_t shift, uint32_t mask);
// Decodes an unsigned (!) value from |word| to a word-size node.
- compiler::Node* DecodeWord(compiler::Node* word, uint32_t shift,
- uint32_t mask);
+ Node* DecodeWord(Node* word, uint32_t shift, uint32_t mask);
// Returns true if any of the |T|'s bits in given |word32| are set.
template <typename T>
- compiler::Node* IsSetWord32(compiler::Node* word32) {
+ Node* IsSetWord32(Node* word32) {
return IsSetWord32(word32, T::kMask);
}
// Returns true if any of the mask's bits in given |word32| are set.
- compiler::Node* IsSetWord32(compiler::Node* word32, uint32_t mask) {
+ Node* IsSetWord32(Node* word32, uint32_t mask) {
return Word32NotEqual(Word32And(word32, Int32Constant(mask)),
Int32Constant(0));
}
// Returns true if any of the |T|'s bits in given |word| are set.
template <typename T>
- compiler::Node* IsSetWord(compiler::Node* word) {
- return WordNotEqual(WordAnd(word, IntPtrConstant(T::kMask)),
- IntPtrConstant(0));
+ Node* IsSetWord(Node* word) {
+ return IsSetWord(word, T::kMask);
+ }
+
+ // Returns true if any of the mask's bits in given |word| are set.
+ Node* IsSetWord(Node* word, uint32_t mask) {
+ return WordNotEqual(WordAnd(word, IntPtrConstant(mask)), IntPtrConstant(0));
+ }
+
+ // Returns true if any of the mask's bit are set in the given Smi.
+ // Smi-encoding of the mask is performed implicitly!
+ Node* IsSetSmi(Node* smi, int untagged_mask) {
+ intptr_t mask_word = bit_cast<intptr_t>(Smi::FromInt(untagged_mask));
+ return WordNotEqual(
+ WordAnd(BitcastTaggedToWord(smi), IntPtrConstant(mask_word)),
+ IntPtrConstant(0));
+ }
+
+ // Returns true if all of the |T|'s bits in given |word32| are clear.
+ template <typename T>
+ Node* IsClearWord32(Node* word32) {
+ return IsClearWord32(word32, T::kMask);
+ }
+
+ // Returns true if all of the mask's bits in given |word32| are clear.
+ Node* IsClearWord32(Node* word32, uint32_t mask) {
+ return Word32Equal(Word32And(word32, Int32Constant(mask)),
+ Int32Constant(0));
+ }
+
+ // Returns true if all of the |T|'s bits in given |word| are clear.
+ template <typename T>
+ Node* IsClearWord(Node* word) {
+ return IsClearWord(word, T::kMask);
+ }
+
+ // Returns true if all of the mask's bits in given |word| are clear.
+ Node* IsClearWord(Node* word, uint32_t mask) {
+ return WordEqual(WordAnd(word, IntPtrConstant(mask)), IntPtrConstant(0));
}
void SetCounter(StatsCounter* counter, int value);
void IncrementCounter(StatsCounter* counter, int delta);
void DecrementCounter(StatsCounter* counter, int delta);
+ void Increment(Variable& variable, int value = 1,
+ ParameterMode mode = INTPTR_PARAMETERS);
+
// Generates "if (false) goto label" code. Useful for marking a label as
// "live" to avoid assertion failures during graph building. In the resulting
// code this check will be eliminated.
void Use(Label* label);
// Various building blocks for stubs doing property lookups.
- void TryToName(compiler::Node* key, Label* if_keyisindex, Variable* var_index,
+ void TryToName(Node* key, Label* if_keyisindex, Variable* var_index,
Label* if_keyisunique, Label* if_bailout);
// Calculates array index for given dictionary entry and entry field.
// See Dictionary::EntryToIndex().
template <typename Dictionary>
- compiler::Node* EntryToIndex(compiler::Node* entry, int field_index);
+ Node* EntryToIndex(Node* entry, int field_index);
template <typename Dictionary>
- compiler::Node* EntryToIndex(compiler::Node* entry) {
+ Node* EntryToIndex(Node* entry) {
return EntryToIndex<Dictionary>(entry, Dictionary::kEntryKeyIndex);
}
// Calculate a valid size for the a hash table.
- compiler::Node* HashTableComputeCapacity(compiler::Node* at_least_space_for);
+ Node* HashTableComputeCapacity(Node* at_least_space_for);
+
+ template <class Dictionary>
+ Node* GetNumberOfElements(Node* dictionary);
+
+ template <class Dictionary>
+ void SetNumberOfElements(Node* dictionary, Node* num_elements_smi);
+
+ template <class Dictionary>
+ Node* GetNumberOfDeletedElements(Node* dictionary);
+
+ template <class Dictionary>
+ Node* GetCapacity(Node* dictionary);
+
+ template <class Dictionary>
+ Node* GetNextEnumerationIndex(Node* dictionary);
+
+ template <class Dictionary>
+ void SetNextEnumerationIndex(Node* dictionary, Node* next_enum_index_smi);
// Looks up an entry in a NameDictionaryBase successor. If the entry is found
// control goes to {if_found} and {var_name_index} contains an index of the
// key field of the entry found. If the key is not found control goes to
// {if_not_found}.
static const int kInlinedDictionaryProbes = 4;
+ enum LookupMode { kFindExisting, kFindInsertionIndex };
template <typename Dictionary>
- void NameDictionaryLookup(compiler::Node* dictionary,
- compiler::Node* unique_name, Label* if_found,
- Variable* var_name_index, Label* if_not_found,
- int inlined_probes = kInlinedDictionaryProbes);
+ void NameDictionaryLookup(Node* dictionary, Node* unique_name,
+ Label* if_found, Variable* var_name_index,
+ Label* if_not_found,
+ int inlined_probes = kInlinedDictionaryProbes,
+ LookupMode mode = kFindExisting);
- compiler::Node* ComputeIntegerHash(compiler::Node* key, compiler::Node* seed);
+ Node* ComputeIntegerHash(Node* key, Node* seed);
template <typename Dictionary>
- void NumberDictionaryLookup(compiler::Node* dictionary,
- compiler::Node* intptr_index, Label* if_found,
- Variable* var_entry, Label* if_not_found);
+ void NumberDictionaryLookup(Node* dictionary, Node* intptr_index,
+ Label* if_found, Variable* var_entry,
+ Label* if_not_found);
+
+ template <class Dictionary>
+ void FindInsertionEntry(Node* dictionary, Node* key, Variable* var_key_index);
+
+ template <class Dictionary>
+ void InsertEntry(Node* dictionary, Node* key, Node* value, Node* index,
+ Node* enum_index);
+
+ template <class Dictionary>
+ void Add(Node* dictionary, Node* key, Node* value, Label* bailout);
// Tries to check if {object} has own {unique_name} property.
- void TryHasOwnProperty(compiler::Node* object, compiler::Node* map,
- compiler::Node* instance_type,
- compiler::Node* unique_name, Label* if_found,
+ void TryHasOwnProperty(Node* object, Node* map, Node* instance_type,
+ Node* unique_name, Label* if_found,
Label* if_not_found, Label* if_bailout);
// Tries to get {object}'s own {unique_name} property value. If the property
// is an accessor then it also calls a getter. If the property is a double
// field it re-wraps value in an immutable heap number.
- void TryGetOwnProperty(compiler::Node* context, compiler::Node* receiver,
- compiler::Node* object, compiler::Node* map,
- compiler::Node* instance_type,
- compiler::Node* unique_name, Label* if_found,
- Variable* var_value, Label* if_not_found,
- Label* if_bailout);
-
- void LoadPropertyFromFastObject(compiler::Node* object, compiler::Node* map,
- compiler::Node* descriptors,
- compiler::Node* name_index,
- Variable* var_details, Variable* var_value);
-
- void LoadPropertyFromNameDictionary(compiler::Node* dictionary,
- compiler::Node* entry,
+ void TryGetOwnProperty(Node* context, Node* receiver, Node* object, Node* map,
+ Node* instance_type, Node* unique_name,
+ Label* if_found, Variable* var_value,
+ Label* if_not_found, Label* if_bailout);
+
+ void LoadPropertyFromFastObject(Node* object, Node* map, Node* descriptors,
+ Node* name_index, Variable* var_details,
+ Variable* var_value);
+
+ void LoadPropertyFromNameDictionary(Node* dictionary, Node* entry,
Variable* var_details,
Variable* var_value);
- void LoadPropertyFromGlobalDictionary(compiler::Node* dictionary,
- compiler::Node* entry,
+ void LoadPropertyFromGlobalDictionary(Node* dictionary, Node* entry,
Variable* var_details,
Variable* var_value, Label* if_deleted);
@@ -833,24 +954,21 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
//
// Note: this code does not check if the global dictionary points to deleted
// entry! This has to be done by the caller.
- void TryLookupProperty(compiler::Node* object, compiler::Node* map,
- compiler::Node* instance_type,
- compiler::Node* unique_name, Label* if_found_fast,
+ void TryLookupProperty(Node* object, Node* map, Node* instance_type,
+ Node* unique_name, Label* if_found_fast,
Label* if_found_dict, Label* if_found_global,
Variable* var_meta_storage, Variable* var_name_index,
Label* if_not_found, Label* if_bailout);
- void TryLookupElement(compiler::Node* object, compiler::Node* map,
- compiler::Node* instance_type,
- compiler::Node* intptr_index, Label* if_found,
+ void TryLookupElement(Node* object, Node* map, Node* instance_type,
+ Node* intptr_index, Label* if_found,
Label* if_not_found, Label* if_bailout);
// This is a type of a lookup in holder generator function. In case of a
- // property lookup the {key} is guaranteed to be a unique name and in case of
+ // property lookup the {key} is guaranteed to be an unique name and in case of
// element lookup the key is an Int32 index.
- typedef std::function<void(compiler::Node* receiver, compiler::Node* holder,
- compiler::Node* map, compiler::Node* instance_type,
- compiler::Node* key, Label* next_holder,
+ typedef std::function<void(Node* receiver, Node* holder, Node* map,
+ Node* instance_type, Node* key, Label* next_holder,
Label* if_bailout)>
LookupInHolder;
@@ -860,231 +978,125 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Upon reaching the end of prototype chain the control goes to {if_end}.
// If it can't handle the case {receiver}/{key} case then the control goes
// to {if_bailout}.
- void TryPrototypeChainLookup(compiler::Node* receiver, compiler::Node* key,
- LookupInHolder& lookup_property_in_holder,
- LookupInHolder& lookup_element_in_holder,
+ void TryPrototypeChainLookup(Node* receiver, Node* key,
+ const LookupInHolder& lookup_property_in_holder,
+ const LookupInHolder& lookup_element_in_holder,
Label* if_end, Label* if_bailout);
// Instanceof helpers.
// ES6 section 7.3.19 OrdinaryHasInstance (C, O)
- compiler::Node* OrdinaryHasInstance(compiler::Node* context,
- compiler::Node* callable,
- compiler::Node* object);
-
- // Load/StoreIC helpers.
- struct LoadICParameters {
- LoadICParameters(compiler::Node* context, compiler::Node* receiver,
- compiler::Node* name, compiler::Node* slot,
- compiler::Node* vector)
- : context(context),
- receiver(receiver),
- name(name),
- slot(slot),
- vector(vector) {}
-
- compiler::Node* context;
- compiler::Node* receiver;
- compiler::Node* name;
- compiler::Node* slot;
- compiler::Node* vector;
- };
-
- struct StoreICParameters : public LoadICParameters {
- StoreICParameters(compiler::Node* context, compiler::Node* receiver,
- compiler::Node* name, compiler::Node* value,
- compiler::Node* slot, compiler::Node* vector)
- : LoadICParameters(context, receiver, name, slot, vector),
- value(value) {}
- compiler::Node* value;
- };
+ Node* OrdinaryHasInstance(Node* context, Node* callable, Node* object);
// Load type feedback vector from the stub caller's frame.
- compiler::Node* LoadTypeFeedbackVectorForStub();
+ Node* LoadTypeFeedbackVectorForStub();
// Update the type feedback vector.
- void UpdateFeedback(compiler::Node* feedback,
- compiler::Node* type_feedback_vector,
- compiler::Node* slot_id);
-
- compiler::Node* LoadReceiverMap(compiler::Node* receiver);
-
- // Checks monomorphic case. Returns {feedback} entry of the vector.
- compiler::Node* TryMonomorphicCase(compiler::Node* slot,
- compiler::Node* vector,
- compiler::Node* receiver_map,
- Label* if_handler, Variable* var_handler,
- Label* if_miss);
- void HandlePolymorphicCase(compiler::Node* receiver_map,
- compiler::Node* feedback, Label* if_handler,
- Variable* var_handler, Label* if_miss,
- int unroll_count);
- void HandleKeyedStorePolymorphicCase(compiler::Node* receiver_map,
- compiler::Node* feedback,
- Label* if_handler, Variable* var_handler,
- Label* if_transition_handler,
- Variable* var_transition_map_cell,
- Label* if_miss);
-
- compiler::Node* StubCachePrimaryOffset(compiler::Node* name,
- compiler::Node* map);
-
- compiler::Node* StubCacheSecondaryOffset(compiler::Node* name,
- compiler::Node* seed);
-
- // This enum is used here as a replacement for StubCache::Table to avoid
- // including stub cache header.
- enum StubCacheTable : int;
-
- void TryProbeStubCacheTable(StubCache* stub_cache, StubCacheTable table_id,
- compiler::Node* entry_offset,
- compiler::Node* name, compiler::Node* map,
- Label* if_handler, Variable* var_handler,
- Label* if_miss);
-
- void TryProbeStubCache(StubCache* stub_cache, compiler::Node* receiver,
- compiler::Node* name, Label* if_handler,
- Variable* var_handler, Label* if_miss);
-
- // Extends properties backing store by JSObject::kFieldsAdded elements.
- void ExtendPropertiesBackingStore(compiler::Node* object);
-
- compiler::Node* PrepareValueForWrite(compiler::Node* value,
- Representation representation,
- Label* bailout);
-
- void StoreNamedField(compiler::Node* object, FieldIndex index,
- Representation representation, compiler::Node* value,
- bool transition_to_field);
-
- void StoreNamedField(compiler::Node* object, compiler::Node* offset,
- bool is_inobject, Representation representation,
- compiler::Node* value, bool transition_to_field);
+ void UpdateFeedback(Node* feedback, Node* type_feedback_vector,
+ Node* slot_id);
+
+ Node* LoadReceiverMap(Node* receiver);
// Emits keyed sloppy arguments load. Returns either the loaded value.
- compiler::Node* LoadKeyedSloppyArguments(compiler::Node* receiver,
- compiler::Node* key,
- Label* bailout) {
+ Node* LoadKeyedSloppyArguments(Node* receiver, Node* key, Label* bailout) {
return EmitKeyedSloppyArguments(receiver, key, nullptr, bailout);
}
// Emits keyed sloppy arguments store.
- void StoreKeyedSloppyArguments(compiler::Node* receiver, compiler::Node* key,
- compiler::Node* value, Label* bailout) {
+ void StoreKeyedSloppyArguments(Node* receiver, Node* key, Node* value,
+ Label* bailout) {
DCHECK_NOT_NULL(value);
EmitKeyedSloppyArguments(receiver, key, value, bailout);
}
// Loads script context from the script context table.
- compiler::Node* LoadScriptContext(compiler::Node* context, int context_index);
+ Node* LoadScriptContext(Node* context, int context_index);
- compiler::Node* Int32ToUint8Clamped(compiler::Node* int32_value);
- compiler::Node* Float64ToUint8Clamped(compiler::Node* float64_value);
+ Node* Int32ToUint8Clamped(Node* int32_value);
+ Node* Float64ToUint8Clamped(Node* float64_value);
- compiler::Node* PrepareValueForWriteToTypedArray(compiler::Node* key,
- ElementsKind elements_kind,
- Label* bailout);
+ Node* PrepareValueForWriteToTypedArray(Node* key, ElementsKind elements_kind,
+ Label* bailout);
// Store value to an elements array with given elements kind.
- void StoreElement(compiler::Node* elements, ElementsKind kind,
- compiler::Node* index, compiler::Node* value,
+ void StoreElement(Node* elements, ElementsKind kind, Node* index, Node* value,
ParameterMode mode);
- void EmitElementStore(compiler::Node* object, compiler::Node* key,
- compiler::Node* value, bool is_jsarray,
+ void EmitElementStore(Node* object, Node* key, Node* value, bool is_jsarray,
ElementsKind elements_kind,
KeyedAccessStoreMode store_mode, Label* bailout);
- compiler::Node* CheckForCapacityGrow(compiler::Node* object,
- compiler::Node* elements,
- ElementsKind kind,
- compiler::Node* length,
- compiler::Node* key, ParameterMode mode,
- bool is_js_array, Label* bailout);
-
- compiler::Node* CopyElementsOnWrite(compiler::Node* object,
- compiler::Node* elements,
- ElementsKind kind, compiler::Node* length,
- ParameterMode mode, Label* bailout);
+ Node* CheckForCapacityGrow(Node* object, Node* elements, ElementsKind kind,
+ Node* length, Node* key, ParameterMode mode,
+ bool is_js_array, Label* bailout);
- void LoadIC(const LoadICParameters* p);
- void LoadICProtoArray(const LoadICParameters* p, compiler::Node* handler);
- void LoadGlobalIC(const LoadICParameters* p);
- void KeyedLoadIC(const LoadICParameters* p);
- void KeyedLoadICGeneric(const LoadICParameters* p);
- void StoreIC(const StoreICParameters* p);
- void KeyedStoreIC(const StoreICParameters* p, LanguageMode language_mode);
+ Node* CopyElementsOnWrite(Node* object, Node* elements, ElementsKind kind,
+ Node* length, ParameterMode mode, Label* bailout);
- void TransitionElementsKind(compiler::Node* object, compiler::Node* map,
- ElementsKind from_kind, ElementsKind to_kind,
- bool is_jsarray, Label* bailout);
+ void TransitionElementsKind(Node* object, Node* map, ElementsKind from_kind,
+ ElementsKind to_kind, bool is_jsarray,
+ Label* bailout);
- void TrapAllocationMemento(compiler::Node* object, Label* memento_found);
+ void TrapAllocationMemento(Node* object, Label* memento_found);
- compiler::Node* PageFromAddress(compiler::Node* address);
+ Node* PageFromAddress(Node* address);
// Get the enumerable length from |map| and return the result as a Smi.
- compiler::Node* EnumLength(compiler::Node* map);
+ Node* EnumLength(Node* map);
// Check the cache validity for |receiver|. Branch to |use_cache| if
// the cache is valid, otherwise branch to |use_runtime|.
- void CheckEnumCache(compiler::Node* receiver,
- CodeStubAssembler::Label* use_cache,
+ void CheckEnumCache(Node* receiver, CodeStubAssembler::Label* use_cache,
CodeStubAssembler::Label* use_runtime);
// Create a new weak cell with a specified value and install it into a
// feedback vector.
- compiler::Node* CreateWeakCellInFeedbackVector(
- compiler::Node* feedback_vector, compiler::Node* slot,
- compiler::Node* value);
+ Node* CreateWeakCellInFeedbackVector(Node* feedback_vector, Node* slot,
+ Node* value);
// Create a new AllocationSite and install it into a feedback vector.
- compiler::Node* CreateAllocationSiteInFeedbackVector(
- compiler::Node* feedback_vector, compiler::Node* slot);
+ Node* CreateAllocationSiteInFeedbackVector(Node* feedback_vector, Node* slot);
enum class IndexAdvanceMode { kPre, kPost };
- void BuildFastLoop(
- const VariableList& var_list, MachineRepresentation index_rep,
- compiler::Node* start_index, compiler::Node* end_index,
- std::function<void(CodeStubAssembler* assembler, compiler::Node* index)>
- body,
- int increment, IndexAdvanceMode mode = IndexAdvanceMode::kPre);
-
- void BuildFastLoop(
- MachineRepresentation index_rep, compiler::Node* start_index,
- compiler::Node* end_index,
- std::function<void(CodeStubAssembler* assembler, compiler::Node* index)>
- body,
- int increment, IndexAdvanceMode mode = IndexAdvanceMode::kPre) {
+ typedef std::function<void(Node* index)> FastLoopBody;
+
+ void BuildFastLoop(const VariableList& var_list,
+ MachineRepresentation index_rep, Node* start_index,
+ Node* end_index, const FastLoopBody& body, int increment,
+ IndexAdvanceMode mode = IndexAdvanceMode::kPre);
+
+ void BuildFastLoop(MachineRepresentation index_rep, Node* start_index,
+ Node* end_index, const FastLoopBody& body, int increment,
+ IndexAdvanceMode mode = IndexAdvanceMode::kPre) {
BuildFastLoop(VariableList(0, zone()), index_rep, start_index, end_index,
body, increment, mode);
}
enum class ForEachDirection { kForward, kReverse };
+ typedef std::function<void(Node* fixed_array, Node* offset)>
+ FastFixedArrayForEachBody;
+
void BuildFastFixedArrayForEach(
- compiler::Node* fixed_array, ElementsKind kind,
- compiler::Node* first_element_inclusive,
- compiler::Node* last_element_exclusive,
- std::function<void(CodeStubAssembler* assembler,
- compiler::Node* fixed_array, compiler::Node* offset)>
- body,
+ Node* fixed_array, ElementsKind kind, Node* first_element_inclusive,
+ Node* last_element_exclusive, const FastFixedArrayForEachBody& body,
ParameterMode mode = INTPTR_PARAMETERS,
ForEachDirection direction = ForEachDirection::kReverse);
- compiler::Node* GetArrayAllocationSize(compiler::Node* element_count,
- ElementsKind kind, ParameterMode mode,
- int header_size) {
+ Node* GetArrayAllocationSize(Node* element_count, ElementsKind kind,
+ ParameterMode mode, int header_size) {
return ElementOffsetFromIndex(element_count, kind, mode, header_size);
}
- compiler::Node* GetFixedArrayAllocationSize(compiler::Node* element_count,
- ElementsKind kind,
- ParameterMode mode) {
+ Node* GetFixedArrayAllocationSize(Node* element_count, ElementsKind kind,
+ ParameterMode mode) {
return GetArrayAllocationSize(element_count, kind, mode,
FixedArray::kHeaderSize);
}
+ void InitializeFieldsWithRoot(Node* object, Node* start_offset,
+ Node* end_offset, Heap::RootListIndex root);
+
enum RelationalComparisonMode {
kLessThan,
kLessThanOrEqual,
@@ -1092,222 +1104,169 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
kGreaterThanOrEqual
};
- compiler::Node* RelationalComparison(RelationalComparisonMode mode,
- compiler::Node* lhs, compiler::Node* rhs,
- compiler::Node* context);
+ Node* RelationalComparison(RelationalComparisonMode mode, Node* lhs,
+ Node* rhs, Node* context);
void BranchIfNumericRelationalComparison(RelationalComparisonMode mode,
- compiler::Node* lhs,
- compiler::Node* rhs, Label* if_true,
+ Node* lhs, Node* rhs, Label* if_true,
Label* if_false);
- void GotoUnlessNumberLessThan(compiler::Node* lhs, compiler::Node* rhs,
- Label* if_false);
+ void GotoUnlessNumberLessThan(Node* lhs, Node* rhs, Label* if_false);
enum ResultMode { kDontNegateResult, kNegateResult };
- compiler::Node* Equal(ResultMode mode, compiler::Node* lhs,
- compiler::Node* rhs, compiler::Node* context);
+ Node* Equal(ResultMode mode, Node* lhs, Node* rhs, Node* context);
- compiler::Node* StrictEqual(ResultMode mode, compiler::Node* lhs,
- compiler::Node* rhs, compiler::Node* context);
+ Node* StrictEqual(ResultMode mode, Node* lhs, Node* rhs, Node* context);
// ECMA#sec-samevalue
// Similar to StrictEqual except that NaNs are treated as equal and minus zero
// differs from positive zero.
// Unlike Equal and StrictEqual, returns a value suitable for use in Branch
// instructions, e.g. Branch(SameValue(...), &label).
- compiler::Node* SameValue(compiler::Node* lhs, compiler::Node* rhs,
- compiler::Node* context);
+ Node* SameValue(Node* lhs, Node* rhs, Node* context);
- compiler::Node* HasProperty(
- compiler::Node* object, compiler::Node* key, compiler::Node* context,
+ Node* HasProperty(
+ Node* object, Node* key, Node* context,
Runtime::FunctionId fallback_runtime_function_id = Runtime::kHasProperty);
- compiler::Node* ForInFilter(compiler::Node* key, compiler::Node* object,
- compiler::Node* context);
+ Node* ForInFilter(Node* key, Node* object, Node* context);
+
+ Node* Typeof(Node* value, Node* context);
+
+ Node* GetSuperConstructor(Node* value, Node* context);
- compiler::Node* Typeof(compiler::Node* value, compiler::Node* context);
+ Node* InstanceOf(Node* object, Node* callable, Node* context);
- compiler::Node* InstanceOf(compiler::Node* object, compiler::Node* callable,
- compiler::Node* context);
+ // Debug helpers
+ Node* IsDebugActive();
// TypedArray/ArrayBuffer helpers
- compiler::Node* IsDetachedBuffer(compiler::Node* buffer);
+ Node* IsDetachedBuffer(Node* buffer);
- compiler::Node* ElementOffsetFromIndex(compiler::Node* index,
- ElementsKind kind, ParameterMode mode,
- int base_size = 0);
+ Node* ElementOffsetFromIndex(Node* index, ElementsKind kind,
+ ParameterMode mode, int base_size = 0);
- protected:
- void HandleStoreICHandlerCase(const StoreICParameters* p,
- compiler::Node* handler, Label* miss);
+ Node* AllocateFunctionWithMapAndContext(Node* map, Node* shared_info,
+ Node* context);
- private:
- friend class CodeStubArguments;
+ // Promise helpers
+ Node* IsPromiseHookEnabled();
- enum ElementSupport { kOnlyProperties, kSupportElements };
+ Node* AllocatePromiseReactionJobInfo(Node* value, Node* tasks,
+ Node* deferred_promise,
+ Node* deferred_on_resolve,
+ Node* deferred_on_reject, Node* context);
- void DescriptorLookupLinear(compiler::Node* unique_name,
- compiler::Node* descriptors, compiler::Node* nof,
+ protected:
+ void DescriptorLookupLinear(Node* unique_name, Node* descriptors, Node* nof,
Label* if_found, Variable* var_name_index,
Label* if_not_found);
- compiler::Node* CallGetterIfAccessor(compiler::Node* value,
- compiler::Node* details,
- compiler::Node* context,
- compiler::Node* receiver,
- Label* if_bailout);
-
- void HandleLoadICHandlerCase(
- const LoadICParameters* p, compiler::Node* handler, Label* miss,
- ElementSupport support_elements = kOnlyProperties);
-
- void HandleLoadICSmiHandlerCase(const LoadICParameters* p,
- compiler::Node* holder,
- compiler::Node* smi_handler, Label* miss,
- ElementSupport support_elements);
-
- void HandleLoadICProtoHandler(const LoadICParameters* p,
- compiler::Node* handler, Variable* var_holder,
- Variable* var_smi_handler,
- Label* if_smi_handler, Label* miss);
-
- compiler::Node* EmitLoadICProtoArrayCheck(const LoadICParameters* p,
- compiler::Node* handler,
- compiler::Node* handler_length,
- compiler::Node* handler_flags,
- Label* miss);
-
- void CheckPrototype(compiler::Node* prototype_cell, compiler::Node* name,
- Label* miss);
-
- void NameDictionaryNegativeLookup(compiler::Node* object,
- compiler::Node* name, Label* miss);
-
- // If |transition| is nullptr then the normal field store is generated or
- // transitioning store otherwise.
- void HandleStoreFieldAndReturn(compiler::Node* handler_word,
- compiler::Node* holder,
- Representation representation,
- compiler::Node* value,
- compiler::Node* transition, Label* miss);
-
- // If |transition| is nullptr then the normal field store is generated or
- // transitioning store otherwise.
- void HandleStoreICSmiHandlerCase(compiler::Node* handler_word,
- compiler::Node* holder,
- compiler::Node* value,
- compiler::Node* transition, Label* miss);
-
- void HandleStoreICProtoHandler(const StoreICParameters* p,
- compiler::Node* handler, Label* miss);
-
- compiler::Node* TryToIntptr(compiler::Node* key, Label* miss);
- void EmitFastElementsBoundsCheck(compiler::Node* object,
- compiler::Node* elements,
- compiler::Node* intptr_index,
- compiler::Node* is_jsarray_condition,
- Label* miss);
- void EmitElementLoad(compiler::Node* object, compiler::Node* elements,
- compiler::Node* elements_kind, compiler::Node* key,
- compiler::Node* is_jsarray_condition, Label* if_hole,
- Label* rebox_double, Variable* var_double_value,
- Label* unimplemented_elements_kind, Label* out_of_bounds,
- Label* miss);
- void BranchIfPrototypesHaveNoElements(compiler::Node* receiver_map,
+
+ Node* CallGetterIfAccessor(Node* value, Node* details, Node* context,
+ Node* receiver, Label* if_bailout);
+
+ Node* TryToIntptr(Node* key, Label* miss);
+
+ void BranchIfPrototypesHaveNoElements(Node* receiver_map,
Label* definitely_no_elements,
Label* possibly_elements);
- compiler::Node* AllocateRawAligned(compiler::Node* size_in_bytes,
- AllocationFlags flags,
- compiler::Node* top_address,
- compiler::Node* limit_address);
- compiler::Node* AllocateRawUnaligned(compiler::Node* size_in_bytes,
- AllocationFlags flags,
- compiler::Node* top_adddress,
- compiler::Node* limit_address);
+ private:
+ friend class CodeStubArguments;
+
+ void HandleBreakOnNode();
+
+ Node* AllocateRawAligned(Node* size_in_bytes, AllocationFlags flags,
+ Node* top_address, Node* limit_address);
+ Node* AllocateRawUnaligned(Node* size_in_bytes, AllocationFlags flags,
+ Node* top_adddress, Node* limit_address);
// Allocate and return a JSArray of given total size in bytes with header
// fields initialized.
- compiler::Node* AllocateUninitializedJSArray(ElementsKind kind,
- compiler::Node* array_map,
- compiler::Node* length,
- compiler::Node* allocation_site,
- compiler::Node* size_in_bytes);
+ Node* AllocateUninitializedJSArray(ElementsKind kind, Node* array_map,
+ Node* length, Node* allocation_site,
+ Node* size_in_bytes);
- compiler::Node* SmiShiftBitsConstant();
+ Node* SmiShiftBitsConstant();
// Emits keyed sloppy arguments load if the |value| is nullptr or store
// otherwise. Returns either the loaded value or |value|.
- compiler::Node* EmitKeyedSloppyArguments(compiler::Node* receiver,
- compiler::Node* key,
- compiler::Node* value,
- Label* bailout);
-
- compiler::Node* AllocateSlicedString(Heap::RootListIndex map_root_index,
- compiler::Node* length,
- compiler::Node* parent,
- compiler::Node* offset);
-
- compiler::Node* AllocateConsString(Heap::RootListIndex map_root_index,
- compiler::Node* length,
- compiler::Node* first,
- compiler::Node* second,
- AllocationFlags flags);
+ Node* EmitKeyedSloppyArguments(Node* receiver, Node* key, Node* value,
+ Label* bailout);
+
+ Node* AllocateSlicedString(Heap::RootListIndex map_root_index, Node* length,
+ Node* parent, Node* offset);
+
+ Node* AllocateConsString(Heap::RootListIndex map_root_index, Node* length,
+ Node* first, Node* second, AllocationFlags flags);
static const int kElementLoopUnrollThreshold = 8;
};
class CodeStubArguments {
public:
- // |argc| specifies the number of arguments passed to the builtin excluding
- // the receiver.
- CodeStubArguments(CodeStubAssembler* assembler, compiler::Node* argc,
- CodeStubAssembler::ParameterMode mode =
- CodeStubAssembler::INTPTR_PARAMETERS);
+ typedef compiler::Node Node;
- compiler::Node* GetReceiver();
+ // |argc| is an uint32 value which specifies the number of arguments passed
+ // to the builtin excluding the receiver.
+ CodeStubArguments(CodeStubAssembler* assembler, Node* argc);
+
+ Node* GetReceiver() const;
// |index| is zero-based and does not include the receiver
- compiler::Node* AtIndex(compiler::Node* index,
- CodeStubAssembler::ParameterMode mode =
- CodeStubAssembler::INTPTR_PARAMETERS);
+ Node* AtIndex(Node* index, CodeStubAssembler::ParameterMode mode =
+ CodeStubAssembler::INTPTR_PARAMETERS) const;
+
+ Node* AtIndex(int index) const;
- compiler::Node* AtIndex(int index);
+ Node* GetLength() const { return argc_; }
- typedef std::function<void(CodeStubAssembler* assembler, compiler::Node* arg)>
- ForEachBodyFunction;
+ typedef std::function<void(Node* arg)> ForEachBodyFunction;
// Iteration doesn't include the receiver. |first| and |last| are zero-based.
- void ForEach(ForEachBodyFunction body, compiler::Node* first = nullptr,
- compiler::Node* last = nullptr,
- CodeStubAssembler::ParameterMode mode =
- CodeStubAssembler::INTPTR_PARAMETERS) {
+ void ForEach(const ForEachBodyFunction& body, Node* first = nullptr,
+ Node* last = nullptr, CodeStubAssembler::ParameterMode mode =
+ CodeStubAssembler::INTPTR_PARAMETERS) {
CodeStubAssembler::VariableList list(0, assembler_->zone());
ForEach(list, body, first, last);
}
// Iteration doesn't include the receiver. |first| and |last| are zero-based.
void ForEach(const CodeStubAssembler::VariableList& vars,
- ForEachBodyFunction body, compiler::Node* first = nullptr,
- compiler::Node* last = nullptr,
- CodeStubAssembler::ParameterMode mode =
- CodeStubAssembler::INTPTR_PARAMETERS);
+ const ForEachBodyFunction& body, Node* first = nullptr,
+ Node* last = nullptr, CodeStubAssembler::ParameterMode mode =
+ CodeStubAssembler::INTPTR_PARAMETERS);
- void PopAndReturn(compiler::Node* value);
+ void PopAndReturn(Node* value);
private:
- compiler::Node* GetArguments();
+ Node* GetArguments();
CodeStubAssembler* assembler_;
- compiler::Node* argc_;
- compiler::Node* arguments_;
- compiler::Node* fp_;
+ Node* argc_;
+ Node* arguments_;
+ Node* fp_;
};
#ifdef DEBUG
#define CSA_ASSERT(csa, x) \
(csa)->Assert([&] { return (x); }, #x, __FILE__, __LINE__)
+#define CSA_ASSERT_JS_ARGC_OP(csa, Op, op, expected) \
+ (csa)->Assert( \
+ [&] { \
+ const CodeAssemblerState* state = (csa)->state(); \
+ /* See Linkage::GetJSCallDescriptor(). */ \
+ int argc_index = state->parameter_count() - 2; \
+ compiler::Node* const argc = (csa)->Parameter(argc_index); \
+ return (csa)->Op(argc, (csa)->Int32Constant(expected)); \
+ }, \
+ "argc " #op " " #expected, __FILE__, __LINE__)
+
+#define CSA_ASSERT_JS_ARGC_EQ(csa, expected) \
+ CSA_ASSERT_JS_ARGC_OP(csa, Word32Equal, ==, expected)
+
#else
#define CSA_ASSERT(csa, x) ((void)0)
+#define CSA_ASSERT_JS_ARGC_EQ(csa, expected) ((void)0)
#endif
#ifdef ENABLE_SLOW_DCHECKS
diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc
index 790f687925..4fd9309b0e 100644
--- a/deps/v8/src/code-stubs-hydrogen.cc
+++ b/deps/v8/src/code-stubs-hydrogen.cc
@@ -8,6 +8,7 @@
#include "src/bailout-reason.h"
#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
#include "src/crankshaft/hydrogen.h"
#include "src/crankshaft/lithium.h"
#include "src/field-index.h"
@@ -82,9 +83,6 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
Representation representation,
bool transition_to_field);
- HValue* BuildPushElement(HValue* object, HValue* argc,
- HValue* argument_elements, ElementsKind kind);
-
HValue* BuildToString(HValue* input, bool convert);
HValue* BuildToPrimitive(HValue* input, HValue* input_map);
@@ -251,8 +249,9 @@ Handle<Code> HydrogenCodeStub::GenerateRuntimeTailCall(
const char* name = CodeStub::MajorName(MajorKey());
Zone zone(isolate()->allocator(), ZONE_NAME);
CallInterfaceDescriptor interface_descriptor(GetCallInterfaceDescriptor());
- CodeStubAssembler assembler(isolate(), &zone, interface_descriptor,
- GetCodeFlags(), name);
+ compiler::CodeAssemblerState state(isolate(), &zone, interface_descriptor,
+ GetCodeFlags(), name);
+ CodeStubAssembler assembler(&state);
int total_params = interface_descriptor.GetStackParameterCount() +
interface_descriptor.GetRegisterParameterCount();
switch (total_params) {
@@ -284,7 +283,7 @@ Handle<Code> HydrogenCodeStub::GenerateRuntimeTailCall(
UNIMPLEMENTED();
break;
}
- return assembler.GenerateCode();
+ return compiler::CodeAssembler::GenerateCode(&state);
}
template <class Stub>
@@ -328,408 +327,6 @@ static Handle<Code> DoGenerateCode(Stub* stub) {
}
-HValue* CodeStubGraphBuilderBase::BuildPushElement(HValue* object, HValue* argc,
- HValue* argument_elements,
- ElementsKind kind) {
- // Precheck whether all elements fit into the array.
- if (!IsFastObjectElementsKind(kind)) {
- LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement);
- HValue* start = graph()->GetConstant0();
- HValue* key = builder.BeginBody(start, argc, Token::LT);
- {
- HInstruction* argument =
- Add<HAccessArgumentsAt>(argument_elements, argc, key);
- IfBuilder can_store(this);
- can_store.IfNot<HIsSmiAndBranch>(argument);
- if (IsFastDoubleElementsKind(kind)) {
- can_store.And();
- can_store.IfNot<HCompareMap>(argument,
- isolate()->factory()->heap_number_map());
- }
- can_store.ThenDeopt(DeoptimizeReason::kFastPathFailed);
- can_store.End();
- }
- builder.EndBody();
- }
-
- HValue* length = Add<HLoadNamedField>(object, nullptr,
- HObjectAccess::ForArrayLength(kind));
- HValue* new_length = AddUncasted<HAdd>(length, argc);
- HValue* max_key = AddUncasted<HSub>(new_length, graph()->GetConstant1());
-
- HValue* elements = Add<HLoadNamedField>(object, nullptr,
- HObjectAccess::ForElementsPointer());
- elements = BuildCheckForCapacityGrow(object, elements, kind, length, max_key,
- true, STORE);
-
- LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement);
- HValue* start = graph()->GetConstant0();
- HValue* key = builder.BeginBody(start, argc, Token::LT);
- {
- HValue* argument = Add<HAccessArgumentsAt>(argument_elements, argc, key);
- HValue* index = AddUncasted<HAdd>(key, length);
- AddElementAccess(elements, index, argument, object, nullptr, kind, STORE);
- }
- builder.EndBody();
- return new_length;
-}
-
-template <>
-HValue* CodeStubGraphBuilder<FastArrayPushStub>::BuildCodeStub() {
- // TODO(verwaest): Fix deoptimizer messages.
- HValue* argc = GetArgumentsLength();
-
- HInstruction* argument_elements = Add<HArgumentsElements>(false, false);
- HInstruction* object = Add<HAccessArgumentsAt>(argument_elements, argc,
- graph()->GetConstantMinus1());
- BuildCheckHeapObject(object);
- HValue* map = Add<HLoadNamedField>(object, nullptr, HObjectAccess::ForMap());
- Add<HCheckInstanceType>(object, HCheckInstanceType::IS_JS_ARRAY);
-
- // Disallow pushing onto prototypes. It might be the JSArray prototype.
- // Disallow pushing onto non-extensible objects.
- {
- HValue* bit_field2 =
- Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField2());
- HValue* mask =
- Add<HConstant>(static_cast<int>(Map::IsPrototypeMapBits::kMask) |
- (1 << Map::kIsExtensible));
- HValue* bits = AddUncasted<HBitwise>(Token::BIT_AND, bit_field2, mask);
- IfBuilder check(this);
- check.If<HCompareNumericAndBranch>(
- bits, Add<HConstant>(1 << Map::kIsExtensible), Token::NE);
- check.ThenDeopt(DeoptimizeReason::kFastPathFailed);
- check.End();
- }
-
- // Disallow pushing onto arrays in dictionary named property mode. We need to
- // figure out whether the length property is still writable.
- {
- HValue* bit_field3 =
- Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField3());
- HValue* mask = Add<HConstant>(static_cast<int>(Map::DictionaryMap::kMask));
- HValue* bit = AddUncasted<HBitwise>(Token::BIT_AND, bit_field3, mask);
- IfBuilder check(this);
- check.If<HCompareNumericAndBranch>(bit, mask, Token::EQ);
- check.ThenDeopt(DeoptimizeReason::kFastPathFailed);
- check.End();
- }
-
- // Check whether the length property is writable. The length property is the
- // only default named property on arrays. It's nonconfigurable, hence is
- // guaranteed to stay the first property.
- {
- HValue* descriptors =
- Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapDescriptors());
- HValue* details = Add<HLoadKeyed>(
- descriptors, Add<HConstant>(DescriptorArray::ToDetailsIndex(0)),
- nullptr, nullptr, FAST_SMI_ELEMENTS);
- HValue* mask =
- Add<HConstant>(READ_ONLY << PropertyDetails::AttributesField::kShift);
- HValue* bit = AddUncasted<HBitwise>(Token::BIT_AND, details, mask);
- IfBuilder readonly(this);
- readonly.If<HCompareNumericAndBranch>(bit, mask, Token::EQ);
- readonly.ThenDeopt(DeoptimizeReason::kFastPathFailed);
- readonly.End();
- }
-
- HValue* null = Add<HLoadRoot>(Heap::kNullValueRootIndex);
- HValue* empty = Add<HLoadRoot>(Heap::kEmptyFixedArrayRootIndex);
- environment()->Push(map);
- LoopBuilder check_prototypes(this);
- check_prototypes.BeginBody(1);
- {
- HValue* parent_map = environment()->Pop();
- HValue* prototype = Add<HLoadNamedField>(parent_map, nullptr,
- HObjectAccess::ForPrototype());
-
- IfBuilder is_null(this);
- is_null.If<HCompareObjectEqAndBranch>(prototype, null);
- is_null.Then();
- check_prototypes.Break();
- is_null.End();
-
- HValue* prototype_map =
- Add<HLoadNamedField>(prototype, nullptr, HObjectAccess::ForMap());
- HValue* instance_type = Add<HLoadNamedField>(
- prototype_map, nullptr, HObjectAccess::ForMapInstanceType());
- IfBuilder check_instance_type(this);
- check_instance_type.If<HCompareNumericAndBranch>(
- instance_type, Add<HConstant>(LAST_CUSTOM_ELEMENTS_RECEIVER),
- Token::LTE);
- check_instance_type.ThenDeopt(DeoptimizeReason::kFastPathFailed);
- check_instance_type.End();
-
- HValue* elements = Add<HLoadNamedField>(
- prototype, nullptr, HObjectAccess::ForElementsPointer());
- IfBuilder no_elements(this);
- no_elements.IfNot<HCompareObjectEqAndBranch>(elements, empty);
- no_elements.ThenDeopt(DeoptimizeReason::kFastPathFailed);
- no_elements.End();
-
- environment()->Push(prototype_map);
- }
- check_prototypes.EndBody();
-
- HValue* bit_field2 =
- Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField2());
- HValue* kind = BuildDecodeField<Map::ElementsKindBits>(bit_field2);
-
- // Below we only check the upper bound of the relevant ranges to include both
- // holey and non-holey versions. We check them in order smi, object, double
- // since smi < object < double.
- STATIC_ASSERT(FAST_SMI_ELEMENTS < FAST_HOLEY_SMI_ELEMENTS);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS < FAST_HOLEY_ELEMENTS);
- STATIC_ASSERT(FAST_ELEMENTS < FAST_HOLEY_ELEMENTS);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS < FAST_HOLEY_DOUBLE_ELEMENTS);
- STATIC_ASSERT(FAST_DOUBLE_ELEMENTS < FAST_HOLEY_DOUBLE_ELEMENTS);
- IfBuilder has_smi_elements(this);
- has_smi_elements.If<HCompareNumericAndBranch>(
- kind, Add<HConstant>(FAST_HOLEY_SMI_ELEMENTS), Token::LTE);
- has_smi_elements.Then();
- {
- HValue* new_length = BuildPushElement(object, argc, argument_elements,
- FAST_HOLEY_SMI_ELEMENTS);
- environment()->Push(new_length);
- }
- has_smi_elements.Else();
- {
- IfBuilder has_object_elements(this);
- has_object_elements.If<HCompareNumericAndBranch>(
- kind, Add<HConstant>(FAST_HOLEY_ELEMENTS), Token::LTE);
- has_object_elements.Then();
- {
- HValue* new_length = BuildPushElement(object, argc, argument_elements,
- FAST_HOLEY_ELEMENTS);
- environment()->Push(new_length);
- }
- has_object_elements.Else();
- {
- IfBuilder has_double_elements(this);
- has_double_elements.If<HCompareNumericAndBranch>(
- kind, Add<HConstant>(FAST_HOLEY_DOUBLE_ELEMENTS), Token::LTE);
- has_double_elements.Then();
- {
- HValue* new_length = BuildPushElement(object, argc, argument_elements,
- FAST_HOLEY_DOUBLE_ELEMENTS);
- environment()->Push(new_length);
- }
- has_double_elements.ElseDeopt(DeoptimizeReason::kFastPathFailed);
- has_double_elements.End();
- }
- has_object_elements.End();
- }
- has_smi_elements.End();
-
- return environment()->Pop();
-}
-
-Handle<Code> FastArrayPushStub::GenerateCode() { return DoGenerateCode(this); }
-
-template <>
-HValue* CodeStubGraphBuilder<FastFunctionBindStub>::BuildCodeStub() {
- // TODO(verwaest): Fix deoptimizer messages.
- HValue* argc = GetArgumentsLength();
- HInstruction* argument_elements = Add<HArgumentsElements>(false, false);
- HInstruction* object = Add<HAccessArgumentsAt>(argument_elements, argc,
- graph()->GetConstantMinus1());
- BuildCheckHeapObject(object);
- HValue* map = Add<HLoadNamedField>(object, nullptr, HObjectAccess::ForMap());
- Add<HCheckInstanceType>(object, HCheckInstanceType::IS_JS_FUNCTION);
-
- // Disallow binding of slow-mode functions. We need to figure out whether the
- // length and name property are in the original state.
- {
- HValue* bit_field3 =
- Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField3());
- HValue* mask = Add<HConstant>(static_cast<int>(Map::DictionaryMap::kMask));
- HValue* bit = AddUncasted<HBitwise>(Token::BIT_AND, bit_field3, mask);
- IfBuilder check(this);
- check.If<HCompareNumericAndBranch>(bit, mask, Token::EQ);
- check.ThenDeopt(DeoptimizeReason::kFastPathFailed);
- check.End();
- }
-
- // Check whether the length and name properties are still present as
- // AccessorInfo objects. In that case, their value can be recomputed even if
- // the actual value on the object changes.
- {
- HValue* descriptors =
- Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapDescriptors());
-
- HValue* descriptors_length = Add<HLoadNamedField>(
- descriptors, nullptr, HObjectAccess::ForFixedArrayLength());
- IfBuilder range(this);
- range.If<HCompareNumericAndBranch>(descriptors_length,
- graph()->GetConstant1(), Token::LTE);
- range.ThenDeopt(DeoptimizeReason::kFastPathFailed);
- range.End();
-
- // Verify .length.
- const int length_index = JSFunction::kLengthDescriptorIndex;
- HValue* maybe_length = Add<HLoadKeyed>(
- descriptors, Add<HConstant>(DescriptorArray::ToKeyIndex(length_index)),
- nullptr, nullptr, FAST_ELEMENTS);
- Unique<Name> length_string = Unique<Name>::CreateUninitialized(
- isolate()->factory()->length_string());
- Add<HCheckValue>(maybe_length, length_string, false);
-
- HValue* maybe_length_accessor = Add<HLoadKeyed>(
- descriptors,
- Add<HConstant>(DescriptorArray::ToValueIndex(length_index)), nullptr,
- nullptr, FAST_ELEMENTS);
- BuildCheckHeapObject(maybe_length_accessor);
- Add<HCheckMaps>(maybe_length_accessor,
- isolate()->factory()->accessor_info_map());
-
- // Verify .name.
- const int name_index = JSFunction::kNameDescriptorIndex;
- HValue* maybe_name = Add<HLoadKeyed>(
- descriptors, Add<HConstant>(DescriptorArray::ToKeyIndex(name_index)),
- nullptr, nullptr, FAST_ELEMENTS);
- Unique<Name> name_string =
- Unique<Name>::CreateUninitialized(isolate()->factory()->name_string());
- Add<HCheckValue>(maybe_name, name_string, false);
-
- HValue* maybe_name_accessor = Add<HLoadKeyed>(
- descriptors, Add<HConstant>(DescriptorArray::ToValueIndex(name_index)),
- nullptr, nullptr, FAST_ELEMENTS);
- BuildCheckHeapObject(maybe_name_accessor);
- Add<HCheckMaps>(maybe_name_accessor,
- isolate()->factory()->accessor_info_map());
- }
-
- // Choose the right bound function map based on whether the target is
- // constructable.
- {
- HValue* bit_field =
- Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField());
- HValue* mask = Add<HConstant>(static_cast<int>(1 << Map::kIsConstructor));
- HValue* bits = AddUncasted<HBitwise>(Token::BIT_AND, bit_field, mask);
-
- HValue* native_context = BuildGetNativeContext();
- IfBuilder is_constructor(this);
- is_constructor.If<HCompareNumericAndBranch>(bits, mask, Token::EQ);
- is_constructor.Then();
- {
- HValue* map = Add<HLoadNamedField>(
- native_context, nullptr,
- HObjectAccess::ForContextSlot(
- Context::BOUND_FUNCTION_WITH_CONSTRUCTOR_MAP_INDEX));
- environment()->Push(map);
- }
- is_constructor.Else();
- {
- HValue* map = Add<HLoadNamedField>(
- native_context, nullptr,
- HObjectAccess::ForContextSlot(
- Context::BOUND_FUNCTION_WITHOUT_CONSTRUCTOR_MAP_INDEX));
- environment()->Push(map);
- }
- is_constructor.End();
- }
- HValue* bound_function_map = environment()->Pop();
-
- // Verify that __proto__ matches that of a the target bound function.
- {
- HValue* prototype =
- Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForPrototype());
- HValue* expected_prototype = Add<HLoadNamedField>(
- bound_function_map, nullptr, HObjectAccess::ForPrototype());
- IfBuilder equal_prototype(this);
- equal_prototype.IfNot<HCompareObjectEqAndBranch>(prototype,
- expected_prototype);
- equal_prototype.ThenDeopt(DeoptimizeReason::kFastPathFailed);
- equal_prototype.End();
- }
-
- // Allocate the arguments array.
- IfBuilder empty_args(this);
- empty_args.If<HCompareNumericAndBranch>(argc, graph()->GetConstant1(),
- Token::LTE);
- empty_args.Then();
- { environment()->Push(Add<HLoadRoot>(Heap::kEmptyFixedArrayRootIndex)); }
- empty_args.Else();
- {
- HValue* elements_length = AddUncasted<HSub>(argc, graph()->GetConstant1());
- HValue* elements =
- BuildAllocateAndInitializeArray(FAST_ELEMENTS, elements_length);
-
- LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement);
- HValue* start = graph()->GetConstant1();
- HValue* key = builder.BeginBody(start, argc, Token::LT);
- {
- HValue* argument = Add<HAccessArgumentsAt>(argument_elements, argc, key);
- HValue* index = AddUncasted<HSub>(key, graph()->GetConstant1());
- AddElementAccess(elements, index, argument, elements, nullptr,
- FAST_ELEMENTS, STORE);
- }
- builder.EndBody();
- environment()->Push(elements);
- }
- empty_args.End();
- HValue* elements = environment()->Pop();
-
- // Find the 'this' to bind.
- IfBuilder no_receiver(this);
- no_receiver.If<HCompareNumericAndBranch>(argc, graph()->GetConstant0(),
- Token::EQ);
- no_receiver.Then();
- { environment()->Push(Add<HLoadRoot>(Heap::kUndefinedValueRootIndex)); }
- no_receiver.Else();
- {
- environment()->Push(Add<HAccessArgumentsAt>(argument_elements, argc,
- graph()->GetConstant0()));
- }
- no_receiver.End();
- HValue* receiver = environment()->Pop();
-
- // Allocate the resulting bound function.
- HValue* size = Add<HConstant>(JSBoundFunction::kSize);
- HValue* bound_function =
- Add<HAllocate>(size, HType::JSObject(), NOT_TENURED,
- JS_BOUND_FUNCTION_TYPE, graph()->GetConstant0());
- Add<HStoreNamedField>(bound_function, HObjectAccess::ForMap(),
- bound_function_map);
- HValue* empty_fixed_array = Add<HLoadRoot>(Heap::kEmptyFixedArrayRootIndex);
- Add<HStoreNamedField>(bound_function, HObjectAccess::ForPropertiesPointer(),
- empty_fixed_array);
- Add<HStoreNamedField>(bound_function, HObjectAccess::ForElementsPointer(),
- empty_fixed_array);
- Add<HStoreNamedField>(bound_function, HObjectAccess::ForBoundTargetFunction(),
- object);
-
- Add<HStoreNamedField>(bound_function, HObjectAccess::ForBoundThis(),
- receiver);
- Add<HStoreNamedField>(bound_function, HObjectAccess::ForBoundArguments(),
- elements);
-
- return bound_function;
-}
-
-Handle<Code> FastFunctionBindStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-template <>
-HValue* CodeStubGraphBuilder<LoadFastElementStub>::BuildCodeStub() {
- LoadKeyedHoleMode hole_mode = casted_stub()->convert_hole_to_undefined()
- ? CONVERT_HOLE_TO_UNDEFINED
- : NEVER_RETURN_HOLE;
-
- HInstruction* load = BuildUncheckedMonomorphicElementAccess(
- GetParameter(Descriptor::kReceiver), GetParameter(Descriptor::kName),
- NULL, casted_stub()->is_js_array(), casted_stub()->elements_kind(), LOAD,
- hole_mode, STANDARD_STORE);
- return load;
-}
-
-
-Handle<Code> LoadFastElementStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-
HLoadNamedField* CodeStubGraphBuilderBase::BuildLoadNamedField(
HValue* object, FieldIndex index) {
Representation representation = index.is_double()
@@ -750,34 +347,6 @@ HLoadNamedField* CodeStubGraphBuilderBase::BuildLoadNamedField(
return Add<HLoadNamedField>(object, nullptr, access);
}
-
-template<>
-HValue* CodeStubGraphBuilder<LoadFieldStub>::BuildCodeStub() {
- return BuildLoadNamedField(GetParameter(Descriptor::kReceiver),
- casted_stub()->index());
-}
-
-
-Handle<Code> LoadFieldStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<LoadConstantStub>::BuildCodeStub() {
- HValue* map = AddLoadMap(GetParameter(Descriptor::kReceiver), NULL);
- HObjectAccess descriptors_access = HObjectAccess::ForObservableJSObjectOffset(
- Map::kDescriptorsOffset, Representation::Tagged());
- HValue* descriptors = Add<HLoadNamedField>(map, nullptr, descriptors_access);
- HObjectAccess value_access = HObjectAccess::ForObservableJSObjectOffset(
- DescriptorArray::GetValueOffset(casted_stub()->constant_index()));
- return Add<HLoadNamedField>(descriptors, nullptr, value_access);
-}
-
-
-Handle<Code> LoadConstantStub::GenerateCode() { return DoGenerateCode(this); }
-
-
void CodeStubGraphBuilderBase::BuildStoreNamedField(
HValue* object, HValue* value, FieldIndex index,
Representation representation, bool transition_to_field) {
@@ -1134,24 +703,5 @@ HValue* CodeStubGraphBuilder<ToBooleanICStub>::BuildCodeInitializedStub() {
Handle<Code> ToBooleanICStub::GenerateCode() { return DoGenerateCode(this); }
-template <>
-HValue* CodeStubGraphBuilder<LoadDictionaryElementStub>::BuildCodeStub() {
- HValue* receiver = GetParameter(Descriptor::kReceiver);
- HValue* key = GetParameter(Descriptor::kName);
-
- Add<HCheckSmi>(key);
-
- HValue* elements = AddLoadElements(receiver);
-
- HValue* hash = BuildElementIndexHash(key);
-
- return BuildUncheckedDictionaryElementLoad(receiver, elements, key, hash);
-}
-
-
-Handle<Code> LoadDictionaryElementStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 2ee5ece8da..032fdb30b3 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -12,13 +12,17 @@
#include "src/code-stub-assembler.h"
#include "src/factory.h"
#include "src/gdb-jit.h"
+#include "src/ic/accessor-assembler.h"
#include "src/ic/handler-compiler.h"
+#include "src/ic/ic-stats.h"
#include "src/ic/ic.h"
#include "src/macro-assembler.h"
+#include "src/tracing/tracing-category-observer.h"
namespace v8 {
namespace internal {
+using compiler::CodeAssemblerState;
RUNTIME_FUNCTION(UnexpectedStubMiss) {
FATAL("Unexpected deopt of a stub");
@@ -110,6 +114,12 @@ Handle<Code> CodeStub::GetCodeCopy(const Code::FindAndReplacePattern& pattern) {
return ic;
}
+void CodeStub::DeleteStubFromCacheForTesting() {
+ Heap* heap = isolate_->heap();
+ Handle<UnseededNumberDictionary> dict(heap->code_stubs());
+ dict = UnseededNumberDictionary::DeleteKey(dict, GetKey());
+ heap->SetRootCodeStubs(*dict);
+}
Handle<Code> PlatformCodeStub::GenerateCode() {
Factory* factory = isolate()->factory();
@@ -183,8 +193,7 @@ Handle<Code> CodeStub::GetCode() {
}
Activate(code);
- DCHECK(!NeedsImmovableCode() ||
- heap->lo_space()->Contains(code) ||
+ DCHECK(!NeedsImmovableCode() || Heap::IsImmovable(code) ||
heap->code_space()->FirstPage()->Contains(code->address()));
return Handle<Code>(code, isolate());
}
@@ -322,36 +331,38 @@ void StringAddStub::PrintBaseName(std::ostream& os) const { // NOLINT
os << "StringAddStub_" << flags() << "_" << pretenure_flag();
}
-void StringAddStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+void StringAddStub::GenerateAssembly(
+ compiler::CodeAssemblerState* state) const {
typedef compiler::Node Node;
- Node* left = assembler->Parameter(Descriptor::kLeft);
- Node* right = assembler->Parameter(Descriptor::kRight);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ CodeStubAssembler assembler(state);
+ Node* left = assembler.Parameter(Descriptor::kLeft);
+ Node* right = assembler.Parameter(Descriptor::kRight);
+ Node* context = assembler.Parameter(Descriptor::kContext);
if ((flags() & STRING_ADD_CHECK_LEFT) != 0) {
DCHECK((flags() & STRING_ADD_CONVERT) != 0);
// TODO(danno): The ToString and JSReceiverToPrimitive below could be
// combined to avoid duplicate smi and instance type checks.
- left = assembler->ToString(context,
- assembler->JSReceiverToPrimitive(context, left));
+ left = assembler.ToString(context,
+ assembler.JSReceiverToPrimitive(context, left));
}
if ((flags() & STRING_ADD_CHECK_RIGHT) != 0) {
DCHECK((flags() & STRING_ADD_CONVERT) != 0);
// TODO(danno): The ToString and JSReceiverToPrimitive below could be
// combined to avoid duplicate smi and instance type checks.
- right = assembler->ToString(
- context, assembler->JSReceiverToPrimitive(context, right));
+ right = assembler.ToString(context,
+ assembler.JSReceiverToPrimitive(context, right));
}
if ((flags() & STRING_ADD_CHECK_BOTH) == 0) {
CodeStubAssembler::AllocationFlag flags =
(pretenure_flag() == TENURED) ? CodeStubAssembler::kPretenured
: CodeStubAssembler::kNone;
- assembler->Return(assembler->StringAdd(context, left, right, flags));
+ assembler.Return(assembler.StringAdd(context, left, right, flags));
} else {
Callable callable = CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE,
pretenure_flag());
- assembler->TailCallStub(callable, context, left, right);
+ assembler.TailCallStub(callable, context, left, right);
}
}
@@ -422,299 +433,108 @@ Handle<Code> TurboFanCodeStub::GenerateCode() {
const char* name = CodeStub::MajorName(MajorKey());
Zone zone(isolate()->allocator(), ZONE_NAME);
CallInterfaceDescriptor descriptor(GetCallInterfaceDescriptor());
- CodeStubAssembler assembler(isolate(), &zone, descriptor, GetCodeFlags(),
- name);
- GenerateAssembly(&assembler);
- return assembler.GenerateCode();
-}
-
-void LoadICTrampolineStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
-
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* context = assembler->Parameter(Descriptor::kContext);
- Node* vector = assembler->LoadTypeFeedbackVectorForStub();
-
- CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
- assembler->LoadIC(&p);
-}
-
-void LoadICStub::GenerateAssembly(CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
-
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
-
- CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
- assembler->LoadIC(&p);
-}
-
-void LoadICProtoArrayStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
-
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* handler = assembler->Parameter(Descriptor::kHandler);
- Node* context = assembler->Parameter(Descriptor::kContext);
-
- CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
- assembler->LoadICProtoArray(&p, handler);
-}
-
-void LoadGlobalICTrampolineStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
-
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* context = assembler->Parameter(Descriptor::kContext);
- Node* vector = assembler->LoadTypeFeedbackVectorForStub();
-
- CodeStubAssembler::LoadICParameters p(context, nullptr, nullptr, slot,
- vector);
- assembler->LoadGlobalIC(&p);
-}
-
-void LoadGlobalICStub::GenerateAssembly(CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
-
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
-
- CodeStubAssembler::LoadICParameters p(context, nullptr, nullptr, slot,
- vector);
- assembler->LoadGlobalIC(&p);
-}
-
-void KeyedLoadICTrampolineTFStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
-
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* context = assembler->Parameter(Descriptor::kContext);
- Node* vector = assembler->LoadTypeFeedbackVectorForStub();
-
- CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
- assembler->KeyedLoadIC(&p);
-}
-
-void KeyedLoadICTFStub::GenerateAssembly(CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
-
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
-
- CodeStubAssembler::LoadICParameters p(context, receiver, name, slot, vector);
- assembler->KeyedLoadIC(&p);
-}
-
-void StoreICTrampolineStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
-
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* value = assembler->Parameter(Descriptor::kValue);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* context = assembler->Parameter(Descriptor::kContext);
- Node* vector = assembler->LoadTypeFeedbackVectorForStub();
-
- CodeStubAssembler::StoreICParameters p(context, receiver, name, value, slot,
- vector);
- assembler->StoreIC(&p);
+ compiler::CodeAssemblerState state(isolate(), &zone, descriptor,
+ GetCodeFlags(), name);
+ GenerateAssembly(&state);
+ return compiler::CodeAssembler::GenerateCode(&state);
}
-void StoreICStub::GenerateAssembly(CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
-
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* value = assembler->Parameter(Descriptor::kValue);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
-
- CodeStubAssembler::StoreICParameters p(context, receiver, name, value, slot,
- vector);
- assembler->StoreIC(&p);
-}
-
-void KeyedStoreICTrampolineTFStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
-
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* value = assembler->Parameter(Descriptor::kValue);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* context = assembler->Parameter(Descriptor::kContext);
- Node* vector = assembler->LoadTypeFeedbackVectorForStub();
-
- CodeStubAssembler::StoreICParameters p(context, receiver, name, value, slot,
- vector);
- assembler->KeyedStoreIC(&p, StoreICState::GetLanguageMode(GetExtraICState()));
-}
-
-void KeyedStoreICTFStub::GenerateAssembly(CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
-
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* value = assembler->Parameter(Descriptor::kValue);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
-
- CodeStubAssembler::StoreICParameters p(context, receiver, name, value, slot,
- vector);
- assembler->KeyedStoreIC(&p, StoreICState::GetLanguageMode(GetExtraICState()));
-}
-
-void StoreMapStub::GenerateAssembly(CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
-
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* map = assembler->Parameter(Descriptor::kMap);
- Node* value = assembler->Parameter(Descriptor::kValue);
-
- assembler->StoreObjectField(receiver, JSObject::kMapOffset, map);
- assembler->Return(value);
-}
-
-void StoreTransitionStub::GenerateAssembly(CodeStubAssembler* assembler) const {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
-
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* offset =
- assembler->SmiUntag(assembler->Parameter(Descriptor::kFieldOffset));
- Node* value = assembler->Parameter(Descriptor::kValue);
- Node* map = assembler->Parameter(Descriptor::kMap);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
-
- Label miss(assembler);
-
- Representation representation = this->representation();
- assembler->Comment("StoreTransitionStub: is_inobject: %d: representation: %s",
- is_inobject(), representation.Mnemonic());
-
- Node* prepared_value =
- assembler->PrepareValueForWrite(value, representation, &miss);
-
- if (store_mode() == StoreTransitionStub::ExtendStorageAndStoreMapAndValue) {
- assembler->Comment("Extend storage");
- assembler->ExtendPropertiesBackingStore(receiver);
- } else {
- DCHECK(store_mode() == StoreTransitionStub::StoreMapAndValue);
- }
-
- // Store the new value into the "extended" object.
- assembler->Comment("Store value");
- assembler->StoreNamedField(receiver, offset, is_inobject(), representation,
- prepared_value, true);
-
- // And finally update the map.
- assembler->Comment("Store map");
- assembler->StoreObjectField(receiver, JSObject::kMapOffset, map);
- assembler->Return(value);
-
- // Only store to tagged field never bails out.
- if (!representation.IsTagged()) {
- assembler->Bind(&miss);
- {
- assembler->Comment("Miss");
- assembler->TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot,
- vector, receiver, name);
- }
- }
+void LoadICProtoArrayStub::GenerateAssembly(CodeAssemblerState* state) const {
+ AccessorAssembler::GenerateLoadICProtoArray(
+ state, throw_reference_error_if_nonexistent());
}
void ElementsTransitionAndStoreStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
+ compiler::CodeAssemblerState* state) const {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* key = assembler->Parameter(Descriptor::kName);
- Node* value = assembler->Parameter(Descriptor::kValue);
- Node* map = assembler->Parameter(Descriptor::kMap);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+ Node* key = assembler.Parameter(Descriptor::kName);
+ Node* value = assembler.Parameter(Descriptor::kValue);
+ Node* map = assembler.Parameter(Descriptor::kMap);
+ Node* slot = assembler.Parameter(Descriptor::kSlot);
+ Node* vector = assembler.Parameter(Descriptor::kVector);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- assembler->Comment(
+ assembler.Comment(
"ElementsTransitionAndStoreStub: from_kind=%s, to_kind=%s,"
" is_jsarray=%d, store_mode=%d",
ElementsKindToString(from_kind()), ElementsKindToString(to_kind()),
is_jsarray(), store_mode());
- Label miss(assembler);
+ Label miss(&assembler);
if (FLAG_trace_elements_transitions) {
// Tracing elements transitions is the job of the runtime.
- assembler->Goto(&miss);
+ assembler.Goto(&miss);
} else {
- assembler->TransitionElementsKind(receiver, map, from_kind(), to_kind(),
- is_jsarray(), &miss);
- assembler->EmitElementStore(receiver, key, value, is_jsarray(), to_kind(),
- store_mode(), &miss);
- assembler->Return(value);
+ assembler.TransitionElementsKind(receiver, map, from_kind(), to_kind(),
+ is_jsarray(), &miss);
+ assembler.EmitElementStore(receiver, key, value, is_jsarray(), to_kind(),
+ store_mode(), &miss);
+ assembler.Return(value);
}
- assembler->Bind(&miss);
+ assembler.Bind(&miss);
{
- assembler->Comment("Miss");
- assembler->TailCallRuntime(Runtime::kElementsTransitionAndStoreIC_Miss,
- context, receiver, key, value, map, slot,
- vector);
+ assembler.Comment("Miss");
+ assembler.TailCallRuntime(Runtime::kElementsTransitionAndStoreIC_Miss,
+ context, receiver, key, value, map, slot, vector);
}
}
void AllocateHeapNumberStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
+ compiler::CodeAssemblerState* state) const {
typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
- Node* result = assembler->AllocateHeapNumber();
- assembler->Return(result);
+ Node* result = assembler.AllocateHeapNumber();
+ assembler.Return(result);
}
-#define SIMD128_GEN_ASM(TYPE, Type, type, lane_count, lane_type) \
- void Allocate##Type##Stub::GenerateAssembly(CodeStubAssembler* assembler) \
- const { \
- compiler::Node* result = \
- assembler->Allocate(Simd128Value::kSize, CodeStubAssembler::kNone); \
- compiler::Node* map = assembler->LoadMap(result); \
- assembler->StoreNoWriteBarrier( \
- MachineRepresentation::kTagged, map, \
- assembler->HeapConstant(isolate()->factory()->type##_map())); \
- assembler->Return(result); \
+#define SIMD128_GEN_ASM(TYPE, Type, type, lane_count, lane_type) \
+ void Allocate##Type##Stub::GenerateAssembly( \
+ compiler::CodeAssemblerState* state) const { \
+ CodeStubAssembler assembler(state); \
+ compiler::Node* result = \
+ assembler.Allocate(Simd128Value::kSize, CodeStubAssembler::kNone); \
+ compiler::Node* map = assembler.LoadMap(result); \
+ assembler.StoreNoWriteBarrier( \
+ MachineRepresentation::kTagged, map, \
+ assembler.HeapConstant(isolate()->factory()->type##_map())); \
+ assembler.Return(result); \
}
SIMD128_TYPES(SIMD128_GEN_ASM)
#undef SIMD128_GEN_ASM
-void StringLengthStub::GenerateAssembly(CodeStubAssembler* assembler) const {
- compiler::Node* value = assembler->Parameter(0);
- compiler::Node* string = assembler->LoadJSValueValue(value);
- compiler::Node* result = assembler->LoadStringLength(string);
- assembler->Return(result);
-}
+void StringLengthStub::GenerateAssembly(
+ compiler::CodeAssemblerState* state) const {
+ CodeStubAssembler assembler(state);
+ compiler::Node* value = assembler.Parameter(0);
+ compiler::Node* string = assembler.LoadJSValueValue(value);
+ compiler::Node* result = assembler.LoadStringLength(string);
+ assembler.Return(result);
+}
+
+#define BINARY_OP_STUB(Name) \
+ void Name::GenerateAssembly(compiler::CodeAssemblerState* state) const { \
+ typedef BinaryOpWithVectorDescriptor Descriptor; \
+ CodeStubAssembler assembler(state); \
+ assembler.Return(Generate( \
+ &assembler, assembler.Parameter(Descriptor::kLeft), \
+ assembler.Parameter(Descriptor::kRight), \
+ assembler.ChangeUint32ToWord(assembler.Parameter(Descriptor::kSlot)), \
+ assembler.Parameter(Descriptor::kVector), \
+ assembler.Parameter(Descriptor::kContext))); \
+ }
+BINARY_OP_STUB(AddWithFeedbackStub)
+BINARY_OP_STUB(SubtractWithFeedbackStub)
+BINARY_OP_STUB(MultiplyWithFeedbackStub)
+BINARY_OP_STUB(DivideWithFeedbackStub)
+BINARY_OP_STUB(ModulusWithFeedbackStub)
+#undef BINARY_OP_STUB
// static
compiler::Node* AddWithFeedbackStub::Generate(
@@ -732,7 +552,7 @@ compiler::Node* AddWithFeedbackStub::Generate(
call_add_stub(assembler), end(assembler);
Variable var_fadd_lhs(assembler, MachineRepresentation::kFloat64),
var_fadd_rhs(assembler, MachineRepresentation::kFloat64),
- var_type_feedback(assembler, MachineRepresentation::kWord32),
+ var_type_feedback(assembler, MachineRepresentation::kTaggedSigned),
var_result(assembler, MachineRepresentation::kTagged);
// Check if the {lhs} is a Smi or a HeapObject.
@@ -768,7 +588,7 @@ compiler::Node* AddWithFeedbackStub::Generate(
assembler->Bind(&if_notoverflow);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall));
+ assembler->SmiConstant(BinaryOperationFeedback::kSignedSmall));
var_result.Bind(assembler->BitcastWordToTaggedSigned(
assembler->Projection(0, pair)));
assembler->Goto(&end);
@@ -829,7 +649,7 @@ compiler::Node* AddWithFeedbackStub::Generate(
assembler->Bind(&do_fadd);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kNumber));
+ assembler->SmiConstant(BinaryOperationFeedback::kNumber));
Node* value =
assembler->Float64Add(var_fadd_lhs.value(), var_fadd_rhs.value());
Node* result = assembler->AllocateHeapNumberWithValue(value);
@@ -876,7 +696,7 @@ compiler::Node* AddWithFeedbackStub::Generate(
&call_with_any_feedback);
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kString));
+ assembler->SmiConstant(BinaryOperationFeedback::kString));
Callable callable = CodeFactory::StringAdd(
assembler->isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
var_result.Bind(assembler->CallStub(callable, context, lhs, rhs));
@@ -899,14 +719,14 @@ compiler::Node* AddWithFeedbackStub::Generate(
assembler->Bind(&call_with_oddball_feedback);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+ assembler->SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
assembler->Goto(&call_add_stub);
}
assembler->Bind(&call_with_any_feedback);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kAny));
+ assembler->SmiConstant(BinaryOperationFeedback::kAny));
assembler->Goto(&call_add_stub);
}
@@ -938,7 +758,7 @@ compiler::Node* SubtractWithFeedbackStub::Generate(
call_with_any_feedback(assembler);
Variable var_fsub_lhs(assembler, MachineRepresentation::kFloat64),
var_fsub_rhs(assembler, MachineRepresentation::kFloat64),
- var_type_feedback(assembler, MachineRepresentation::kWord32),
+ var_type_feedback(assembler, MachineRepresentation::kTaggedSigned),
var_result(assembler, MachineRepresentation::kTagged);
// Check if the {lhs} is a Smi or a HeapObject.
@@ -976,7 +796,7 @@ compiler::Node* SubtractWithFeedbackStub::Generate(
assembler->Bind(&if_notoverflow);
// lhs, rhs, result smi. combined - smi.
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall));
+ assembler->SmiConstant(BinaryOperationFeedback::kSignedSmall));
var_result.Bind(
assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair)));
assembler->Goto(&end);
@@ -1039,7 +859,7 @@ compiler::Node* SubtractWithFeedbackStub::Generate(
assembler->Bind(&do_fsub);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kNumber));
+ assembler->SmiConstant(BinaryOperationFeedback::kNumber));
Node* lhs_value = var_fsub_lhs.value();
Node* rhs_value = var_fsub_rhs.value();
Node* value = assembler->Float64Sub(lhs_value, rhs_value);
@@ -1063,7 +883,7 @@ compiler::Node* SubtractWithFeedbackStub::Generate(
assembler->Bind(&if_rhsissmi);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+ assembler->SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
assembler->Goto(&call_subtract_stub);
}
@@ -1077,7 +897,7 @@ compiler::Node* SubtractWithFeedbackStub::Generate(
&check_rhsisoddball);
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+ assembler->SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
assembler->Goto(&call_subtract_stub);
}
}
@@ -1092,14 +912,14 @@ compiler::Node* SubtractWithFeedbackStub::Generate(
assembler->GotoUnless(rhs_is_oddball, &call_with_any_feedback);
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+ assembler->SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
assembler->Goto(&call_subtract_stub);
}
assembler->Bind(&call_with_any_feedback);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kAny));
+ assembler->SmiConstant(BinaryOperationFeedback::kAny));
assembler->Goto(&call_subtract_stub);
}
@@ -1134,9 +954,7 @@ compiler::Node* MultiplyWithFeedbackStub::Generate(
Variable var_lhs_float64(assembler, MachineRepresentation::kFloat64),
var_rhs_float64(assembler, MachineRepresentation::kFloat64),
var_result(assembler, MachineRepresentation::kTagged),
- var_type_feedback(assembler, MachineRepresentation::kWord32);
-
- Node* number_map = assembler->HeapNumberMapConstant();
+ var_type_feedback(assembler, MachineRepresentation::kTaggedSigned);
Label lhs_is_smi(assembler), lhs_is_not_smi(assembler);
assembler->Branch(assembler->TaggedIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
@@ -1152,11 +970,10 @@ compiler::Node* MultiplyWithFeedbackStub::Generate(
// Both {lhs} and {rhs} are Smis. The result is not necessarily a smi,
// in case of overflow.
var_result.Bind(assembler->SmiMul(lhs, rhs));
- var_type_feedback.Bind(assembler->Select(
+ var_type_feedback.Bind(assembler->SelectSmiConstant(
assembler->TaggedIsSmi(var_result.value()),
- assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall),
- assembler->Int32Constant(BinaryOperationFeedback::kNumber),
- MachineRepresentation::kWord32));
+ BinaryOperationFeedback::kSignedSmall,
+ BinaryOperationFeedback::kNumber));
assembler->Goto(&end);
}
@@ -1165,7 +982,7 @@ compiler::Node* MultiplyWithFeedbackStub::Generate(
Node* rhs_map = assembler->LoadMap(rhs);
// Check if {rhs} is a HeapNumber.
- assembler->GotoUnless(assembler->WordEqual(rhs_map, number_map),
+ assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
&check_rhsisoddball);
// Convert {lhs} to a double and multiply it with the value of {rhs}.
@@ -1180,7 +997,7 @@ compiler::Node* MultiplyWithFeedbackStub::Generate(
Node* lhs_map = assembler->LoadMap(lhs);
// Check if {lhs} is a HeapNumber.
- assembler->GotoUnless(assembler->WordEqual(lhs_map, number_map),
+ assembler->GotoUnless(assembler->IsHeapNumberMap(lhs_map),
&if_lhsisnotnumber);
// Check if {rhs} is a Smi.
@@ -1201,7 +1018,7 @@ compiler::Node* MultiplyWithFeedbackStub::Generate(
Node* rhs_map = assembler->LoadMap(rhs);
// Check if {rhs} is a HeapNumber.
- assembler->GotoUnless(assembler->WordEqual(rhs_map, number_map),
+ assembler->GotoUnless(assembler->IsHeapNumberMap(rhs_map),
&check_rhsisoddball);
// Both {lhs} and {rhs} are HeapNumbers. Load their values and
@@ -1215,7 +1032,7 @@ compiler::Node* MultiplyWithFeedbackStub::Generate(
assembler->Bind(&do_fmul);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kNumber));
+ assembler->SmiConstant(BinaryOperationFeedback::kNumber));
Node* value =
assembler->Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
Node* result = assembler->AllocateHeapNumberWithValue(value);
@@ -1256,14 +1073,14 @@ compiler::Node* MultiplyWithFeedbackStub::Generate(
assembler->Bind(&call_with_oddball_feedback);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+ assembler->SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
assembler->Goto(&call_multiply_stub);
}
assembler->Bind(&call_with_any_feedback);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kAny));
+ assembler->SmiConstant(BinaryOperationFeedback::kAny));
assembler->Goto(&call_multiply_stub);
}
@@ -1298,9 +1115,7 @@ compiler::Node* DivideWithFeedbackStub::Generate(
Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
var_divisor_float64(assembler, MachineRepresentation::kFloat64),
var_result(assembler, MachineRepresentation::kTagged),
- var_type_feedback(assembler, MachineRepresentation::kWord32);
-
- Node* number_map = assembler->HeapNumberMapConstant();
+ var_type_feedback(assembler, MachineRepresentation::kTaggedSigned);
Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
assembler->Branch(assembler->TaggedIsSmi(dividend), &dividend_is_smi,
@@ -1318,27 +1133,26 @@ compiler::Node* DivideWithFeedbackStub::Generate(
// Do floating point division if {divisor} is zero.
assembler->GotoIf(
- assembler->WordEqual(divisor, assembler->IntPtrConstant(0)),
- &bailout);
+ assembler->WordEqual(divisor, assembler->SmiConstant(0)), &bailout);
// Do floating point division {dividend} is zero and {divisor} is
// negative.
Label dividend_is_zero(assembler), dividend_is_not_zero(assembler);
assembler->Branch(
- assembler->WordEqual(dividend, assembler->IntPtrConstant(0)),
+ assembler->WordEqual(dividend, assembler->SmiConstant(0)),
&dividend_is_zero, &dividend_is_not_zero);
assembler->Bind(&dividend_is_zero);
{
assembler->GotoIf(
- assembler->IntPtrLessThan(divisor, assembler->IntPtrConstant(0)),
+ assembler->SmiLessThan(divisor, assembler->SmiConstant(0)),
&bailout);
assembler->Goto(&dividend_is_not_zero);
}
assembler->Bind(&dividend_is_not_zero);
- Node* untagged_divisor = assembler->SmiUntag(divisor);
- Node* untagged_dividend = assembler->SmiUntag(dividend);
+ Node* untagged_divisor = assembler->SmiToWord32(divisor);
+ Node* untagged_dividend = assembler->SmiToWord32(dividend);
// Do floating point division if {dividend} is kMinInt (or kMinInt - 1
// if the Smi size is 31) and {divisor} is -1.
@@ -1367,8 +1181,8 @@ compiler::Node* DivideWithFeedbackStub::Generate(
assembler->GotoIf(assembler->Word32NotEqual(untagged_dividend, truncated),
&bailout);
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall));
- var_result.Bind(assembler->SmiTag(untagged_result));
+ assembler->SmiConstant(BinaryOperationFeedback::kSignedSmall));
+ var_result.Bind(assembler->SmiFromWord32(untagged_result));
assembler->Goto(&end);
// Bailout: convert {dividend} and {divisor} to double and do double
@@ -1386,7 +1200,7 @@ compiler::Node* DivideWithFeedbackStub::Generate(
Node* divisor_map = assembler->LoadMap(divisor);
// Check if {divisor} is a HeapNumber.
- assembler->GotoUnless(assembler->WordEqual(divisor_map, number_map),
+ assembler->GotoUnless(assembler->IsHeapNumberMap(divisor_map),
&check_divisor_for_oddball);
// Convert {dividend} to a double and divide it with the value of
@@ -1401,7 +1215,7 @@ compiler::Node* DivideWithFeedbackStub::Generate(
Node* dividend_map = assembler->LoadMap(dividend);
// Check if {dividend} is a HeapNumber.
- assembler->GotoUnless(assembler->WordEqual(dividend_map, number_map),
+ assembler->GotoUnless(assembler->IsHeapNumberMap(dividend_map),
&dividend_is_not_number);
// Check if {divisor} is a Smi.
@@ -1423,7 +1237,7 @@ compiler::Node* DivideWithFeedbackStub::Generate(
Node* divisor_map = assembler->LoadMap(divisor);
// Check if {divisor} is a HeapNumber.
- assembler->GotoUnless(assembler->WordEqual(divisor_map, number_map),
+ assembler->GotoUnless(assembler->IsHeapNumberMap(divisor_map),
&check_divisor_for_oddball);
// Both {dividend} and {divisor} are HeapNumbers. Load their values
@@ -1438,7 +1252,7 @@ compiler::Node* DivideWithFeedbackStub::Generate(
assembler->Bind(&do_fdiv);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kNumber));
+ assembler->SmiConstant(BinaryOperationFeedback::kNumber));
Node* value = assembler->Float64Div(var_dividend_float64.value(),
var_divisor_float64.value());
var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
@@ -1479,14 +1293,14 @@ compiler::Node* DivideWithFeedbackStub::Generate(
assembler->Bind(&call_with_oddball_feedback);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+ assembler->SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
assembler->Goto(&call_divide_stub);
}
assembler->Bind(&call_with_any_feedback);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kAny));
+ assembler->SmiConstant(BinaryOperationFeedback::kAny));
assembler->Goto(&call_divide_stub);
}
@@ -1520,9 +1334,7 @@ compiler::Node* ModulusWithFeedbackStub::Generate(
Variable var_dividend_float64(assembler, MachineRepresentation::kFloat64),
var_divisor_float64(assembler, MachineRepresentation::kFloat64),
var_result(assembler, MachineRepresentation::kTagged),
- var_type_feedback(assembler, MachineRepresentation::kWord32);
-
- Node* number_map = assembler->HeapNumberMapConstant();
+ var_type_feedback(assembler, MachineRepresentation::kTaggedSigned);
Label dividend_is_smi(assembler), dividend_is_not_smi(assembler);
assembler->Branch(assembler->TaggedIsSmi(dividend), &dividend_is_smi,
@@ -1537,10 +1349,10 @@ compiler::Node* ModulusWithFeedbackStub::Generate(
assembler->Bind(&divisor_is_smi);
{
var_result.Bind(assembler->SmiMod(dividend, divisor));
- var_type_feedback.Bind(assembler->Select(
+ var_type_feedback.Bind(assembler->SelectSmiConstant(
assembler->TaggedIsSmi(var_result.value()),
- assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall),
- assembler->Int32Constant(BinaryOperationFeedback::kNumber)));
+ BinaryOperationFeedback::kSignedSmall,
+ BinaryOperationFeedback::kNumber));
assembler->Goto(&end);
}
@@ -1549,7 +1361,7 @@ compiler::Node* ModulusWithFeedbackStub::Generate(
Node* divisor_map = assembler->LoadMap(divisor);
// Check if {divisor} is a HeapNumber.
- assembler->GotoUnless(assembler->WordEqual(divisor_map, number_map),
+ assembler->GotoUnless(assembler->IsHeapNumberMap(divisor_map),
&check_divisor_for_oddball);
// Convert {dividend} to a double and divide it with the value of
@@ -1565,7 +1377,7 @@ compiler::Node* ModulusWithFeedbackStub::Generate(
Node* dividend_map = assembler->LoadMap(dividend);
// Check if {dividend} is a HeapNumber.
- assembler->GotoUnless(assembler->WordEqual(dividend_map, number_map),
+ assembler->GotoUnless(assembler->IsHeapNumberMap(dividend_map),
&dividend_is_not_number);
// Check if {divisor} is a Smi.
@@ -1587,7 +1399,7 @@ compiler::Node* ModulusWithFeedbackStub::Generate(
Node* divisor_map = assembler->LoadMap(divisor);
// Check if {divisor} is a HeapNumber.
- assembler->GotoUnless(assembler->WordEqual(divisor_map, number_map),
+ assembler->GotoUnless(assembler->IsHeapNumberMap(divisor_map),
&check_divisor_for_oddball);
// Both {dividend} and {divisor} are HeapNumbers. Load their values
@@ -1601,7 +1413,7 @@ compiler::Node* ModulusWithFeedbackStub::Generate(
assembler->Bind(&do_fmod);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kNumber));
+ assembler->SmiConstant(BinaryOperationFeedback::kNumber));
Node* value = assembler->Float64Mod(var_dividend_float64.value(),
var_divisor_float64.value());
var_result.Bind(assembler->AllocateHeapNumberWithValue(value));
@@ -1642,14 +1454,14 @@ compiler::Node* ModulusWithFeedbackStub::Generate(
assembler->Bind(&call_with_oddball_feedback);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+ assembler->SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
assembler->Goto(&call_modulus_stub);
}
assembler->Bind(&call_with_any_feedback);
{
var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kAny));
+ assembler->SmiConstant(BinaryOperationFeedback::kAny));
assembler->Goto(&call_modulus_stub);
}
@@ -1666,277 +1478,13 @@ compiler::Node* ModulusWithFeedbackStub::Generate(
return var_result.value();
}
-// static
-compiler::Node* IncStub::Generate(CodeStubAssembler* assembler,
- compiler::Node* value,
- compiler::Node* context,
- compiler::Node* type_feedback_vector,
- compiler::Node* slot_id) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
-
- // Shared entry for floating point increment.
- Label do_finc(assembler), end(assembler);
- Variable var_finc_value(assembler, MachineRepresentation::kFloat64);
-
- // We might need to try again due to ToNumber conversion.
- Variable value_var(assembler, MachineRepresentation::kTagged);
- Variable result_var(assembler, MachineRepresentation::kTagged);
- Variable var_type_feedback(assembler, MachineRepresentation::kWord32);
- Variable* loop_vars[] = {&value_var, &var_type_feedback};
- Label start(assembler, 2, loop_vars);
- value_var.Bind(value);
- var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kNone));
- assembler->Goto(&start);
- assembler->Bind(&start);
- {
- value = value_var.value();
-
- Label if_issmi(assembler), if_isnotsmi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
-
- assembler->Bind(&if_issmi);
- {
- // Try fast Smi addition first.
- Node* one = assembler->SmiConstant(Smi::FromInt(1));
- Node* pair = assembler->IntPtrAddWithOverflow(
- assembler->BitcastTaggedToWord(value),
- assembler->BitcastTaggedToWord(one));
- Node* overflow = assembler->Projection(1, pair);
-
- // Check if the Smi addition overflowed.
- Label if_overflow(assembler), if_notoverflow(assembler);
- assembler->Branch(overflow, &if_overflow, &if_notoverflow);
-
- assembler->Bind(&if_notoverflow);
- var_type_feedback.Bind(assembler->Word32Or(
- var_type_feedback.value(),
- assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall)));
- result_var.Bind(
- assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair)));
- assembler->Goto(&end);
-
- assembler->Bind(&if_overflow);
- {
- var_finc_value.Bind(assembler->SmiToFloat64(value));
- assembler->Goto(&do_finc);
- }
- }
-
- assembler->Bind(&if_isnotsmi);
- {
- // Check if the value is a HeapNumber.
- Label if_valueisnumber(assembler),
- if_valuenotnumber(assembler, Label::kDeferred);
- Node* value_map = assembler->LoadMap(value);
- assembler->Branch(assembler->IsHeapNumberMap(value_map),
- &if_valueisnumber, &if_valuenotnumber);
-
- assembler->Bind(&if_valueisnumber);
- {
- // Load the HeapNumber value.
- var_finc_value.Bind(assembler->LoadHeapNumberValue(value));
- assembler->Goto(&do_finc);
- }
-
- assembler->Bind(&if_valuenotnumber);
- {
- // We do not require an Or with earlier feedback here because once we
- // convert the value to a number, we cannot reach this path. We can
- // only reach this path on the first pass when the feedback is kNone.
- CSA_ASSERT(assembler,
- assembler->Word32Equal(var_type_feedback.value(),
- assembler->Int32Constant(
- BinaryOperationFeedback::kNone)));
-
- Label if_valueisoddball(assembler), if_valuenotoddball(assembler);
- Node* instance_type = assembler->LoadMapInstanceType(value_map);
- Node* is_oddball = assembler->Word32Equal(
- instance_type, assembler->Int32Constant(ODDBALL_TYPE));
- assembler->Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball);
-
- assembler->Bind(&if_valueisoddball);
- {
- // Convert Oddball to Number and check again.
- value_var.Bind(
- assembler->LoadObjectField(value, Oddball::kToNumberOffset));
- var_type_feedback.Bind(assembler->Int32Constant(
- BinaryOperationFeedback::kNumberOrOddball));
- assembler->Goto(&start);
- }
-
- assembler->Bind(&if_valuenotoddball);
- {
- // Convert to a Number first and try again.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kAny));
- value_var.Bind(assembler->CallStub(callable, context, value));
- assembler->Goto(&start);
- }
- }
- }
- }
-
- assembler->Bind(&do_finc);
- {
- Node* finc_value = var_finc_value.value();
- Node* one = assembler->Float64Constant(1.0);
- Node* finc_result = assembler->Float64Add(finc_value, one);
- var_type_feedback.Bind(assembler->Word32Or(
- var_type_feedback.value(),
- assembler->Int32Constant(BinaryOperationFeedback::kNumber)));
- result_var.Bind(assembler->AllocateHeapNumberWithValue(finc_result));
- assembler->Goto(&end);
- }
-
- assembler->Bind(&end);
- assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
- slot_id);
- return result_var.value();
-}
-
-void NumberToStringStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+void NumberToStringStub::GenerateAssembly(
+ compiler::CodeAssemblerState* state) const {
typedef compiler::Node Node;
- Node* argument = assembler->Parameter(Descriptor::kArgument);
- Node* context = assembler->Parameter(Descriptor::kContext);
- assembler->Return(assembler->NumberToString(context, argument));
-}
-
-// static
-compiler::Node* DecStub::Generate(CodeStubAssembler* assembler,
- compiler::Node* value,
- compiler::Node* context,
- compiler::Node* type_feedback_vector,
- compiler::Node* slot_id) {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Variable Variable;
-
- // Shared entry for floating point decrement.
- Label do_fdec(assembler), end(assembler);
- Variable var_fdec_value(assembler, MachineRepresentation::kFloat64);
-
- // We might need to try again due to ToNumber conversion.
- Variable value_var(assembler, MachineRepresentation::kTagged);
- Variable result_var(assembler, MachineRepresentation::kTagged);
- Variable var_type_feedback(assembler, MachineRepresentation::kWord32);
- Variable* loop_vars[] = {&value_var, &var_type_feedback};
- Label start(assembler, 2, loop_vars);
- var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kNone));
- value_var.Bind(value);
- assembler->Goto(&start);
- assembler->Bind(&start);
- {
- value = value_var.value();
-
- Label if_issmi(assembler), if_isnotsmi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
-
- assembler->Bind(&if_issmi);
- {
- // Try fast Smi subtraction first.
- Node* one = assembler->SmiConstant(Smi::FromInt(1));
- Node* pair = assembler->IntPtrSubWithOverflow(
- assembler->BitcastTaggedToWord(value),
- assembler->BitcastTaggedToWord(one));
- Node* overflow = assembler->Projection(1, pair);
-
- // Check if the Smi subtraction overflowed.
- Label if_overflow(assembler), if_notoverflow(assembler);
- assembler->Branch(overflow, &if_overflow, &if_notoverflow);
-
- assembler->Bind(&if_notoverflow);
- var_type_feedback.Bind(assembler->Word32Or(
- var_type_feedback.value(),
- assembler->Int32Constant(BinaryOperationFeedback::kSignedSmall)));
- result_var.Bind(
- assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair)));
- assembler->Goto(&end);
-
- assembler->Bind(&if_overflow);
- {
- var_fdec_value.Bind(assembler->SmiToFloat64(value));
- assembler->Goto(&do_fdec);
- }
- }
-
- assembler->Bind(&if_isnotsmi);
- {
- // Check if the value is a HeapNumber.
- Label if_valueisnumber(assembler),
- if_valuenotnumber(assembler, Label::kDeferred);
- Node* value_map = assembler->LoadMap(value);
- assembler->Branch(assembler->IsHeapNumberMap(value_map),
- &if_valueisnumber, &if_valuenotnumber);
-
- assembler->Bind(&if_valueisnumber);
- {
- // Load the HeapNumber value.
- var_fdec_value.Bind(assembler->LoadHeapNumberValue(value));
- assembler->Goto(&do_fdec);
- }
-
- assembler->Bind(&if_valuenotnumber);
- {
- // We do not require an Or with earlier feedback here because once we
- // convert the value to a number, we cannot reach this path. We can
- // only reach this path on the first pass when the feedback is kNone.
- CSA_ASSERT(assembler,
- assembler->Word32Equal(var_type_feedback.value(),
- assembler->Int32Constant(
- BinaryOperationFeedback::kNone)));
-
- Label if_valueisoddball(assembler), if_valuenotoddball(assembler);
- Node* instance_type = assembler->LoadMapInstanceType(value_map);
- Node* is_oddball = assembler->Word32Equal(
- instance_type, assembler->Int32Constant(ODDBALL_TYPE));
- assembler->Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball);
-
- assembler->Bind(&if_valueisoddball);
- {
- // Convert Oddball to Number and check again.
- value_var.Bind(
- assembler->LoadObjectField(value, Oddball::kToNumberOffset));
- var_type_feedback.Bind(assembler->Int32Constant(
- BinaryOperationFeedback::kNumberOrOddball));
- assembler->Goto(&start);
- }
-
- assembler->Bind(&if_valuenotoddball);
- {
- // Convert to a Number first and try again.
- Callable callable =
- CodeFactory::NonNumberToNumber(assembler->isolate());
- var_type_feedback.Bind(
- assembler->Int32Constant(BinaryOperationFeedback::kAny));
- value_var.Bind(assembler->CallStub(callable, context, value));
- assembler->Goto(&start);
- }
- }
- }
- }
-
- assembler->Bind(&do_fdec);
- {
- Node* fdec_value = var_fdec_value.value();
- Node* one = assembler->Float64Constant(1.0);
- Node* fdec_result = assembler->Float64Sub(fdec_value, one);
- var_type_feedback.Bind(assembler->Word32Or(
- var_type_feedback.value(),
- assembler->Int32Constant(BinaryOperationFeedback::kNumber)));
- result_var.Bind(assembler->AllocateHeapNumberWithValue(fdec_result));
- assembler->Goto(&end);
- }
-
- assembler->Bind(&end);
- assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
- slot_id);
- return result_var.value();
+ CodeStubAssembler assembler(state);
+ Node* argument = assembler.Parameter(Descriptor::kArgument);
+ Node* context = assembler.Parameter(Descriptor::kContext);
+ assembler.Return(assembler.NumberToString(context, argument));
}
// ES6 section 21.1.3.19 String.prototype.substring ( start, end )
@@ -1948,110 +1496,85 @@ compiler::Node* SubStringStub::Generate(CodeStubAssembler* assembler,
return assembler->SubString(context, string, from, to);
}
-void LoadApiGetterStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+void SubStringStub::GenerateAssembly(
+ compiler::CodeAssemblerState* state) const {
+ CodeStubAssembler assembler(state);
+ assembler.Return(Generate(&assembler,
+ assembler.Parameter(Descriptor::kString),
+ assembler.Parameter(Descriptor::kFrom),
+ assembler.Parameter(Descriptor::kTo),
+ assembler.Parameter(Descriptor::kContext)));
+}
+
+void LoadApiGetterStub::GenerateAssembly(
+ compiler::CodeAssemblerState* state) const {
typedef compiler::Node Node;
- Node* context = assembler->Parameter(Descriptor::kContext);
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
+ CodeStubAssembler assembler(state);
+ Node* context = assembler.Parameter(Descriptor::kContext);
+ Node* receiver = assembler.Parameter(Descriptor::kReceiver);
// For now we only support receiver_is_holder.
DCHECK(receiver_is_holder());
Node* holder = receiver;
- Node* map = assembler->LoadMap(receiver);
- Node* descriptors = assembler->LoadMapDescriptors(map);
- Node* value_index =
- assembler->IntPtrConstant(DescriptorArray::ToValueIndex(index()));
- Node* callback = assembler->LoadFixedArrayElement(
- descriptors, value_index, 0, CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->TailCallStub(CodeFactory::ApiGetter(isolate()), context, receiver,
- holder, callback);
+ Node* map = assembler.LoadMap(receiver);
+ Node* descriptors = assembler.LoadMapDescriptors(map);
+ Node* callback = assembler.LoadFixedArrayElement(
+ descriptors, DescriptorArray::ToValueIndex(index()));
+ assembler.TailCallStub(CodeFactory::ApiGetter(isolate()), context, receiver,
+ holder, callback);
}
-void StoreFieldStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+void StoreGlobalStub::GenerateAssembly(
+ compiler::CodeAssemblerState* state) const {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
- FieldIndex index = this->index();
- Representation representation = this->representation();
-
- assembler->Comment("StoreFieldStub: inobject=%d, offset=%d, rep=%s",
- index.is_inobject(), index.offset(),
- representation.Mnemonic());
-
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* value = assembler->Parameter(Descriptor::kValue);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
-
- Label miss(assembler);
-
- Node* prepared_value =
- assembler->PrepareValueForWrite(value, representation, &miss);
- assembler->StoreNamedField(receiver, index, representation, prepared_value,
- false);
- assembler->Return(value);
-
- // Only stores to tagged field can't bailout.
- if (!representation.IsTagged()) {
- assembler->Bind(&miss);
- {
- assembler->Comment("Miss");
- assembler->TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot,
- vector, receiver, name);
- }
- }
-}
-
-void StoreGlobalStub::GenerateAssembly(CodeStubAssembler* assembler) const {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
-
- assembler->Comment(
+ assembler.Comment(
"StoreGlobalStub: cell_type=%d, constant_type=%d, check_global=%d",
cell_type(), PropertyCellType::kConstantType == cell_type()
? static_cast<int>(constant_type())
: -1,
check_global());
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* value = assembler->Parameter(Descriptor::kValue);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+ Node* name = assembler.Parameter(Descriptor::kName);
+ Node* value = assembler.Parameter(Descriptor::kValue);
+ Node* slot = assembler.Parameter(Descriptor::kSlot);
+ Node* vector = assembler.Parameter(Descriptor::kVector);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- Label miss(assembler);
+ Label miss(&assembler);
if (check_global()) {
// Check that the map of the global has not changed: use a placeholder map
// that will be replaced later with the global object's map.
- Node* proxy_map = assembler->LoadMap(receiver);
- Node* global = assembler->LoadObjectField(proxy_map, Map::kPrototypeOffset);
- Node* map_cell = assembler->HeapConstant(isolate()->factory()->NewWeakCell(
+ Node* proxy_map = assembler.LoadMap(receiver);
+ Node* global = assembler.LoadObjectField(proxy_map, Map::kPrototypeOffset);
+ Node* map_cell = assembler.HeapConstant(isolate()->factory()->NewWeakCell(
StoreGlobalStub::global_map_placeholder(isolate())));
- Node* expected_map = assembler->LoadWeakCellValueUnchecked(map_cell);
- Node* map = assembler->LoadMap(global);
- assembler->GotoIf(assembler->WordNotEqual(expected_map, map), &miss);
+ Node* expected_map = assembler.LoadWeakCellValueUnchecked(map_cell);
+ Node* map = assembler.LoadMap(global);
+ assembler.GotoIf(assembler.WordNotEqual(expected_map, map), &miss);
}
- Node* weak_cell = assembler->HeapConstant(isolate()->factory()->NewWeakCell(
+ Node* weak_cell = assembler.HeapConstant(isolate()->factory()->NewWeakCell(
StoreGlobalStub::property_cell_placeholder(isolate())));
- Node* cell = assembler->LoadWeakCellValue(weak_cell);
- assembler->GotoIf(assembler->TaggedIsSmi(cell), &miss);
+ Node* cell = assembler.LoadWeakCellValue(weak_cell);
+ assembler.GotoIf(assembler.TaggedIsSmi(cell), &miss);
// Load the payload of the global parameter cell. A hole indicates that the
// cell has been invalidated and that the store must be handled by the
// runtime.
Node* cell_contents =
- assembler->LoadObjectField(cell, PropertyCell::kValueOffset);
+ assembler.LoadObjectField(cell, PropertyCell::kValueOffset);
PropertyCellType cell_type = this->cell_type();
if (cell_type == PropertyCellType::kConstant ||
cell_type == PropertyCellType::kUndefined) {
// This is always valid for all states a cell can be in.
- assembler->GotoIf(assembler->WordNotEqual(cell_contents, value), &miss);
+ assembler.GotoIf(assembler.WordNotEqual(cell_contents, value), &miss);
} else {
- assembler->GotoIf(assembler->IsTheHole(cell_contents), &miss);
+ assembler.GotoIf(assembler.IsTheHole(cell_contents), &miss);
// When dealing with constant types, the type may be allowed to change, as
// long as optimized code remains valid.
@@ -2059,7 +1582,7 @@ void StoreGlobalStub::GenerateAssembly(CodeStubAssembler* assembler) const {
if (cell_type == PropertyCellType::kConstantType) {
switch (constant_type()) {
case PropertyCellConstantType::kSmi:
- assembler->GotoUnless(assembler->TaggedIsSmi(value), &miss);
+ assembler.GotoUnless(assembler.TaggedIsSmi(value), &miss);
value_is_smi = true;
break;
case PropertyCellConstantType::kStableMap: {
@@ -2068,273 +1591,156 @@ void StoreGlobalStub::GenerateAssembly(CodeStubAssembler* assembler) const {
// are the maps that were originally in the cell or not. If optimized
// code will deopt when a cell has a unstable map and if it has a
// dependency on a stable map, it will deopt if the map destabilizes.
- assembler->GotoIf(assembler->TaggedIsSmi(value), &miss);
- assembler->GotoIf(assembler->TaggedIsSmi(cell_contents), &miss);
- Node* expected_map = assembler->LoadMap(cell_contents);
- Node* map = assembler->LoadMap(value);
- assembler->GotoIf(assembler->WordNotEqual(expected_map, map), &miss);
+ assembler.GotoIf(assembler.TaggedIsSmi(value), &miss);
+ assembler.GotoIf(assembler.TaggedIsSmi(cell_contents), &miss);
+ Node* expected_map = assembler.LoadMap(cell_contents);
+ Node* map = assembler.LoadMap(value);
+ assembler.GotoIf(assembler.WordNotEqual(expected_map, map), &miss);
break;
}
}
}
if (value_is_smi) {
- assembler->StoreObjectFieldNoWriteBarrier(
- cell, PropertyCell::kValueOffset, value);
+ assembler.StoreObjectFieldNoWriteBarrier(cell, PropertyCell::kValueOffset,
+ value);
} else {
- assembler->StoreObjectField(cell, PropertyCell::kValueOffset, value);
+ assembler.StoreObjectField(cell, PropertyCell::kValueOffset, value);
}
}
- assembler->Return(value);
+ assembler.Return(value);
- assembler->Bind(&miss);
+ assembler.Bind(&miss);
{
- assembler->Comment("Miss");
- assembler->TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot,
- vector, receiver, name);
+ assembler.Comment("Miss");
+ assembler.TailCallRuntime(Runtime::kStoreIC_Miss, context, value, slot,
+ vector, receiver, name);
}
}
+void LoadFieldStub::GenerateAssembly(
+ compiler::CodeAssemblerState* state) const {
+ AccessorAssembler::GenerateLoadField(state);
+}
+
void KeyedLoadSloppyArgumentsStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
+ compiler::CodeAssemblerState* state) const {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* key = assembler->Parameter(Descriptor::kName);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+ Node* key = assembler.Parameter(Descriptor::kName);
+ Node* slot = assembler.Parameter(Descriptor::kSlot);
+ Node* vector = assembler.Parameter(Descriptor::kVector);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- Label miss(assembler);
+ Label miss(&assembler);
- Node* result = assembler->LoadKeyedSloppyArguments(receiver, key, &miss);
- assembler->Return(result);
+ Node* result = assembler.LoadKeyedSloppyArguments(receiver, key, &miss);
+ assembler.Return(result);
- assembler->Bind(&miss);
+ assembler.Bind(&miss);
{
- assembler->Comment("Miss");
- assembler->TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver,
- key, slot, vector);
+ assembler.Comment("Miss");
+ assembler.TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver,
+ key, slot, vector);
}
}
void KeyedStoreSloppyArgumentsStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
+ compiler::CodeAssemblerState* state) const {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* key = assembler->Parameter(Descriptor::kName);
- Node* value = assembler->Parameter(Descriptor::kValue);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+ Node* key = assembler.Parameter(Descriptor::kName);
+ Node* value = assembler.Parameter(Descriptor::kValue);
+ Node* slot = assembler.Parameter(Descriptor::kSlot);
+ Node* vector = assembler.Parameter(Descriptor::kVector);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- Label miss(assembler);
+ Label miss(&assembler);
- assembler->StoreKeyedSloppyArguments(receiver, key, value, &miss);
- assembler->Return(value);
+ assembler.StoreKeyedSloppyArguments(receiver, key, value, &miss);
+ assembler.Return(value);
- assembler->Bind(&miss);
+ assembler.Bind(&miss);
{
- assembler->Comment("Miss");
- assembler->TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value,
- slot, vector, receiver, key);
+ assembler.Comment("Miss");
+ assembler.TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value, slot,
+ vector, receiver, key);
}
}
void LoadScriptContextFieldStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
+ compiler::CodeAssemblerState* state) const {
typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
- assembler->Comment("LoadScriptContextFieldStub: context_index=%d, slot=%d",
- context_index(), slot_index());
+ assembler.Comment("LoadScriptContextFieldStub: context_index=%d, slot=%d",
+ context_index(), slot_index());
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- Node* script_context = assembler->LoadScriptContext(context, context_index());
- Node* result = assembler->LoadFixedArrayElement(
- script_context, assembler->IntPtrConstant(slot_index()), 0,
- CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->Return(result);
+ Node* script_context = assembler.LoadScriptContext(context, context_index());
+ Node* result = assembler.LoadFixedArrayElement(script_context, slot_index());
+ assembler.Return(result);
}
void StoreScriptContextFieldStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
+ compiler::CodeAssemblerState* state) const {
typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
- assembler->Comment("StoreScriptContextFieldStub: context_index=%d, slot=%d",
- context_index(), slot_index());
+ assembler.Comment("StoreScriptContextFieldStub: context_index=%d, slot=%d",
+ context_index(), slot_index());
- Node* value = assembler->Parameter(Descriptor::kValue);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* value = assembler.Parameter(Descriptor::kValue);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- Node* script_context = assembler->LoadScriptContext(context, context_index());
- assembler->StoreFixedArrayElement(
- script_context, assembler->IntPtrConstant(slot_index()), value,
- UPDATE_WRITE_BARRIER, CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->Return(value);
+ Node* script_context = assembler.LoadScriptContext(context, context_index());
+ assembler.StoreFixedArrayElement(
+ script_context, assembler.IntPtrConstant(slot_index()), value);
+ assembler.Return(value);
}
void StoreInterceptorStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
+ compiler::CodeAssemblerState* state) const {
typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* name = assembler->Parameter(Descriptor::kName);
- Node* value = assembler->Parameter(Descriptor::kValue);
- Node* context = assembler->Parameter(Descriptor::kContext);
- assembler->TailCallRuntime(Runtime::kStorePropertyWithInterceptor, context,
- receiver, name, value);
+ Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+ Node* name = assembler.Parameter(Descriptor::kName);
+ Node* value = assembler.Parameter(Descriptor::kValue);
+ Node* context = assembler.Parameter(Descriptor::kContext);
+ assembler.TailCallRuntime(Runtime::kStorePropertyWithInterceptor, context,
+ receiver, name, value);
}
void LoadIndexedInterceptorStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
+ compiler::CodeAssemblerState* state) const {
typedef compiler::Node Node;
typedef CodeStubAssembler::Label Label;
+ CodeStubAssembler assembler(state);
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* key = assembler->Parameter(Descriptor::kName);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
-
- Label if_keyispositivesmi(assembler), if_keyisinvalid(assembler);
- assembler->Branch(assembler->WordIsPositiveSmi(key), &if_keyispositivesmi,
- &if_keyisinvalid);
- assembler->Bind(&if_keyispositivesmi);
- assembler->TailCallRuntime(Runtime::kLoadElementWithInterceptor, context,
- receiver, key);
-
- assembler->Bind(&if_keyisinvalid);
- assembler->TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver, key,
- slot, vector);
-}
-
-// static
-bool FastCloneShallowObjectStub::IsSupported(ObjectLiteral* expr) {
- // FastCloneShallowObjectStub doesn't copy elements, and object literals don't
- // support copy-on-write (COW) elements for now.
- // TODO(mvstanton): make object literals support COW elements.
- return expr->fast_elements() && expr->has_shallow_properties() &&
- expr->properties_count() <= kMaximumClonedProperties;
-}
-
-// static
-int FastCloneShallowObjectStub::PropertiesCount(int literal_length) {
- // This heuristic of setting empty literals to have
- // kInitialGlobalObjectUnusedPropertiesCount must remain in-sync with the
- // runtime.
- // TODO(verwaest): Unify this with the heuristic in the runtime.
- return literal_length == 0
- ? JSObject::kInitialGlobalObjectUnusedPropertiesCount
- : literal_length;
-}
-
-// static
-compiler::Node* FastCloneShallowObjectStub::GenerateFastPath(
- CodeStubAssembler* assembler, compiler::CodeAssembler::Label* call_runtime,
- compiler::Node* closure, compiler::Node* literals_index,
- compiler::Node* properties_count) {
- typedef compiler::Node Node;
- typedef compiler::CodeAssembler::Label Label;
- typedef compiler::CodeAssembler::Variable Variable;
-
- Node* literals_array =
- assembler->LoadObjectField(closure, JSFunction::kLiteralsOffset);
- Node* allocation_site = assembler->LoadFixedArrayElement(
- literals_array, literals_index,
- LiteralsArray::kFirstLiteralIndex * kPointerSize,
- CodeStubAssembler::SMI_PARAMETERS);
- assembler->GotoIf(assembler->IsUndefined(allocation_site), call_runtime);
-
- // Calculate the object and allocation size based on the properties count.
- Node* object_size = assembler->IntPtrAdd(
- assembler->WordShl(properties_count, kPointerSizeLog2),
- assembler->IntPtrConstant(JSObject::kHeaderSize));
- Node* allocation_size = object_size;
- if (FLAG_allocation_site_pretenuring) {
- allocation_size = assembler->IntPtrAdd(
- object_size, assembler->IntPtrConstant(AllocationMemento::kSize));
- }
- Node* boilerplate = assembler->LoadObjectField(
- allocation_site, AllocationSite::kTransitionInfoOffset);
- Node* boilerplate_map = assembler->LoadMap(boilerplate);
- Node* instance_size = assembler->LoadMapInstanceSize(boilerplate_map);
- Node* size_in_words = assembler->WordShr(object_size, kPointerSizeLog2);
- assembler->GotoUnless(assembler->Word32Equal(instance_size, size_in_words),
- call_runtime);
-
- Node* copy = assembler->Allocate(allocation_size);
-
- // Copy boilerplate elements.
- Variable offset(assembler, MachineType::PointerRepresentation());
- offset.Bind(assembler->IntPtrConstant(-kHeapObjectTag));
- Node* end_offset = assembler->IntPtrAdd(object_size, offset.value());
- Label loop_body(assembler, &offset), loop_check(assembler, &offset);
- // We should always have an object size greater than zero.
- assembler->Goto(&loop_body);
- assembler->Bind(&loop_body);
- {
- // The Allocate above guarantees that the copy lies in new space. This
- // allows us to skip write barriers. This is necessary since we may also be
- // copying unboxed doubles.
- Node* field =
- assembler->Load(MachineType::IntPtr(), boilerplate, offset.value());
- assembler->StoreNoWriteBarrier(MachineType::PointerRepresentation(), copy,
- offset.value(), field);
- assembler->Goto(&loop_check);
- }
- assembler->Bind(&loop_check);
- {
- offset.Bind(assembler->IntPtrAdd(offset.value(),
- assembler->IntPtrConstant(kPointerSize)));
- assembler->GotoUnless(
- assembler->IntPtrGreaterThanOrEqual(offset.value(), end_offset),
- &loop_body);
- }
-
- if (FLAG_allocation_site_pretenuring) {
- Node* memento = assembler->InnerAllocate(copy, object_size);
- assembler->StoreObjectFieldNoWriteBarrier(
- memento, HeapObject::kMapOffset,
- assembler->LoadRoot(Heap::kAllocationMementoMapRootIndex));
- assembler->StoreObjectFieldNoWriteBarrier(
- memento, AllocationMemento::kAllocationSiteOffset, allocation_site);
- Node* memento_create_count = assembler->LoadObjectField(
- allocation_site, AllocationSite::kPretenureCreateCountOffset);
- memento_create_count = assembler->SmiAdd(
- memento_create_count, assembler->SmiConstant(Smi::FromInt(1)));
- assembler->StoreObjectFieldNoWriteBarrier(
- allocation_site, AllocationSite::kPretenureCreateCountOffset,
- memento_create_count);
- }
-
- // TODO(verwaest): Allocate and fill in double boxes.
- return copy;
-}
-
-void FastCloneShallowObjectStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
- typedef CodeStubAssembler::Label Label;
- typedef compiler::Node Node;
- Label call_runtime(assembler);
- Node* closure = assembler->Parameter(0);
- Node* literals_index = assembler->Parameter(1);
+ Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+ Node* key = assembler.Parameter(Descriptor::kName);
+ Node* slot = assembler.Parameter(Descriptor::kSlot);
+ Node* vector = assembler.Parameter(Descriptor::kVector);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- Node* properties_count =
- assembler->IntPtrConstant(PropertiesCount(this->length()));
- Node* copy = GenerateFastPath(assembler, &call_runtime, closure,
- literals_index, properties_count);
- assembler->Return(copy);
+ Label if_keyispositivesmi(&assembler), if_keyisinvalid(&assembler);
+ assembler.Branch(assembler.TaggedIsPositiveSmi(key), &if_keyispositivesmi,
+ &if_keyisinvalid);
+ assembler.Bind(&if_keyispositivesmi);
+ assembler.TailCallRuntime(Runtime::kLoadElementWithInterceptor, context,
+ receiver, key);
- assembler->Bind(&call_runtime);
- Node* constant_properties = assembler->Parameter(2);
- Node* flags = assembler->Parameter(3);
- Node* context = assembler->Parameter(4);
- assembler->TailCallRuntime(Runtime::kCreateObjectLiteral, context, closure,
- literals_index, constant_properties, flags);
+ assembler.Bind(&if_keyisinvalid);
+ assembler.TailCallRuntime(Runtime::kKeyedLoadIC_Miss, context, receiver, key,
+ slot, vector);
}
template<class StateType>
@@ -2342,7 +1748,19 @@ void HydrogenCodeStub::TraceTransition(StateType from, StateType to) {
// Note: Although a no-op transition is semantically OK, it is hinting at a
// bug somewhere in our state transition machinery.
DCHECK(from != to);
- if (!FLAG_trace_ic) return;
+ if (V8_LIKELY(!FLAG_ic_stats)) return;
+ if (FLAG_ic_stats &
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING) {
+ auto ic_stats = ICStats::instance();
+ ic_stats->Begin();
+ ICInfo& ic_info = ic_stats->Current();
+ ic_info.type = MajorName(MajorKey());
+ ic_info.state = ToString(from);
+ ic_info.state += "=>";
+ ic_info.state += ToString(to);
+ ic_stats->End();
+ return;
+ }
OFStream os(stdout);
os << "[";
PrintBaseName(os);
@@ -2362,12 +1780,6 @@ void JSEntryStub::FinishCode(Handle<Code> code) {
}
-void LoadDictionaryElementStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- descriptor->Initialize(
- FUNCTION_ADDR(Runtime_KeyedLoadIC_MissFromStubFailure));
-}
-
void HandlerStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
DCHECK(kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC);
if (kind() == Code::KEYED_LOAD_IC) {
@@ -2427,528 +1839,67 @@ void BinaryOpWithAllocationSiteStub::InitializeDescriptor(
FUNCTION_ADDR(Runtime_BinaryOpIC_MissWithAllocationSite));
}
-void GetPropertyStub::GenerateAssembly(CodeStubAssembler* assembler) const {
+void GetPropertyStub::GenerateAssembly(
+ compiler::CodeAssemblerState* state) const {
typedef compiler::Node Node;
typedef CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Variable Variable;
+ CodeStubAssembler assembler(state);
- Label call_runtime(assembler, Label::kDeferred), return_undefined(assembler),
- end(assembler);
+ Label call_runtime(&assembler, Label::kDeferred),
+ return_undefined(&assembler), end(&assembler);
- Node* object = assembler->Parameter(0);
- Node* key = assembler->Parameter(1);
- Node* context = assembler->Parameter(2);
- Variable var_result(assembler, MachineRepresentation::kTagged);
+ Node* object = assembler.Parameter(0);
+ Node* key = assembler.Parameter(1);
+ Node* context = assembler.Parameter(2);
+ Variable var_result(&assembler, MachineRepresentation::kTagged);
CodeStubAssembler::LookupInHolder lookup_property_in_holder =
- [assembler, context, &var_result, &end](
+ [&assembler, context, &var_result, &end](
Node* receiver, Node* holder, Node* holder_map,
Node* holder_instance_type, Node* unique_name, Label* next_holder,
Label* if_bailout) {
- Variable var_value(assembler, MachineRepresentation::kTagged);
- Label if_found(assembler);
- assembler->TryGetOwnProperty(
+ Variable var_value(&assembler, MachineRepresentation::kTagged);
+ Label if_found(&assembler);
+ assembler.TryGetOwnProperty(
context, receiver, holder, holder_map, holder_instance_type,
unique_name, &if_found, &var_value, next_holder, if_bailout);
- assembler->Bind(&if_found);
+ assembler.Bind(&if_found);
{
var_result.Bind(var_value.value());
- assembler->Goto(&end);
+ assembler.Goto(&end);
}
};
CodeStubAssembler::LookupInHolder lookup_element_in_holder =
- [assembler, context, &var_result, &end](
+ [&assembler, context, &var_result, &end](
Node* receiver, Node* holder, Node* holder_map,
Node* holder_instance_type, Node* index, Label* next_holder,
Label* if_bailout) {
// Not supported yet.
- assembler->Use(next_holder);
- assembler->Goto(if_bailout);
+ assembler.Use(next_holder);
+ assembler.Goto(if_bailout);
};
- assembler->TryPrototypeChainLookup(object, key, lookup_property_in_holder,
- lookup_element_in_holder,
- &return_undefined, &call_runtime);
+ assembler.TryPrototypeChainLookup(object, key, lookup_property_in_holder,
+ lookup_element_in_holder, &return_undefined,
+ &call_runtime);
- assembler->Bind(&return_undefined);
+ assembler.Bind(&return_undefined);
{
- var_result.Bind(assembler->UndefinedConstant());
- assembler->Goto(&end);
+ var_result.Bind(assembler.UndefinedConstant());
+ assembler.Goto(&end);
}
- assembler->Bind(&call_runtime);
+ assembler.Bind(&call_runtime);
{
var_result.Bind(
- assembler->CallRuntime(Runtime::kGetProperty, context, object, key));
- assembler->Goto(&end);
- }
-
- assembler->Bind(&end);
- assembler->Return(var_result.value());
-}
-
-// static
-compiler::Node* FastNewClosureStub::Generate(CodeStubAssembler* assembler,
- compiler::Node* shared_info,
- compiler::Node* context) {
- typedef compiler::Node Node;
- typedef compiler::CodeAssembler::Label Label;
- typedef compiler::CodeAssembler::Variable Variable;
-
- Isolate* isolate = assembler->isolate();
- Factory* factory = assembler->isolate()->factory();
- assembler->IncrementCounter(isolate->counters()->fast_new_closure_total(), 1);
-
- // Create a new closure from the given function info in new space
- Node* result = assembler->Allocate(JSFunction::kSize);
-
- // Calculate the index of the map we should install on the function based on
- // the FunctionKind and LanguageMode of the function.
- // Note: Must be kept in sync with Context::FunctionMapIndex
- Node* compiler_hints = assembler->LoadObjectField(
- shared_info, SharedFunctionInfo::kCompilerHintsOffset,
- MachineType::Uint32());
- Node* is_strict = assembler->Word32And(
- compiler_hints,
- assembler->Int32Constant(1 << SharedFunctionInfo::kStrictModeBit));
-
- Label if_normal(assembler), if_generator(assembler), if_async(assembler),
- if_class_constructor(assembler), if_function_without_prototype(assembler),
- load_map(assembler);
- Variable map_index(assembler, MachineType::PointerRepresentation());
-
- STATIC_ASSERT(FunctionKind::kNormalFunction == 0);
- Node* is_not_normal = assembler->Word32And(
- compiler_hints,
- assembler->Int32Constant(SharedFunctionInfo::kAllFunctionKindBitsMask));
- assembler->GotoUnless(is_not_normal, &if_normal);
-
- Node* is_generator = assembler->Word32And(
- compiler_hints,
- assembler->Int32Constant(FunctionKind::kGeneratorFunction
- << SharedFunctionInfo::kFunctionKindShift));
- assembler->GotoIf(is_generator, &if_generator);
-
- Node* is_async = assembler->Word32And(
- compiler_hints,
- assembler->Int32Constant(FunctionKind::kAsyncFunction
- << SharedFunctionInfo::kFunctionKindShift));
- assembler->GotoIf(is_async, &if_async);
-
- Node* is_class_constructor = assembler->Word32And(
- compiler_hints,
- assembler->Int32Constant(FunctionKind::kClassConstructor
- << SharedFunctionInfo::kFunctionKindShift));
- assembler->GotoIf(is_class_constructor, &if_class_constructor);
-
- if (FLAG_debug_code) {
- // Function must be a function without a prototype.
- CSA_ASSERT(assembler, assembler->Word32And(
- compiler_hints,
- assembler->Int32Constant(
- (FunctionKind::kAccessorFunction |
- FunctionKind::kArrowFunction |
- FunctionKind::kConciseMethod)
- << SharedFunctionInfo::kFunctionKindShift)));
- }
- assembler->Goto(&if_function_without_prototype);
-
- assembler->Bind(&if_normal);
- {
- map_index.Bind(assembler->Select(
- is_strict,
- assembler->IntPtrConstant(Context::STRICT_FUNCTION_MAP_INDEX),
- assembler->IntPtrConstant(Context::SLOPPY_FUNCTION_MAP_INDEX)));
- assembler->Goto(&load_map);
- }
-
- assembler->Bind(&if_generator);
- {
- map_index.Bind(assembler->Select(
- is_strict,
- assembler->IntPtrConstant(Context::STRICT_GENERATOR_FUNCTION_MAP_INDEX),
- assembler->IntPtrConstant(
- Context::SLOPPY_GENERATOR_FUNCTION_MAP_INDEX)));
- assembler->Goto(&load_map);
- }
-
- assembler->Bind(&if_async);
- {
- map_index.Bind(assembler->Select(
- is_strict,
- assembler->IntPtrConstant(Context::STRICT_ASYNC_FUNCTION_MAP_INDEX),
- assembler->IntPtrConstant(Context::SLOPPY_ASYNC_FUNCTION_MAP_INDEX)));
- assembler->Goto(&load_map);
- }
-
- assembler->Bind(&if_class_constructor);
- {
- map_index.Bind(
- assembler->IntPtrConstant(Context::STRICT_FUNCTION_MAP_INDEX));
- assembler->Goto(&load_map);
- }
-
- assembler->Bind(&if_function_without_prototype);
- {
- map_index.Bind(assembler->IntPtrConstant(
- Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX));
- assembler->Goto(&load_map);
- }
-
- assembler->Bind(&load_map);
-
- // Get the function map in the current native context and set that
- // as the map of the allocated object.
- Node* native_context = assembler->LoadNativeContext(context);
- Node* map_slot_value =
- assembler->LoadFixedArrayElement(native_context, map_index.value(), 0,
- CodeStubAssembler::INTPTR_PARAMETERS);
- assembler->StoreMapNoWriteBarrier(result, map_slot_value);
-
- // Initialize the rest of the function.
- Node* empty_fixed_array =
- assembler->HeapConstant(factory->empty_fixed_array());
- Node* empty_literals_array =
- assembler->HeapConstant(factory->empty_literals_array());
- assembler->StoreObjectFieldNoWriteBarrier(result, JSObject::kPropertiesOffset,
- empty_fixed_array);
- assembler->StoreObjectFieldNoWriteBarrier(result, JSObject::kElementsOffset,
- empty_fixed_array);
- assembler->StoreObjectFieldNoWriteBarrier(result, JSFunction::kLiteralsOffset,
- empty_literals_array);
- assembler->StoreObjectFieldNoWriteBarrier(
- result, JSFunction::kPrototypeOrInitialMapOffset,
- assembler->TheHoleConstant());
- assembler->StoreObjectFieldNoWriteBarrier(
- result, JSFunction::kSharedFunctionInfoOffset, shared_info);
- assembler->StoreObjectFieldNoWriteBarrier(result, JSFunction::kContextOffset,
- context);
- Handle<Code> lazy_builtin_handle(
- assembler->isolate()->builtins()->builtin(Builtins::kCompileLazy));
- Node* lazy_builtin = assembler->HeapConstant(lazy_builtin_handle);
- Node* lazy_builtin_entry = assembler->IntPtrAdd(
- lazy_builtin,
- assembler->IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
- assembler->StoreObjectFieldNoWriteBarrier(
- result, JSFunction::kCodeEntryOffset, lazy_builtin_entry);
- assembler->StoreObjectFieldNoWriteBarrier(result,
- JSFunction::kNextFunctionLinkOffset,
- assembler->UndefinedConstant());
-
- return result;
-}
-
-void FastNewClosureStub::GenerateAssembly(CodeStubAssembler* assembler) const {
- assembler->Return(
- Generate(assembler, assembler->Parameter(0), assembler->Parameter(1)));
-}
-
-// static
-compiler::Node* FastNewFunctionContextStub::Generate(
- CodeStubAssembler* assembler, compiler::Node* function,
- compiler::Node* slots, compiler::Node* context) {
- typedef compiler::Node Node;
-
- Node* min_context_slots =
- assembler->Int32Constant(Context::MIN_CONTEXT_SLOTS);
- Node* length = assembler->Int32Add(slots, min_context_slots);
- Node* size = assembler->Int32Add(
- assembler->Word32Shl(length, assembler->Int32Constant(kPointerSizeLog2)),
- assembler->Int32Constant(FixedArray::kHeaderSize));
-
- // Create a new closure from the given function info in new space
- Node* function_context = assembler->Allocate(size);
-
- Isolate* isolate = assembler->isolate();
- assembler->StoreMapNoWriteBarrier(
- function_context,
- assembler->HeapConstant(isolate->factory()->function_context_map()));
- assembler->StoreObjectFieldNoWriteBarrier(function_context,
- Context::kLengthOffset,
- assembler->SmiFromWord32(length));
-
- // Set up the fixed slots.
- assembler->StoreFixedArrayElement(
- function_context, assembler->Int32Constant(Context::CLOSURE_INDEX),
- function, SKIP_WRITE_BARRIER);
- assembler->StoreFixedArrayElement(
- function_context, assembler->Int32Constant(Context::PREVIOUS_INDEX),
- context, SKIP_WRITE_BARRIER);
- assembler->StoreFixedArrayElement(
- function_context, assembler->Int32Constant(Context::EXTENSION_INDEX),
- assembler->TheHoleConstant(), SKIP_WRITE_BARRIER);
-
- // Copy the native context from the previous context.
- Node* native_context = assembler->LoadNativeContext(context);
- assembler->StoreFixedArrayElement(
- function_context, assembler->Int32Constant(Context::NATIVE_CONTEXT_INDEX),
- native_context, SKIP_WRITE_BARRIER);
-
- // Initialize the rest of the slots to undefined.
- Node* undefined = assembler->UndefinedConstant();
- assembler->BuildFastFixedArrayForEach(
- function_context, FAST_ELEMENTS, min_context_slots, length,
- [undefined](CodeStubAssembler* assembler, Node* context, Node* offset) {
- assembler->StoreNoWriteBarrier(MachineType::PointerRepresentation(),
- context, offset, undefined);
- });
-
- return function_context;
-}
-
-void FastNewFunctionContextStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
- Node* function = assembler->Parameter(Descriptor::kFunction);
- Node* slots = assembler->Parameter(FastNewFunctionContextDescriptor::kSlots);
- Node* context = assembler->Parameter(Descriptor::kContext);
-
- assembler->Return(Generate(assembler, function, slots, context));
-}
-
-// static
-compiler::Node* FastCloneRegExpStub::Generate(CodeStubAssembler* assembler,
- compiler::Node* closure,
- compiler::Node* literal_index,
- compiler::Node* pattern,
- compiler::Node* flags,
- compiler::Node* context) {
- typedef CodeStubAssembler::Label Label;
- typedef CodeStubAssembler::Variable Variable;
- typedef compiler::Node Node;
-
- Label call_runtime(assembler, Label::kDeferred), end(assembler);
-
- Variable result(assembler, MachineRepresentation::kTagged);
-
- Node* literals_array =
- assembler->LoadObjectField(closure, JSFunction::kLiteralsOffset);
- Node* boilerplate = assembler->LoadFixedArrayElement(
- literals_array, literal_index,
- LiteralsArray::kFirstLiteralIndex * kPointerSize,
- CodeStubAssembler::SMI_PARAMETERS);
- assembler->GotoIf(assembler->IsUndefined(boilerplate), &call_runtime);
-
- {
- int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
- Node* copy = assembler->Allocate(size);
- for (int offset = 0; offset < size; offset += kPointerSize) {
- Node* value = assembler->LoadObjectField(boilerplate, offset);
- assembler->StoreObjectFieldNoWriteBarrier(copy, offset, value);
- }
- result.Bind(copy);
- assembler->Goto(&end);
- }
-
- assembler->Bind(&call_runtime);
- {
- result.Bind(assembler->CallRuntime(Runtime::kCreateRegExpLiteral, context,
- closure, literal_index, pattern, flags));
- assembler->Goto(&end);
- }
-
- assembler->Bind(&end);
- return result.value();
-}
-
-void FastCloneRegExpStub::GenerateAssembly(CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
- Node* closure = assembler->Parameter(Descriptor::kClosure);
- Node* literal_index = assembler->Parameter(Descriptor::kLiteralIndex);
- Node* pattern = assembler->Parameter(Descriptor::kPattern);
- Node* flags = assembler->Parameter(Descriptor::kFlags);
- Node* context = assembler->Parameter(Descriptor::kContext);
-
- assembler->Return(
- Generate(assembler, closure, literal_index, pattern, flags, context));
-}
-
-namespace {
-
-compiler::Node* NonEmptyShallowClone(CodeStubAssembler* assembler,
- compiler::Node* boilerplate,
- compiler::Node* boilerplate_map,
- compiler::Node* boilerplate_elements,
- compiler::Node* allocation_site,
- compiler::Node* capacity,
- ElementsKind kind) {
- typedef compiler::Node Node;
- typedef CodeStubAssembler::ParameterMode ParameterMode;
-
- ParameterMode param_mode = CodeStubAssembler::SMI_PARAMETERS;
-
- Node* length = assembler->LoadJSArrayLength(boilerplate);
-
- if (assembler->Is64()) {
- capacity = assembler->SmiUntag(capacity);
- param_mode = CodeStubAssembler::INTEGER_PARAMETERS;
- }
-
- Node *array, *elements;
- std::tie(array, elements) =
- assembler->AllocateUninitializedJSArrayWithElements(
- kind, boilerplate_map, length, allocation_site, capacity, param_mode);
-
- assembler->Comment("copy elements header");
- for (int offset = 0; offset < FixedArrayBase::kHeaderSize;
- offset += kPointerSize) {
- Node* value = assembler->LoadObjectField(boilerplate_elements, offset);
- assembler->StoreObjectField(elements, offset, value);
- }
-
- if (assembler->Is64()) {
- length = assembler->SmiUntag(length);
- }
-
- assembler->Comment("copy boilerplate elements");
- assembler->CopyFixedArrayElements(kind, boilerplate_elements, elements,
- length, SKIP_WRITE_BARRIER, param_mode);
- assembler->IncrementCounter(
- assembler->isolate()->counters()->inlined_copied_elements(), 1);
-
- return array;
-}
-
-} // namespace
-
-// static
-compiler::Node* FastCloneShallowArrayStub::Generate(
- CodeStubAssembler* assembler, compiler::Node* closure,
- compiler::Node* literal_index, compiler::Node* context,
- CodeStubAssembler::Label* call_runtime,
- AllocationSiteMode allocation_site_mode) {
- typedef CodeStubAssembler::Label Label;
- typedef CodeStubAssembler::Variable Variable;
- typedef compiler::Node Node;
-
- Label zero_capacity(assembler), cow_elements(assembler),
- fast_elements(assembler), return_result(assembler);
- Variable result(assembler, MachineRepresentation::kTagged);
-
- Node* literals_array =
- assembler->LoadObjectField(closure, JSFunction::kLiteralsOffset);
- Node* allocation_site = assembler->LoadFixedArrayElement(
- literals_array, literal_index,
- LiteralsArray::kFirstLiteralIndex * kPointerSize,
- CodeStubAssembler::SMI_PARAMETERS);
-
- assembler->GotoIf(assembler->IsUndefined(allocation_site), call_runtime);
- allocation_site = assembler->LoadFixedArrayElement(
- literals_array, literal_index,
- LiteralsArray::kFirstLiteralIndex * kPointerSize,
- CodeStubAssembler::SMI_PARAMETERS);
-
- Node* boilerplate = assembler->LoadObjectField(
- allocation_site, AllocationSite::kTransitionInfoOffset);
- Node* boilerplate_map = assembler->LoadMap(boilerplate);
- Node* boilerplate_elements = assembler->LoadElements(boilerplate);
- Node* capacity = assembler->LoadFixedArrayBaseLength(boilerplate_elements);
- allocation_site =
- allocation_site_mode == TRACK_ALLOCATION_SITE ? allocation_site : nullptr;
-
- Node* zero = assembler->SmiConstant(Smi::kZero);
- assembler->GotoIf(assembler->SmiEqual(capacity, zero), &zero_capacity);
-
- Node* elements_map = assembler->LoadMap(boilerplate_elements);
- assembler->GotoIf(assembler->IsFixedCOWArrayMap(elements_map), &cow_elements);
-
- assembler->GotoIf(assembler->IsFixedArrayMap(elements_map), &fast_elements);
- {
- assembler->Comment("fast double elements path");
- if (FLAG_debug_code) {
- Label correct_elements_map(assembler), abort(assembler, Label::kDeferred);
- assembler->Branch(assembler->IsFixedDoubleArrayMap(elements_map),
- &correct_elements_map, &abort);
-
- assembler->Bind(&abort);
- {
- Node* abort_id = assembler->SmiConstant(
- Smi::FromInt(BailoutReason::kExpectedFixedDoubleArrayMap));
- assembler->CallRuntime(Runtime::kAbort, context, abort_id);
- result.Bind(assembler->UndefinedConstant());
- assembler->Goto(&return_result);
- }
- assembler->Bind(&correct_elements_map);
- }
-
- Node* array = NonEmptyShallowClone(assembler, boilerplate, boilerplate_map,
- boilerplate_elements, allocation_site,
- capacity, FAST_DOUBLE_ELEMENTS);
- result.Bind(array);
- assembler->Goto(&return_result);
+ assembler.CallRuntime(Runtime::kGetProperty, context, object, key));
+ assembler.Goto(&end);
}
- assembler->Bind(&fast_elements);
- {
- assembler->Comment("fast elements path");
- Node* array = NonEmptyShallowClone(assembler, boilerplate, boilerplate_map,
- boilerplate_elements, allocation_site,
- capacity, FAST_ELEMENTS);
- result.Bind(array);
- assembler->Goto(&return_result);
- }
-
- Variable length(assembler, MachineRepresentation::kTagged),
- elements(assembler, MachineRepresentation::kTagged);
- Label allocate_without_elements(assembler);
-
- assembler->Bind(&cow_elements);
- {
- assembler->Comment("fixed cow path");
- length.Bind(assembler->LoadJSArrayLength(boilerplate));
- elements.Bind(boilerplate_elements);
-
- assembler->Goto(&allocate_without_elements);
- }
-
- assembler->Bind(&zero_capacity);
- {
- assembler->Comment("zero capacity path");
- length.Bind(zero);
- elements.Bind(assembler->LoadRoot(Heap::kEmptyFixedArrayRootIndex));
-
- assembler->Goto(&allocate_without_elements);
- }
-
- assembler->Bind(&allocate_without_elements);
- {
- Node* array = assembler->AllocateUninitializedJSArrayWithoutElements(
- FAST_ELEMENTS, boilerplate_map, length.value(), allocation_site);
- assembler->StoreObjectField(array, JSObject::kElementsOffset,
- elements.value());
- result.Bind(array);
- assembler->Goto(&return_result);
- }
-
- assembler->Bind(&return_result);
- return result.value();
-}
-
-void FastCloneShallowArrayStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
- typedef compiler::Node Node;
- typedef CodeStubAssembler::Label Label;
- Node* closure = assembler->Parameter(Descriptor::kClosure);
- Node* literal_index = assembler->Parameter(Descriptor::kLiteralIndex);
- Node* constant_elements = assembler->Parameter(Descriptor::kConstantElements);
- Node* context = assembler->Parameter(Descriptor::kContext);
- Label call_runtime(assembler, Label::kDeferred);
- assembler->Return(Generate(assembler, closure, literal_index, context,
- &call_runtime, allocation_site_mode()));
-
- assembler->Bind(&call_runtime);
- {
- assembler->Comment("call runtime");
- Node* flags = assembler->SmiConstant(
- Smi::FromInt(ArrayLiteral::kShallowElements |
- (allocation_site_mode() == TRACK_ALLOCATION_SITE
- ? 0
- : ArrayLiteral::kDisableMementos)));
- assembler->Return(assembler->CallRuntime(Runtime::kCreateArrayLiteral,
- context, closure, literal_index,
- constant_elements, flags));
- }
+ assembler.Bind(&end);
+ assembler.Return(var_result.value());
}
void CreateAllocationSiteStub::GenerateAheadOfTime(Isolate* isolate) {
@@ -2969,32 +1920,33 @@ void StoreElementStub::Generate(MacroAssembler* masm) {
}
void StoreFastElementStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
+ compiler::CodeAssemblerState* state) const {
typedef CodeStubAssembler::Label Label;
typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
- assembler->Comment(
+ assembler.Comment(
"StoreFastElementStub: js_array=%d, elements_kind=%s, store_mode=%d",
is_js_array(), ElementsKindToString(elements_kind()), store_mode());
- Node* receiver = assembler->Parameter(Descriptor::kReceiver);
- Node* key = assembler->Parameter(Descriptor::kName);
- Node* value = assembler->Parameter(Descriptor::kValue);
- Node* slot = assembler->Parameter(Descriptor::kSlot);
- Node* vector = assembler->Parameter(Descriptor::kVector);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* receiver = assembler.Parameter(Descriptor::kReceiver);
+ Node* key = assembler.Parameter(Descriptor::kName);
+ Node* value = assembler.Parameter(Descriptor::kValue);
+ Node* slot = assembler.Parameter(Descriptor::kSlot);
+ Node* vector = assembler.Parameter(Descriptor::kVector);
+ Node* context = assembler.Parameter(Descriptor::kContext);
- Label miss(assembler);
+ Label miss(&assembler);
- assembler->EmitElementStore(receiver, key, value, is_js_array(),
- elements_kind(), store_mode(), &miss);
- assembler->Return(value);
+ assembler.EmitElementStore(receiver, key, value, is_js_array(),
+ elements_kind(), store_mode(), &miss);
+ assembler.Return(value);
- assembler->Bind(&miss);
+ assembler.Bind(&miss);
{
- assembler->Comment("Miss");
- assembler->TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value,
- slot, vector, receiver, key);
+ assembler.Comment("Miss");
+ assembler.TailCallRuntime(Runtime::kKeyedStoreIC_Miss, context, value, slot,
+ vector, receiver, key);
}
}
@@ -3078,50 +2030,55 @@ void ProfileEntryHookStub::EntryHookTrampoline(intptr_t function,
}
void CreateAllocationSiteStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
- assembler->Return(assembler->CreateAllocationSiteInFeedbackVector(
- assembler->Parameter(Descriptor::kVector),
- assembler->Parameter(Descriptor::kSlot)));
+ compiler::CodeAssemblerState* state) const {
+ CodeStubAssembler assembler(state);
+ assembler.Return(assembler.CreateAllocationSiteInFeedbackVector(
+ assembler.Parameter(Descriptor::kVector),
+ assembler.Parameter(Descriptor::kSlot)));
}
-void CreateWeakCellStub::GenerateAssembly(CodeStubAssembler* assembler) const {
- assembler->Return(assembler->CreateWeakCellInFeedbackVector(
- assembler->Parameter(Descriptor::kVector),
- assembler->Parameter(Descriptor::kSlot),
- assembler->Parameter(Descriptor::kValue)));
+void CreateWeakCellStub::GenerateAssembly(
+ compiler::CodeAssemblerState* state) const {
+ CodeStubAssembler assembler(state);
+ assembler.Return(assembler.CreateWeakCellInFeedbackVector(
+ assembler.Parameter(Descriptor::kVector),
+ assembler.Parameter(Descriptor::kSlot),
+ assembler.Parameter(Descriptor::kValue)));
}
void ArrayNoArgumentConstructorStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
+ compiler::CodeAssemblerState* state) const {
typedef compiler::Node Node;
- Node* native_context = assembler->LoadObjectField(
- assembler->Parameter(Descriptor::kFunction), JSFunction::kContextOffset);
+ CodeStubAssembler assembler(state);
+ Node* native_context = assembler.LoadObjectField(
+ assembler.Parameter(Descriptor::kFunction), JSFunction::kContextOffset);
bool track_allocation_site =
AllocationSite::GetMode(elements_kind()) == TRACK_ALLOCATION_SITE &&
override_mode() != DISABLE_ALLOCATION_SITES;
- Node* allocation_site =
- track_allocation_site ? assembler->Parameter(Descriptor::kAllocationSite)
- : nullptr;
+ Node* allocation_site = track_allocation_site
+ ? assembler.Parameter(Descriptor::kAllocationSite)
+ : nullptr;
Node* array_map =
- assembler->LoadJSArrayElementsMap(elements_kind(), native_context);
- Node* array = assembler->AllocateJSArray(
+ assembler.LoadJSArrayElementsMap(elements_kind(), native_context);
+ Node* array = assembler.AllocateJSArray(
elements_kind(), array_map,
- assembler->IntPtrConstant(JSArray::kPreallocatedArrayElements),
- assembler->SmiConstant(Smi::kZero), allocation_site);
- assembler->Return(array);
+ assembler.IntPtrConstant(JSArray::kPreallocatedArrayElements),
+ assembler.SmiConstant(Smi::kZero), allocation_site);
+ assembler.Return(array);
}
void InternalArrayNoArgumentConstructorStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
+ compiler::CodeAssemblerState* state) const {
typedef compiler::Node Node;
+ CodeStubAssembler assembler(state);
Node* array_map =
- assembler->LoadObjectField(assembler->Parameter(Descriptor::kFunction),
- JSFunction::kPrototypeOrInitialMapOffset);
- Node* array = assembler->AllocateJSArray(
+ assembler.LoadObjectField(assembler.Parameter(Descriptor::kFunction),
+ JSFunction::kPrototypeOrInitialMapOffset);
+ Node* array = assembler.AllocateJSArray(
elements_kind(), array_map,
- assembler->IntPtrConstant(JSArray::kPreallocatedArrayElements),
- assembler->SmiConstant(Smi::kZero), nullptr);
- assembler->Return(array);
+ assembler.IntPtrConstant(JSArray::kPreallocatedArrayElements),
+ assembler.SmiConstant(Smi::kZero));
+ assembler.Return(array);
}
namespace {
@@ -3191,49 +2148,52 @@ void SingleArgumentConstructorCommon(CodeStubAssembler* assembler,
} // namespace
void ArraySingleArgumentConstructorStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
+ compiler::CodeAssemblerState* state) const {
typedef compiler::Node Node;
- Node* function = assembler->Parameter(Descriptor::kFunction);
+ CodeStubAssembler assembler(state);
+ Node* function = assembler.Parameter(Descriptor::kFunction);
Node* native_context =
- assembler->LoadObjectField(function, JSFunction::kContextOffset);
+ assembler.LoadObjectField(function, JSFunction::kContextOffset);
Node* array_map =
- assembler->LoadJSArrayElementsMap(elements_kind(), native_context);
+ assembler.LoadJSArrayElementsMap(elements_kind(), native_context);
AllocationSiteMode mode = override_mode() == DISABLE_ALLOCATION_SITES
? DONT_TRACK_ALLOCATION_SITE
: AllocationSite::GetMode(elements_kind());
- Node* allocation_site = assembler->Parameter(Descriptor::kAllocationSite);
- SingleArgumentConstructorCommon<Descriptor>(assembler, elements_kind(),
+ Node* allocation_site = assembler.Parameter(Descriptor::kAllocationSite);
+ SingleArgumentConstructorCommon<Descriptor>(&assembler, elements_kind(),
array_map, allocation_site, mode);
}
void InternalArraySingleArgumentConstructorStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
+ compiler::CodeAssemblerState* state) const {
typedef compiler::Node Node;
- Node* function = assembler->Parameter(Descriptor::kFunction);
- Node* array_map = assembler->LoadObjectField(
+ CodeStubAssembler assembler(state);
+ Node* function = assembler.Parameter(Descriptor::kFunction);
+ Node* array_map = assembler.LoadObjectField(
function, JSFunction::kPrototypeOrInitialMapOffset);
SingleArgumentConstructorCommon<Descriptor>(
- assembler, elements_kind(), array_map, assembler->UndefinedConstant(),
+ &assembler, elements_kind(), array_map, assembler.UndefinedConstant(),
DONT_TRACK_ALLOCATION_SITE);
}
void GrowArrayElementsStub::GenerateAssembly(
- CodeStubAssembler* assembler) const {
+ compiler::CodeAssemblerState* state) const {
typedef compiler::Node Node;
- CodeStubAssembler::Label runtime(assembler,
+ CodeStubAssembler assembler(state);
+ CodeStubAssembler::Label runtime(&assembler,
CodeStubAssembler::Label::kDeferred);
- Node* object = assembler->Parameter(Descriptor::kObject);
- Node* key = assembler->Parameter(Descriptor::kKey);
- Node* context = assembler->Parameter(Descriptor::kContext);
+ Node* object = assembler.Parameter(Descriptor::kObject);
+ Node* key = assembler.Parameter(Descriptor::kKey);
+ Node* context = assembler.Parameter(Descriptor::kContext);
ElementsKind kind = elements_kind();
- Node* elements = assembler->LoadElements(object);
+ Node* elements = assembler.LoadElements(object);
Node* new_elements =
- assembler->TryGrowElementsCapacity(object, elements, kind, key, &runtime);
- assembler->Return(new_elements);
+ assembler.TryGrowElementsCapacity(object, elements, kind, key, &runtime);
+ assembler.Return(new_elements);
- assembler->Bind(&runtime);
+ assembler.Bind(&runtime);
// TODO(danno): Make this a tail call when the stub is only used from TurboFan
// code. This musn't be a tail call for now, since the caller site in lithium
// creates a safepoint. This safepoint musn't have a different number of
@@ -3243,8 +2203,8 @@ void GrowArrayElementsStub::GenerateAssembly(
// tail call pushing the arguments on the stack for the runtime call). By not
// tail-calling, the runtime call case also has zero arguments on the stack
// for the stub frame.
- assembler->Return(assembler->CallRuntime(Runtime::kGrowArrayElements, context,
- object, key));
+ assembler.Return(
+ assembler.CallRuntime(Runtime::kGrowArrayElements, context, object, key));
}
ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate)
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 450d0c12c2..62203c37a7 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -7,7 +7,6 @@
#include "src/allocation.h"
#include "src/assembler.h"
-#include "src/code-stub-assembler.h"
#include "src/codegen.h"
#include "src/globals.h"
#include "src/ic/ic-state.h"
@@ -19,7 +18,13 @@
namespace v8 {
namespace internal {
-class ObjectLiteral;
+// Forward declarations.
+class CodeStubAssembler;
+namespace compiler {
+class CodeAssemblerLabel;
+class CodeAssemblerState;
+class Node;
+}
// List of code stubs used on all platforms.
#define CODE_STUB_LIST_ALL_PLATFORMS(V) \
@@ -44,9 +49,6 @@ class ObjectLiteral;
V(StoreBufferOverflow) \
V(StoreElement) \
V(SubString) \
- V(KeyedStoreIC) \
- V(LoadGlobalIC) \
- V(FastNewObject) \
V(FastNewRestParameter) \
V(FastNewSloppyArguments) \
V(FastNewStrictArguments) \
@@ -59,20 +61,7 @@ class ObjectLiteral;
/* version of the corresponding stub is */ \
/* used universally */ \
V(CallICTrampoline) \
- V(KeyedStoreICTrampoline) \
/* --- HydrogenCodeStubs --- */ \
- /* These builtins w/ JS linkage are */ \
- /* just fast-cases of C++ builtins. They */ \
- /* require varg support from TF */ \
- V(FastArrayPush) \
- V(FastFunctionBind) \
- /* These will be ported/eliminated */ \
- /* as part of the new IC system, ask */ \
- /* ishell before doing anything */ \
- V(LoadConstant) \
- V(LoadDictionaryElement) \
- V(LoadFastElement) \
- V(LoadField) \
/* These should never be ported to TF */ \
/* because they are either used only by */ \
/* FCG/Crankshaft or are deprecated */ \
@@ -103,16 +92,9 @@ class ObjectLiteral;
V(MultiplyWithFeedback) \
V(DivideWithFeedback) \
V(ModulusWithFeedback) \
- V(Inc) \
V(InternalArrayNoArgumentConstructor) \
V(InternalArraySingleArgumentConstructor) \
- V(Dec) \
V(ElementsTransitionAndStore) \
- V(FastCloneRegExp) \
- V(FastCloneShallowArray) \
- V(FastCloneShallowObject) \
- V(FastNewClosure) \
- V(FastNewFunctionContext) \
V(KeyedLoadSloppyArguments) \
V(KeyedStoreSloppyArguments) \
V(LoadScriptContextField) \
@@ -120,28 +102,14 @@ class ObjectLiteral;
V(NumberToString) \
V(StringAdd) \
V(GetProperty) \
- V(LoadIC) \
V(LoadICProtoArray) \
- V(KeyedLoadICTF) \
V(StoreFastElement) \
- V(StoreField) \
V(StoreGlobal) \
- V(StoreIC) \
- V(KeyedStoreICTF) \
V(StoreInterceptor) \
- V(StoreMap) \
- V(StoreTransition) \
V(LoadApiGetter) \
V(LoadIndexedInterceptor) \
- V(GrowArrayElements) \
- /* These are only called from FGC and */ \
- /* can be removed when we use ignition */ \
- /* only */ \
- V(LoadICTrampoline) \
- V(LoadGlobalICTrampoline) \
- V(KeyedLoadICTrampolineTF) \
- V(StoreICTrampoline) \
- V(KeyedStoreICTrampolineTF)
+ V(LoadField) \
+ V(GrowArrayElements)
// List of code stubs only used on ARM 32 bits platforms.
#if V8_TARGET_ARCH_ARM
@@ -291,6 +259,8 @@ class CodeStub BASE_EMBEDDED {
Isolate* isolate() const { return isolate_; }
+ void DeleteStubFromCacheForTesting();
+
protected:
CodeStub(uint32_t key, Isolate* isolate)
: minor_key_(MinorKeyFromKey(key)), isolate_(isolate) {}
@@ -370,7 +340,6 @@ class CodeStub BASE_EMBEDDED {
public: \
inline Major MajorKey() const override { return NAME; }; \
\
- protected: \
DEFINE_CODE_STUB_BASE(NAME##Stub, SUPER)
@@ -386,59 +355,27 @@ class CodeStub BASE_EMBEDDED {
Handle<Code> GenerateCode() override; \
DEFINE_CODE_STUB(NAME, SUPER)
-#define DEFINE_TURBOFAN_CODE_STUB(NAME, SUPER) \
- public: \
- void GenerateAssembly(CodeStubAssembler* assembler) const override; \
+#define DEFINE_TURBOFAN_CODE_STUB(NAME, SUPER) \
+ public: \
+ void GenerateAssembly(compiler::CodeAssemblerState* state) const override; \
DEFINE_CODE_STUB(NAME, SUPER)
-#define DEFINE_TURBOFAN_BINARY_OP_CODE_STUB(NAME, SUPER) \
- public: \
- static compiler::Node* Generate(CodeStubAssembler* assembler, \
- compiler::Node* left, compiler::Node* right, \
- compiler::Node* context); \
- void GenerateAssembly(CodeStubAssembler* assembler) const override { \
- assembler->Return(Generate(assembler, assembler->Parameter(0), \
- assembler->Parameter(1), \
- assembler->Parameter(2))); \
- } \
+#define DEFINE_TURBOFAN_BINARY_OP_CODE_STUB_WITH_FEEDBACK(NAME, SUPER) \
+ public: \
+ static compiler::Node* Generate( \
+ CodeStubAssembler* assembler, compiler::Node* left, \
+ compiler::Node* right, compiler::Node* slot_id, \
+ compiler::Node* type_feedback_vector, compiler::Node* context); \
+ void GenerateAssembly(compiler::CodeAssemblerState* state) const override; \
DEFINE_CODE_STUB(NAME, SUPER)
-#define DEFINE_TURBOFAN_BINARY_OP_CODE_STUB_WITH_FEEDBACK(NAME, SUPER) \
- public: \
- static compiler::Node* Generate( \
- CodeStubAssembler* assembler, compiler::Node* left, \
- compiler::Node* right, compiler::Node* slot_id, \
- compiler::Node* type_feedback_vector, compiler::Node* context); \
- void GenerateAssembly(CodeStubAssembler* assembler) const override { \
- assembler->Return( \
- Generate(assembler, assembler->Parameter(0), assembler->Parameter(1), \
- assembler->Parameter(2), assembler->Parameter(3), \
- assembler->Parameter(4))); \
- } \
- DEFINE_CODE_STUB(NAME, SUPER)
-
-#define DEFINE_TURBOFAN_UNARY_OP_CODE_STUB(NAME, SUPER) \
- public: \
- static compiler::Node* Generate(CodeStubAssembler* assembler, \
- compiler::Node* value, \
- compiler::Node* context); \
- void GenerateAssembly(CodeStubAssembler* assembler) const override { \
- assembler->Return(Generate(assembler, assembler->Parameter(0), \
- assembler->Parameter(1))); \
- } \
- DEFINE_CODE_STUB(NAME, SUPER)
-
-#define DEFINE_TURBOFAN_UNARY_OP_CODE_STUB_WITH_FEEDBACK(NAME, SUPER) \
- public: \
- static compiler::Node* Generate( \
- CodeStubAssembler* assembler, compiler::Node* value, \
- compiler::Node* context, compiler::Node* type_feedback_vector, \
- compiler::Node* slot_id); \
- void GenerateAssembly(CodeStubAssembler* assembler) const override { \
- assembler->Return( \
- Generate(assembler, assembler->Parameter(0), assembler->Parameter(1), \
- assembler->Parameter(2), assembler->Parameter(3))); \
- } \
+#define DEFINE_TURBOFAN_UNARY_OP_CODE_STUB_WITH_FEEDBACK(NAME, SUPER) \
+ public: \
+ static compiler::Node* Generate( \
+ CodeStubAssembler* assembler, compiler::Node* value, \
+ compiler::Node* context, compiler::Node* type_feedback_vector, \
+ compiler::Node* slot_id); \
+ void GenerateAssembly(compiler::CodeAssemblerState* state) const override; \
DEFINE_CODE_STUB(NAME, SUPER)
#define DEFINE_HANDLER_CODE_STUB(NAME, SUPER) \
@@ -638,7 +575,7 @@ class TurboFanCodeStub : public CodeStub {
protected:
explicit TurboFanCodeStub(Isolate* isolate) : CodeStub(isolate) {}
- virtual void GenerateAssembly(CodeStubAssembler* assembler) const = 0;
+ virtual void GenerateAssembly(compiler::CodeAssemblerState* state) const = 0;
private:
DEFINE_CODE_STUB_BASE(TurboFanCodeStub, CodeStub);
@@ -774,33 +711,15 @@ class ModulusWithFeedbackStub final : public TurboFanCodeStub {
TurboFanCodeStub);
};
-class IncStub final : public TurboFanCodeStub {
- public:
- explicit IncStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(CountOp);
- DEFINE_TURBOFAN_UNARY_OP_CODE_STUB_WITH_FEEDBACK(Inc, TurboFanCodeStub);
-};
-
-class DecStub final : public TurboFanCodeStub {
- public:
- explicit DecStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(CountOp);
- DEFINE_TURBOFAN_UNARY_OP_CODE_STUB_WITH_FEEDBACK(Dec, TurboFanCodeStub);
-};
-
class StoreInterceptorStub : public TurboFanCodeStub {
public:
explicit StoreInterceptorStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
- void GenerateAssembly(CodeStubAssembler* assember) const override;
-
Code::Kind GetCodeKind() const override { return Code::HANDLER; }
ExtraICState GetExtraICState() const override { return Code::STORE_IC; }
DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
- DEFINE_CODE_STUB(StoreInterceptor, TurboFanCodeStub);
+ DEFINE_TURBOFAN_CODE_STUB(StoreInterceptor, TurboFanCodeStub);
};
class LoadIndexedInterceptorStub : public TurboFanCodeStub {
@@ -832,50 +751,6 @@ class NumberToStringStub final : public TurboFanCodeStub {
DEFINE_TURBOFAN_CODE_STUB(NumberToString, TurboFanCodeStub);
};
-class FastNewClosureStub : public TurboFanCodeStub {
- public:
- explicit FastNewClosureStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- static compiler::Node* Generate(CodeStubAssembler* assembler,
- compiler::Node* shared_info,
- compiler::Node* context);
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewClosure);
- DEFINE_TURBOFAN_CODE_STUB(FastNewClosure, TurboFanCodeStub);
-};
-
-class FastNewFunctionContextStub final : public TurboFanCodeStub {
- public:
- static const int kMaximumSlots = 0x8000;
-
- explicit FastNewFunctionContextStub(Isolate* isolate)
- : TurboFanCodeStub(isolate) {}
-
- static compiler::Node* Generate(CodeStubAssembler* assembler,
- compiler::Node* function,
- compiler::Node* slots,
- compiler::Node* context);
-
- private:
- // FastNewFunctionContextStub can only allocate closures which fit in the
- // new space.
- STATIC_ASSERT(((kMaximumSlots + Context::MIN_CONTEXT_SLOTS) * kPointerSize +
- FixedArray::kHeaderSize) < kMaxRegularHeapObjectSize);
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewFunctionContext);
- DEFINE_TURBOFAN_CODE_STUB(FastNewFunctionContext, TurboFanCodeStub);
-};
-
-
-class FastNewObjectStub final : public PlatformCodeStub {
- public:
- explicit FastNewObjectStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewObject);
- DEFINE_PLATFORM_CODE_STUB(FastNewObject, PlatformCodeStub);
-};
-
-
// TODO(turbofan): This stub should be possible to write in TurboFan
// using the CodeStubAssembler very soon in a way that is as efficient
// and easy as the current handwritten version, which is partly a copy
@@ -939,77 +814,6 @@ class FastNewStrictArgumentsStub final : public PlatformCodeStub {
class SkipStubFrameBits : public BitField<bool, 0, 1> {};
};
-class FastCloneRegExpStub final : public TurboFanCodeStub {
- public:
- explicit FastCloneRegExpStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- static compiler::Node* Generate(CodeStubAssembler* assembler,
- compiler::Node* closure,
- compiler::Node* literal_index,
- compiler::Node* pattern,
- compiler::Node* flags,
- compiler::Node* context);
-
- private:
- DEFINE_CALL_INTERFACE_DESCRIPTOR(FastCloneRegExp);
- DEFINE_TURBOFAN_CODE_STUB(FastCloneRegExp, TurboFanCodeStub);
-};
-
-class FastCloneShallowArrayStub : public TurboFanCodeStub {
- public:
- FastCloneShallowArrayStub(Isolate* isolate,
- AllocationSiteMode allocation_site_mode)
- : TurboFanCodeStub(isolate) {
- minor_key_ = AllocationSiteModeBits::encode(allocation_site_mode);
- }
-
- static compiler::Node* Generate(CodeStubAssembler* assembler,
- compiler::Node* closure,
- compiler::Node* literal_index,
- compiler::Node* context,
- CodeStubAssembler::Label* call_runtime,
- AllocationSiteMode allocation_site_mode);
-
- AllocationSiteMode allocation_site_mode() const {
- return AllocationSiteModeBits::decode(minor_key_);
- }
-
- private:
- class AllocationSiteModeBits: public BitField<AllocationSiteMode, 0, 1> {};
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(FastCloneShallowArray);
- DEFINE_TURBOFAN_CODE_STUB(FastCloneShallowArray, TurboFanCodeStub);
-};
-
-class FastCloneShallowObjectStub : public TurboFanCodeStub {
- public:
- // Maximum number of properties in copied object.
- static const int kMaximumClonedProperties = 6;
-
- FastCloneShallowObjectStub(Isolate* isolate, int length)
- : TurboFanCodeStub(isolate) {
- DCHECK_GE(length, 0);
- DCHECK_LE(length, kMaximumClonedProperties);
- minor_key_ = LengthBits::encode(LengthBits::encode(length));
- }
-
- static compiler::Node* GenerateFastPath(
- CodeStubAssembler* assembler,
- compiler::CodeAssembler::Label* call_runtime, compiler::Node* closure,
- compiler::Node* literals_index, compiler::Node* properties_count);
-
- static bool IsSupported(ObjectLiteral* expr);
- static int PropertiesCount(int literal_length);
-
- int length() const { return LengthBits::decode(minor_key_); }
-
- private:
- class LengthBits : public BitField<int, 0, 4> {};
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(FastCloneShallowObject);
- DEFINE_TURBOFAN_CODE_STUB(FastCloneShallowObject, TurboFanCodeStub);
-};
-
class CreateAllocationSiteStub : public TurboFanCodeStub {
public:
explicit CreateAllocationSiteStub(Isolate* isolate)
@@ -1048,24 +852,6 @@ class GrowArrayElementsStub : public TurboFanCodeStub {
DEFINE_TURBOFAN_CODE_STUB(GrowArrayElements, TurboFanCodeStub);
};
-class FastArrayPushStub : public HydrogenCodeStub {
- public:
- explicit FastArrayPushStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
-
- private:
- DEFINE_CALL_INTERFACE_DESCRIPTOR(VarArgFunction);
- DEFINE_HYDROGEN_CODE_STUB(FastArrayPush, HydrogenCodeStub);
-};
-
-class FastFunctionBindStub : public HydrogenCodeStub {
- public:
- explicit FastFunctionBindStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
-
- private:
- DEFINE_CALL_INTERFACE_DESCRIPTOR(VarArgFunction);
- DEFINE_HYDROGEN_CODE_STUB(FastFunctionBind, HydrogenCodeStub);
-};
-
enum AllocationSiteOverrideMode {
DONT_OVERRIDE,
DISABLE_ALLOCATION_SITES,
@@ -1211,28 +997,16 @@ class HandlerStub : public HydrogenCodeStub {
DEFINE_CODE_STUB_BASE(HandlerStub, HydrogenCodeStub);
};
-
-class LoadFieldStub: public HandlerStub {
+class LoadFieldStub : public TurboFanCodeStub {
public:
- LoadFieldStub(Isolate* isolate, FieldIndex index) : HandlerStub(isolate) {
- int property_index_key = index.GetFieldAccessStubKey();
- set_sub_minor_key(LoadFieldByIndexBits::encode(property_index_key));
- }
-
- FieldIndex index() const {
- int property_index_key = LoadFieldByIndexBits::decode(sub_minor_key());
- return FieldIndex::FromFieldAccessStubKey(property_index_key);
- }
+ explicit LoadFieldStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
- protected:
- Code::Kind kind() const override { return Code::LOAD_IC; }
+ Code::Kind GetCodeKind() const override { return Code::HANDLER; }
+ ExtraICState GetExtraICState() const override { return GetCodeKind(); }
private:
- class LoadFieldByIndexBits : public BitField<int, 0, 13> {};
-
- // TODO(ishell): The stub uses only kReceiver parameter.
- DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
- DEFINE_HANDLER_CODE_STUB(LoadField, HandlerStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadField);
+ DEFINE_TURBOFAN_CODE_STUB(LoadField, TurboFanCodeStub);
};
class KeyedLoadSloppyArgumentsStub : public TurboFanCodeStub {
@@ -1267,29 +1041,6 @@ class KeyedStoreSloppyArgumentsStub : public TurboFanCodeStub {
DEFINE_TURBOFAN_CODE_STUB(KeyedStoreSloppyArguments, TurboFanCodeStub);
};
-
-class LoadConstantStub : public HandlerStub {
- public:
- LoadConstantStub(Isolate* isolate, int constant_index)
- : HandlerStub(isolate) {
- set_sub_minor_key(ConstantIndexBits::encode(constant_index));
- }
-
- int constant_index() const {
- return ConstantIndexBits::decode(sub_minor_key());
- }
-
- protected:
- Code::Kind kind() const override { return Code::LOAD_IC; }
-
- private:
- class ConstantIndexBits : public BitField<int, 0, kSubMinorKeyBits> {};
-
- // TODO(ishell): The stub uses only kReceiver parameter.
- DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
- DEFINE_HANDLER_CODE_STUB(LoadConstant, HandlerStub);
-};
-
class LoadApiGetterStub : public TurboFanCodeStub {
public:
LoadApiGetterStub(Isolate* isolate, bool receiver_is_holder, int index)
@@ -1317,91 +1068,6 @@ class LoadApiGetterStub : public TurboFanCodeStub {
DEFINE_TURBOFAN_CODE_STUB(LoadApiGetter, TurboFanCodeStub);
};
-class StoreFieldStub : public TurboFanCodeStub {
- public:
- StoreFieldStub(Isolate* isolate, FieldIndex index,
- Representation representation)
- : TurboFanCodeStub(isolate) {
- int property_index_key = index.GetFieldAccessStubKey();
- minor_key_ = StoreFieldByIndexBits::encode(property_index_key) |
- RepresentationBits::encode(representation.kind());
- }
-
- Code::Kind GetCodeKind() const override { return Code::HANDLER; }
- ExtraICState GetExtraICState() const override { return Code::STORE_IC; }
-
- FieldIndex index() const {
- int property_index_key = StoreFieldByIndexBits::decode(minor_key_);
- return FieldIndex::FromFieldAccessStubKey(property_index_key);
- }
-
- Representation representation() const {
- return Representation::FromKind(RepresentationBits::decode(minor_key_));
- }
-
- private:
- class StoreFieldByIndexBits : public BitField<int, 0, 13> {};
- class RepresentationBits
- : public BitField<Representation::Kind, StoreFieldByIndexBits::kNext, 4> {
- };
- STATIC_ASSERT(Representation::kNumRepresentations - 1 <
- RepresentationBits::kMax);
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
- DEFINE_TURBOFAN_CODE_STUB(StoreField, TurboFanCodeStub);
-};
-
-class StoreMapStub : public TurboFanCodeStub {
- public:
- explicit StoreMapStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- Code::Kind GetCodeKind() const override { return Code::HANDLER; }
- ExtraICState GetExtraICState() const override { return Code::STORE_IC; }
-
- private:
- DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreTransition);
- DEFINE_TURBOFAN_CODE_STUB(StoreMap, TurboFanCodeStub);
-};
-
-class StoreTransitionStub : public TurboFanCodeStub {
- public:
- enum StoreMode {
- StoreMapAndValue,
- ExtendStorageAndStoreMapAndValue
- };
-
- StoreTransitionStub(Isolate* isolate, bool is_inobject,
- Representation representation, StoreMode store_mode)
- : TurboFanCodeStub(isolate) {
- minor_key_ = IsInobjectBits::encode(is_inobject) |
- RepresentationBits::encode(representation.kind()) |
- StoreModeBits::encode(store_mode);
- }
-
- Code::Kind GetCodeKind() const override { return Code::HANDLER; }
- ExtraICState GetExtraICState() const override { return Code::STORE_IC; }
-
- bool is_inobject() const { return IsInobjectBits::decode(minor_key_); }
-
- Representation representation() const {
- return Representation::FromKind(RepresentationBits::decode(minor_key_));
- }
-
- StoreMode store_mode() const { return StoreModeBits::decode(minor_key_); }
-
- private:
- class IsInobjectBits : public BitField<bool, 0, 1> {};
- class RepresentationBits
- : public BitField<Representation::Kind, IsInobjectBits::kNext, 4> {};
- STATIC_ASSERT(Representation::kNumRepresentations - 1 <
- RepresentationBits::kMax);
- class StoreModeBits
- : public BitField<StoreMode, RepresentationBits::kNext, 1> {};
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreNamedTransition);
- DEFINE_TURBOFAN_CODE_STUB(StoreTransition, TurboFanCodeStub);
-};
-
class StoreGlobalStub : public TurboFanCodeStub {
public:
StoreGlobalStub(Isolate* isolate, PropertyCellType type,
@@ -1961,115 +1627,6 @@ class StringCharAtGenerator {
DISALLOW_COPY_AND_ASSIGN(StringCharAtGenerator);
};
-
-class LoadDictionaryElementStub : public HydrogenCodeStub {
- public:
- explicit LoadDictionaryElementStub(Isolate* isolate)
- : HydrogenCodeStub(isolate) {}
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
- DEFINE_HYDROGEN_CODE_STUB(LoadDictionaryElement, HydrogenCodeStub);
-};
-
-class LoadICTrampolineStub : public TurboFanCodeStub {
- public:
- explicit LoadICTrampolineStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- void GenerateAssembly(CodeStubAssembler* assembler) const override;
-
- Code::Kind GetCodeKind() const override { return Code::LOAD_IC; }
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
- DEFINE_CODE_STUB(LoadICTrampoline, TurboFanCodeStub);
-};
-
-class LoadGlobalICTrampolineStub : public TurboFanCodeStub {
- public:
- explicit LoadGlobalICTrampolineStub(Isolate* isolate,
- const LoadGlobalICState& state)
- : TurboFanCodeStub(isolate) {
- minor_key_ = state.GetExtraICState();
- }
-
- void GenerateAssembly(CodeStubAssembler* assembler) const override;
-
- Code::Kind GetCodeKind() const override { return Code::LOAD_GLOBAL_IC; }
-
- ExtraICState GetExtraICState() const final {
- return static_cast<ExtraICState>(minor_key_);
- }
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadGlobal);
- DEFINE_CODE_STUB(LoadGlobalICTrampoline, TurboFanCodeStub);
-};
-
-class KeyedLoadICTrampolineTFStub : public LoadICTrampolineStub {
- public:
- explicit KeyedLoadICTrampolineTFStub(Isolate* isolate)
- : LoadICTrampolineStub(isolate) {}
-
- void GenerateAssembly(CodeStubAssembler* assembler) const override;
-
- Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
-
- DEFINE_CODE_STUB(KeyedLoadICTrampolineTF, LoadICTrampolineStub);
-};
-
-class StoreICTrampolineStub : public TurboFanCodeStub {
- public:
- StoreICTrampolineStub(Isolate* isolate, const StoreICState& state)
- : TurboFanCodeStub(isolate) {
- minor_key_ = state.GetExtraICState();
- }
-
- void GenerateAssembly(CodeStubAssembler* assembler) const override;
-
- Code::Kind GetCodeKind() const override { return Code::STORE_IC; }
-
- ExtraICState GetExtraICState() const final {
- return static_cast<ExtraICState>(minor_key_);
- }
-
- protected:
- StoreICState state() const { return StoreICState(GetExtraICState()); }
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(Store);
- DEFINE_CODE_STUB(StoreICTrampoline, TurboFanCodeStub);
-};
-
-class KeyedStoreICTrampolineStub : public PlatformCodeStub {
- public:
- KeyedStoreICTrampolineStub(Isolate* isolate, const StoreICState& state)
- : PlatformCodeStub(isolate) {
- minor_key_ = state.GetExtraICState();
- }
-
- Code::Kind GetCodeKind() const override { return Code::KEYED_STORE_IC; }
-
- ExtraICState GetExtraICState() const final {
- return static_cast<ExtraICState>(minor_key_);
- }
-
- protected:
- StoreICState state() const { return StoreICState(GetExtraICState()); }
-
- private:
- DEFINE_CALL_INTERFACE_DESCRIPTOR(Store);
- DEFINE_PLATFORM_CODE_STUB(KeyedStoreICTrampoline, PlatformCodeStub);
-};
-
-class KeyedStoreICTrampolineTFStub : public StoreICTrampolineStub {
- public:
- KeyedStoreICTrampolineTFStub(Isolate* isolate, const StoreICState& state)
- : StoreICTrampolineStub(isolate, state) {}
-
- void GenerateAssembly(CodeStubAssembler* assembler) const override;
-
- Code::Kind GetCodeKind() const override { return Code::KEYED_STORE_IC; }
-
- DEFINE_CODE_STUB(KeyedStoreICTrampolineTF, StoreICTrampolineStub);
-};
-
class CallICTrampolineStub : public PlatformCodeStub {
public:
CallICTrampolineStub(Isolate* isolate, const CallICState& state)
@@ -2092,108 +1649,28 @@ class CallICTrampolineStub : public PlatformCodeStub {
DEFINE_PLATFORM_CODE_STUB(CallICTrampoline, PlatformCodeStub);
};
-class LoadICStub : public TurboFanCodeStub {
- public:
- explicit LoadICStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- void GenerateAssembly(CodeStubAssembler* assembler) const override;
-
- Code::Kind GetCodeKind() const override { return Code::LOAD_IC; }
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
- DEFINE_CODE_STUB(LoadIC, TurboFanCodeStub);
-};
-
class LoadICProtoArrayStub : public TurboFanCodeStub {
public:
- explicit LoadICProtoArrayStub(Isolate* isolate) : TurboFanCodeStub(isolate) {}
-
- void GenerateAssembly(CodeStubAssembler* assembler) const override;
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadICProtoArray);
- DEFINE_CODE_STUB(LoadICProtoArray, TurboFanCodeStub);
-};
-
-class LoadGlobalICStub : public TurboFanCodeStub {
- public:
- explicit LoadGlobalICStub(Isolate* isolate, const LoadGlobalICState& state)
+ explicit LoadICProtoArrayStub(Isolate* isolate,
+ bool throw_reference_error_if_nonexistent)
: TurboFanCodeStub(isolate) {
- minor_key_ = state.GetExtraICState();
+ minor_key_ = ThrowReferenceErrorIfNonexistentBits::encode(
+ throw_reference_error_if_nonexistent);
}
- void GenerateAssembly(CodeStubAssembler* assembler) const override;
-
- Code::Kind GetCodeKind() const override { return Code::LOAD_GLOBAL_IC; }
-
- ExtraICState GetExtraICState() const final {
- return static_cast<ExtraICState>(minor_key_);
- }
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadGlobalWithVector);
- DEFINE_CODE_STUB(LoadGlobalIC, TurboFanCodeStub);
-};
-
-class KeyedLoadICTFStub : public LoadICStub {
- public:
- explicit KeyedLoadICTFStub(Isolate* isolate) : LoadICStub(isolate) {}
-
- void GenerateAssembly(CodeStubAssembler* assembler) const override;
-
- Code::Kind GetCodeKind() const override { return Code::KEYED_LOAD_IC; }
-
- DEFINE_CODE_STUB(KeyedLoadICTF, LoadICStub);
-};
-
-class StoreICStub : public TurboFanCodeStub {
- public:
- StoreICStub(Isolate* isolate, const StoreICState& state)
- : TurboFanCodeStub(isolate) {
- minor_key_ = state.GetExtraICState();
+ bool throw_reference_error_if_nonexistent() const {
+ return ThrowReferenceErrorIfNonexistentBits::decode(minor_key_);
}
- void GenerateAssembly(CodeStubAssembler* assembler) const override;
-
- Code::Kind GetCodeKind() const override { return Code::STORE_IC; }
ExtraICState GetExtraICState() const final {
return static_cast<ExtraICState>(minor_key_);
}
- DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
- DEFINE_CODE_STUB(StoreIC, TurboFanCodeStub);
-};
-
-class KeyedStoreICStub : public PlatformCodeStub {
- public:
- KeyedStoreICStub(Isolate* isolate, const StoreICState& state)
- : PlatformCodeStub(isolate) {
- minor_key_ = state.GetExtraICState();
- }
-
- void GenerateForTrampoline(MacroAssembler* masm);
-
- Code::Kind GetCodeKind() const final { return Code::KEYED_STORE_IC; }
-
- ExtraICState GetExtraICState() const final {
- return static_cast<ExtraICState>(minor_key_);
- }
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreWithVector);
- DEFINE_PLATFORM_CODE_STUB(KeyedStoreIC, PlatformCodeStub);
-
- protected:
- void GenerateImpl(MacroAssembler* masm, bool in_frame);
-};
-
-class KeyedStoreICTFStub : public StoreICStub {
- public:
- KeyedStoreICTFStub(Isolate* isolate, const StoreICState& state)
- : StoreICStub(isolate, state) {}
-
- void GenerateAssembly(CodeStubAssembler* assembler) const override;
-
- Code::Kind GetCodeKind() const override { return Code::KEYED_STORE_IC; }
+ private:
+ class ThrowReferenceErrorIfNonexistentBits : public BitField<bool, 0, 1> {};
- DEFINE_CODE_STUB(KeyedStoreICTF, StoreICStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadICProtoArray);
+ DEFINE_TURBOFAN_CODE_STUB(LoadICProtoArray, TurboFanCodeStub);
};
class DoubleToIStub : public PlatformCodeStub {
@@ -2301,39 +1778,6 @@ class StoreScriptContextFieldStub : public ScriptContextFieldStub {
DEFINE_TURBOFAN_CODE_STUB(StoreScriptContextField, ScriptContextFieldStub);
};
-
-class LoadFastElementStub : public HandlerStub {
- public:
- LoadFastElementStub(Isolate* isolate, bool is_js_array,
- ElementsKind elements_kind,
- bool convert_hole_to_undefined = false)
- : HandlerStub(isolate) {
- set_sub_minor_key(
- ElementsKindBits::encode(elements_kind) |
- IsJSArrayBits::encode(is_js_array) |
- CanConvertHoleToUndefined::encode(convert_hole_to_undefined));
- }
-
- Code::Kind kind() const override { return Code::KEYED_LOAD_IC; }
-
- bool is_js_array() const { return IsJSArrayBits::decode(sub_minor_key()); }
- bool convert_hole_to_undefined() const {
- return CanConvertHoleToUndefined::decode(sub_minor_key());
- }
-
- ElementsKind elements_kind() const {
- return ElementsKindBits::decode(sub_minor_key());
- }
-
- private:
- class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
- class IsJSArrayBits: public BitField<bool, 8, 1> {};
- class CanConvertHoleToUndefined : public BitField<bool, 9, 1> {};
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(LoadWithVector);
- DEFINE_HANDLER_CODE_STUB(LoadFastElement, HandlerStub);
-};
-
class StoreFastElementStub : public TurboFanCodeStub {
public:
StoreFastElementStub(Isolate* isolate, bool is_js_array,
@@ -2398,23 +1842,22 @@ class AllocateHeapNumberStub : public TurboFanCodeStub {
: TurboFanCodeStub(isolate) {}
void InitializeDescriptor(CodeStubDescriptor* descriptor) override;
- void GenerateAssembly(CodeStubAssembler* assembler) const override;
DEFINE_CALL_INTERFACE_DESCRIPTOR(AllocateHeapNumber);
- DEFINE_CODE_STUB(AllocateHeapNumber, TurboFanCodeStub);
-};
-
-#define SIMD128_ALLOC_STUB(TYPE, Type, type, lane_count, lane_type) \
- class Allocate##Type##Stub : public TurboFanCodeStub { \
- public: \
- explicit Allocate##Type##Stub(Isolate* isolate) \
- : TurboFanCodeStub(isolate) {} \
- \
- void InitializeDescriptor(CodeStubDescriptor* descriptor) override; \
- void GenerateAssembly(CodeStubAssembler* assembler) const override; \
- \
- DEFINE_CALL_INTERFACE_DESCRIPTOR(Allocate##Type); \
- DEFINE_CODE_STUB(Allocate##Type, TurboFanCodeStub); \
+ DEFINE_TURBOFAN_CODE_STUB(AllocateHeapNumber, TurboFanCodeStub);
+};
+
+#define SIMD128_ALLOC_STUB(TYPE, Type, type, lane_count, lane_type) \
+ class Allocate##Type##Stub : public TurboFanCodeStub { \
+ public: \
+ explicit Allocate##Type##Stub(Isolate* isolate) \
+ : TurboFanCodeStub(isolate) {} \
+ \
+ void InitializeDescriptor(CodeStubDescriptor* descriptor) override; \
+ void GenerateAssembly(compiler::CodeAssemblerState* state) const override; \
+ \
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(Allocate##Type); \
+ DEFINE_CODE_STUB(Allocate##Type, TurboFanCodeStub); \
};
SIMD128_TYPES(SIMD128_ALLOC_STUB)
#undef SIMD128_ALLOC_STUB
@@ -2713,16 +2156,8 @@ class SubStringStub : public TurboFanCodeStub {
compiler::Node* string, compiler::Node* from,
compiler::Node* to, compiler::Node* context);
- void GenerateAssembly(CodeStubAssembler* assembler) const override {
- assembler->Return(Generate(assembler,
- assembler->Parameter(Descriptor::kString),
- assembler->Parameter(Descriptor::kFrom),
- assembler->Parameter(Descriptor::kTo),
- assembler->Parameter(Descriptor::kContext)));
- }
-
DEFINE_CALL_INTERFACE_DESCRIPTOR(SubString);
- DEFINE_CODE_STUB(SubString, TurboFanCodeStub);
+ DEFINE_TURBOFAN_CODE_STUB(SubString, TurboFanCodeStub);
};
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index afd8a6f592..54350698af 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -137,8 +137,100 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
return code;
}
+// Print function's source if it was not printed before.
+// Return a sequential id under which this function was printed.
+static int PrintFunctionSource(CompilationInfo* info,
+ std::vector<Handle<SharedFunctionInfo>>* printed,
+ int inlining_id,
+ Handle<SharedFunctionInfo> shared) {
+ // Outermost function has source id -1 and inlined functions take
+ // source ids starting from 0.
+ int source_id = -1;
+ if (inlining_id != SourcePosition::kNotInlined) {
+ for (unsigned i = 0; i < printed->size(); i++) {
+ if (printed->at(i).is_identical_to(shared)) {
+ return i;
+ }
+ }
+ source_id = static_cast<int>(printed->size());
+ printed->push_back(shared);
+ }
+
+ Isolate* isolate = info->isolate();
+ if (!shared->script()->IsUndefined(isolate)) {
+ Handle<Script> script(Script::cast(shared->script()), isolate);
+
+ if (!script->source()->IsUndefined(isolate)) {
+ CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
+ Object* source_name = script->name();
+ OFStream os(tracing_scope.file());
+ os << "--- FUNCTION SOURCE (";
+ if (source_name->IsString()) {
+ os << String::cast(source_name)->ToCString().get() << ":";
+ }
+ os << shared->DebugName()->ToCString().get() << ") id{";
+ os << info->optimization_id() << "," << source_id << "} start{";
+ os << shared->start_position() << "} ---\n";
+ {
+ DisallowHeapAllocation no_allocation;
+ int start = shared->start_position();
+ int len = shared->end_position() - start;
+ String::SubStringRange source(String::cast(script->source()), start,
+ len);
+ for (const auto& c : source) {
+ os << AsReversiblyEscapedUC16(c);
+ }
+ }
+
+ os << "\n--- END ---\n";
+ }
+ }
+
+ return source_id;
+}
+
+// Print information for the given inlining: which function was inlined and
+// where the inlining occured.
+static void PrintInlinedFunctionInfo(
+ CompilationInfo* info, int source_id, int inlining_id,
+ const CompilationInfo::InlinedFunctionHolder& h) {
+ CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
+ OFStream os(tracing_scope.file());
+ os << "INLINE (" << h.shared_info->DebugName()->ToCString().get() << ") id{"
+ << info->optimization_id() << "," << source_id << "} AS " << inlining_id
+ << " AT ";
+ const SourcePosition position = h.position.position;
+ if (position.IsKnown()) {
+ os << "<" << position.InliningId() << ":" << position.ScriptOffset() << ">";
+ } else {
+ os << "<?>";
+ }
+ os << std::endl;
+}
+
+// Print the source of all functions that participated in this optimizing
+// compilation. For inlined functions print source position of their inlining.
+static void DumpParticipatingSource(CompilationInfo* info) {
+ AllowDeferredHandleDereference allow_deference_for_print_code;
+
+ std::vector<Handle<SharedFunctionInfo>> printed;
+ printed.reserve(info->inlined_functions().size());
+
+ PrintFunctionSource(info, &printed, SourcePosition::kNotInlined,
+ info->shared_info());
+ const auto& inlined = info->inlined_functions();
+ for (unsigned id = 0; id < inlined.size(); id++) {
+ const int source_id =
+ PrintFunctionSource(info, &printed, id, inlined[id].shared_info);
+ PrintInlinedFunctionInfo(info, source_id, id, inlined[id]);
+ }
+}
void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
+ if (FLAG_print_opt_source && info->IsOptimizing()) {
+ DumpParticipatingSource(info);
+ }
+
#ifdef ENABLE_DISASSEMBLER
AllowDeferredHandleDereference allow_deference_for_print_code;
Isolate* isolate = info->isolate();
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index a17ad2a880..b909edc850 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -104,43 +104,6 @@ V8_EXPORT_PRIVATE double modulo(double x, double y);
double fast_sqrt(double input, Isolate* isolate);
void lazily_initialize_fast_sqrt(Isolate* isolate);
-
-class ElementsTransitionGenerator : public AllStatic {
- public:
- // If |mode| is set to DONT_TRACK_ALLOCATION_SITE,
- // |allocation_memento_found| may be NULL.
- static void GenerateMapChangeElementsTransition(
- MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register target_map,
- AllocationSiteMode mode,
- Label* allocation_memento_found);
- static void GenerateSmiToDouble(
- MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register target_map,
- AllocationSiteMode mode,
- Label* fail);
- static void GenerateDoubleToObject(
- MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register target_map,
- AllocationSiteMode mode,
- Label* fail);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ElementsTransitionGenerator);
-};
-
-static const int kNumberDictionaryProbes = 4;
-
-
class CodeAgingHelper {
public:
explicit CodeAgingHelper(Isolate* isolate);
diff --git a/deps/v8/src/compilation-info.cc b/deps/v8/src/compilation-info.cc
index 5c9fa58367..0a9ce310a7 100644
--- a/deps/v8/src/compilation-info.cc
+++ b/deps/v8/src/compilation-info.cc
@@ -7,6 +7,7 @@
#include "src/api.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
+#include "src/debug/debug.h"
#include "src/isolate.h"
#include "src/parsing/parse-info.h"
#include "src/source-position.h"
@@ -68,8 +69,10 @@ CompilationInfo::CompilationInfo(ParseInfo* parse_info,
if (FLAG_function_context_specialization) MarkAsFunctionContextSpecializing();
if (FLAG_turbo_splitting) MarkAsSplittingEnabled();
- if (FLAG_trace_deopt || FLAG_trace_turbo || FLAG_trace_turbo_graph ||
- FLAG_turbo_profiling || isolate_->is_profiling()) {
+ // Collect source positions for optimized code when profiling or if debugger
+ // is active, to be able to get more precise source positions at the price of
+ // more memory consumption.
+ if (isolate_->NeedsSourcePositionsForProfiling()) {
MarkAsSourcePositionsEnabled();
}
}
@@ -121,7 +124,7 @@ bool CompilationInfo::is_this_defined() const { return !IsStub(); }
// profiler, so they trigger their own optimization when they're called
// for the SharedFunctionInfo::kCallsUntilPrimitiveOptimization-th time.
bool CompilationInfo::ShouldSelfOptimize() {
- return FLAG_crankshaft &&
+ return FLAG_opt && FLAG_crankshaft &&
!(literal()->flags() & AstProperties::kDontSelfOptimize) &&
!literal()->dont_optimize() &&
literal()->scope()->AllowsLazyCompilation() &&
@@ -163,11 +166,13 @@ StackFrame::Type CompilationInfo::GetOutputStackFrameType() const {
#undef CASE_KIND
return StackFrame::STUB;
case Code::WASM_FUNCTION:
- return StackFrame::WASM;
+ return StackFrame::WASM_COMPILED;
case Code::JS_TO_WASM_FUNCTION:
return StackFrame::JS_TO_WASM;
case Code::WASM_TO_JS_FUNCTION:
return StackFrame::WASM_TO_JS;
+ case Code::WASM_INTERPRETER_ENTRY:
+ return StackFrame::WASM_INTERPRETER_ENTRY;
default:
UNIMPLEMENTED();
return StackFrame::NONE;
diff --git a/deps/v8/src/compilation-info.h b/deps/v8/src/compilation-info.h
index 77b9e34306..863183b5cd 100644
--- a/deps/v8/src/compilation-info.h
+++ b/deps/v8/src/compilation-info.h
@@ -49,7 +49,7 @@ class CompilationInfo final {
kSourcePositionsEnabled = 1 << 13,
kBailoutOnUninitialized = 1 << 14,
kOptimizeFromBytecode = 1 << 15,
- kTypeFeedbackEnabled = 1 << 16,
+ kLoopPeelingEnabled = 1 << 16,
};
CompilationInfo(ParseInfo* parse_info, Handle<JSFunction> closure);
@@ -141,12 +141,6 @@ class CompilationInfo final {
return GetFlag(kDeoptimizationEnabled);
}
- void MarkAsTypeFeedbackEnabled() { SetFlag(kTypeFeedbackEnabled); }
-
- bool is_type_feedback_enabled() const {
- return GetFlag(kTypeFeedbackEnabled);
- }
-
void MarkAsAccessorInliningEnabled() { SetFlag(kAccessorInliningEnabled); }
bool is_accessor_inlining_enabled() const {
@@ -179,6 +173,10 @@ class CompilationInfo final {
return GetFlag(kOptimizeFromBytecode);
}
+ void MarkAsLoopPeelingEnabled() { SetFlag(kLoopPeelingEnabled); }
+
+ bool is_loop_peeling_enabled() const { return GetFlag(kLoopPeelingEnabled); }
+
bool GeneratePreagedPrologue() const {
// Generate a pre-aged prologue if we are optimizing for size, which
// will make code flushing more aggressive. Only apply to Code::FUNCTION,
diff --git a/deps/v8/src/compilation-statistics.cc b/deps/v8/src/compilation-statistics.cc
index d4ca39d611..16ab3b37fe 100644
--- a/deps/v8/src/compilation-statistics.cc
+++ b/deps/v8/src/compilation-statistics.cc
@@ -14,6 +14,8 @@ namespace internal {
void CompilationStatistics::RecordPhaseStats(const char* phase_kind_name,
const char* phase_name,
const BasicStats& stats) {
+ base::LockGuard<base::Mutex> guard(&record_mutex_);
+
std::string phase_name_str(phase_name);
auto it = phase_map_.find(phase_name_str);
if (it == phase_map_.end()) {
@@ -26,6 +28,8 @@ void CompilationStatistics::RecordPhaseStats(const char* phase_kind_name,
void CompilationStatistics::RecordPhaseKindStats(const char* phase_kind_name,
const BasicStats& stats) {
+ base::LockGuard<base::Mutex> guard(&record_mutex_);
+
std::string phase_kind_name_str(phase_kind_name);
auto it = phase_kind_map_.find(phase_kind_name_str);
if (it == phase_kind_map_.end()) {
@@ -39,6 +43,8 @@ void CompilationStatistics::RecordPhaseKindStats(const char* phase_kind_name,
void CompilationStatistics::RecordTotalStats(size_t source_size,
const BasicStats& stats) {
+ base::LockGuard<base::Mutex> guard(&record_mutex_);
+
source_size += source_size;
total_stats_.Accumulate(stats);
}
@@ -128,10 +134,10 @@ std::ostream& operator<<(std::ostream& os, const AsPrintableStatistics& ps) {
}
if (!ps.machine_output) WriteHeader(os);
- for (auto phase_kind_it : sorted_phase_kinds) {
+ for (const auto& phase_kind_it : sorted_phase_kinds) {
const auto& phase_kind_name = phase_kind_it->first;
if (!ps.machine_output) {
- for (auto phase_it : sorted_phases) {
+ for (const auto& phase_it : sorted_phases) {
const auto& phase_stats = phase_it->second;
if (phase_stats.phase_kind_name_ != phase_kind_name) continue;
const auto& phase_name = phase_it->first;
diff --git a/deps/v8/src/compilation-statistics.h b/deps/v8/src/compilation-statistics.h
index ceffc2ebc1..388117b10e 100644
--- a/deps/v8/src/compilation-statistics.h
+++ b/deps/v8/src/compilation-statistics.h
@@ -80,6 +80,7 @@ class CompilationStatistics final : public Malloced {
TotalStats total_stats_;
PhaseKindMap phase_kind_map_;
PhaseMap phase_map_;
+ base::Mutex record_mutex_;
DISALLOW_COPY_AND_ASSIGN(CompilationStatistics);
};
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc
index b87a4a5c32..fdb975a5e4 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc
@@ -8,6 +8,7 @@
#include "src/compilation-info.h"
#include "src/compiler-dispatcher/compiler-dispatcher-tracer.h"
#include "src/compiler.h"
+#include "src/flags.h"
#include "src/global-handles.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
@@ -15,26 +16,71 @@
#include "src/parsing/parser.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/unicode-cache.h"
+#include "src/utils.h"
#include "src/zone/zone.h"
namespace v8 {
namespace internal {
+namespace {
+
+class OneByteWrapper : public v8::String::ExternalOneByteStringResource {
+ public:
+ OneByteWrapper(const void* data, int length) : data_(data), length_(length) {}
+ ~OneByteWrapper() override = default;
+
+ const char* data() const override {
+ return reinterpret_cast<const char*>(data_);
+ }
+
+ size_t length() const override { return static_cast<size_t>(length_); }
+
+ private:
+ const void* data_;
+ int length_;
+
+ DISALLOW_COPY_AND_ASSIGN(OneByteWrapper);
+};
+
+class TwoByteWrapper : public v8::String::ExternalStringResource {
+ public:
+ TwoByteWrapper(const void* data, int length) : data_(data), length_(length) {}
+ ~TwoByteWrapper() override = default;
+
+ const uint16_t* data() const override {
+ return reinterpret_cast<const uint16_t*>(data_);
+ }
+
+ size_t length() const override { return static_cast<size_t>(length_); }
+
+ private:
+ const void* data_;
+ int length_;
+
+ DISALLOW_COPY_AND_ASSIGN(TwoByteWrapper);
+};
+
+} // namespace
+
CompilerDispatcherJob::CompilerDispatcherJob(Isolate* isolate,
+ CompilerDispatcherTracer* tracer,
Handle<SharedFunctionInfo> shared,
size_t max_stack_size)
: isolate_(isolate),
- tracer_(isolate_->compiler_dispatcher_tracer()),
+ tracer_(tracer),
shared_(Handle<SharedFunctionInfo>::cast(
isolate_->global_handles()->Create(*shared))),
max_stack_size_(max_stack_size),
- can_compile_on_background_thread_(false) {
+ trace_compiler_dispatcher_jobs_(FLAG_trace_compiler_dispatcher_jobs) {
HandleScope scope(isolate_);
DCHECK(!shared_->outer_scope_info()->IsTheHole(isolate_));
Handle<Script> script(Script::cast(shared_->script()), isolate_);
Handle<String> source(String::cast(script->source()), isolate_);
- can_parse_on_background_thread_ =
- source->IsExternalTwoByteString() || source->IsExternalOneByteString();
+ if (trace_compiler_dispatcher_jobs_) {
+ PrintF("CompilerDispatcherJob[%p] created for ", static_cast<void*>(this));
+ shared_->ShortPrint();
+ PrintF("\n");
+ }
}
CompilerDispatcherJob::~CompilerDispatcherJob() {
@@ -44,10 +90,19 @@ CompilerDispatcherJob::~CompilerDispatcherJob() {
i::GlobalHandles::Destroy(Handle<Object>::cast(shared_).location());
}
+bool CompilerDispatcherJob::IsAssociatedWith(
+ Handle<SharedFunctionInfo> shared) const {
+ return *shared_ == *shared;
+}
+
void CompilerDispatcherJob::PrepareToParseOnMainThread() {
DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
DCHECK(status() == CompileJobStatus::kInitial);
COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kPrepareToParse);
+ if (trace_compiler_dispatcher_jobs_) {
+ PrintF("CompilerDispatcherJob[%p]: Preparing to parse\n",
+ static_cast<void*>(this));
+ }
HandleScope scope(isolate_);
unicode_cache_.reset(new UnicodeCache());
zone_.reset(new Zone(isolate_->allocator(), ZONE_NAME));
@@ -60,11 +115,68 @@ void CompilerDispatcherJob::PrepareToParseOnMainThread() {
source, shared_->start_position(), shared_->end_position()));
} else {
source = String::Flatten(source);
- // Have to globalize the reference here, so it survives between function
- // calls.
- source_ = Handle<String>::cast(isolate_->global_handles()->Create(*source));
- character_stream_.reset(ScannerStream::For(
- source_, shared_->start_position(), shared_->end_position()));
+ const void* data;
+ int offset = 0;
+ int length = source->length();
+
+ // Objects in lo_space don't move, so we can just read the contents from
+ // any thread.
+ if (isolate_->heap()->lo_space()->Contains(*source)) {
+ // We need to globalize the handle to the flattened string here, in
+ // case it's not referenced from anywhere else.
+ source_ =
+ Handle<String>::cast(isolate_->global_handles()->Create(*source));
+ DisallowHeapAllocation no_allocation;
+ String::FlatContent content = source->GetFlatContent();
+ DCHECK(content.IsFlat());
+ data =
+ content.IsOneByte()
+ ? reinterpret_cast<const void*>(content.ToOneByteVector().start())
+ : reinterpret_cast<const void*>(content.ToUC16Vector().start());
+ } else {
+ // Otherwise, create a copy of the part of the string we'll parse in the
+ // zone.
+ length = (shared_->end_position() - shared_->start_position());
+ offset = shared_->start_position();
+
+ int byte_len = length * (source->IsOneByteRepresentation() ? 1 : 2);
+ data = zone_->New(byte_len);
+
+ DisallowHeapAllocation no_allocation;
+ String::FlatContent content = source->GetFlatContent();
+ DCHECK(content.IsFlat());
+ if (content.IsOneByte()) {
+ MemCopy(const_cast<void*>(data),
+ &content.ToOneByteVector().at(shared_->start_position()),
+ byte_len);
+ } else {
+ MemCopy(const_cast<void*>(data),
+ &content.ToUC16Vector().at(shared_->start_position()),
+ byte_len);
+ }
+ }
+ Handle<String> wrapper;
+ if (source->IsOneByteRepresentation()) {
+ ExternalOneByteString::Resource* resource =
+ new OneByteWrapper(data, length);
+ source_wrapper_.reset(resource);
+ wrapper = isolate_->factory()
+ ->NewExternalStringFromOneByte(resource)
+ .ToHandleChecked();
+ } else {
+ ExternalTwoByteString::Resource* resource =
+ new TwoByteWrapper(data, length);
+ source_wrapper_.reset(resource);
+ wrapper = isolate_->factory()
+ ->NewExternalStringFromTwoByte(resource)
+ .ToHandleChecked();
+ }
+ wrapper_ =
+ Handle<String>::cast(isolate_->global_handles()->Create(*wrapper));
+
+ character_stream_.reset(
+ ScannerStream::For(wrapper_, shared_->start_position() - offset,
+ shared_->end_position() - offset));
}
parse_info_.reset(new ParseInfo(zone_.get()));
parse_info_->set_isolate(isolate_);
@@ -76,6 +188,7 @@ void CompilerDispatcherJob::PrepareToParseOnMainThread() {
parse_info_->set_end_position(shared_->end_position());
parse_info_->set_unicode_cache(unicode_cache_.get());
parse_info_->set_language_mode(shared_->language_mode());
+ parse_info_->set_function_literal_id(shared_->function_literal_id());
parser_.reset(new Parser(parse_info_.get()));
Handle<ScopeInfo> outer_scope_info(
@@ -92,21 +205,17 @@ void CompilerDispatcherJob::PrepareToParseOnMainThread() {
}
void CompilerDispatcherJob::Parse() {
- DCHECK(can_parse_on_background_thread_ ||
- ThreadId::Current().Equals(isolate_->thread_id()));
DCHECK(status() == CompileJobStatus::kReadyToParse);
COMPILER_DISPATCHER_TRACE_SCOPE_WITH_NUM(
tracer_, kParse,
parse_info_->end_position() - parse_info_->start_position());
+ if (trace_compiler_dispatcher_jobs_) {
+ PrintF("CompilerDispatcherJob[%p]: Parsing\n", static_cast<void*>(this));
+ }
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
- std::unique_ptr<DisallowHandleDereference> no_deref;
- // If we can't parse on a background thread, we need to be able to deref the
- // source string.
- if (can_parse_on_background_thread_) {
- no_deref.reset(new DisallowHandleDereference());
- }
+ DisallowHandleDereference no_deref;
// Nullify the Isolate temporarily so that the parser doesn't accidentally
// use it.
@@ -126,11 +235,19 @@ bool CompilerDispatcherJob::FinalizeParsingOnMainThread() {
DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
DCHECK(status() == CompileJobStatus::kParsed);
COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kFinalizeParsing);
+ if (trace_compiler_dispatcher_jobs_) {
+ PrintF("CompilerDispatcherJob[%p]: Finalizing parsing\n",
+ static_cast<void*>(this));
+ }
if (!source_.is_null()) {
i::GlobalHandles::Destroy(Handle<Object>::cast(source_).location());
source_ = Handle<String>::null();
}
+ if (!wrapper_.is_null()) {
+ i::GlobalHandles::Destroy(Handle<Object>::cast(wrapper_).location());
+ wrapper_ = Handle<String>::null();
+ }
if (parse_info_->literal() == nullptr) {
status_ = CompileJobStatus::kFailed;
@@ -170,6 +287,10 @@ bool CompilerDispatcherJob::PrepareToCompileOnMainThread() {
DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
DCHECK(status() == CompileJobStatus::kReadyToAnalyse);
COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kPrepareToCompile);
+ if (trace_compiler_dispatcher_jobs_) {
+ PrintF("CompilerDispatcherJob[%p]: Preparing to compile\n",
+ static_cast<void*>(this));
+ }
compile_info_.reset(
new CompilationInfo(parse_info_.get(), Handle<JSFunction>::null()));
@@ -187,18 +308,18 @@ bool CompilerDispatcherJob::PrepareToCompileOnMainThread() {
return false;
}
- can_compile_on_background_thread_ =
- compile_job_->can_execute_on_background_thread();
+ CHECK(compile_job_->can_execute_on_background_thread());
status_ = CompileJobStatus::kReadyToCompile;
return true;
}
void CompilerDispatcherJob::Compile() {
DCHECK(status() == CompileJobStatus::kReadyToCompile);
- DCHECK(can_compile_on_background_thread_ ||
- ThreadId::Current().Equals(isolate_->thread_id()));
COMPILER_DISPATCHER_TRACE_SCOPE_WITH_NUM(
tracer_, kCompile, parse_info_->literal()->ast_node_count());
+ if (trace_compiler_dispatcher_jobs_) {
+ PrintF("CompilerDispatcherJob[%p]: Compiling\n", static_cast<void*>(this));
+ }
// Disallowing of handle dereference and heap access dealt with in
// CompilationJob::ExecuteJob.
@@ -218,6 +339,10 @@ bool CompilerDispatcherJob::FinalizeCompilingOnMainThread() {
DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
DCHECK(status() == CompileJobStatus::kCompiled);
COMPILER_DISPATCHER_TRACE_SCOPE(tracer_, kFinalizeCompiling);
+ if (trace_compiler_dispatcher_jobs_) {
+ PrintF("CompilerDispatcherJob[%p]: Finalizing compiling\n",
+ static_cast<void*>(this));
+ }
if (compile_job_->state() == CompilationJob::State::kFailed ||
!Compiler::FinalizeCompilationJob(compile_job_.release())) {
@@ -239,22 +364,66 @@ bool CompilerDispatcherJob::FinalizeCompilingOnMainThread() {
void CompilerDispatcherJob::ResetOnMainThread() {
DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
+ if (trace_compiler_dispatcher_jobs_) {
+ PrintF("CompilerDispatcherJob[%p]: Resetting\n", static_cast<void*>(this));
+ }
+
parser_.reset();
unicode_cache_.reset();
character_stream_.reset();
parse_info_.reset();
- zone_.reset();
handles_from_parsing_.reset();
compile_info_.reset();
compile_job_.reset();
+ zone_.reset();
if (!source_.is_null()) {
i::GlobalHandles::Destroy(Handle<Object>::cast(source_).location());
source_ = Handle<String>::null();
}
+ if (!wrapper_.is_null()) {
+ i::GlobalHandles::Destroy(Handle<Object>::cast(wrapper_).location());
+ wrapper_ = Handle<String>::null();
+ }
status_ = CompileJobStatus::kInitial;
}
+double CompilerDispatcherJob::EstimateRuntimeOfNextStepInMs() const {
+ switch (status_) {
+ case CompileJobStatus::kInitial:
+ return tracer_->EstimatePrepareToParseInMs();
+
+ case CompileJobStatus::kReadyToParse:
+ return tracer_->EstimateParseInMs(parse_info_->end_position() -
+ parse_info_->start_position());
+
+ case CompileJobStatus::kParsed:
+ return tracer_->EstimateFinalizeParsingInMs();
+
+ case CompileJobStatus::kReadyToAnalyse:
+ return tracer_->EstimatePrepareToCompileInMs();
+
+ case CompileJobStatus::kReadyToCompile:
+ return tracer_->EstimateCompileInMs(
+ parse_info_->literal()->ast_node_count());
+
+ case CompileJobStatus::kCompiled:
+ return tracer_->EstimateFinalizeCompilingInMs();
+
+ case CompileJobStatus::kFailed:
+ case CompileJobStatus::kDone:
+ return 0.0;
+ }
+
+ UNREACHABLE();
+ return 0.0;
+}
+
+void CompilerDispatcherJob::ShortPrint() {
+ DCHECK(ThreadId::Current().Equals(isolate_->thread_id()));
+ shared_->ShortPrint();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h
index 7f4c6ced71..e0a2677f8e 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h
@@ -7,6 +7,7 @@
#include <memory>
+#include "include/v8.h"
#include "src/base/macros.h"
#include "src/globals.h"
#include "src/handles.h"
@@ -40,19 +41,16 @@ enum class CompileJobStatus {
class V8_EXPORT_PRIVATE CompilerDispatcherJob {
public:
- CompilerDispatcherJob(Isolate* isolate, Handle<SharedFunctionInfo> shared,
+ CompilerDispatcherJob(Isolate* isolate, CompilerDispatcherTracer* tracer,
+ Handle<SharedFunctionInfo> shared,
size_t max_stack_size);
~CompilerDispatcherJob();
CompileJobStatus status() const { return status_; }
- bool can_parse_on_background_thread() const {
- return can_parse_on_background_thread_;
- }
- // Should only be called after kReadyToCompile.
- bool can_compile_on_background_thread() const {
- DCHECK(compile_job_.get());
- return can_compile_on_background_thread_;
- }
+
+ // Returns true if this CompilerDispatcherJob was created for the given
+ // function.
+ bool IsAssociatedWith(Handle<SharedFunctionInfo> shared) const;
// Transition from kInitial to kReadyToParse.
void PrepareToParseOnMainThread();
@@ -78,6 +76,13 @@ class V8_EXPORT_PRIVATE CompilerDispatcherJob {
// Transition from any state to kInitial and free all resources.
void ResetOnMainThread();
+ // Estimate how long the next step will take using the tracer.
+ double EstimateRuntimeOfNextStepInMs() const;
+
+ // Even though the name does not imply this, ShortPrint() must only be invoked
+ // on the main thread.
+ void ShortPrint();
+
private:
FRIEND_TEST(CompilerDispatcherJobTest, ScopeChain);
@@ -86,6 +91,8 @@ class V8_EXPORT_PRIVATE CompilerDispatcherJob {
CompilerDispatcherTracer* tracer_;
Handle<SharedFunctionInfo> shared_; // Global handle.
Handle<String> source_; // Global handle.
+ Handle<String> wrapper_; // Global handle.
+ std::unique_ptr<v8::String::ExternalStringResourceBase> source_wrapper_;
size_t max_stack_size_;
// Members required for parsing.
@@ -100,8 +107,7 @@ class V8_EXPORT_PRIVATE CompilerDispatcherJob {
std::unique_ptr<CompilationInfo> compile_info_;
std::unique_ptr<CompilationJob> compile_job_;
- bool can_parse_on_background_thread_;
- bool can_compile_on_background_thread_;
+ bool trace_compiler_dispatcher_jobs_;
DISALLOW_COPY_AND_ASSIGN(CompilerDispatcherJob);
};
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc
index f8af05feb9..0703e016e9 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.cc
@@ -5,6 +5,7 @@
#include "src/compiler-dispatcher/compiler-dispatcher-tracer.h"
#include "src/isolate.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
@@ -16,17 +17,14 @@ double MonotonicallyIncreasingTimeInMs() {
static_cast<double>(base::Time::kMillisecondsPerSecond);
}
+const double kEstimatedRuntimeWithoutData = 1.0;
+
} // namespace
CompilerDispatcherTracer::Scope::Scope(CompilerDispatcherTracer* tracer,
ScopeID scope_id, size_t num)
: tracer_(tracer), scope_id_(scope_id), num_(num) {
start_time_ = MonotonicallyIncreasingTimeInMs();
- // TODO(cbruni): remove once we fully moved to a trace-based system.
- if (V8_UNLIKELY(FLAG_runtime_stats)) {
- RuntimeCallStats::Enter(tracer_->runtime_call_stats_, &timer_,
- &RuntimeCallStats::CompilerDispatcher);
- }
}
CompilerDispatcherTracer::Scope::~Scope() {
@@ -51,10 +49,6 @@ CompilerDispatcherTracer::Scope::~Scope() {
tracer_->RecordFinalizeCompiling(elapsed);
break;
}
- // TODO(cbruni): remove once we fully moved to a trace-based system.
- if (V8_UNLIKELY(FLAG_runtime_stats)) {
- RuntimeCallStats::Leave(tracer_->runtime_call_stats_, &timer_);
- }
}
// static
@@ -129,26 +123,38 @@ double CompilerDispatcherTracer::EstimateParseInMs(size_t source_length) const {
return Estimate(parse_events_, source_length);
}
-double CompilerDispatcherTracer::EstimateFinalizeParsingInMs() {
+double CompilerDispatcherTracer::EstimateFinalizeParsingInMs() const {
base::LockGuard<base::Mutex> lock(&mutex_);
return Average(finalize_parsing_events_);
}
-double CompilerDispatcherTracer::EstimatePrepareToCompileInMs() {
+double CompilerDispatcherTracer::EstimatePrepareToCompileInMs() const {
base::LockGuard<base::Mutex> lock(&mutex_);
return Average(prepare_compile_events_);
}
-double CompilerDispatcherTracer::EstimateCompileInMs(size_t ast_size_in_bytes) {
+double CompilerDispatcherTracer::EstimateCompileInMs(
+ size_t ast_size_in_bytes) const {
base::LockGuard<base::Mutex> lock(&mutex_);
return Estimate(compile_events_, ast_size_in_bytes);
}
-double CompilerDispatcherTracer::EstimateFinalizeCompilingInMs() {
+double CompilerDispatcherTracer::EstimateFinalizeCompilingInMs() const {
base::LockGuard<base::Mutex> lock(&mutex_);
return Average(finalize_compiling_events_);
}
+void CompilerDispatcherTracer::DumpStatistics() const {
+ PrintF(
+ "CompilerDispatcherTracer: "
+ "prepare_parsing=%.2lfms parsing=%.2lfms/kb finalize_parsing=%.2lfms "
+ "prepare_compiling=%.2lfms compiling=%.2lfms/kb "
+ "finalize_compilig=%.2lfms\n",
+ EstimatePrepareToParseInMs(), EstimateParseInMs(1 * KB),
+ EstimateFinalizeParsingInMs(), EstimatePrepareToCompileInMs(),
+ EstimateCompileInMs(1 * KB), EstimateFinalizeCompilingInMs());
+}
+
double CompilerDispatcherTracer::Average(
const base::RingBuffer<double>& buffer) {
if (buffer.Count() == 0) return 0.0;
@@ -158,7 +164,7 @@ double CompilerDispatcherTracer::Average(
double CompilerDispatcherTracer::Estimate(
const base::RingBuffer<std::pair<size_t, double>>& buffer, size_t num) {
- if (buffer.Count() == 0) return 0.0;
+ if (buffer.Count() == 0) return kEstimatedRuntimeWithoutData;
std::pair<size_t, double> sum = buffer.Sum(
[](std::pair<size_t, double> a, std::pair<size_t, double> b) {
return std::make_pair(a.first + b.first, a.second + b.second);
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.h b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.h
index b505511eb9..3751d0da54 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.h
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-tracer.h
@@ -52,7 +52,6 @@ class V8_EXPORT_PRIVATE CompilerDispatcherTracer {
ScopeID scope_id_;
size_t num_;
double start_time_;
- RuntimeCallTimer timer_;
DISALLOW_COPY_AND_ASSIGN(Scope);
};
@@ -69,10 +68,12 @@ class V8_EXPORT_PRIVATE CompilerDispatcherTracer {
double EstimatePrepareToParseInMs() const;
double EstimateParseInMs(size_t source_length) const;
- double EstimateFinalizeParsingInMs();
- double EstimatePrepareToCompileInMs();
- double EstimateCompileInMs(size_t ast_size_in_bytes);
- double EstimateFinalizeCompilingInMs();
+ double EstimateFinalizeParsingInMs() const;
+ double EstimatePrepareToCompileInMs() const;
+ double EstimateCompileInMs(size_t ast_size_in_bytes) const;
+ double EstimateFinalizeCompilingInMs() const;
+
+ void DumpStatistics() const;
private:
static double Average(const base::RingBuffer<double>& buffer);
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
new file mode 100644
index 0000000000..70edce9673
--- /dev/null
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
@@ -0,0 +1,631 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler-dispatcher/compiler-dispatcher.h"
+
+#include "include/v8-platform.h"
+#include "include/v8.h"
+#include "src/base/platform/time.h"
+#include "src/cancelable-task.h"
+#include "src/compiler-dispatcher/compiler-dispatcher-job.h"
+#include "src/compiler-dispatcher/compiler-dispatcher-tracer.h"
+#include "src/flags.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+enum class ExceptionHandling { kSwallow, kThrow };
+
+bool DoNextStepOnMainThread(Isolate* isolate, CompilerDispatcherJob* job,
+ ExceptionHandling exception_handling) {
+ DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
+ switch (job->status()) {
+ case CompileJobStatus::kInitial:
+ job->PrepareToParseOnMainThread();
+ break;
+
+ case CompileJobStatus::kReadyToParse:
+ job->Parse();
+ break;
+
+ case CompileJobStatus::kParsed:
+ job->FinalizeParsingOnMainThread();
+ break;
+
+ case CompileJobStatus::kReadyToAnalyse:
+ job->PrepareToCompileOnMainThread();
+ break;
+
+ case CompileJobStatus::kReadyToCompile:
+ job->Compile();
+ break;
+
+ case CompileJobStatus::kCompiled:
+ job->FinalizeCompilingOnMainThread();
+ break;
+
+ case CompileJobStatus::kFailed:
+ case CompileJobStatus::kDone:
+ break;
+ }
+
+ DCHECK_EQ(job->status() == CompileJobStatus::kFailed,
+ isolate->has_pending_exception());
+ if (job->status() == CompileJobStatus::kFailed &&
+ exception_handling == ExceptionHandling::kSwallow) {
+ isolate->clear_pending_exception();
+ }
+ return job->status() != CompileJobStatus::kFailed;
+}
+
+bool IsFinished(CompilerDispatcherJob* job) {
+ return job->status() == CompileJobStatus::kDone ||
+ job->status() == CompileJobStatus::kFailed;
+}
+
+bool CanRunOnAnyThread(CompilerDispatcherJob* job) {
+ return job->status() == CompileJobStatus::kReadyToParse ||
+ job->status() == CompileJobStatus::kReadyToCompile;
+}
+
+void DoNextStepOnBackgroundThread(CompilerDispatcherJob* job) {
+ DCHECK(CanRunOnAnyThread(job));
+ switch (job->status()) {
+ case CompileJobStatus::kReadyToParse:
+ job->Parse();
+ break;
+
+ case CompileJobStatus::kReadyToCompile:
+ job->Compile();
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+// Theoretically we get 50ms of idle time max, however it's unlikely that
+// we'll get all of it so try to be a conservative.
+const double kMaxIdleTimeToExpectInMs = 40;
+
+class MemoryPressureTask : public CancelableTask {
+ public:
+ MemoryPressureTask(Isolate* isolate, CancelableTaskManager* task_manager,
+ CompilerDispatcher* dispatcher);
+ ~MemoryPressureTask() override;
+
+ // CancelableTask implementation.
+ void RunInternal() override;
+
+ private:
+ CompilerDispatcher* dispatcher_;
+
+ DISALLOW_COPY_AND_ASSIGN(MemoryPressureTask);
+};
+
+MemoryPressureTask::MemoryPressureTask(Isolate* isolate,
+ CancelableTaskManager* task_manager,
+ CompilerDispatcher* dispatcher)
+ : CancelableTask(isolate, task_manager), dispatcher_(dispatcher) {}
+
+MemoryPressureTask::~MemoryPressureTask() {}
+
+void MemoryPressureTask::RunInternal() {
+ dispatcher_->AbortAll(CompilerDispatcher::BlockingBehavior::kDontBlock);
+}
+
+} // namespace
+
+class CompilerDispatcher::AbortTask : public CancelableTask {
+ public:
+ AbortTask(Isolate* isolate, CancelableTaskManager* task_manager,
+ CompilerDispatcher* dispatcher);
+ ~AbortTask() override;
+
+ // CancelableTask implementation.
+ void RunInternal() override;
+
+ private:
+ CompilerDispatcher* dispatcher_;
+
+ DISALLOW_COPY_AND_ASSIGN(AbortTask);
+};
+
+CompilerDispatcher::AbortTask::AbortTask(Isolate* isolate,
+ CancelableTaskManager* task_manager,
+ CompilerDispatcher* dispatcher)
+ : CancelableTask(isolate, task_manager), dispatcher_(dispatcher) {}
+
+CompilerDispatcher::AbortTask::~AbortTask() {}
+
+void CompilerDispatcher::AbortTask::RunInternal() {
+ dispatcher_->AbortInactiveJobs();
+}
+
+class CompilerDispatcher::BackgroundTask : public CancelableTask {
+ public:
+ BackgroundTask(Isolate* isolate, CancelableTaskManager* task_manager,
+ CompilerDispatcher* dispatcher);
+ ~BackgroundTask() override;
+
+ // CancelableTask implementation.
+ void RunInternal() override;
+
+ private:
+ CompilerDispatcher* dispatcher_;
+
+ DISALLOW_COPY_AND_ASSIGN(BackgroundTask);
+};
+
+CompilerDispatcher::BackgroundTask::BackgroundTask(
+ Isolate* isolate, CancelableTaskManager* task_manager,
+ CompilerDispatcher* dispatcher)
+ : CancelableTask(isolate, task_manager), dispatcher_(dispatcher) {}
+
+CompilerDispatcher::BackgroundTask::~BackgroundTask() {}
+
+void CompilerDispatcher::BackgroundTask::RunInternal() {
+ dispatcher_->DoBackgroundWork();
+}
+
+class CompilerDispatcher::IdleTask : public CancelableIdleTask {
+ public:
+ IdleTask(Isolate* isolate, CancelableTaskManager* task_manager,
+ CompilerDispatcher* dispatcher);
+ ~IdleTask() override;
+
+ // CancelableIdleTask implementation.
+ void RunInternal(double deadline_in_seconds) override;
+
+ private:
+ CompilerDispatcher* dispatcher_;
+
+ DISALLOW_COPY_AND_ASSIGN(IdleTask);
+};
+
+CompilerDispatcher::IdleTask::IdleTask(Isolate* isolate,
+ CancelableTaskManager* task_manager,
+ CompilerDispatcher* dispatcher)
+ : CancelableIdleTask(isolate, task_manager), dispatcher_(dispatcher) {}
+
+CompilerDispatcher::IdleTask::~IdleTask() {}
+
+void CompilerDispatcher::IdleTask::RunInternal(double deadline_in_seconds) {
+ dispatcher_->DoIdleWork(deadline_in_seconds);
+}
+
+CompilerDispatcher::CompilerDispatcher(Isolate* isolate, Platform* platform,
+ size_t max_stack_size)
+ : isolate_(isolate),
+ platform_(platform),
+ max_stack_size_(max_stack_size),
+ trace_compiler_dispatcher_(FLAG_trace_compiler_dispatcher),
+ tracer_(new CompilerDispatcherTracer(isolate_)),
+ task_manager_(new CancelableTaskManager()),
+ memory_pressure_level_(MemoryPressureLevel::kNone),
+ abort_(false),
+ idle_task_scheduled_(false),
+ num_scheduled_background_tasks_(0),
+ main_thread_blocking_on_job_(nullptr),
+ block_for_testing_(false),
+ semaphore_for_testing_(0) {
+ if (trace_compiler_dispatcher_ && !IsEnabled()) {
+ PrintF("CompilerDispatcher: dispatcher is disabled\n");
+ }
+}
+
+CompilerDispatcher::~CompilerDispatcher() {
+ // To avoid crashing in unit tests due to unfished jobs.
+ AbortAll(BlockingBehavior::kBlock);
+ task_manager_->CancelAndWait();
+}
+
+bool CompilerDispatcher::Enqueue(Handle<SharedFunctionInfo> function) {
+ if (!IsEnabled()) return false;
+
+ DCHECK(FLAG_ignition);
+
+ if (memory_pressure_level_.Value() != MemoryPressureLevel::kNone) {
+ return false;
+ }
+
+ {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ if (abort_) return false;
+ }
+
+ // We only handle functions (no eval / top-level code / wasm) that are
+ // attached to a script.
+ if (!function->script()->IsScript() || function->is_toplevel() ||
+ function->asm_function() || function->native()) {
+ return false;
+ }
+
+ if (IsEnqueued(function)) return true;
+
+ if (trace_compiler_dispatcher_) {
+ PrintF("CompilerDispatcher: enqueuing ");
+ function->ShortPrint();
+ PrintF("\n");
+ }
+
+ std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
+ isolate_, tracer_.get(), function, max_stack_size_));
+ std::pair<int, int> key(Script::cast(function->script())->id(),
+ function->function_literal_id());
+ jobs_.insert(std::make_pair(key, std::move(job)));
+ ScheduleIdleTaskIfNeeded();
+ return true;
+}
+
+bool CompilerDispatcher::EnqueueAndStep(Handle<SharedFunctionInfo> function) {
+ if (!Enqueue(function)) return false;
+
+ if (trace_compiler_dispatcher_) {
+ PrintF("CompilerDispatcher: stepping ");
+ function->ShortPrint();
+ PrintF("\n");
+ }
+ JobMap::const_iterator job = GetJobFor(function);
+ DoNextStepOnMainThread(isolate_, job->second.get(),
+ ExceptionHandling::kSwallow);
+ ConsiderJobForBackgroundProcessing(job->second.get());
+ return true;
+}
+
+bool CompilerDispatcher::IsEnabled() const {
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
+ return FLAG_compiler_dispatcher && platform_->IdleTasksEnabled(v8_isolate);
+}
+
+bool CompilerDispatcher::IsEnqueued(Handle<SharedFunctionInfo> function) const {
+ return GetJobFor(function) != jobs_.end();
+}
+
+void CompilerDispatcher::WaitForJobIfRunningOnBackground(
+ CompilerDispatcherJob* job) {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ if (running_background_jobs_.find(job) == running_background_jobs_.end()) {
+ pending_background_jobs_.erase(job);
+ return;
+ }
+ DCHECK_NULL(main_thread_blocking_on_job_);
+ main_thread_blocking_on_job_ = job;
+ while (main_thread_blocking_on_job_ != nullptr) {
+ main_thread_blocking_signal_.Wait(&mutex_);
+ }
+ DCHECK(pending_background_jobs_.find(job) == pending_background_jobs_.end());
+ DCHECK(running_background_jobs_.find(job) == running_background_jobs_.end());
+}
+
+bool CompilerDispatcher::FinishNow(Handle<SharedFunctionInfo> function) {
+ JobMap::const_iterator job = GetJobFor(function);
+ CHECK(job != jobs_.end());
+
+ if (trace_compiler_dispatcher_) {
+ PrintF("CompilerDispatcher: finishing ");
+ function->ShortPrint();
+ PrintF(" now\n");
+ }
+
+ WaitForJobIfRunningOnBackground(job->second.get());
+ while (!IsFinished(job->second.get())) {
+ DoNextStepOnMainThread(isolate_, job->second.get(),
+ ExceptionHandling::kThrow);
+ }
+ bool result = job->second->status() != CompileJobStatus::kFailed;
+
+ if (trace_compiler_dispatcher_) {
+ PrintF("CompilerDispatcher: finished working on ");
+ function->ShortPrint();
+ PrintF(": %s\n", result ? "success" : "failure");
+ tracer_->DumpStatistics();
+ }
+
+ job->second->ResetOnMainThread();
+ jobs_.erase(job);
+ if (jobs_.empty()) {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ abort_ = false;
+ }
+ return result;
+}
+
+void CompilerDispatcher::AbortAll(BlockingBehavior blocking) {
+ bool background_tasks_running =
+ task_manager_->TryAbortAll() == CancelableTaskManager::kTaskRunning;
+ if (!background_tasks_running || blocking == BlockingBehavior::kBlock) {
+ for (auto& it : jobs_) {
+ WaitForJobIfRunningOnBackground(it.second.get());
+ if (trace_compiler_dispatcher_) {
+ PrintF("CompilerDispatcher: aborted ");
+ it.second->ShortPrint();
+ PrintF("\n");
+ }
+ it.second->ResetOnMainThread();
+ }
+ jobs_.clear();
+ {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ DCHECK(pending_background_jobs_.empty());
+ DCHECK(running_background_jobs_.empty());
+ abort_ = false;
+ }
+ return;
+ }
+
+ {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ abort_ = true;
+ pending_background_jobs_.clear();
+ }
+ AbortInactiveJobs();
+
+ // All running background jobs might already have scheduled idle tasks instead
+ // of abort tasks. Schedule a single abort task here to make sure they get
+ // processed as soon as possible (and not first when we have idle time).
+ ScheduleAbortTask();
+}
+
+void CompilerDispatcher::AbortInactiveJobs() {
+ {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ // Since we schedule two abort tasks per async abort, we might end up
+ // here with nothing left to do.
+ if (!abort_) return;
+ }
+ for (auto it = jobs_.begin(); it != jobs_.end();) {
+ auto job = it;
+ ++it;
+ {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ if (running_background_jobs_.find(job->second.get()) !=
+ running_background_jobs_.end()) {
+ continue;
+ }
+ }
+ if (trace_compiler_dispatcher_) {
+ PrintF("CompilerDispatcher: aborted ");
+ job->second->ShortPrint();
+ PrintF("\n");
+ }
+ job->second->ResetOnMainThread();
+ jobs_.erase(job);
+ }
+ if (jobs_.empty()) {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ abort_ = false;
+ }
+}
+
+void CompilerDispatcher::MemoryPressureNotification(
+ v8::MemoryPressureLevel level, bool is_isolate_locked) {
+ MemoryPressureLevel previous = memory_pressure_level_.Value();
+ memory_pressure_level_.SetValue(level);
+ // If we're already under pressure, we haven't accepted new tasks meanwhile
+ // and can just return. If we're no longer under pressure, we're also done.
+ if (previous != MemoryPressureLevel::kNone ||
+ level == MemoryPressureLevel::kNone) {
+ return;
+ }
+ if (trace_compiler_dispatcher_) {
+ PrintF("CompilerDispatcher: received memory pressure notification\n");
+ }
+ if (is_isolate_locked) {
+ AbortAll(BlockingBehavior::kDontBlock);
+ } else {
+ {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ if (abort_) return;
+ // By going into abort mode here, and clearing the
+ // pending_background_jobs_, we at keep existing background jobs from
+ // picking up more work before the MemoryPressureTask gets executed.
+ abort_ = true;
+ pending_background_jobs_.clear();
+ }
+ platform_->CallOnForegroundThread(
+ reinterpret_cast<v8::Isolate*>(isolate_),
+ new MemoryPressureTask(isolate_, task_manager_.get(), this));
+ }
+}
+
+CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::GetJobFor(
+ Handle<SharedFunctionInfo> shared) const {
+ if (!shared->script()->IsScript()) return jobs_.end();
+ std::pair<int, int> key(Script::cast(shared->script())->id(),
+ shared->function_literal_id());
+ auto range = jobs_.equal_range(key);
+ for (auto job = range.first; job != range.second; ++job) {
+ if (job->second->IsAssociatedWith(shared)) return job;
+ }
+ return jobs_.end();
+}
+
+void CompilerDispatcher::ScheduleIdleTaskFromAnyThread() {
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
+ DCHECK(platform_->IdleTasksEnabled(v8_isolate));
+ {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ if (idle_task_scheduled_) return;
+ idle_task_scheduled_ = true;
+ }
+ platform_->CallIdleOnForegroundThread(
+ v8_isolate, new IdleTask(isolate_, task_manager_.get(), this));
+}
+
+void CompilerDispatcher::ScheduleIdleTaskIfNeeded() {
+ if (jobs_.empty()) return;
+ ScheduleIdleTaskFromAnyThread();
+}
+
+void CompilerDispatcher::ScheduleAbortTask() {
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
+ platform_->CallOnForegroundThread(
+ v8_isolate, new AbortTask(isolate_, task_manager_.get(), this));
+}
+
+void CompilerDispatcher::ConsiderJobForBackgroundProcessing(
+ CompilerDispatcherJob* job) {
+ if (!CanRunOnAnyThread(job)) return;
+ {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ pending_background_jobs_.insert(job);
+ }
+ ScheduleMoreBackgroundTasksIfNeeded();
+}
+
+void CompilerDispatcher::ScheduleMoreBackgroundTasksIfNeeded() {
+ if (FLAG_single_threaded) return;
+ {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ if (pending_background_jobs_.empty()) return;
+ if (platform_->NumberOfAvailableBackgroundThreads() <=
+ num_scheduled_background_tasks_) {
+ return;
+ }
+ ++num_scheduled_background_tasks_;
+ }
+ platform_->CallOnBackgroundThread(
+ new BackgroundTask(isolate_, task_manager_.get(), this),
+ v8::Platform::kShortRunningTask);
+}
+
+void CompilerDispatcher::DoBackgroundWork() {
+ CompilerDispatcherJob* job = nullptr;
+ {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ --num_scheduled_background_tasks_;
+ if (!pending_background_jobs_.empty()) {
+ auto it = pending_background_jobs_.begin();
+ job = *it;
+ pending_background_jobs_.erase(it);
+ running_background_jobs_.insert(job);
+ }
+ }
+ if (job == nullptr) return;
+
+ if (V8_UNLIKELY(block_for_testing_.Value())) {
+ block_for_testing_.SetValue(false);
+ semaphore_for_testing_.Wait();
+ }
+
+ if (trace_compiler_dispatcher_) {
+ PrintF("CompilerDispatcher: doing background work\n");
+ }
+
+ DoNextStepOnBackgroundThread(job);
+
+ ScheduleMoreBackgroundTasksIfNeeded();
+ // Unconditionally schedule an idle task, as all background steps have to be
+ // followed by a main thread step.
+ ScheduleIdleTaskFromAnyThread();
+
+ {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ running_background_jobs_.erase(job);
+
+ if (running_background_jobs_.empty() && abort_) {
+ // This is the last background job that finished. The abort task
+ // scheduled by AbortAll might already have ran, so schedule another
+ // one to be on the safe side.
+ ScheduleAbortTask();
+ }
+
+ if (main_thread_blocking_on_job_ == job) {
+ main_thread_blocking_on_job_ = nullptr;
+ main_thread_blocking_signal_.NotifyOne();
+ }
+ }
+ // Don't touch |this| anymore after this point, as it might have been
+ // deleted.
+}
+
+void CompilerDispatcher::DoIdleWork(double deadline_in_seconds) {
+ bool aborted = false;
+ {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ idle_task_scheduled_ = false;
+ aborted = abort_;
+ }
+
+ if (aborted) {
+ AbortInactiveJobs();
+ return;
+ }
+
+ // Number of jobs that are unlikely to make progress during any idle callback
+ // due to their estimated duration.
+ size_t too_long_jobs = 0;
+
+ // Iterate over all available jobs & remaining time. For each job, decide
+ // whether to 1) skip it (if it would take too long), 2) erase it (if it's
+ // finished), or 3) make progress on it.
+ double idle_time_in_seconds =
+ deadline_in_seconds - platform_->MonotonicallyIncreasingTime();
+
+ if (trace_compiler_dispatcher_) {
+ PrintF("CompilerDispatcher: received %0.1lfms of idle time\n",
+ idle_time_in_seconds *
+ static_cast<double>(base::Time::kMillisecondsPerSecond));
+ }
+ for (auto job = jobs_.begin();
+ job != jobs_.end() && idle_time_in_seconds > 0.0;
+ idle_time_in_seconds =
+ deadline_in_seconds - platform_->MonotonicallyIncreasingTime()) {
+ // Don't work on jobs that are being worked on by background tasks.
+ // Similarly, remove jobs we work on from the set of available background
+ // jobs.
+ std::unique_ptr<base::LockGuard<base::Mutex>> lock(
+ new base::LockGuard<base::Mutex>(&mutex_));
+ if (running_background_jobs_.find(job->second.get()) !=
+ running_background_jobs_.end()) {
+ ++job;
+ continue;
+ }
+ auto it = pending_background_jobs_.find(job->second.get());
+ double estimate_in_ms = job->second->EstimateRuntimeOfNextStepInMs();
+ if (idle_time_in_seconds <
+ (estimate_in_ms /
+ static_cast<double>(base::Time::kMillisecondsPerSecond))) {
+ // If there's not enough time left, try to estimate whether we would
+ // have managed to finish the job in a large idle task to assess
+ // whether we should ask for another idle callback.
+ if (estimate_in_ms > kMaxIdleTimeToExpectInMs) ++too_long_jobs;
+ if (it == pending_background_jobs_.end()) {
+ lock.reset();
+ ConsiderJobForBackgroundProcessing(job->second.get());
+ }
+ ++job;
+ } else if (IsFinished(job->second.get())) {
+ DCHECK(it == pending_background_jobs_.end());
+ if (trace_compiler_dispatcher_) {
+ PrintF("CompilerDispatcher: finished working on ");
+ job->second->ShortPrint();
+ PrintF(": %s\n", job->second->status() == CompileJobStatus::kDone
+ ? "success"
+ : "failure");
+ tracer_->DumpStatistics();
+ }
+ job->second->ResetOnMainThread();
+ job = jobs_.erase(job);
+ continue;
+ } else {
+ // Do one step, and keep processing the job (as we don't advance the
+ // iterator).
+ if (it != pending_background_jobs_.end()) {
+ pending_background_jobs_.erase(it);
+ }
+ lock.reset();
+ DoNextStepOnMainThread(isolate_, job->second.get(),
+ ExceptionHandling::kSwallow);
+ }
+ }
+ if (jobs_.size() > too_long_jobs) ScheduleIdleTaskIfNeeded();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h
new file mode 100644
index 0000000000..41d4c83d52
--- /dev/null
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h
@@ -0,0 +1,175 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_DISPATCHER_COMPILER_DISPATCHER_H_
+#define V8_COMPILER_DISPATCHER_COMPILER_DISPATCHER_H_
+
+#include <map>
+#include <memory>
+#include <unordered_set>
+#include <utility>
+
+#include "src/base/atomic-utils.h"
+#include "src/base/macros.h"
+#include "src/base/platform/condition-variable.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/platform/semaphore.h"
+#include "src/globals.h"
+#include "testing/gtest/include/gtest/gtest_prod.h"
+
+namespace v8 {
+
+class Platform;
+enum class MemoryPressureLevel;
+
+namespace internal {
+
+class CancelableTaskManager;
+class CompilerDispatcherJob;
+class CompilerDispatcherTracer;
+class Isolate;
+class SharedFunctionInfo;
+
+template <typename T>
+class Handle;
+
+// The CompilerDispatcher uses a combination of idle tasks and background tasks
+// to parse and compile lazily parsed functions.
+//
+// As both parsing and compilation currently requires a preparation and
+// finalization step that happens on the main thread, every task has to be
+// advanced during idle time first. Depending on the properties of the task, it
+// can then be parsed or compiled on either background threads, or during idle
+// time. Last, it has to be finalized during idle time again.
+//
+// CompilerDispatcher::jobs_ maintains the list of all CompilerDispatcherJobs
+// the CompilerDispatcher knows about.
+//
+// CompilerDispatcher::pending_background_jobs_ contains the set of
+// CompilerDispatcherJobs that can be processed on a background thread.
+//
+// CompilerDispatcher::running_background_jobs_ contains the set of
+// CompilerDispatcherJobs that are currently being processed on a background
+// thread.
+//
+// CompilerDispatcher::DoIdleWork tries to advance as many jobs out of jobs_ as
+// possible during idle time. If a job can't be advanced, but is suitable for
+// background processing, it fires off background threads.
+//
+// CompilerDispatcher::DoBackgroundWork advances one of the pending jobs, and
+// then spins of another idle task to potentially do the final step on the main
+// thread.
+class V8_EXPORT_PRIVATE CompilerDispatcher {
+ public:
+ enum class BlockingBehavior { kBlock, kDontBlock };
+
+ CompilerDispatcher(Isolate* isolate, Platform* platform,
+ size_t max_stack_size);
+ ~CompilerDispatcher();
+
+ // Returns true if a job was enqueued.
+ bool Enqueue(Handle<SharedFunctionInfo> function);
+
+ // Like Enqueue, but also advances the job so that it can potentially
+ // continue running on a background thread (if at all possible). Returns
+ // true if the job was enqueued.
+ bool EnqueueAndStep(Handle<SharedFunctionInfo> function);
+
+ // Returns true if there is a pending job for the given function.
+ bool IsEnqueued(Handle<SharedFunctionInfo> function) const;
+
+ // Blocks until the given function is compiled (and does so as fast as
+ // possible). Returns true if the compile job was succesful.
+ bool FinishNow(Handle<SharedFunctionInfo> function);
+
+ // Aborts a given job. Blocks if requested.
+ void Abort(Handle<SharedFunctionInfo> function, BlockingBehavior blocking);
+
+ // Aborts all jobs. Blocks if requested.
+ void AbortAll(BlockingBehavior blocking);
+
+ // Memory pressure notifications from the embedder.
+ void MemoryPressureNotification(v8::MemoryPressureLevel level,
+ bool is_isolate_locked);
+
+ private:
+ FRIEND_TEST(CompilerDispatcherTest, EnqueueAndStep);
+ FRIEND_TEST(CompilerDispatcherTest, IdleTaskSmallIdleTime);
+ FRIEND_TEST(CompilerDispatcherTest, CompileOnBackgroundThread);
+ FRIEND_TEST(CompilerDispatcherTest, FinishNowWithBackgroundTask);
+ FRIEND_TEST(CompilerDispatcherTest, AsyncAbortAllPendingBackgroundTask);
+ FRIEND_TEST(CompilerDispatcherTest, AsyncAbortAllRunningBackgroundTask);
+ FRIEND_TEST(CompilerDispatcherTest, FinishNowDuringAbortAll);
+
+ typedef std::multimap<std::pair<int, int>,
+ std::unique_ptr<CompilerDispatcherJob>>
+ JobMap;
+ class AbortTask;
+ class BackgroundTask;
+ class IdleTask;
+
+ void WaitForJobIfRunningOnBackground(CompilerDispatcherJob* job);
+ bool IsEnabled() const;
+ void AbortInactiveJobs();
+ JobMap::const_iterator GetJobFor(Handle<SharedFunctionInfo> shared) const;
+ void ConsiderJobForBackgroundProcessing(CompilerDispatcherJob* job);
+ void ScheduleMoreBackgroundTasksIfNeeded();
+ void ScheduleIdleTaskFromAnyThread();
+ void ScheduleIdleTaskIfNeeded();
+ void ScheduleAbortTask();
+ void DoBackgroundWork();
+ void DoIdleWork(double deadline_in_seconds);
+
+ Isolate* isolate_;
+ Platform* platform_;
+ size_t max_stack_size_;
+
+ // Copy of FLAG_trace_compiler_dispatcher to allow for access from any thread.
+ bool trace_compiler_dispatcher_;
+
+ std::unique_ptr<CompilerDispatcherTracer> tracer_;
+
+ std::unique_ptr<CancelableTaskManager> task_manager_;
+
+ // Mapping from (script id, function literal id) to job. We use a multimap,
+ // as script id is not necessarily unique.
+ JobMap jobs_;
+
+ base::AtomicValue<v8::MemoryPressureLevel> memory_pressure_level_;
+
+ // The following members can be accessed from any thread. Methods need to hold
+ // the mutex |mutex_| while accessing them.
+ base::Mutex mutex_;
+
+ // True if the dispatcher is in the process of aborting running tasks.
+ bool abort_;
+
+ bool idle_task_scheduled_;
+
+ // Number of currently scheduled BackgroundTask objects.
+ size_t num_scheduled_background_tasks_;
+
+ // The set of CompilerDispatcherJobs that can be advanced on any thread.
+ std::unordered_set<CompilerDispatcherJob*> pending_background_jobs_;
+
+ // The set of CompilerDispatcherJobs currently processed on background
+ // threads.
+ std::unordered_set<CompilerDispatcherJob*> running_background_jobs_;
+
+ // If not nullptr, then the main thread waits for the task processing
+ // this job, and blocks on the ConditionVariable main_thread_blocking_signal_.
+ CompilerDispatcherJob* main_thread_blocking_on_job_;
+ base::ConditionVariable main_thread_blocking_signal_;
+
+ // Test support.
+ base::AtomicValue<bool> block_for_testing_;
+ base::Semaphore semaphore_for_testing_;
+
+ DISALLOW_COPY_AND_ASSIGN(CompilerDispatcher);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_DISPATCHER_COMPILER_DISPATCHER_H_
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 3435f530c2..5e23be5e5c 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -28,7 +28,7 @@
#include "src/isolate-inl.h"
#include "src/log-inl.h"
#include "src/messages.h"
-#include "src/parsing/parser.h"
+#include "src/parsing/parsing.h"
#include "src/parsing/rewriter.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/runtime-profiler.h"
@@ -38,8 +38,6 @@
namespace v8 {
namespace internal {
-
-
// A wrapper around a CompilationInfo that detaches the Handles from
// the underlying DeferredHandleScope and stores them in info_ on
// destruction.
@@ -70,6 +68,15 @@ struct ScopedTimer {
// ----------------------------------------------------------------------------
// Implementation of CompilationJob
+CompilationJob::CompilationJob(Isolate* isolate, CompilationInfo* info,
+ const char* compiler_name, State initial_state)
+ : info_(info),
+ isolate_thread_id_(isolate->thread_id()),
+ compiler_name_(compiler_name),
+ state_(initial_state),
+ stack_limit_(isolate->stack_guard()->real_climit()),
+ executed_on_background_thread_(false) {}
+
CompilationJob::Status CompilationJob::PrepareJob() {
DCHECK(ThreadId::Current().Equals(info()->isolate()->thread_id()));
DisallowJavascriptExecution no_js(isolate());
@@ -98,8 +105,10 @@ CompilationJob::Status CompilationJob::ExecuteJob() {
no_handles.reset(new DisallowHandleAllocation());
no_deref.reset(new DisallowHandleDereference());
no_dependency_change.reset(new DisallowCodeDependencyChange());
+ executed_on_background_thread_ =
+ !ThreadId::Current().Equals(isolate_thread_id_);
} else {
- DCHECK(ThreadId::Current().Equals(info()->isolate()->thread_id()));
+ DCHECK(ThreadId::Current().Equals(isolate_thread_id_));
}
// Delegate to the underlying implementation.
@@ -284,7 +293,7 @@ void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
void EnsureFeedbackMetadata(CompilationInfo* info) {
DCHECK(info->has_shared_info());
- // If no type feedback metadata exists, we create it now. At this point the
+ // If no type feedback metadata exists, create it. At this point the
// AstNumbering pass has already run. Note the snapshot can contain outdated
// vectors for a different configuration, hence we also recreate a new vector
// when the function is not compiled (i.e. no code was serialized).
@@ -305,7 +314,7 @@ void EnsureFeedbackMetadata(CompilationInfo* info) {
bool UseTurboFan(Handle<SharedFunctionInfo> shared) {
bool optimization_disabled = shared->optimization_disabled();
- bool dont_crankshaft = shared->dont_crankshaft();
+ bool must_use_ignition_turbo = shared->must_use_ignition_turbo();
// Check the enabling conditions for Turbofan.
// 1. "use asm" code.
@@ -314,7 +323,7 @@ bool UseTurboFan(Handle<SharedFunctionInfo> shared) {
// 2. Fallback for features unsupported by Crankshaft.
bool is_unsupported_by_crankshaft_but_turbofanable =
- dont_crankshaft && strcmp(FLAG_turbo_filter, "~~") == 0 &&
+ must_use_ignition_turbo && strcmp(FLAG_turbo_filter, "~~") == 0 &&
!optimization_disabled;
// 3. Explicitly enabled by the command-line filter.
@@ -326,32 +335,44 @@ bool UseTurboFan(Handle<SharedFunctionInfo> shared) {
bool ShouldUseIgnition(CompilationInfo* info) {
DCHECK(info->has_shared_info());
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+
+ // Code which can't be supported by the old pipeline should use Ignition.
+ if (shared->must_use_ignition_turbo()) return true;
+
+ // Resumable functions are not supported by {FullCodeGenerator}, suspended
+ // activations stored as {JSGeneratorObject} on the heap always assume the
+ // underlying code to be based on the bytecode array.
+ DCHECK(!IsResumableFunction(shared->kind()));
// Skip Ignition for asm.js functions.
- if (info->shared_info()->asm_function()) {
+ if (shared->asm_function()) return false;
+
+ // Skip Ignition for asm wasm code.
+ if (FLAG_validate_asm && shared->HasAsmWasmData()) {
return false;
}
// When requesting debug code as a replacement for existing code, we provide
// the same kind as the existing code (to prevent implicit tier-change).
- if (info->is_debug() && info->shared_info()->is_compiled()) {
- return !info->shared_info()->HasBaselineCode();
+ if (info->is_debug() && shared->is_compiled()) {
+ return !shared->HasBaselineCode();
}
// Code destined for TurboFan should be compiled with Ignition first.
- if (UseTurboFan(info->shared_info())) return true;
+ if (UseTurboFan(shared)) return true;
// Only use Ignition for any other function if FLAG_ignition is true.
if (!FLAG_ignition) return false;
// Checks whether top level functions should be passed by the filter.
- if (info->shared_info()->is_toplevel()) {
+ if (shared->is_toplevel()) {
Vector<const char> filter = CStrVector(FLAG_ignition_filter);
return (filter.length() == 0) || (filter.length() == 1 && filter[0] == '*');
}
// Finally respect the filter.
- return info->shared_info()->PassesFilter(FLAG_ignition_filter);
+ return shared->PassesFilter(FLAG_ignition_filter);
}
CompilationJob* GetUnoptimizedCompilationJob(CompilationInfo* info) {
@@ -360,7 +381,6 @@ CompilationJob* GetUnoptimizedCompilationJob(CompilationInfo* info) {
DCHECK_NOT_NULL(info->literal());
DCHECK_NOT_NULL(info->scope());
- EnsureFeedbackMetadata(info);
if (ShouldUseIgnition(info)) {
return interpreter::Interpreter::NewCompilationJob(info);
} else {
@@ -407,18 +427,42 @@ void InstallUnoptimizedCode(CompilationInfo* info) {
CompilationJob::Status FinalizeUnoptimizedCompilationJob(CompilationJob* job) {
CompilationJob::Status status = job->FinalizeJob();
if (status == CompilationJob::SUCCEEDED) {
+ EnsureFeedbackMetadata(job->info());
InstallUnoptimizedCode(job->info());
job->RecordUnoptimizedCompilationStats();
}
return status;
}
+bool Renumber(ParseInfo* parse_info,
+ Compiler::EagerInnerFunctionLiterals* eager_literals) {
+ RuntimeCallTimerScope runtimeTimer(parse_info->isolate(),
+ &RuntimeCallStats::CompileRenumber);
+ if (!AstNumbering::Renumber(
+ parse_info->isolate()->stack_guard()->real_climit(),
+ parse_info->zone(), parse_info->literal(), eager_literals)) {
+ return false;
+ }
+ Handle<SharedFunctionInfo> shared_info = parse_info->shared_info();
+ if (!shared_info.is_null()) {
+ FunctionLiteral* lit = parse_info->literal();
+ shared_info->set_ast_node_count(lit->ast_node_count());
+ if (lit->dont_optimize_reason() != kNoReason) {
+ shared_info->DisableOptimization(lit->dont_optimize_reason());
+ }
+ if (lit->flags() & AstProperties::kMustUseIgnitionTurbo) {
+ shared_info->set_must_use_ignition_turbo(true);
+ }
+ }
+ return true;
+}
+
bool GenerateUnoptimizedCode(CompilationInfo* info) {
if (FLAG_validate_asm && info->scope()->asm_module() &&
!info->shared_info()->is_asm_wasm_broken() && !info->is_debug()) {
EnsureFeedbackMetadata(info);
MaybeHandle<FixedArray> wasm_data;
- wasm_data = AsmJs::ConvertAsmToWasm(info->parse_info());
+ wasm_data = AsmJs::CompileAsmViaWasm(info);
if (!wasm_data.is_null()) {
info->shared_info()->set_asm_wasm_data(*wasm_data.ToHandleChecked());
info->SetCode(info->isolate()->builtins()->InstantiateAsmJs());
@@ -437,23 +481,104 @@ bool GenerateUnoptimizedCode(CompilationInfo* info) {
return true;
}
+bool CompileUnoptimizedInnerFunctionsRecursively(
+ ThreadedList<ThreadedListZoneEntry<FunctionLiteral*>>* literals,
+ CompilationInfo* outer_info) {
+ Isolate* isolate = outer_info->isolate();
+ Handle<Script> script = outer_info->script();
+ RuntimeCallTimerScope runtimeTimer(isolate,
+ &RuntimeCallStats::CompileInnerFunction);
+
+ for (auto it : *literals) {
+ FunctionLiteral* literal = it->value();
+
+ // Find any previously allocated shared function info for the given literal.
+ Handle<SharedFunctionInfo> shared;
+ MaybeHandle<SharedFunctionInfo> maybe_existing =
+ script->FindSharedFunctionInfo(isolate, literal);
+ if (maybe_existing.ToHandle(&shared)) {
+ DCHECK(!shared->is_toplevel());
+ // If we found an existing shared function info with compiled code,
+ // we are done.
+ if (shared->is_compiled()) continue;
+ } else {
+ shared =
+ isolate->factory()->NewSharedFunctionInfoForLiteral(literal, script);
+ shared->set_is_toplevel(false);
+ }
+
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ ParseInfo parse_info(&zone, script);
+ parse_info.set_literal(literal);
+ parse_info.set_shared_info(shared);
+ parse_info.set_function_literal_id(shared->function_literal_id());
+ parse_info.set_language_mode(literal->scope()->language_mode());
+ parse_info.set_ast_value_factory(
+ outer_info->parse_info()->ast_value_factory());
+ parse_info.set_ast_value_factory_owned(false);
+
+ CompilationInfo info(&parse_info, Handle<JSFunction>::null());
+ if (outer_info->will_serialize()) info.PrepareForSerializing();
+ if (outer_info->is_debug()) info.MarkAsDebug();
+
+ Compiler::EagerInnerFunctionLiterals inner_literals;
+ if (!Renumber(&parse_info, &inner_literals) ||
+ !CompileUnoptimizedInnerFunctionsRecursively(&inner_literals,
+ outer_info) ||
+ !GenerateUnoptimizedCode(&info)) {
+ if (!isolate->has_pending_exception()) isolate->StackOverflow();
+ return false;
+ }
+
+ DCHECK(!info.code().is_null());
+ RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, &info);
+ if (literal->should_be_used_once_hint()) {
+ info.code()->MarkToBeExecutedOnce(isolate);
+ }
+ }
+ return true;
+}
+
bool CompileUnoptimizedCode(CompilationInfo* info) {
- DCHECK(AllowCompilation::IsAllowed(info->isolate()));
- if (!Compiler::Analyze(info->parse_info()) ||
+ Isolate* isolate = info->isolate();
+ DCHECK(AllowCompilation::IsAllowed(isolate));
+
+ Compiler::EagerInnerFunctionLiterals inner_literals;
+ if (!Compiler::Analyze(info->parse_info(), &inner_literals) ||
+ !CompileUnoptimizedInnerFunctionsRecursively(&inner_literals, info) ||
!GenerateUnoptimizedCode(info)) {
- Isolate* isolate = info->isolate();
if (!isolate->has_pending_exception()) isolate->StackOverflow();
return false;
}
+
return true;
}
+void EnsureSharedFunctionInfosArrayOnScript(ParseInfo* info) {
+ DCHECK(info->is_toplevel());
+ DCHECK(!info->script().is_null());
+ if (info->script()->shared_function_infos()->length() > 0) {
+ DCHECK_EQ(info->script()->shared_function_infos()->length(),
+ info->max_function_literal_id() + 1);
+ return;
+ }
+ Isolate* isolate = info->isolate();
+ Handle<FixedArray> infos(
+ isolate->factory()->NewFixedArray(info->max_function_literal_id() + 1));
+ info->script()->set_shared_function_infos(*infos);
+}
+
MUST_USE_RESULT MaybeHandle<Code> GetUnoptimizedCode(CompilationInfo* info) {
+ RuntimeCallTimerScope runtimeTimer(
+ info->isolate(), &RuntimeCallStats::CompileGetUnoptimizedCode);
VMState<COMPILER> state(info->isolate());
PostponeInterruptsScope postpone(info->isolate());
// Parse and update CompilationInfo with the results.
- if (!Parser::ParseStatic(info->parse_info())) return MaybeHandle<Code>();
+ if (!parsing::ParseAny(info->parse_info())) return MaybeHandle<Code>();
+ if (info->parse_info()->is_toplevel()) {
+ EnsureSharedFunctionInfosArrayOnScript(info->parse_info());
+ }
DCHECK_EQ(info->shared_info()->language_mode(),
info->literal()->language_mode());
@@ -468,6 +593,9 @@ MUST_USE_RESULT MaybeHandle<Code> GetUnoptimizedCode(CompilationInfo* info) {
MUST_USE_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeMap(
Handle<JSFunction> function, BailoutId osr_ast_id) {
+ RuntimeCallTimerScope runtimeTimer(
+ function->GetIsolate(),
+ &RuntimeCallStats::CompileGetFromOptimizedCodeMap);
Handle<SharedFunctionInfo> shared(function->shared());
DisallowHeapAllocation no_gc;
CodeAndLiterals cached = shared->SearchOptimizedCodeMap(
@@ -505,25 +633,6 @@ void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
literals, info->osr_ast_id());
}
-bool Renumber(ParseInfo* parse_info) {
- if (!AstNumbering::Renumber(parse_info->isolate(), parse_info->zone(),
- parse_info->literal())) {
- return false;
- }
- Handle<SharedFunctionInfo> shared_info = parse_info->shared_info();
- if (!shared_info.is_null()) {
- FunctionLiteral* lit = parse_info->literal();
- shared_info->set_ast_node_count(lit->ast_node_count());
- if (lit->dont_optimize_reason() != kNoReason) {
- shared_info->DisableOptimization(lit->dont_optimize_reason());
- }
- if (lit->flags() & AstProperties::kDontCrankshaft) {
- shared_info->set_dont_crankshaft(true);
- }
- }
- return true;
-}
-
bool GetOptimizedCodeNow(CompilationJob* job) {
CompilationInfo* info = job->info();
Isolate* isolate = info->isolate();
@@ -640,6 +749,7 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
}
// Reset profiler ticks, function is no longer considered hot.
+ DCHECK(shared->is_compiled());
if (shared->HasBaselineCode()) {
shared->code()->set_profiler_ticks(0);
} else if (shared->HasBytecodeArray()) {
@@ -668,7 +778,7 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
const int kMaxOptCount =
FLAG_deopt_every_n_times == 0 ? FLAG_max_opt_count : 1000;
if (info->shared_info()->opt_count() > kMaxOptCount) {
- info->AbortOptimization(kOptimizedTooManyTimes);
+ info->AbortOptimization(kDeoptimizedTooManyTimes);
return MaybeHandle<Code>();
}
@@ -679,10 +789,7 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
// TurboFan can optimize directly from existing bytecode.
if (use_turbofan && ShouldUseIgnition(info)) {
if (info->is_osr() && !ignition_osr) return MaybeHandle<Code>();
- if (!Compiler::EnsureBytecode(info)) {
- if (isolate->has_pending_exception()) isolate->clear_pending_exception();
- return MaybeHandle<Code>();
- }
+ DCHECK(shared->HasBytecodeArray());
info->MarkAsOptimizeFromBytecode();
}
@@ -793,6 +900,8 @@ MaybeHandle<Code> GetBaselineCode(Handle<JSFunction> function) {
ParseInfo parse_info(&zone, handle(function->shared()));
CompilationInfo info(&parse_info, function);
+ DCHECK(function->shared()->is_compiled());
+
// Function no longer needs to be tiered up
function->shared()->set_marked_for_tier_up(false);
@@ -812,13 +921,11 @@ MaybeHandle<Code> GetBaselineCode(Handle<JSFunction> function) {
return MaybeHandle<Code>();
}
- // TODO(4280): For now we do not switch generators or async functions to
- // baseline code because there might be suspended activations stored in
- // generator objects on the heap. We could eventually go directly to
- // TurboFan in this case.
- if (IsResumableFunction(function->shared()->kind())) {
+ // Don't generate full-codegen code for functions it can't support.
+ if (function->shared()->must_use_ignition_turbo()) {
return MaybeHandle<Code>();
}
+ DCHECK(!IsResumableFunction(function->shared()->kind()));
if (FLAG_trace_opt) {
OFStream os(stdout);
@@ -827,7 +934,7 @@ MaybeHandle<Code> GetBaselineCode(Handle<JSFunction> function) {
}
// Parse and update CompilationInfo with the results.
- if (!Parser::ParseStatic(info.parse_info())) return MaybeHandle<Code>();
+ if (!parsing::ParseFunction(info.parse_info())) return MaybeHandle<Code>();
Handle<SharedFunctionInfo> shared = info.shared_info();
DCHECK_EQ(shared->language_mode(), info.literal()->language_mode());
@@ -856,7 +963,7 @@ MaybeHandle<Code> GetLazyCode(Handle<JSFunction> function) {
DCHECK(!function->is_compiled());
TimerEventScope<TimerEventCompileCode> compile_timer(isolate);
RuntimeCallTimerScope runtimeTimer(isolate,
- &RuntimeCallStats::CompileCodeLazy);
+ &RuntimeCallStats::CompileFunction);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileCode");
AggregatedHistogramTimerScope timer(isolate->counters()->compile_lazy());
@@ -872,7 +979,8 @@ MaybeHandle<Code> GetLazyCode(Handle<JSFunction> function) {
return cached_code;
}
- if (function->shared()->marked_for_tier_up()) {
+ if (function->shared()->is_compiled() &&
+ function->shared()->marked_for_tier_up()) {
DCHECK(FLAG_mark_shared_functions_for_tier_up);
function->shared()->set_marked_for_tier_up(false);
@@ -928,7 +1036,7 @@ MaybeHandle<Code> GetLazyCode(Handle<JSFunction> function) {
Handle<Code> result;
ASSIGN_RETURN_ON_EXCEPTION(isolate, result, GetUnoptimizedCode(&info), Code);
- if (FLAG_always_opt) {
+ if (FLAG_always_opt && !info.shared_info()->HasAsmWasmData()) {
Handle<Code> opt_code;
if (GetOptimizedCode(function, Compiler::NOT_CONCURRENT)
.ToHandle(&opt_code)) {
@@ -940,49 +1048,37 @@ MaybeHandle<Code> GetLazyCode(Handle<JSFunction> function) {
}
-Handle<SharedFunctionInfo> NewSharedFunctionInfoForLiteral(
- Isolate* isolate, FunctionLiteral* literal, Handle<Script> script) {
- Handle<Code> code = isolate->builtins()->CompileLazy();
- Handle<ScopeInfo> scope_info = handle(ScopeInfo::Empty(isolate));
- Handle<SharedFunctionInfo> result = isolate->factory()->NewSharedFunctionInfo(
- literal->name(), literal->materialized_literal_count(), literal->kind(),
- code, scope_info);
- SharedFunctionInfo::InitFromFunctionLiteral(result, literal);
- SharedFunctionInfo::SetScript(result, script);
- return result;
-}
-
Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
Isolate* isolate = info->isolate();
TimerEventScope<TimerEventCompileCode> timer(isolate);
- RuntimeCallTimerScope runtimeTimer(isolate, &RuntimeCallStats::CompileCode);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileCode");
PostponeInterruptsScope postpone(isolate);
DCHECK(!isolate->native_context().is_null());
ParseInfo* parse_info = info->parse_info();
+
+ RuntimeCallTimerScope runtimeTimer(
+ isolate, parse_info->is_eval() ? &RuntimeCallStats::CompileEval
+ : &RuntimeCallStats::CompileScript);
+
Handle<Script> script = parse_info->script();
// TODO(svenpanne) Obscure place for this, perhaps move to OnBeforeCompile?
FixedArray* array = isolate->native_context()->embedder_data();
script->set_context_data(array->get(v8::Context::kDebugIdIndex));
- isolate->debug()->OnBeforeCompile(script);
-
Handle<SharedFunctionInfo> result;
{ VMState<COMPILER> state(info->isolate());
- if (parse_info->literal() == nullptr && !Parser::ParseStatic(parse_info)) {
+ if (parse_info->literal() == nullptr &&
+ !parsing::ParseProgram(parse_info)) {
return Handle<SharedFunctionInfo>::null();
}
- FunctionLiteral* lit = parse_info->literal();
+ EnsureSharedFunctionInfosArrayOnScript(parse_info);
// Measure how long it takes to do the compilation; only take the
// rest of the function into account to avoid overlap with the
// parsing statistics.
- RuntimeCallTimerScope runtimeTimer(
- isolate, parse_info->is_eval() ? &RuntimeCallStats::CompileEval
- : &RuntimeCallStats::Compile);
HistogramTimer* rate = parse_info->is_eval()
? info->isolate()->counters()->compile_eval()
: info->isolate()->counters()->compile();
@@ -991,10 +1087,12 @@ Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
parse_info->is_eval() ? "V8.CompileEval" : "V8.Compile");
// Allocate a shared function info object.
+ FunctionLiteral* lit = parse_info->literal();
DCHECK_EQ(kNoSourcePosition, lit->function_token_position());
- result = NewSharedFunctionInfoForLiteral(isolate, lit, script);
+ result = isolate->factory()->NewSharedFunctionInfoForLiteral(lit, script);
result->set_is_toplevel(true);
parse_info->set_shared_info(result);
+ parse_info->set_function_literal_id(result->function_literal_id());
// Compile the code.
if (!CompileUnoptimizedCode(info)) {
@@ -1025,17 +1123,21 @@ Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
// ----------------------------------------------------------------------------
// Implementation of Compiler
-bool Compiler::Analyze(ParseInfo* info) {
+bool Compiler::Analyze(ParseInfo* info,
+ EagerInnerFunctionLiterals* eager_literals) {
DCHECK_NOT_NULL(info->literal());
+ RuntimeCallTimerScope runtimeTimer(info->isolate(),
+ &RuntimeCallStats::CompileAnalyse);
if (!Rewriter::Rewrite(info)) return false;
DeclarationScope::Analyze(info, AnalyzeMode::kRegular);
- if (!Renumber(info)) return false;
+ if (!Renumber(info, eager_literals)) return false;
DCHECK_NOT_NULL(info->scope());
return true;
}
bool Compiler::ParseAndAnalyze(ParseInfo* info) {
- if (!Parser::ParseStatic(info)) return false;
+ if (!parsing::ParseAny(info)) return false;
+ if (info->is_toplevel()) EnsureSharedFunctionInfosArrayOnScript(info);
if (!Compiler::Analyze(info)) return false;
DCHECK_NOT_NULL(info->literal());
DCHECK_NOT_NULL(info->scope());
@@ -1156,8 +1258,9 @@ MaybeHandle<JSArray> Compiler::CompileForLiveEdit(Handle<Script> script) {
// In order to ensure that live edit function info collection finds the newly
// generated shared function infos, clear the script's list temporarily
// and restore it at the end of this method.
- Handle<Object> old_function_infos(script->shared_function_infos(), isolate);
- script->set_shared_function_infos(Smi::kZero);
+ Handle<FixedArray> old_function_infos(script->shared_function_infos(),
+ isolate);
+ script->set_shared_function_infos(isolate->heap()->empty_fixed_array());
// Start a compilation.
Zone zone(isolate->allocator(), ZONE_NAME);
@@ -1184,21 +1287,15 @@ MaybeHandle<JSArray> Compiler::CompileForLiveEdit(Handle<Script> script) {
}
bool Compiler::EnsureBytecode(CompilationInfo* info) {
- if (!ShouldUseIgnition(info)) return false;
- if (!info->shared_info()->HasBytecodeArray()) {
- Handle<Code> original_code(info->shared_info()->code());
+ if (!info->shared_info()->is_compiled()) {
if (GetUnoptimizedCode(info).is_null()) return false;
- if (info->shared_info()->HasAsmWasmData()) return false;
- DCHECK(info->shared_info()->is_compiled());
- if (original_code->kind() == Code::FUNCTION) {
- // Generating bytecode will install the {InterpreterEntryTrampoline} as
- // shared code on the function. To avoid an implicit tier down we restore
- // original baseline code in case it existed beforehand.
- info->shared_info()->ReplaceCode(*original_code);
- }
}
- DCHECK(info->shared_info()->HasBytecodeArray());
- return true;
+ DCHECK(info->shared_info()->is_compiled());
+
+ if (info->shared_info()->HasAsmWasmData()) return false;
+
+ DCHECK_EQ(ShouldUseIgnition(info), info->shared_info()->HasBytecodeArray());
+ return info->shared_info()->HasBytecodeArray();
}
// TODO(turbofan): In the future, unoptimized code with deopt support could
@@ -1212,11 +1309,9 @@ bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) {
CompilationInfo unoptimized(info->parse_info(), info->closure());
unoptimized.EnableDeoptimizationSupport();
- // TODO(4280): For now we do not switch generators or async functions to
- // baseline code because there might be suspended activations stored in
- // generator objects on the heap. We could eventually go directly to
- // TurboFan in this case.
- if (IsResumableFunction(shared->kind())) return false;
+ // Don't generate full-codegen code for functions it can't support.
+ if (shared->must_use_ignition_turbo()) return false;
+ DCHECK(!IsResumableFunction(shared->kind()));
// When we call PrepareForSerializing below, we will change the shared
// ParseInfo. Make sure to reset it.
@@ -1230,6 +1325,14 @@ bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) {
unoptimized.PrepareForSerializing();
}
EnsureFeedbackMetadata(&unoptimized);
+
+ // Ensure we generate and install bytecode first if the function should use
+ // Ignition to avoid implicit tier-down.
+ if (!shared->is_compiled() && ShouldUseIgnition(info) &&
+ !GenerateUnoptimizedCode(info)) {
+ return false;
+ }
+
if (!FullCodeGenerator::MakeCode(&unoptimized)) return false;
info->parse_info()->set_will_serialize(old_will_serialize_value);
@@ -1284,7 +1387,9 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
Handle<Script> script;
if (!maybe_shared_info.ToHandle(&shared_info)) {
script = isolate->factory()->NewScript(source);
- if (FLAG_trace_deopt) Script::InitLineEnds(script);
+ if (isolate->NeedsSourcePositionsForProfiling()) {
+ Script::InitLineEnds(script);
+ }
if (!script_name.is_null()) {
script->set_name(*script_name);
script->set_line_offset(line_offset);
@@ -1347,6 +1452,15 @@ bool CodeGenerationFromStringsAllowed(Isolate* isolate,
}
}
+bool ContainsAsmModule(Handle<Script> script) {
+ DisallowHeapAllocation no_gc;
+ SharedFunctionInfo::ScriptIterator iter(script);
+ while (SharedFunctionInfo* info = iter.Next()) {
+ if (info->HasAsmWasmData()) return true;
+ }
+ return false;
+}
+
} // namespace
MaybeHandle<JSFunction> Compiler::GetFunctionFromString(
@@ -1444,13 +1558,15 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
// Create a script object describing the script to be compiled.
Handle<Script> script = isolate->factory()->NewScript(source);
- if (FLAG_trace_deopt) Script::InitLineEnds(script);
+ if (isolate->NeedsSourcePositionsForProfiling()) {
+ Script::InitLineEnds(script);
+ }
if (natives == NATIVES_CODE) {
script->set_type(Script::TYPE_NATIVE);
- script->set_hide_source(true);
} else if (natives == EXTENSION_CODE) {
script->set_type(Script::TYPE_EXTENSION);
- script->set_hide_source(true);
+ } else if (natives == INSPECTOR_CODE) {
+ script->set_type(Script::TYPE_INSPECTOR);
}
if (!script_name.is_null()) {
script->set_name(*script_name);
@@ -1486,7 +1602,8 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
if (extension == NULL && !result.is_null()) {
compilation_cache->PutScript(source, context, language_mode, result);
if (FLAG_serialize_toplevel &&
- compile_options == ScriptCompiler::kProduceCodeCache) {
+ compile_options == ScriptCompiler::kProduceCodeCache &&
+ !ContainsAsmModule(script)) {
HistogramTimerScope histogram_timer(
isolate->counters()->compile_serialize());
RuntimeCallTimerScope runtimeTimer(isolate,
@@ -1502,7 +1619,7 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
}
if (result.is_null()) {
- isolate->ReportPendingMessages();
+ if (natives != EXTENSION_CODE) isolate->ReportPendingMessages();
} else {
isolate->debug()->OnAfterCompile(script);
}
@@ -1533,7 +1650,6 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForStreamedScript(
return result;
}
-
Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
FunctionLiteral* literal, Handle<Script> script,
CompilationInfo* outer_info) {
@@ -1542,92 +1658,23 @@ Handle<SharedFunctionInfo> Compiler::GetSharedFunctionInfo(
MaybeHandle<SharedFunctionInfo> maybe_existing;
// Find any previously allocated shared function info for the given literal.
- if (outer_info->shared_info()->never_compiled()) {
- // On the first compile, there are no existing shared function info for
- // inner functions yet, so do not try to find them. All bets are off for
- // live edit though.
- SLOW_DCHECK(script->FindSharedFunctionInfo(literal).is_null() ||
- isolate->debug()->live_edit_enabled());
- } else {
- maybe_existing = script->FindSharedFunctionInfo(literal);
- }
+ maybe_existing = script->FindSharedFunctionInfo(isolate, literal);
- // We found an existing shared function info. If it has any sort of code
- // attached, don't worry about compiling and simply return it. Otherwise,
- // continue to decide whether to eagerly compile.
- // Note that we also carry on if we are compiling eager to obtain code for
- // debugging, unless we already have code with debug break slots.
+ // If we found an existing shared function info, return it.
Handle<SharedFunctionInfo> existing;
if (maybe_existing.ToHandle(&existing)) {
DCHECK(!existing->is_toplevel());
- if (existing->HasBaselineCode() || existing->HasBytecodeArray()) {
- if (!outer_info->is_debug() || existing->HasDebugCode()) {
- return existing;
- }
- }
+ return existing;
}
- // Allocate a shared function info object.
- Handle<SharedFunctionInfo> result;
- if (!maybe_existing.ToHandle(&result)) {
- result = NewSharedFunctionInfoForLiteral(isolate, literal, script);
- result->set_is_toplevel(false);
-
- // If the outer function has been compiled before, we cannot be sure that
- // shared function info for this function literal has been created for the
- // first time. It may have already been compiled previously.
- result->set_never_compiled(outer_info->shared_info()->never_compiled());
- }
-
- Zone zone(isolate->allocator(), ZONE_NAME);
- ParseInfo parse_info(&zone, script);
- CompilationInfo info(&parse_info, Handle<JSFunction>::null());
- parse_info.set_literal(literal);
- parse_info.set_shared_info(result);
- parse_info.set_language_mode(literal->scope()->language_mode());
- parse_info.set_ast_value_factory(
- outer_info->parse_info()->ast_value_factory());
- parse_info.set_ast_value_factory_owned(false);
-
- if (outer_info->will_serialize()) info.PrepareForSerializing();
- if (outer_info->is_debug()) info.MarkAsDebug();
-
- // If this inner function is already compiled, we don't need to compile
- // again. When compiling for debug, we are not interested in having debug
- // break slots in inner functions, neither for setting break points nor
- // for revealing inner functions.
- // This is especially important for generators. We must not replace the
- // code for generators, as there may be suspended generator objects.
- if (!result->is_compiled()) {
- if (!literal->ShouldEagerCompile()) {
- info.SetCode(isolate->builtins()->CompileLazy());
- Scope* outer_scope = literal->scope()->GetOuterScopeWithContext();
- if (outer_scope) {
- result->set_outer_scope_info(*outer_scope->scope_info());
- }
- } else {
- // Generate code
- TimerEventScope<TimerEventCompileCode> timer(isolate);
- RuntimeCallTimerScope runtimeTimer(isolate,
- &RuntimeCallStats::CompileCode);
- TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileCode");
- if (Renumber(info.parse_info()) && GenerateUnoptimizedCode(&info)) {
- // Code generation will ensure that the feedback vector is present and
- // appropriately sized.
- DCHECK(!info.code().is_null());
- if (literal->should_be_used_once_hint()) {
- info.code()->MarkToBeExecutedOnce(isolate);
- }
- } else {
- return Handle<SharedFunctionInfo>::null();
- }
- }
- }
-
- if (maybe_existing.is_null()) {
- RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, &info);
+ // Allocate a shared function info object which will be compiled lazily.
+ Handle<SharedFunctionInfo> result =
+ isolate->factory()->NewSharedFunctionInfoForLiteral(literal, script);
+ result->set_is_toplevel(false);
+ Scope* outer_scope = literal->scope()->GetOuterScopeWithContext();
+ if (outer_scope) {
+ result->set_outer_scope_info(*outer_scope->scope_info());
}
-
return result;
}
@@ -1704,7 +1751,9 @@ void Compiler::PostInstantiation(Handle<JSFunction> function,
PretenureFlag pretenure) {
Handle<SharedFunctionInfo> shared(function->shared());
- if (FLAG_always_opt && shared->allows_lazy_compilation()) {
+ if (FLAG_always_opt && shared->allows_lazy_compilation() &&
+ !function->shared()->HasAsmWasmData() &&
+ function->shared()->is_compiled()) {
function->MarkForOptimization();
}
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index 03c6f8199f..dfbd520f9d 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -22,6 +22,10 @@ class CompilationJob;
class JavaScriptFrame;
class ParseInfo;
class ScriptData;
+template <typename T>
+class ThreadedList;
+template <typename T>
+class ThreadedListZoneEntry;
// The V8 compiler API.
//
@@ -63,10 +67,15 @@ class Compiler : public AllStatic {
// offer this chance, optimized closure instantiation will not call this.
static void PostInstantiation(Handle<JSFunction> function, PretenureFlag);
+ typedef ThreadedList<ThreadedListZoneEntry<FunctionLiteral*>>
+ EagerInnerFunctionLiterals;
+
// Parser::Parse, then Compiler::Analyze.
static bool ParseAndAnalyze(ParseInfo* info);
- // Rewrite, analyze scopes, and renumber.
- static bool Analyze(ParseInfo* info);
+ // Rewrite, analyze scopes, and renumber. If |eager_literals| is non-null, it
+ // is appended with inner function literals which should be eagerly compiled.
+ static bool Analyze(ParseInfo* info,
+ EagerInnerFunctionLiterals* eager_literals = nullptr);
// Adds deoptimization support, requires ParseAndAnalyze.
static bool EnsureDeoptimizationSupport(CompilationInfo* info);
// Ensures that bytecode is generated, calls ParseAndAnalyze internally.
@@ -158,11 +167,7 @@ class CompilationJob {
CompilationJob(Isolate* isolate, CompilationInfo* info,
const char* compiler_name,
- State initial_state = State::kReadyToPrepare)
- : info_(info),
- compiler_name_(compiler_name),
- state_(initial_state),
- stack_limit_(isolate->stack_guard()->real_climit()) {}
+ State initial_state = State::kReadyToPrepare);
virtual ~CompilationJob() {}
// Prepare the compile job. Must be called on the main thread.
@@ -191,6 +196,11 @@ class CompilationJob {
void set_stack_limit(uintptr_t stack_limit) { stack_limit_ = stack_limit; }
uintptr_t stack_limit() const { return stack_limit_; }
+ bool executed_on_background_thread() const {
+ DCHECK_IMPLIES(!can_execute_on_background_thread(),
+ !executed_on_background_thread_);
+ return executed_on_background_thread_;
+ }
State state() const { return state_; }
CompilationInfo* info() const { return info_; }
Isolate* isolate() const;
@@ -207,12 +217,14 @@ class CompilationJob {
private:
CompilationInfo* info_;
+ ThreadId isolate_thread_id_;
base::TimeDelta time_taken_to_prepare_;
base::TimeDelta time_taken_to_execute_;
base::TimeDelta time_taken_to_finalize_;
const char* compiler_name_;
State state_;
uintptr_t stack_limit_;
+ bool executed_on_background_thread_;
MUST_USE_RESULT Status UpdateState(Status status, State next_state) {
if (status == SUCCEEDED) {
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index 02de4edeac..10ffcb0f1a 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -6,3 +6,4 @@ jarin@chromium.org
mstarzinger@chromium.org
mtrofin@chromium.org
titzer@chromium.org
+danno@chromium.org
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 540eb375b7..9fd531c637 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -9,6 +9,7 @@
#include "src/frames.h"
#include "src/handles-inl.h"
#include "src/heap/heap.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -16,47 +17,67 @@ namespace compiler {
// static
FieldAccess AccessBuilder::ForExternalDoubleValue() {
- FieldAccess access = {kUntaggedBase, 0,
- MaybeHandle<Name>(), Type::Number(),
- MachineType::Float64(), kNoWriteBarrier};
+ FieldAccess access = {kUntaggedBase, 0,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Number(), MachineType::Float64(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForExternalTaggedValue() {
+ FieldAccess access = {kUntaggedBase, 0,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForExternalUint8Value() {
+ FieldAccess access = {kUntaggedBase, 0,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ TypeCache::Get().kUint8, MachineType::Uint8(),
+ kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForMap() {
- FieldAccess access = {
- kTaggedBase, HeapObject::kMapOffset, MaybeHandle<Name>(),
- Type::OtherInternal(), MachineType::TaggedPointer(), kMapWriteBarrier};
+ FieldAccess access = {kTaggedBase, HeapObject::kMapOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TaggedPointer(),
+ kMapWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForHeapNumberValue() {
- FieldAccess access = {kTaggedBase,
- HeapNumber::kValueOffset,
- MaybeHandle<Name>(),
- TypeCache::Get().kFloat64,
- MachineType::Float64(),
- kNoWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, HeapNumber::kValueOffset, MaybeHandle<Name>(),
+ MaybeHandle<Map>(), TypeCache::Get().kFloat64, MachineType::Float64(),
+ kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSObjectProperties() {
- FieldAccess access = {
- kTaggedBase, JSObject::kPropertiesOffset, MaybeHandle<Name>(),
- Type::Internal(), MachineType::TaggedPointer(), kPointerWriteBarrier};
+ FieldAccess access = {kTaggedBase, JSObject::kPropertiesOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Internal(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSObjectElements() {
- FieldAccess access = {
- kTaggedBase, JSObject::kElementsOffset, MaybeHandle<Name>(),
- Type::Internal(), MachineType::TaggedPointer(), kPointerWriteBarrier};
+ FieldAccess access = {kTaggedBase, JSObject::kElementsOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Internal(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
@@ -65,126 +86,127 @@ FieldAccess AccessBuilder::ForJSObjectElements() {
FieldAccess AccessBuilder::ForJSObjectInObjectProperty(Handle<Map> map,
int index) {
int const offset = map->GetInObjectPropertyOffset(index);
- FieldAccess access = {kTaggedBase,
- offset,
- MaybeHandle<Name>(),
- Type::NonInternal(),
- MachineType::AnyTagged(),
+ FieldAccess access = {kTaggedBase, offset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::AnyTagged(),
kFullWriteBarrier};
return access;
}
+// static
+FieldAccess AccessBuilder::ForJSObjectOffset(
+ int offset, WriteBarrierKind write_barrier_kind) {
+ FieldAccess access = {kTaggedBase, offset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::AnyTagged(),
+ write_barrier_kind};
+ return access;
+}
// static
FieldAccess AccessBuilder::ForJSFunctionPrototypeOrInitialMap() {
- FieldAccess access = {kTaggedBase,
- JSFunction::kPrototypeOrInitialMapOffset,
- MaybeHandle<Name>(),
- Type::Any(),
- MachineType::AnyTagged(),
- kFullWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSFunction::kPrototypeOrInitialMapOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSFunctionContext() {
- FieldAccess access = {
- kTaggedBase, JSFunction::kContextOffset, MaybeHandle<Name>(),
- Type::Internal(), MachineType::AnyTagged(), kPointerWriteBarrier};
+ FieldAccess access = {kTaggedBase, JSFunction::kContextOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Internal(), MachineType::AnyTagged(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSFunctionSharedFunctionInfo() {
- FieldAccess access = {kTaggedBase,
- JSFunction::kSharedFunctionInfoOffset,
- Handle<Name>(),
- Type::OtherInternal(),
- MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSFunction::kSharedFunctionInfoOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSFunctionLiterals() {
- FieldAccess access = {
- kTaggedBase, JSFunction::kLiteralsOffset, Handle<Name>(),
- Type::Internal(), MachineType::TaggedPointer(), kPointerWriteBarrier};
+ FieldAccess access = {kTaggedBase, JSFunction::kLiteralsOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Internal(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSFunctionCodeEntry() {
- FieldAccess access = {
- kTaggedBase, JSFunction::kCodeEntryOffset, Handle<Name>(),
- Type::OtherInternal(), MachineType::Pointer(), kNoWriteBarrier};
+ FieldAccess access = {kTaggedBase, JSFunction::kCodeEntryOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::Pointer(),
+ kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSFunctionNextFunctionLink() {
- FieldAccess access = {kTaggedBase,
- JSFunction::kNextFunctionLinkOffset,
- Handle<Name>(),
- Type::Any(),
- MachineType::AnyTagged(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSFunction::kNextFunctionLinkOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSGeneratorObjectContext() {
- FieldAccess access = {kTaggedBase,
- JSGeneratorObject::kContextOffset,
- Handle<Name>(),
- Type::Internal(),
- MachineType::TaggedPointer(),
+ FieldAccess access = {kTaggedBase, JSGeneratorObject::kContextOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Internal(), MachineType::TaggedPointer(),
kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSGeneratorObjectContinuation() {
- FieldAccess access = {kTaggedBase,
- JSGeneratorObject::kContinuationOffset,
- Handle<Name>(),
- Type::SignedSmall(),
- MachineType::TaggedSigned(),
- kNoWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSGeneratorObject::kContinuationOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::SignedSmall(), MachineType::TaggedSigned(),
+ kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSGeneratorObjectInputOrDebugPos() {
- FieldAccess access = {kTaggedBase,
- JSGeneratorObject::kInputOrDebugPosOffset,
- Handle<Name>(),
- Type::NonInternal(),
- MachineType::AnyTagged(),
- kFullWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSGeneratorObject::kInputOrDebugPosOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
// static
-FieldAccess AccessBuilder::ForJSGeneratorObjectOperandStack() {
- FieldAccess access = {kTaggedBase,
- JSGeneratorObject::kOperandStackOffset,
- Handle<Name>(),
- Type::Internal(),
- MachineType::AnyTagged(),
- kPointerWriteBarrier};
+FieldAccess AccessBuilder::ForJSGeneratorObjectRegisterFile() {
+ FieldAccess access = {
+ kTaggedBase, JSGeneratorObject::kRegisterFileOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Internal(), MachineType::AnyTagged(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSGeneratorObjectResumeMode() {
- FieldAccess access = {kTaggedBase,
- JSGeneratorObject::kResumeModeOffset,
- Handle<Name>(),
- Type::SignedSmall(),
- MachineType::TaggedSigned(),
- kNoWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSGeneratorObject::kResumeModeOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::SignedSmall(), MachineType::TaggedSigned(),
+ kNoWriteBarrier};
return access;
}
@@ -194,6 +216,7 @@ FieldAccess AccessBuilder::ForJSArrayLength(ElementsKind elements_kind) {
FieldAccess access = {kTaggedBase,
JSArray::kLengthOffset,
Handle<Name>(),
+ MaybeHandle<Map>(),
type_cache.kJSArrayLengthType,
MachineType::TaggedSigned(),
kFullWriteBarrier};
@@ -210,30 +233,28 @@ FieldAccess AccessBuilder::ForJSArrayLength(ElementsKind elements_kind) {
// static
FieldAccess AccessBuilder::ForJSArrayBufferBackingStore() {
- FieldAccess access = {kTaggedBase,
- JSArrayBuffer::kBackingStoreOffset,
- MaybeHandle<Name>(),
- Type::OtherInternal(),
- MachineType::Pointer(),
- kNoWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSArrayBuffer::kBackingStoreOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::Pointer(),
+ kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSArrayBufferBitField() {
- FieldAccess access = {kTaggedBase, JSArrayBuffer::kBitFieldOffset,
- MaybeHandle<Name>(), TypeCache::Get().kUint8,
- MachineType::Uint32(), kNoWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSArrayBuffer::kBitFieldOffset, MaybeHandle<Name>(),
+ MaybeHandle<Map>(), TypeCache::Get().kUint8, MachineType::Uint32(),
+ kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSArrayBufferViewBuffer() {
- FieldAccess access = {kTaggedBase,
- JSArrayBufferView::kBufferOffset,
- MaybeHandle<Name>(),
- Type::OtherInternal(),
- MachineType::TaggedPointer(),
+ FieldAccess access = {kTaggedBase, JSArrayBufferView::kBufferOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TaggedPointer(),
kPointerWriteBarrier};
return access;
}
@@ -243,6 +264,7 @@ FieldAccess AccessBuilder::ForJSArrayBufferViewByteLength() {
FieldAccess access = {kTaggedBase,
JSArrayBufferView::kByteLengthOffset,
MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
TypeCache::Get().kPositiveInteger,
MachineType::AnyTagged(),
kFullWriteBarrier};
@@ -254,6 +276,7 @@ FieldAccess AccessBuilder::ForJSArrayBufferViewByteOffset() {
FieldAccess access = {kTaggedBase,
JSArrayBufferView::kByteOffsetOffset,
MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
TypeCache::Get().kPositiveInteger,
MachineType::AnyTagged(),
kFullWriteBarrier};
@@ -265,6 +288,7 @@ FieldAccess AccessBuilder::ForJSTypedArrayLength() {
FieldAccess access = {kTaggedBase,
JSTypedArray::kLengthOffset,
MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
TypeCache::Get().kJSTypedArrayLengthType,
MachineType::TaggedSigned(),
kNoWriteBarrier};
@@ -276,6 +300,7 @@ FieldAccess AccessBuilder::ForJSDateValue() {
FieldAccess access = {kTaggedBase,
JSDate::kValueOffset,
MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
TypeCache::Get().kJSDateValueType,
MachineType::AnyTagged(),
kFullWriteBarrier};
@@ -284,48 +309,51 @@ FieldAccess AccessBuilder::ForJSDateValue() {
// static
FieldAccess AccessBuilder::ForJSDateField(JSDate::FieldIndex index) {
- FieldAccess access = {kTaggedBase,
- JSDate::kValueOffset + index * kPointerSize,
- MaybeHandle<Name>(),
- Type::Number(),
- MachineType::AnyTagged(),
- kFullWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSDate::kValueOffset + index * kPointerSize,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Number(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSIteratorResultDone() {
- FieldAccess access = {
- kTaggedBase, JSIteratorResult::kDoneOffset, MaybeHandle<Name>(),
- Type::NonInternal(), MachineType::AnyTagged(), kFullWriteBarrier};
+ FieldAccess access = {kTaggedBase, JSIteratorResult::kDoneOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSIteratorResultValue() {
- FieldAccess access = {
- kTaggedBase, JSIteratorResult::kValueOffset, MaybeHandle<Name>(),
- Type::NonInternal(), MachineType::AnyTagged(), kFullWriteBarrier};
+ FieldAccess access = {kTaggedBase, JSIteratorResult::kValueOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSRegExpFlags() {
- FieldAccess access = {
- kTaggedBase, JSRegExp::kFlagsOffset, MaybeHandle<Name>(),
- Type::NonInternal(), MachineType::AnyTagged(), kFullWriteBarrier};
+ FieldAccess access = {kTaggedBase, JSRegExp::kFlagsOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSRegExpSource() {
- FieldAccess access = {
- kTaggedBase, JSRegExp::kSourceOffset, MaybeHandle<Name>(),
- Type::NonInternal(), MachineType::AnyTagged(), kFullWriteBarrier};
+ FieldAccess access = {kTaggedBase, JSRegExp::kSourceOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
@@ -335,6 +363,7 @@ FieldAccess AccessBuilder::ForFixedArrayLength() {
FieldAccess access = {kTaggedBase,
FixedArray::kLengthOffset,
MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
TypeCache::Get().kFixedArrayLengthType,
MachineType::TaggedSigned(),
kNoWriteBarrier};
@@ -343,12 +372,11 @@ FieldAccess AccessBuilder::ForFixedArrayLength() {
// static
FieldAccess AccessBuilder::ForFixedTypedArrayBaseBasePointer() {
- FieldAccess access = {kTaggedBase,
- FixedTypedArrayBase::kBasePointerOffset,
- MaybeHandle<Name>(),
- Type::OtherInternal(),
- MachineType::AnyTagged(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, FixedTypedArrayBase::kBasePointerOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::AnyTagged(),
+ kPointerWriteBarrier};
return access;
}
@@ -357,6 +385,7 @@ FieldAccess AccessBuilder::ForFixedTypedArrayBaseExternalPointer() {
FieldAccess access = {kTaggedBase,
FixedTypedArrayBase::kExternalPointerOffset,
MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
Type::ExternalPointer(),
MachineType::Pointer(),
kNoWriteBarrier};
@@ -365,53 +394,51 @@ FieldAccess AccessBuilder::ForFixedTypedArrayBaseExternalPointer() {
// static
FieldAccess AccessBuilder::ForDescriptorArrayEnumCache() {
- FieldAccess access = {kTaggedBase,
- DescriptorArray::kEnumCacheOffset,
- Handle<Name>(),
- Type::OtherInternal(),
- MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, DescriptorArray::kEnumCacheOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForDescriptorArrayEnumCacheBridgeCache() {
- FieldAccess access = {kTaggedBase,
- DescriptorArray::kEnumCacheBridgeCacheOffset,
- Handle<Name>(),
- Type::OtherInternal(),
- MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, DescriptorArray::kEnumCacheBridgeCacheOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForMapBitField() {
- FieldAccess access = {kTaggedBase, Map::kBitFieldOffset,
- Handle<Name>(), TypeCache::Get().kUint8,
- MachineType::Uint8(), kNoWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, Map::kBitFieldOffset, Handle<Name>(),
+ MaybeHandle<Map>(), TypeCache::Get().kUint8, MachineType::Uint8(),
+ kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForMapBitField3() {
- FieldAccess access = {kTaggedBase, Map::kBitField3Offset,
- Handle<Name>(), TypeCache::Get().kInt32,
- MachineType::Int32(), kNoWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, Map::kBitField3Offset, Handle<Name>(),
+ MaybeHandle<Map>(), TypeCache::Get().kInt32, MachineType::Int32(),
+ kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForMapDescriptors() {
- FieldAccess access = {kTaggedBase,
- Map::kDescriptorsOffset,
- Handle<Name>(),
- Type::OtherInternal(),
- MachineType::TaggedPointer(),
+ FieldAccess access = {kTaggedBase, Map::kDescriptorsOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TaggedPointer(),
kPointerWriteBarrier};
return access;
}
@@ -419,48 +446,47 @@ FieldAccess AccessBuilder::ForMapDescriptors() {
// static
FieldAccess AccessBuilder::ForMapInstanceType() {
- FieldAccess access = {kTaggedBase, Map::kInstanceTypeOffset,
- Handle<Name>(), TypeCache::Get().kUint8,
- MachineType::Uint8(), kNoWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, Map::kInstanceTypeOffset, Handle<Name>(),
+ MaybeHandle<Map>(), TypeCache::Get().kUint8, MachineType::Uint8(),
+ kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForMapPrototype() {
- FieldAccess access = {
- kTaggedBase, Map::kPrototypeOffset, Handle<Name>(),
- Type::Any(), MachineType::TaggedPointer(), kPointerWriteBarrier};
+ FieldAccess access = {kTaggedBase, Map::kPrototypeOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForModuleRegularExports() {
- FieldAccess access = {kTaggedBase,
- Module::kRegularExportsOffset,
- Handle<Name>(),
- Type::OtherInternal(),
- MachineType::TaggedPointer(),
+ FieldAccess access = {kTaggedBase, Module::kRegularExportsOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TaggedPointer(),
kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForModuleRegularImports() {
- FieldAccess access = {kTaggedBase,
- Module::kRegularImportsOffset,
- Handle<Name>(),
- Type::OtherInternal(),
- MachineType::TaggedPointer(),
+ FieldAccess access = {kTaggedBase, Module::kRegularImportsOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TaggedPointer(),
kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForNameHashField() {
- FieldAccess access = {kTaggedBase, Name::kHashFieldOffset,
- Handle<Name>(), Type::Internal(),
- MachineType::Uint32(), kNoWriteBarrier};
+ FieldAccess access = {kTaggedBase, Name::kHashFieldOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Internal(), MachineType::Uint32(),
+ kNoWriteBarrier};
return access;
}
@@ -469,6 +495,7 @@ FieldAccess AccessBuilder::ForStringLength() {
FieldAccess access = {kTaggedBase,
String::kLengthOffset,
Handle<Name>(),
+ MaybeHandle<Map>(),
TypeCache::Get().kStringLengthType,
MachineType::TaggedSigned(),
kNoWriteBarrier};
@@ -477,33 +504,37 @@ FieldAccess AccessBuilder::ForStringLength() {
// static
FieldAccess AccessBuilder::ForConsStringFirst() {
- FieldAccess access = {
- kTaggedBase, ConsString::kFirstOffset, Handle<Name>(),
- Type::String(), MachineType::TaggedPointer(), kPointerWriteBarrier};
+ FieldAccess access = {kTaggedBase, ConsString::kFirstOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::String(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForConsStringSecond() {
- FieldAccess access = {
- kTaggedBase, ConsString::kSecondOffset, Handle<Name>(),
- Type::String(), MachineType::TaggedPointer(), kPointerWriteBarrier};
+ FieldAccess access = {kTaggedBase, ConsString::kSecondOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::String(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForSlicedStringOffset() {
- FieldAccess access = {
- kTaggedBase, SlicedString::kOffsetOffset, Handle<Name>(),
- Type::SignedSmall(), MachineType::TaggedSigned(), kNoWriteBarrier};
+ FieldAccess access = {kTaggedBase, SlicedString::kOffsetOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::SignedSmall(), MachineType::TaggedSigned(),
+ kNoWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForSlicedStringParent() {
- FieldAccess access = {
- kTaggedBase, SlicedString::kParentOffset, Handle<Name>(),
- Type::String(), MachineType::TaggedPointer(), kPointerWriteBarrier};
+ FieldAccess access = {kTaggedBase, SlicedString::kParentOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::String(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
@@ -512,6 +543,7 @@ FieldAccess AccessBuilder::ForExternalStringResourceData() {
FieldAccess access = {kTaggedBase,
ExternalString::kResourceDataOffset,
Handle<Name>(),
+ MaybeHandle<Map>(),
Type::ExternalPointer(),
MachineType::Pointer(),
kNoWriteBarrier};
@@ -550,23 +582,20 @@ ElementAccess AccessBuilder::ForSeqTwoByteStringCharacter() {
// static
FieldAccess AccessBuilder::ForJSGlobalObjectGlobalProxy() {
- FieldAccess access = {kTaggedBase,
- JSGlobalObject::kGlobalProxyOffset,
- Handle<Name>(),
- Type::Receiver(),
- MachineType::TaggedPointer(),
+ FieldAccess access = {kTaggedBase, JSGlobalObject::kGlobalProxyOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Receiver(), MachineType::TaggedPointer(),
kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSGlobalObjectNativeContext() {
- FieldAccess access = {kTaggedBase,
- JSGlobalObject::kNativeContextOffset,
- Handle<Name>(),
- Type::Internal(),
- MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSGlobalObject::kNativeContextOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Internal(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
@@ -575,6 +604,7 @@ FieldAccess AccessBuilder::ForJSArrayIteratorObject() {
FieldAccess access = {kTaggedBase,
JSArrayIterator::kIteratedObjectOffset,
Handle<Name>(),
+ MaybeHandle<Map>(),
Type::ReceiverOrUndefined(),
MachineType::TaggedPointer(),
kPointerWriteBarrier};
@@ -589,6 +619,7 @@ FieldAccess AccessBuilder::ForJSArrayIteratorIndex(InstanceType instance_type,
FieldAccess access = {kTaggedBase,
JSArrayIterator::kNextIndexOffset,
Handle<Name>(),
+ MaybeHandle<Map>(),
TypeCache::Get().kPositiveSafeInteger,
MachineType::AnyTagged(),
kFullWriteBarrier};
@@ -614,20 +645,20 @@ FieldAccess AccessBuilder::ForJSArrayIteratorIndex(InstanceType instance_type,
// static
FieldAccess AccessBuilder::ForJSArrayIteratorObjectMap() {
- FieldAccess access = {kTaggedBase,
- JSArrayIterator::kIteratedObjectMapOffset,
- Handle<Name>(),
- Type::OtherInternal(),
- MachineType::TaggedPointer(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSArrayIterator::kIteratedObjectMapOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSStringIteratorString() {
- FieldAccess access = {
- kTaggedBase, JSStringIterator::kStringOffset, Handle<Name>(),
- Type::String(), MachineType::TaggedPointer(), kPointerWriteBarrier};
+ FieldAccess access = {kTaggedBase, JSStringIterator::kStringOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::String(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
@@ -636,6 +667,7 @@ FieldAccess AccessBuilder::ForJSStringIteratorIndex() {
FieldAccess access = {kTaggedBase,
JSStringIterator::kNextIndexOffset,
Handle<Name>(),
+ MaybeHandle<Map>(),
TypeCache::Get().kStringLengthType,
MachineType::TaggedSigned(),
kNoWriteBarrier};
@@ -644,52 +676,53 @@ FieldAccess AccessBuilder::ForJSStringIteratorIndex() {
// static
FieldAccess AccessBuilder::ForValue() {
- FieldAccess access = {
- kTaggedBase, JSValue::kValueOffset, Handle<Name>(),
- Type::NonInternal(), MachineType::AnyTagged(), kFullWriteBarrier};
+ FieldAccess access = {kTaggedBase, JSValue::kValueOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForArgumentsLength() {
- FieldAccess access = {
- kTaggedBase, JSArgumentsObject::kLengthOffset, Handle<Name>(),
- Type::NonInternal(), MachineType::AnyTagged(), kFullWriteBarrier};
+ FieldAccess access = {kTaggedBase, JSArgumentsObject::kLengthOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForArgumentsCallee() {
- FieldAccess access = {kTaggedBase,
- JSSloppyArgumentsObject::kCalleeOffset,
- Handle<Name>(),
- Type::NonInternal(),
- MachineType::AnyTagged(),
- kPointerWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, JSSloppyArgumentsObject::kCalleeOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::AnyTagged(),
+ kPointerWriteBarrier};
return access;
}
// static
-FieldAccess AccessBuilder::ForFixedArraySlot(size_t index) {
+FieldAccess AccessBuilder::ForFixedArraySlot(
+ size_t index, WriteBarrierKind write_barrier_kind) {
int offset = FixedArray::OffsetOfElementAt(static_cast<int>(index));
- FieldAccess access = {kTaggedBase,
- offset,
- Handle<Name>(),
- Type::NonInternal(),
- MachineType::AnyTagged(),
- kFullWriteBarrier};
+ FieldAccess access = {kTaggedBase, offset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::AnyTagged(),
+ write_barrier_kind};
return access;
}
// static
FieldAccess AccessBuilder::ForCellValue() {
- FieldAccess access = {
- kTaggedBase, Cell::kValueOffset, Handle<Name>(),
- Type::Any(), MachineType::AnyTagged(), kFullWriteBarrier};
+ FieldAccess access = {kTaggedBase, Cell::kValueOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
@@ -698,31 +731,29 @@ FieldAccess AccessBuilder::ForContextSlot(size_t index) {
int offset = Context::kHeaderSize + static_cast<int>(index) * kPointerSize;
DCHECK_EQ(offset,
Context::SlotOffset(static_cast<int>(index)) + kHeapObjectTag);
- FieldAccess access = {kTaggedBase,
- offset,
- Handle<Name>(),
- Type::Any(),
- MachineType::AnyTagged(),
+ FieldAccess access = {kTaggedBase, offset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForContextExtensionScopeInfo() {
- FieldAccess access = {kTaggedBase,
- ContextExtension::kScopeInfoOffset,
- Handle<Name>(),
- Type::OtherInternal(),
- MachineType::AnyTagged(),
- kFullWriteBarrier};
+ FieldAccess access = {
+ kTaggedBase, ContextExtension::kScopeInfoOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::OtherInternal(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForContextExtensionExtension() {
- FieldAccess access = {
- kTaggedBase, ContextExtension::kExtensionOffset, Handle<Name>(),
- Type::Any(), MachineType::AnyTagged(), kFullWriteBarrier};
+ FieldAccess access = {kTaggedBase, ContextExtension::kExtensionOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
@@ -831,6 +862,68 @@ ElementAccess AccessBuilder::ForTypedArrayElement(ExternalArrayType type,
return access;
}
+// static
+FieldAccess AccessBuilder::ForHashTableBaseNumberOfElements() {
+ FieldAccess access = {
+ kTaggedBase,
+ FixedArray::OffsetOfElementAt(HashTableBase::kNumberOfElementsIndex),
+ MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
+ Type::SignedSmall(),
+ MachineType::TaggedSigned(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForHashTableBaseNumberOfDeletedElement() {
+ FieldAccess access = {
+ kTaggedBase, FixedArray::OffsetOfElementAt(
+ HashTableBase::kNumberOfDeletedElementsIndex),
+ MaybeHandle<Name>(), MaybeHandle<Map>(), Type::SignedSmall(),
+ MachineType::TaggedSigned(), kNoWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForHashTableBaseCapacity() {
+ FieldAccess access = {
+ kTaggedBase,
+ FixedArray::OffsetOfElementAt(HashTableBase::kCapacityIndex),
+ MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
+ Type::SignedSmall(),
+ MachineType::TaggedSigned(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForDictionaryMaxNumberKey() {
+ FieldAccess access = {
+ kTaggedBase,
+ FixedArray::OffsetOfElementAt(NameDictionary::kMaxNumberKeyIndex),
+ MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
+ Type::Any(),
+ MachineType::AnyTagged(),
+ kNoWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForDictionaryNextEnumerationIndex() {
+ FieldAccess access = {
+ kTaggedBase,
+ FixedArray::OffsetOfElementAt(NameDictionary::kNextEnumerationIndexIndex),
+ MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
+ Type::SignedSmall(),
+ MachineType::TaggedSigned(),
+ kNoWriteBarrier};
+ return access;
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index eb8e78fc36..f76aedf5a9 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -26,6 +26,12 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to a double field identified by an external reference.
static FieldAccess ForExternalDoubleValue();
+ // Provides access to a tagged field identified by an external reference.
+ static FieldAccess ForExternalTaggedValue();
+
+ // Provides access to an uint8 field identified by an external reference.
+ static FieldAccess ForExternalUint8Value();
+
// ===========================================================================
// Access to heap object fields and elements (based on tagged pointer).
@@ -43,6 +49,8 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to JSObject inobject property fields.
static FieldAccess ForJSObjectInObjectProperty(Handle<Map> map, int index);
+ static FieldAccess ForJSObjectOffset(
+ int offset, WriteBarrierKind write_barrier_kind = kFullWriteBarrier);
// Provides access to JSFunction::prototype_or_initial_map() field.
static FieldAccess ForJSFunctionPrototypeOrInitialMap();
@@ -71,8 +79,8 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to JSGeneratorObject::input_or_debug_pos() field.
static FieldAccess ForJSGeneratorObjectInputOrDebugPos();
- // Provides access to JSGeneratorObject::operand_stack() field.
- static FieldAccess ForJSGeneratorObjectOperandStack();
+ // Provides access to JSGeneratorObject::register_file() field.
+ static FieldAccess ForJSGeneratorObjectRegisterFile();
// Provides access to JSGeneratorObject::resume_mode() field.
static FieldAccess ForJSGeneratorObjectResumeMode();
@@ -218,7 +226,8 @@ class V8_EXPORT_PRIVATE AccessBuilder final
static FieldAccess ForArgumentsCallee();
// Provides access to FixedArray slots.
- static FieldAccess ForFixedArraySlot(size_t index);
+ static FieldAccess ForFixedArraySlot(
+ size_t index, WriteBarrierKind write_barrier_kind = kFullWriteBarrier);
// Provides access to Context slots.
static FieldAccess ForContextSlot(size_t index);
@@ -238,6 +247,15 @@ class V8_EXPORT_PRIVATE AccessBuilder final
static ElementAccess ForTypedArrayElement(ExternalArrayType type,
bool is_external);
+ // Provides access to HashTable fields.
+ static FieldAccess ForHashTableBaseNumberOfElements();
+ static FieldAccess ForHashTableBaseNumberOfDeletedElement();
+ static FieldAccess ForHashTableBaseCapacity();
+
+ // Provides access to Dictionary fields.
+ static FieldAccess ForDictionaryMaxNumberKey();
+ static FieldAccess ForDictionaryNextEnumerationIndex();
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(AccessBuilder);
};
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 866b06086a..f23154aa45 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -52,6 +52,8 @@ std::ostream& operator<<(std::ostream& os, AccessMode access_mode) {
return os << "Load";
case AccessMode::kStore:
return os << "Store";
+ case AccessMode::kStoreInLiteral:
+ return os << "StoreInLiteral";
}
UNREACHABLE();
return os;
@@ -144,13 +146,11 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that) {
case kInvalid:
break;
- case kNotFound:
- return true;
-
case kDataField: {
// Check if we actually access the same field.
if (this->transition_map_.address() == that->transition_map_.address() &&
this->field_index_ == that->field_index_ &&
+ this->field_map_.address() == that->field_map_.address() &&
this->field_type_->Is(that->field_type_) &&
that->field_type_->Is(this->field_type_) &&
this->field_representation_ == that->field_representation_) {
@@ -173,6 +173,8 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that) {
}
return false;
}
+
+ case kNotFound:
case kGeneric: {
this->receiver_maps_.insert(this->receiver_maps_.end(),
that->receiver_maps_.begin(),
@@ -282,7 +284,8 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
int const number = descriptors->SearchWithCache(isolate(), *name, *map);
if (number != DescriptorArray::kNotFound) {
PropertyDetails const details = descriptors->GetDetails(number);
- if (access_mode == AccessMode::kStore) {
+ if (access_mode == AccessMode::kStore ||
+ access_mode == AccessMode::kStoreInLiteral) {
// Don't bother optimizing stores to read-only properties.
if (details.IsReadOnly()) {
return false;
@@ -295,14 +298,8 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
return LookupTransition(receiver_map, name, holder, access_info);
}
}
- switch (details.type()) {
- case DATA_CONSTANT: {
- *access_info = PropertyAccessInfo::DataConstant(
- MapList{receiver_map},
- handle(descriptors->GetValue(number), isolate()), holder);
- return true;
- }
- case DATA: {
+ if (details.location() == kField) {
+ if (details.kind() == kData) {
int index = descriptors->GetFieldIndex(number);
Representation details_representation = details.representation();
FieldIndex field_index = FieldIndex::ForPropertyIndex(
@@ -344,8 +341,21 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
MapList{receiver_map}, field_index, field_representation,
field_type, field_map, holder);
return true;
+ } else {
+ DCHECK_EQ(kAccessor, details.kind());
+ // TODO(turbofan): Add support for general accessors?
+ return false;
}
- case ACCESSOR_CONSTANT: {
+
+ } else {
+ DCHECK_EQ(kDescriptor, details.location());
+ if (details.kind() == kData) {
+ *access_info = PropertyAccessInfo::DataConstant(
+ MapList{receiver_map},
+ handle(descriptors->GetValue(number), isolate()), holder);
+ return true;
+ } else {
+ DCHECK_EQ(kAccessor, details.kind());
Handle<Object> accessors(descriptors->GetValue(number), isolate());
if (!accessors->IsAccessorPair()) return false;
Handle<Object> accessor(
@@ -361,15 +371,12 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
if (optimization.api_call_info()->fast_handler()->IsCode()) {
return false;
}
+ if (V8_UNLIKELY(FLAG_runtime_stats)) return false;
}
*access_info = PropertyAccessInfo::AccessorConstant(
MapList{receiver_map}, accessor, holder);
return true;
}
- case ACCESSOR: {
- // TODO(turbofan): Add support for general accessors?
- return false;
- }
}
UNREACHABLE();
return false;
@@ -382,6 +389,11 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
return false;
}
+ // Don't search on the prototype when storing in literals
+ if (access_mode == AccessMode::kStoreInLiteral) {
+ return false;
+ }
+
// Don't lookup private symbols on the prototype chain.
if (name->IsPrivate()) return false;
@@ -503,7 +515,7 @@ bool AccessInfoFactory::LookupTransition(Handle<Map> map, Handle<Name> name,
// Don't bother optimizing stores to read-only properties.
if (details.IsReadOnly()) return false;
// TODO(bmeurer): Handle transition to data constant?
- if (details.type() != DATA) return false;
+ if (details.location() != kField) return false;
int const index = details.field_index();
Representation details_representation = details.representation();
FieldIndex field_index = FieldIndex::ForPropertyIndex(
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index 1d485dd0d4..e301ad9890 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -26,7 +26,8 @@ class Type;
class TypeCache;
// Whether we are loading a property or storing to a property.
-enum class AccessMode { kLoad, kStore };
+// For a store during literal creation, do not walk up the prototype chain.
+enum class AccessMode { kLoad, kStore, kStoreInLiteral };
std::ostream& operator<<(std::ostream&, AccessMode);
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index c473b9b6aa..a721f6a3be 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -32,6 +32,7 @@ class ArmOperandConverter final : public InstructionOperandConverter {
case kFlags_branch:
case kFlags_deoptimize:
case kFlags_set:
+ case kFlags_trap:
return SetCC;
case kFlags_none:
return LeaveCC;
@@ -1504,6 +1505,110 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
+ case kArmFloat32x4Splat: {
+ __ vdup(i.OutputSimd128Register(), i.InputFloatRegister(0));
+ break;
+ }
+ case kArmFloat32x4ExtractLane: {
+ __ ExtractLane(i.OutputFloatRegister(), i.InputSimd128Register(0),
+ kScratchReg, i.InputInt8(1));
+ break;
+ }
+ case kArmFloat32x4ReplaceLane: {
+ __ ReplaceLane(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputFloatRegister(2), kScratchReg, i.InputInt8(1));
+ break;
+ }
+ case kArmFloat32x4FromInt32x4: {
+ __ vcvt_f32_s32(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kArmFloat32x4FromUint32x4: {
+ __ vcvt_f32_u32(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kArmFloat32x4Abs: {
+ __ vabs(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kArmFloat32x4Neg: {
+ __ vneg(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kArmFloat32x4Add: {
+ __ vadd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmFloat32x4Sub: {
+ __ vsub(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmFloat32x4Eq: {
+ __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmFloat32x4Ne: {
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vceq(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
+ __ vmvn(dst, dst);
+ break;
+ }
+ case kArmInt32x4Splat: {
+ __ vdup(Neon32, i.OutputSimd128Register(), i.InputRegister(0));
+ break;
+ }
+ case kArmInt32x4ExtractLane: {
+ __ ExtractLane(i.OutputRegister(), i.InputSimd128Register(0), NeonS32,
+ i.InputInt8(1));
+ break;
+ }
+ case kArmInt32x4ReplaceLane: {
+ __ ReplaceLane(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputRegister(2), NeonS32, i.InputInt8(1));
+ break;
+ }
+ case kArmInt32x4FromFloat32x4: {
+ __ vcvt_s32_f32(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kArmUint32x4FromFloat32x4: {
+ __ vcvt_u32_f32(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kArmInt32x4Add: {
+ __ vadd(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmInt32x4Sub: {
+ __ vsub(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmInt32x4Eq: {
+ __ vceq(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kArmInt32x4Ne: {
+ Simd128Register dst = i.OutputSimd128Register();
+ __ vceq(Neon32, dst, i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ __ vmvn(dst, dst);
+ break;
+ }
+ case kArmSimd32x4Select: {
+ // Select is a ternary op, so we need to move one input into the
+ // destination. Use vtst to canonicalize the 'boolean' input #0.
+ __ vtst(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(0));
+ __ vbsl(i.OutputSimd128Register(), i.InputSimd128Register(1),
+ i.InputSimd128Register(2));
+ break;
+ }
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsb);
break;
@@ -1590,6 +1695,67 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
}
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+ FlagsCondition condition) {
+ class OutOfLineTrap final : public OutOfLineCode {
+ public:
+ OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+ : OutOfLineCode(gen),
+ frame_elided_(frame_elided),
+ instr_(instr),
+ gen_(gen) {}
+
+ void Generate() final {
+ ArmOperandConverter i(gen_, instr_);
+
+ Runtime::FunctionId trap_id = static_cast<Runtime::FunctionId>(
+ i.InputInt32(instr_->InputCount() - 1));
+ bool old_has_frame = __ has_frame();
+ if (frame_elided_) {
+ __ set_has_frame(true);
+ __ EnterFrame(StackFrame::WASM_COMPILED);
+ }
+ GenerateCallToTrap(trap_id);
+ if (frame_elided_) {
+ __ set_has_frame(old_has_frame);
+ }
+ if (FLAG_debug_code) {
+ __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+ }
+ }
+
+ private:
+ void GenerateCallToTrap(Runtime::FunctionId trap_id) {
+ if (trap_id == Runtime::kNumFunctions) {
+ // We cannot test calls to the runtime in cctest/test-run-wasm.
+ // Therefore we emit a call to C here instead of a call to the runtime.
+ // We use the context register as the scratch register, because we do
+ // not have a context here.
+ __ PrepareCallCFunction(0, 0, cp);
+ __ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+ 0);
+ } else {
+ __ Move(cp, isolate()->native_context());
+ gen_->AssembleSourcePosition(instr_);
+ __ CallRuntime(trap_id);
+ }
+ ReferenceMap* reference_map =
+ new (gen_->zone()) ReferenceMap(gen_->zone());
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ }
+
+ bool frame_elided_;
+ Instruction* instr_;
+ CodeGenerator* gen_;
+ };
+ bool frame_elided = !frame_access_state()->has_frame();
+ auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+ Label* tlabel = ool->entry();
+ Condition cc = FlagsConditionToCondition(condition);
+ __ b(cc, tlabel);
+}
// Assembles boolean materializations after an instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -1824,9 +1990,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
switch (src.type()) {
case Constant::kInt32:
- if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
- src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
- src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+ if (RelocInfo::IsWasmReference(src.rmode())) {
__ mov(dst, Operand(src.ToInt32(), src.rmode()));
} else {
__ mov(dst, Operand(src.ToInt32()));
@@ -1891,8 +2055,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
DCHECK(destination->IsDoubleStackSlot());
__ vstr(src, g.ToMemOperand(destination));
}
- } else {
- DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+ } else if (rep == MachineRepresentation::kFloat32) {
// GapResolver may give us reg codes that don't map to actual s-registers.
// Generate code to work around those cases.
int src_code = LocationOperand::cast(source)->register_code();
@@ -1903,6 +2066,19 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
DCHECK(destination->IsFloatStackSlot());
__ VmovExtended(g.ToMemOperand(destination), src_code, kScratchReg);
}
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ QwNeonRegister src = g.ToSimd128Register(source);
+ if (destination->IsSimd128Register()) {
+ QwNeonRegister dst = g.ToSimd128Register(destination);
+ __ Move(dst, src);
+ } else {
+ DCHECK(destination->IsSimd128StackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ add(kScratchReg, dst.rn(), Operand(dst.offset()));
+ __ vst1(Neon8, NeonListOperand(src.low(), 2),
+ NeonMemOperand(kScratchReg));
+ }
}
} else if (source->IsFPStackSlot()) {
MemOperand src = g.ToMemOperand(source);
@@ -1911,24 +2087,38 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (destination->IsFPRegister()) {
if (rep == MachineRepresentation::kFloat64) {
__ vldr(g.ToDoubleRegister(destination), src);
- } else {
- DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+ } else if (rep == MachineRepresentation::kFloat32) {
// GapResolver may give us reg codes that don't map to actual
// s-registers. Generate code to work around those cases.
int dst_code = LocationOperand::cast(destination)->register_code();
__ VmovExtended(dst_code, src, kScratchReg);
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ QwNeonRegister dst = g.ToSimd128Register(destination);
+ __ add(kScratchReg, src.rn(), Operand(src.offset()));
+ __ vld1(Neon8, NeonListOperand(dst.low(), 2),
+ NeonMemOperand(kScratchReg));
}
- } else {
+ } else if (rep == MachineRepresentation::kFloat64) {
DCHECK(destination->IsFPStackSlot());
if (rep == MachineRepresentation::kFloat64) {
DwVfpRegister temp = kScratchDoubleReg;
__ vldr(temp, src);
__ vstr(temp, g.ToMemOperand(destination));
- } else {
- DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+ } else if (rep == MachineRepresentation::kFloat32) {
SwVfpRegister temp = kScratchDoubleReg.low();
__ vldr(temp, src);
__ vstr(temp, g.ToMemOperand(destination));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ MemOperand dst = g.ToMemOperand(destination);
+ __ add(kScratchReg, src.rn(), Operand(src.offset()));
+ __ vld1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2),
+ NeonMemOperand(kScratchReg));
+ __ add(kScratchReg, dst.rn(), Operand(dst.offset()));
+ __ vst1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2),
+ NeonMemOperand(kScratchReg));
+ __ veor(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero);
}
}
} else {
@@ -1936,7 +2126,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
}
-
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) {
ArmOperandConverter g(this, nullptr);
@@ -1975,7 +2164,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
DwVfpRegister src = g.ToDoubleRegister(source);
if (destination->IsFPRegister()) {
DwVfpRegister dst = g.ToDoubleRegister(destination);
- __ vswp(src, dst);
+ __ Swap(src, dst);
} else {
DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination);
@@ -1983,8 +2172,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ vldr(src, dst);
__ vstr(temp, dst);
}
- } else {
- DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+ } else if (rep == MachineRepresentation::kFloat32) {
int src_code = LocationOperand::cast(source)->register_code();
if (destination->IsFPRegister()) {
int dst_code = LocationOperand::cast(destination)->register_code();
@@ -1998,29 +2186,55 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ VmovExtended(src_code, dst, kScratchReg);
__ vstr(temp.low(), dst);
}
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ QwNeonRegister src = g.ToSimd128Register(source);
+ if (destination->IsFPRegister()) {
+ QwNeonRegister dst = g.ToSimd128Register(destination);
+ __ Swap(src, dst);
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ Move(kScratchQuadReg, src);
+ __ add(kScratchReg, dst.rn(), Operand(dst.offset()));
+ __ vld1(Neon8, NeonListOperand(src.low(), 2),
+ NeonMemOperand(kScratchReg));
+ __ vst1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2),
+ NeonMemOperand(kScratchReg));
+ __ veor(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero);
+ }
}
} else if (source->IsFPStackSlot()) {
DCHECK(destination->IsFPStackSlot());
- Register temp_0 = kScratchReg;
- LowDwVfpRegister temp_1 = kScratchDoubleReg;
- MemOperand src0 = g.ToMemOperand(source);
- MemOperand dst0 = g.ToMemOperand(destination);
+ MemOperand src = g.ToMemOperand(source);
+ MemOperand dst = g.ToMemOperand(destination);
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kFloat64) {
- MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
- MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
- __ vldr(temp_1, dst0); // Save destination in temp_1.
- __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
- __ str(temp_0, dst0);
- __ ldr(temp_0, src1);
- __ str(temp_0, dst1);
- __ vstr(temp_1, src0);
+ __ vldr(kScratchDoubleReg, dst);
+ __ vldr(kDoubleRegZero, src);
+ __ vstr(kScratchDoubleReg, src);
+ __ vstr(kDoubleRegZero, dst);
+ // Restore the 0 register.
+ __ veor(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero);
+ } else if (rep == MachineRepresentation::kFloat32) {
+ __ vldr(kScratchDoubleReg.low(), dst);
+ __ vldr(kScratchDoubleReg.high(), src);
+ __ vstr(kScratchDoubleReg.low(), src);
+ __ vstr(kScratchDoubleReg.high(), dst);
} else {
- DCHECK_EQ(MachineRepresentation::kFloat32, rep);
- __ vldr(temp_1.low(), dst0); // Save destination in temp_1.
- __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
- __ str(temp_0, dst0);
- __ vstr(temp_1.low(), src0);
+ DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+ __ vldr(kScratchDoubleReg, dst);
+ __ vldr(kDoubleRegZero, src);
+ __ vstr(kScratchDoubleReg, src);
+ __ vstr(kDoubleRegZero, dst);
+ src.set_offset(src.offset() + kDoubleSize);
+ dst.set_offset(dst.offset() + kDoubleSize);
+ __ vldr(kScratchDoubleReg, dst);
+ __ vldr(kDoubleRegZero, src);
+ __ vstr(kScratchDoubleReg, src);
+ __ vstr(kDoubleRegZero, dst);
+ // Restore the 0 register.
+ __ veor(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero);
}
} else {
// No other combinations are possible.
diff --git a/deps/v8/src/compiler/arm/instruction-codes-arm.h b/deps/v8/src/compiler/arm/instruction-codes-arm.h
index 07c4033bd6..6e5426c255 100644
--- a/deps/v8/src/compiler/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/arm/instruction-codes-arm.h
@@ -119,7 +119,28 @@ namespace compiler {
V(ArmLdr) \
V(ArmStr) \
V(ArmPush) \
- V(ArmPoke)
+ V(ArmPoke) \
+ V(ArmFloat32x4Splat) \
+ V(ArmFloat32x4ExtractLane) \
+ V(ArmFloat32x4ReplaceLane) \
+ V(ArmFloat32x4FromInt32x4) \
+ V(ArmFloat32x4FromUint32x4) \
+ V(ArmFloat32x4Abs) \
+ V(ArmFloat32x4Neg) \
+ V(ArmFloat32x4Add) \
+ V(ArmFloat32x4Sub) \
+ V(ArmFloat32x4Eq) \
+ V(ArmFloat32x4Ne) \
+ V(ArmInt32x4Splat) \
+ V(ArmInt32x4ExtractLane) \
+ V(ArmInt32x4ReplaceLane) \
+ V(ArmInt32x4FromFloat32x4) \
+ V(ArmUint32x4FromFloat32x4) \
+ V(ArmInt32x4Add) \
+ V(ArmInt32x4Sub) \
+ V(ArmInt32x4Eq) \
+ V(ArmInt32x4Ne) \
+ V(ArmSimd32x4Select)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
index 3f38e5ddef..8dfa68a2f6 100644
--- a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
@@ -108,6 +108,27 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmFloat32Min:
case kArmFloat64Min:
case kArmFloat64SilenceNaN:
+ case kArmFloat32x4Splat:
+ case kArmFloat32x4ExtractLane:
+ case kArmFloat32x4ReplaceLane:
+ case kArmFloat32x4FromInt32x4:
+ case kArmFloat32x4FromUint32x4:
+ case kArmFloat32x4Abs:
+ case kArmFloat32x4Neg:
+ case kArmFloat32x4Add:
+ case kArmFloat32x4Sub:
+ case kArmFloat32x4Eq:
+ case kArmFloat32x4Ne:
+ case kArmInt32x4Splat:
+ case kArmInt32x4ExtractLane:
+ case kArmInt32x4ReplaceLane:
+ case kArmInt32x4FromFloat32x4:
+ case kArmUint32x4FromFloat32x4:
+ case kArmInt32x4Add:
+ case kArmInt32x4Sub:
+ case kArmInt32x4Eq:
+ case kArmInt32x4Ne:
+ case kArmSimd32x4Select:
return kNoOpcodeFlags;
case kArmVldrF32:
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index 5279d1eec1..def486af62 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -267,6 +267,9 @@ void VisitBinop(InstructionSelector* selector, Node* node,
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
cont->reason(), cont->frame_state());
+ } else if (cont->IsTrap()) {
+ inputs[input_count++] = g.UseImmediate(cont->trap_id());
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -501,6 +504,11 @@ void InstructionSelector::VisitStore(Node* node) {
}
}
+void InstructionSelector::VisitProtectedStore(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
+
void InstructionSelector::VisitUnalignedLoad(Node* node) {
UnalignedLoadRepresentation load_rep =
UnalignedLoadRepresentationOf(node->op());
@@ -885,6 +893,9 @@ void VisitShift(InstructionSelector* selector, Node* node,
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
cont->reason(), cont->frame_state());
+ } else if (cont->IsTrap()) {
+ inputs[input_count++] = g.UseImmediate(cont->trap_id());
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -1252,10 +1263,14 @@ void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
InstructionOperand in[] = {temp_operand, result_operand, shift_31};
selector->EmitDeoptimize(opcode, 0, nullptr, 3, in, cont->reason(),
cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ } else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), temp_operand,
result_operand, shift_31);
+ } else {
+ DCHECK(cont->IsTrap());
+ InstructionOperand in[] = {temp_operand, result_operand, shift_31,
+ g.UseImmediate(cont->trap_id())};
+ selector->Emit(opcode, 0, nullptr, 4, in);
}
}
@@ -1643,9 +1658,12 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ } else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+ } else {
+ DCHECK(cont->IsTrap());
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.UseImmediate(cont->trap_id()));
}
}
@@ -1836,6 +1854,9 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
cont->reason(), cont->frame_state());
+ } else if (cont->IsTrap()) {
+ inputs[input_count++] = g.UseImmediate(cont->trap_id());
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -1992,10 +2013,13 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand, value_operand,
cont->reason(), cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ } else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
value_operand);
+ } else {
+ DCHECK(cont->IsTrap());
+ selector->Emit(opcode, g.NoOutput(), value_operand, value_operand,
+ g.UseImmediate(cont->trap_id()));
}
}
@@ -2019,6 +2043,19 @@ void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+ Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
ArmOperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
@@ -2249,6 +2286,137 @@ void InstructionSelector::VisitAtomicStore(Node* node) {
Emit(code, 0, nullptr, input_count, inputs);
}
+void InstructionSelector::VisitCreateFloat32x4(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmFloat32x4Splat, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitFloat32x4ExtractLane(Node* node) {
+ ArmOperandGenerator g(this);
+ int32_t lane = OpParameter<int32_t>(node);
+ Emit(kArmFloat32x4ExtractLane, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(lane));
+}
+
+void InstructionSelector::VisitFloat32x4ReplaceLane(Node* node) {
+ ArmOperandGenerator g(this);
+ int32_t lane = OpParameter<int32_t>(node);
+ Emit(kArmFloat32x4ReplaceLane, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(lane),
+ g.Use(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitFloat32x4FromInt32x4(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmFloat32x4FromInt32x4, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitFloat32x4FromUint32x4(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmFloat32x4FromUint32x4, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitFloat32x4Abs(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmFloat32x4Abs, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitFloat32x4Neg(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmFloat32x4Neg, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitFloat32x4Add(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmFloat32x4Add, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitFloat32x4Sub(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmFloat32x4Sub, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitFloat32x4Equal(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmFloat32x4Eq, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitFloat32x4NotEqual(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmFloat32x4Ne, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitCreateInt32x4(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmInt32x4Splat, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitInt32x4ExtractLane(Node* node) {
+ ArmOperandGenerator g(this);
+ int32_t lane = OpParameter<int32_t>(node);
+ Emit(kArmInt32x4ExtractLane, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(lane));
+}
+
+void InstructionSelector::VisitInt32x4ReplaceLane(Node* node) {
+ ArmOperandGenerator g(this);
+ int32_t lane = OpParameter<int32_t>(node);
+ Emit(kArmInt32x4ReplaceLane, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(lane),
+ g.Use(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitInt32x4FromFloat32x4(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmInt32x4FromFloat32x4, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitUint32x4FromFloat32x4(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmUint32x4FromFloat32x4, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitInt32x4Add(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmInt32x4Add, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitInt32x4Sub(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmInt32x4Sub, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitInt32x4Equal(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmInt32x4Eq, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitInt32x4NotEqual(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmInt32x4Ne, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitSimd32x4Select(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmSimd32x4Select, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ g.UseRegister(node->InputAt(2)));
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index 8b1cb578e0..09fe0eb718 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -209,17 +209,16 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
Constant constant = ToConstant(operand);
switch (constant.type()) {
case Constant::kInt32:
- if (constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+ if (RelocInfo::IsWasmSizeReference(constant.rmode())) {
return Operand(constant.ToInt32(), constant.rmode());
} else {
return Operand(constant.ToInt32());
}
case Constant::kInt64:
- if (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
- constant.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+ if (RelocInfo::IsWasmPtrReference(constant.rmode())) {
return Operand(constant.ToInt64(), constant.rmode());
} else {
- DCHECK(constant.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ DCHECK(!RelocInfo::IsWasmSizeReference(constant.rmode()));
return Operand(constant.ToInt64());
}
case Constant::kFloat32:
@@ -1702,6 +1701,65 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ B(GetLabel(target));
}
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+ FlagsCondition condition) {
+ class OutOfLineTrap final : public OutOfLineCode {
+ public:
+ OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+ : OutOfLineCode(gen),
+ frame_elided_(frame_elided),
+ instr_(instr),
+ gen_(gen) {}
+ void Generate() final {
+ Arm64OperandConverter i(gen_, instr_);
+ Runtime::FunctionId trap_id = static_cast<Runtime::FunctionId>(
+ i.InputInt32(instr_->InputCount() - 1));
+ bool old_has_frame = __ has_frame();
+ if (frame_elided_) {
+ __ set_has_frame(true);
+ __ EnterFrame(StackFrame::WASM_COMPILED);
+ }
+ GenerateCallToTrap(trap_id);
+ if (frame_elided_) {
+ __ set_has_frame(old_has_frame);
+ }
+ if (FLAG_debug_code) {
+ // The trap code should never return.
+ __ Brk(0);
+ }
+ }
+
+ private:
+ void GenerateCallToTrap(Runtime::FunctionId trap_id) {
+ if (trap_id == Runtime::kNumFunctions) {
+ // We cannot test calls to the runtime in cctest/test-run-wasm.
+ // Therefore we emit a call to C here instead of a call to the runtime.
+ __ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+ 0);
+ } else {
+ DCHECK(csp.Is(__ StackPointer()));
+ __ Move(cp, isolate()->native_context());
+ // Initialize the jssp because it is required for the runtime call.
+ __ Mov(jssp, csp);
+ gen_->AssembleSourcePosition(instr_);
+ __ CallRuntime(trap_id);
+ }
+ ReferenceMap* reference_map =
+ new (gen_->zone()) ReferenceMap(gen_->zone());
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ }
+ bool frame_elided_;
+ Instruction* instr_;
+ CodeGenerator* gen_;
+ };
+ bool frame_elided = !frame_access_state()->has_frame();
+ auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+ Label* tlabel = ool->entry();
+ Condition cc = FlagsConditionToCondition(condition);
+ __ B(cc, tlabel);
+}
// Assemble boolean materializations after this instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index 0eef53c6d5..9cb33f6c44 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -471,6 +471,9 @@ void VisitBinop(InstructionSelector* selector, Node* node,
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
cont->reason(), cont->frame_state());
+ } else if (cont->IsTrap()) {
+ inputs[input_count++] = g.UseImmediate(cont->trap_id());
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -708,6 +711,11 @@ void InstructionSelector::VisitStore(Node* node) {
}
}
+void InstructionSelector::VisitProtectedStore(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
+
// Architecture supports unaligned access, therefore VisitLoad is used instead
void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
@@ -1061,6 +1069,7 @@ bool TryEmitBitfieldExtract32(InstructionSelector* selector, Node* node) {
// OP is >>> or >> and (K & 0x1f) != 0.
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().HasValue() && m.right().HasValue() &&
+ (mleft.right().Value() & 0x1f) != 0 &&
(mleft.right().Value() & 0x1f) == (m.right().Value() & 0x1f)) {
DCHECK(m.IsWord32Shr() || m.IsWord32Sar());
ArchOpcode opcode = m.IsWord32Sar() ? kArm64Sbfx32 : kArm64Ubfx32;
@@ -1379,9 +1388,12 @@ void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
InstructionOperand in[] = {result, result};
selector->EmitDeoptimize(opcode, 0, nullptr, 2, in, cont->reason(),
cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ } else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), result, result);
+ } else {
+ DCHECK(cont->IsTrap());
+ selector->Emit(opcode, g.NoOutput(), result, result,
+ g.UseImmediate(cont->trap_id()));
}
}
@@ -1995,9 +2007,12 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ } else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+ } else {
+ DCHECK(cont->IsTrap());
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.UseImmediate(cont->trap_id()));
}
}
@@ -2513,11 +2528,15 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
selector->Emit(cont->Encode(kArm64CompareAndBranch32), g.NoOutput(),
g.UseRegister(value), g.Label(cont->true_block()),
g.Label(cont->false_block()));
- } else {
- DCHECK(cont->IsDeoptimize());
+ } else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(cont->Encode(kArm64Tst32), g.NoOutput(),
g.UseRegister(value), g.UseRegister(value),
cont->reason(), cont->frame_state());
+ } else {
+ DCHECK(cont->IsTrap());
+ selector->Emit(cont->Encode(kArm64Tst32), g.NoOutput(),
+ g.UseRegister(value), g.UseRegister(value),
+ g.UseImmediate(cont->trap_id()));
}
}
@@ -2541,6 +2560,19 @@ void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+ Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
Arm64OperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
diff --git a/deps/v8/src/compiler/ast-graph-builder.cc b/deps/v8/src/compiler/ast-graph-builder.cc
index 1b7d1169dd..8c5dce61ee 100644
--- a/deps/v8/src/compiler/ast-graph-builder.cc
+++ b/deps/v8/src/compiler/ast-graph-builder.cc
@@ -17,7 +17,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/state-values-utils.h"
-#include "src/compiler/type-hint-analyzer.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -166,8 +166,6 @@ class AstGraphBuilder::ControlScope BASE_EMBEDDED {
void ReturnValue(Node* return_value);
void ThrowValue(Node* exception_value);
- class DeferredCommands;
-
protected:
enum Command { CMD_BREAK, CMD_CONTINUE, CMD_RETURN, CMD_THROW };
@@ -207,93 +205,6 @@ class AstGraphBuilder::ControlScope BASE_EMBEDDED {
int stack_height_;
};
-// Helper class for a try-finally control scope. It can record intercepted
-// control-flow commands that cause entry into a finally-block, and re-apply
-// them after again leaving that block. Special tokens are used to identify
-// paths going through the finally-block to dispatch after leaving the block.
-class AstGraphBuilder::ControlScope::DeferredCommands : public ZoneObject {
- public:
- explicit DeferredCommands(AstGraphBuilder* owner)
- : owner_(owner),
- deferred_(owner->local_zone()),
- return_token_(nullptr),
- throw_token_(nullptr) {}
-
- // One recorded control-flow command.
- struct Entry {
- Command command; // The command type being applied on this path.
- Statement* statement; // The target statement for the command or {nullptr}.
- Node* token; // A token identifying this particular path.
- };
-
- // Records a control-flow command while entering the finally-block. This also
- // generates a new dispatch token that identifies one particular path.
- Node* RecordCommand(Command cmd, Statement* stmt, Node* value) {
- Node* token = nullptr;
- switch (cmd) {
- case CMD_BREAK:
- case CMD_CONTINUE:
- token = NewPathToken(dispenser_.GetBreakContinueToken());
- break;
- case CMD_THROW:
- if (throw_token_) return throw_token_;
- token = NewPathToken(TokenDispenserForFinally::kThrowToken);
- throw_token_ = token;
- break;
- case CMD_RETURN:
- if (return_token_) return return_token_;
- token = NewPathToken(TokenDispenserForFinally::kReturnToken);
- return_token_ = token;
- break;
- }
- DCHECK_NOT_NULL(token);
- deferred_.push_back({cmd, stmt, token});
- return token;
- }
-
- // Returns the dispatch token to be used to identify the implicit fall-through
- // path at the end of a try-block into the corresponding finally-block.
- Node* GetFallThroughToken() { return NewPathTokenForImplicitFallThrough(); }
-
- // Applies all recorded control-flow commands after the finally-block again.
- // This generates a dynamic dispatch on the token from the entry point.
- void ApplyDeferredCommands(Node* token, Node* value) {
- SwitchBuilder dispatch(owner_, static_cast<int>(deferred_.size()));
- dispatch.BeginSwitch();
- for (size_t i = 0; i < deferred_.size(); ++i) {
- Node* condition = NewPathDispatchCondition(token, deferred_[i].token);
- dispatch.BeginLabel(static_cast<int>(i), condition);
- dispatch.EndLabel();
- }
- for (size_t i = 0; i < deferred_.size(); ++i) {
- dispatch.BeginCase(static_cast<int>(i));
- owner_->execution_control()->PerformCommand(
- deferred_[i].command, deferred_[i].statement, value);
- dispatch.EndCase();
- }
- dispatch.EndSwitch();
- }
-
- protected:
- Node* NewPathToken(int token_id) {
- return owner_->jsgraph()->Constant(token_id);
- }
- Node* NewPathTokenForImplicitFallThrough() {
- return NewPathToken(TokenDispenserForFinally::kFallThroughToken);
- }
- Node* NewPathDispatchCondition(Node* t1, Node* t2) {
- return owner_->NewNode(
- owner_->javascript()->StrictEqual(CompareOperationHint::kAny), t1, t2);
- }
-
- private:
- TokenDispenserForFinally dispenser_;
- AstGraphBuilder* owner_;
- ZoneVector<Entry> deferred_;
- Node* return_token_;
- Node* throw_token_;
-};
-
// Control scope implementation for a BreakableStatement.
class AstGraphBuilder::ControlScopeForBreakable : public ControlScope {
@@ -356,65 +267,9 @@ class AstGraphBuilder::ControlScopeForIteration : public ControlScope {
};
-// Control scope implementation for a TryCatchStatement.
-class AstGraphBuilder::ControlScopeForCatch : public ControlScope {
- public:
- ControlScopeForCatch(AstGraphBuilder* owner, TryCatchStatement* stmt,
- TryCatchBuilder* control)
- : ControlScope(owner), control_(control) {
- builder()->try_nesting_level_++; // Increment nesting.
- }
- ~ControlScopeForCatch() {
- builder()->try_nesting_level_--; // Decrement nesting.
- }
-
- protected:
- bool Execute(Command cmd, Statement* target, Node** value) override {
- switch (cmd) {
- case CMD_THROW:
- control_->Throw(*value);
- return true;
- case CMD_BREAK:
- case CMD_CONTINUE:
- case CMD_RETURN:
- break;
- }
- return false;
- }
-
- private:
- TryCatchBuilder* control_;
-};
-
-
-// Control scope implementation for a TryFinallyStatement.
-class AstGraphBuilder::ControlScopeForFinally : public ControlScope {
- public:
- ControlScopeForFinally(AstGraphBuilder* owner, TryFinallyStatement* stmt,
- DeferredCommands* commands, TryFinallyBuilder* control)
- : ControlScope(owner), commands_(commands), control_(control) {
- builder()->try_nesting_level_++; // Increment nesting.
- }
- ~ControlScopeForFinally() {
- builder()->try_nesting_level_--; // Decrement nesting.
- }
-
- protected:
- bool Execute(Command cmd, Statement* target, Node** value) override {
- Node* token = commands_->RecordCommand(cmd, target, *value);
- control_->LeaveTry(token, *value);
- return true;
- }
-
- private:
- DeferredCommands* commands_;
- TryFinallyBuilder* control_;
-};
-
AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
JSGraph* jsgraph, float invocation_frequency,
- LoopAssignmentAnalysis* loop,
- TypeHintAnalysis* type_hint_analysis)
+ LoopAssignmentAnalysis* loop)
: isolate_(info->isolate()),
local_zone_(local_zone),
info_(info),
@@ -425,12 +280,10 @@ AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
globals_(0, local_zone),
execution_control_(nullptr),
execution_context_(nullptr),
- try_nesting_level_(0),
input_buffer_size_(0),
input_buffer_(nullptr),
exit_controls_(local_zone),
loop_assignment_analysis_(loop),
- type_hint_analysis_(type_hint_analysis),
state_values_cache_(jsgraph),
liveness_analyzer_(static_cast<size_t>(info->scope()->num_stack_slots()),
false, local_zone),
@@ -453,7 +306,7 @@ Node* AstGraphBuilder::GetFunctionClosureForContext() {
// calling eval, not the anonymous closure containing the eval code.
const Operator* op =
javascript()->LoadContext(0, Context::CLOSURE_INDEX, false);
- return NewNode(op, current_context());
+ return NewNode(op);
} else {
DCHECK(closure_scope->is_function_scope());
return GetFunctionClosure();
@@ -483,18 +336,6 @@ Node* AstGraphBuilder::GetFunctionContext() {
return function_context_.get();
}
-
-Node* AstGraphBuilder::GetNewTarget() {
- if (!new_target_.is_set()) {
- int params = info()->num_parameters_including_this();
- int index = Linkage::GetJSCallNewTargetParamIndex(params);
- const Operator* op = common()->Parameter(index, "%new.target");
- Node* node = NewNode(op, graph()->start());
- new_target_.set(node);
- }
- return new_target_.get();
-}
-
Node* AstGraphBuilder::GetEmptyFrameState() {
if (!empty_frame_state_.is_set()) {
const Operator* op = common()->FrameState(
@@ -573,15 +414,10 @@ void AstGraphBuilder::CreateGraphBody(bool stack_check) {
// Build the arguments object if it is used.
BuildArgumentsObject(scope->arguments());
- // Build rest arguments array if it is used.
- Variable* rest_parameter = scope->rest_parameter();
- BuildRestArgumentsArray(rest_parameter);
-
- // Build assignment to {.this_function} variable if it is used.
- BuildThisFunctionVariable(scope->this_function_var());
-
- // Build assignment to {new.target} variable if it is used.
- BuildNewTargetVariable(scope->new_target_var());
+ // We don't support new.target and rest parameters here.
+ DCHECK_NULL(scope->new_target_var());
+ DCHECK_NULL(scope->rest_parameter());
+ DCHECK_NULL(scope->this_function_var());
// Emit tracing call if requested to do so.
if (FLAG_trace) {
@@ -835,7 +671,7 @@ void AstGraphBuilder::Environment::UpdateStateValues(Node** state_values,
}
}
if (should_update) {
- const Operator* op = common()->StateValues(count);
+ const Operator* op = common()->StateValues(count, SparseInputMask::Dense());
(*state_values) = graph()->NewNode(op, count, env_values);
}
}
@@ -1092,6 +928,7 @@ void AstGraphBuilder::VisitVariableDeclaration(VariableDeclaration* decl) {
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
+ globals()->push_back(variable->name());
FeedbackVectorSlot slot = decl->proxy()->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals()->push_back(handle(Smi::FromInt(slot.ToInt()), isolate()));
@@ -1109,17 +946,10 @@ void AstGraphBuilder::VisitVariableDeclaration(VariableDeclaration* decl) {
if (variable->binding_needs_init()) {
Node* value = jsgraph()->TheHoleConstant();
const Operator* op = javascript()->StoreContext(0, variable->index());
- NewNode(op, current_context(), value);
+ NewNode(op, value);
}
break;
- case VariableLocation::LOOKUP: {
- DCHECK(!variable->binding_needs_init());
- Node* name = jsgraph()->Constant(variable->name());
- const Operator* op = javascript()->CallRuntime(Runtime::kDeclareEvalVar);
- Node* store = NewNode(op, name);
- PrepareFrameState(store, decl->proxy()->id());
- break;
- }
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -1134,6 +964,7 @@ void AstGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* decl) {
decl->fun(), info()->script(), info());
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
+ globals()->push_back(variable->name());
FeedbackVectorSlot slot = decl->proxy()->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals()->push_back(handle(Smi::FromInt(slot.ToInt()), isolate()));
@@ -1151,19 +982,10 @@ void AstGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* decl) {
VisitForValue(decl->fun());
Node* value = environment()->Pop();
const Operator* op = javascript()->StoreContext(0, variable->index());
- NewNode(op, current_context(), value);
- break;
- }
- case VariableLocation::LOOKUP: {
- VisitForValue(decl->fun());
- Node* value = environment()->Pop();
- Node* name = jsgraph()->Constant(variable->name());
- const Operator* op =
- javascript()->CallRuntime(Runtime::kDeclareEvalFunction);
- Node* store = NewNode(op, name, value);
- PrepareFrameState(store, decl->proxy()->id());
+ NewNode(op, value);
break;
}
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -1240,14 +1062,8 @@ void AstGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
void AstGraphBuilder::VisitWithStatement(WithStatement* stmt) {
- VisitForValue(stmt->expression());
- Node* value = environment()->Pop();
- Node* object = BuildToObject(value, stmt->ToObjectId());
- Handle<ScopeInfo> scope_info = stmt->scope()->scope_info();
- const Operator* op = javascript()->CreateWithContext(scope_info);
- Node* context = NewNode(op, object, GetFunctionClosureForContext());
- PrepareFrameState(context, stmt->EntryId());
- VisitInScope(stmt->statement(), stmt->scope(), context);
+ // Dynamic scoping is supported only by going through Ignition first.
+ UNREACHABLE();
}
@@ -1277,13 +1093,7 @@ void AstGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
Node* label = environment()->Pop();
Node* tag = environment()->Top();
- CompareOperationHint hint;
- if (!type_hint_analysis_ ||
- !type_hint_analysis_->GetCompareOperationHint(clause->CompareId(),
- &hint)) {
- hint = CompareOperationHint::kAny;
- }
-
+ CompareOperationHint hint = CompareOperationHint::kAny;
const Operator* op = javascript()->StrictEqual(hint);
Node* condition = NewNode(op, tag, label);
compare_switch.BeginLabel(i, condition);
@@ -1450,114 +1260,20 @@ void AstGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
void AstGraphBuilder::VisitForOfStatement(ForOfStatement* stmt) {
- LoopBuilder for_loop(this);
- VisitForEffect(stmt->assign_iterator());
- for_loop.BeginLoop(GetVariablesAssignedInLoop(stmt), CheckOsrEntry(stmt));
- VisitForEffect(stmt->next_result());
- VisitForTest(stmt->result_done());
- Node* condition = environment()->Pop();
- for_loop.BreakWhen(condition);
- VisitForEffect(stmt->assign_each());
- VisitIterationBody(stmt, &for_loop, stmt->StackCheckId());
- for_loop.EndBody();
- for_loop.EndLoop();
+ // Iterator looping is supported only by going through Ignition first.
+ UNREACHABLE();
}
void AstGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
- TryCatchBuilder try_control(this);
-
- // Evaluate the try-block inside a control scope. This simulates a handler
- // that is intercepting 'throw' control commands.
- try_control.BeginTry();
- {
- ControlScopeForCatch scope(this, stmt, &try_control);
- STATIC_ASSERT(TryBlockConstant::kElementCount == 1);
- environment()->Push(current_context());
- Visit(stmt->try_block());
- environment()->Pop();
- }
- try_control.EndTry();
-
- // If requested, clear message object as we enter the catch block.
- if (stmt->clear_pending_message()) {
- Node* the_hole = jsgraph()->TheHoleConstant();
- NewNode(javascript()->StoreMessage(), the_hole);
- }
-
- // Create a catch scope that binds the exception.
- Node* exception = try_control.GetExceptionNode();
- Handle<String> name = stmt->variable()->name();
- Handle<ScopeInfo> scope_info = stmt->scope()->scope_info();
- const Operator* op = javascript()->CreateCatchContext(name, scope_info);
- Node* context = NewNode(op, exception, GetFunctionClosureForContext());
-
- // Evaluate the catch-block.
- VisitInScope(stmt->catch_block(), stmt->scope(), context);
- try_control.EndCatch();
+ // Exception handling is supported only by going through Ignition first.
+ UNREACHABLE();
}
void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- TryFinallyBuilder try_control(this);
-
- // We keep a record of all paths that enter the finally-block to be able to
- // dispatch to the correct continuation point after the statements in the
- // finally-block have been evaluated.
- //
- // The try-finally construct can enter the finally-block in three ways:
- // 1. By exiting the try-block normally, falling through at the end.
- // 2. By exiting the try-block with a function-local control flow transfer
- // (i.e. through break/continue/return statements).
- // 3. By exiting the try-block with a thrown exception.
- Node* fallthrough_result = jsgraph()->TheHoleConstant();
- ControlScope::DeferredCommands* commands =
- new (local_zone()) ControlScope::DeferredCommands(this);
-
- // Evaluate the try-block inside a control scope. This simulates a handler
- // that is intercepting all control commands.
- try_control.BeginTry();
- {
- ControlScopeForFinally scope(this, stmt, commands, &try_control);
- STATIC_ASSERT(TryBlockConstant::kElementCount == 1);
- environment()->Push(current_context());
- Visit(stmt->try_block());
- environment()->Pop();
- }
- try_control.EndTry(commands->GetFallThroughToken(), fallthrough_result);
-
- // The result value semantics depend on how the block was entered:
- // - ReturnStatement: It represents the return value being returned.
- // - ThrowStatement: It represents the exception being thrown.
- // - BreakStatement/ContinueStatement: Filled with the hole.
- // - Falling through into finally-block: Filled with the hole.
- Node* result = try_control.GetResultValueNode();
- Node* token = try_control.GetDispatchTokenNode();
-
- // The result value, dispatch token and message is expected on the operand
- // stack (this is in sync with FullCodeGenerator::EnterFinallyBlock).
- Node* message = NewNode(javascript()->LoadMessage());
- environment()->Push(token);
- environment()->Push(result);
- environment()->Push(message);
-
- // Clear message object as we enter the finally block.
- Node* the_hole = jsgraph()->TheHoleConstant();
- NewNode(javascript()->StoreMessage(), the_hole);
-
- // Evaluate the finally-block.
- Visit(stmt->finally_block());
- try_control.EndFinally();
-
- // The result value, dispatch token and message is restored from the operand
- // stack (this is in sync with FullCodeGenerator::ExitFinallyBlock).
- message = environment()->Pop();
- result = environment()->Pop();
- token = environment()->Pop();
- NewNode(javascript()->StoreMessage(), message);
-
- // Dynamic dispatch after the finally-block.
- commands->ApplyDeferredCommands(token, result);
+ // Exception handling is supported only by going through Ignition first.
+ UNREACHABLE();
}
@@ -1577,112 +1293,14 @@ void AstGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
// Create node to instantiate a new closure.
PretenureFlag pretenure = expr->pretenure() ? TENURED : NOT_TENURED;
- const Operator* op = javascript()->CreateClosure(shared_info, pretenure);
+ VectorSlotPair pair = CreateVectorSlotPair(expr->LiteralFeedbackSlot());
+ const Operator* op =
+ javascript()->CreateClosure(shared_info, pair, pretenure);
Node* value = NewNode(op);
ast_context()->ProduceValue(expr, value);
}
-
-void AstGraphBuilder::VisitClassLiteral(ClassLiteral* expr) {
- VisitForValueOrTheHole(expr->extends());
- VisitForValue(expr->constructor());
-
- // Create node to instantiate a new class.
- Node* constructor = environment()->Pop();
- Node* extends = environment()->Pop();
- Node* start = jsgraph()->Constant(expr->start_position());
- Node* end = jsgraph()->Constant(expr->end_position());
- const Operator* opc = javascript()->CallRuntime(Runtime::kDefineClass);
- Node* literal = NewNode(opc, extends, constructor, start, end);
- PrepareFrameState(literal, expr->CreateLiteralId(),
- OutputFrameStateCombine::Push());
- environment()->Push(literal);
-
- // Load the "prototype" from the constructor.
- PrepareEagerCheckpoint(expr->CreateLiteralId());
- Handle<Name> name = isolate()->factory()->prototype_string();
- VectorSlotPair pair = CreateVectorSlotPair(expr->PrototypeSlot());
- Node* prototype = BuildNamedLoad(literal, name, pair);
- PrepareFrameState(prototype, expr->PrototypeId(),
- OutputFrameStateCombine::Push());
- environment()->Push(prototype);
-
- // Create nodes to store method values into the literal.
- for (int i = 0; i < expr->properties()->length(); i++) {
- ClassLiteral::Property* property = expr->properties()->at(i);
- environment()->Push(environment()->Peek(property->is_static() ? 1 : 0));
-
- VisitForValue(property->key());
- Node* name = BuildToName(environment()->Pop(), expr->GetIdForProperty(i));
- environment()->Push(name);
-
- // The static prototype property is read only. We handle the non computed
- // property name case in the parser. Since this is the only case where we
- // need to check for an own read only property we special case this so we do
- // not need to do this for every property.
- if (property->is_static() && property->is_computed_name()) {
- Node* check = BuildThrowIfStaticPrototype(environment()->Pop(),
- expr->GetIdForProperty(i));
- environment()->Push(check);
- }
-
- VisitForValue(property->value());
- Node* value = environment()->Pop();
- Node* key = environment()->Pop();
- Node* receiver = environment()->Pop();
-
- BuildSetHomeObject(value, receiver, property);
-
- switch (property->kind()) {
- case ClassLiteral::Property::METHOD: {
- Node* attr = jsgraph()->Constant(DONT_ENUM);
- Node* set_function_name =
- jsgraph()->Constant(property->NeedsSetFunctionName());
- const Operator* op =
- javascript()->CallRuntime(Runtime::kDefineDataPropertyInLiteral);
- Node* call = NewNode(op, receiver, key, value, attr, set_function_name);
- PrepareFrameState(call, BailoutId::None());
- break;
- }
- case ClassLiteral::Property::GETTER: {
- Node* attr = jsgraph()->Constant(DONT_ENUM);
- const Operator* op = javascript()->CallRuntime(
- Runtime::kDefineGetterPropertyUnchecked, 4);
- NewNode(op, receiver, key, value, attr);
- break;
- }
- case ClassLiteral::Property::SETTER: {
- Node* attr = jsgraph()->Constant(DONT_ENUM);
- const Operator* op = javascript()->CallRuntime(
- Runtime::kDefineSetterPropertyUnchecked, 4);
- NewNode(op, receiver, key, value, attr);
- break;
- }
- case ClassLiteral::Property::FIELD: {
- UNREACHABLE();
- break;
- }
- }
- }
-
- // Set the constructor to have fast properties.
- prototype = environment()->Pop();
- literal = environment()->Pop();
- const Operator* op = javascript()->CallRuntime(Runtime::kToFastProperties);
- literal = NewNode(op, literal);
-
- // Assign to class variable.
- if (expr->class_variable_proxy() != nullptr) {
- Variable* var = expr->class_variable_proxy()->var();
- VectorSlotPair feedback = CreateVectorSlotPair(
- expr->NeedsProxySlot() ? expr->ProxySlot()
- : FeedbackVectorSlot::Invalid());
- BuildVariableAssignment(var, literal, Token::INIT, feedback,
- BailoutId::None());
- }
- ast_context()->ProduceValue(expr, literal);
-}
-
+void AstGraphBuilder::VisitClassLiteral(ClassLiteral* expr) { UNREACHABLE(); }
void AstGraphBuilder::VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
UNREACHABLE();
@@ -1746,7 +1364,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
// Create node to deep-copy the literal boilerplate.
const Operator* op = javascript()->CreateLiteralObject(
- expr->constant_properties(), expr->ComputeFlags(true),
+ expr->GetOrBuildConstantProperties(isolate()), expr->ComputeFlags(true),
expr->literal_index(), expr->properties_count());
Node* literal = NewNode(op, closure);
PrepareFrameState(literal, expr->CreateLiteralId(),
@@ -1757,15 +1375,15 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
environment()->Push(literal);
// Create nodes to store computed values into the literal.
- int property_index = 0;
AccessorTable accessor_table(local_zone());
- for (; property_index < expr->properties()->length(); property_index++) {
- ObjectLiteral::Property* property = expr->properties()->at(property_index);
- if (property->is_computed_name()) break;
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ DCHECK(!property->is_computed_name());
if (property->IsCompileTimeValue()) continue;
Literal* key = property->key()->AsLiteral();
switch (property->kind()) {
+ case ObjectLiteral::Property::SPREAD:
case ObjectLiteral::Property::CONSTANT:
UNREACHABLE();
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -1818,21 +1436,20 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
javascript()->CallRuntime(Runtime::kInternalSetPrototype);
Node* set_prototype = NewNode(op, receiver, value);
// SetPrototype should not lazy deopt on an object literal.
- PrepareFrameState(set_prototype,
- expr->GetIdForPropertySet(property_index));
+ PrepareFrameState(set_prototype, expr->GetIdForPropertySet(i));
break;
}
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->setter = property;
}
break;
@@ -1856,77 +1473,6 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
Node* call = NewNode(op, literal, name, getter, setter, attr);
PrepareFrameState(call, it->second->bailout_id);
}
-
- // Object literals have two parts. The "static" part on the left contains no
- // computed property names, and so we can compute its map ahead of time; see
- // Runtime_CreateObjectLiteralBoilerplate. The second "dynamic" part starts
- // with the first computed property name and continues with all properties to
- // its right. All the code from above initializes the static component of the
- // object literal, and arranges for the map of the result to reflect the
- // static order in which the keys appear. For the dynamic properties, we
- // compile them into a series of "SetOwnProperty" runtime calls. This will
- // preserve insertion order.
- for (; property_index < expr->properties()->length(); property_index++) {
- ObjectLiteral::Property* property = expr->properties()->at(property_index);
-
- if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
- environment()->Push(environment()->Top()); // Duplicate receiver.
- VisitForValue(property->value());
- Node* value = environment()->Pop();
- Node* receiver = environment()->Pop();
- const Operator* op =
- javascript()->CallRuntime(Runtime::kInternalSetPrototype);
- Node* call = NewNode(op, receiver, value);
- PrepareFrameState(call, expr->GetIdForPropertySet(property_index));
- continue;
- }
-
- environment()->Push(environment()->Top()); // Duplicate receiver.
- VisitForValue(property->key());
- Node* name = BuildToName(environment()->Pop(),
- expr->GetIdForPropertyName(property_index));
- environment()->Push(name);
- VisitForValue(property->value());
- Node* value = environment()->Pop();
- Node* key = environment()->Pop();
- Node* receiver = environment()->Pop();
- BuildSetHomeObject(value, receiver, property);
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- case ObjectLiteral::Property::COMPUTED:
- case ObjectLiteral::Property::MATERIALIZED_LITERAL: {
- if (!property->emit_store()) continue;
- Node* attr = jsgraph()->Constant(NONE);
- Node* set_function_name =
- jsgraph()->Constant(property->NeedsSetFunctionName());
- const Operator* op =
- javascript()->CallRuntime(Runtime::kDefineDataPropertyInLiteral);
- Node* call = NewNode(op, receiver, key, value, attr, set_function_name);
- PrepareFrameState(call, expr->GetIdForPropertySet(property_index));
- break;
- }
- case ObjectLiteral::Property::PROTOTYPE:
- UNREACHABLE(); // Handled specially above.
- break;
- case ObjectLiteral::Property::GETTER: {
- Node* attr = jsgraph()->Constant(NONE);
- const Operator* op = javascript()->CallRuntime(
- Runtime::kDefineGetterPropertyUnchecked, 4);
- Node* call = NewNode(op, receiver, key, value, attr);
- PrepareFrameState(call, BailoutId::None());
- break;
- }
- case ObjectLiteral::Property::SETTER: {
- Node* attr = jsgraph()->Constant(NONE);
- const Operator* op = javascript()->CallRuntime(
- Runtime::kDefineSetterPropertyUnchecked, 4);
- Node* call = NewNode(op, receiver, key, value, attr);
- PrepareFrameState(call, BailoutId::None());
- break;
- }
- }
- }
-
ast_context()->ProduceValue(expr, environment()->Pop());
}
@@ -1947,7 +1493,7 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
// Create node to deep-copy the literal boilerplate.
const Operator* op = javascript()->CreateLiteralArray(
- expr->constant_elements(), expr->ComputeFlags(true),
+ expr->GetOrBuildConstantElements(isolate()), expr->ComputeFlags(true),
expr->literal_index(), expr->values()->length());
Node* literal = NewNode(op, closure);
PrepareFrameState(literal, expr->CreateLiteralId(),
@@ -2015,31 +1561,10 @@ void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value,
PrepareFrameState(store, bailout_id, OutputFrameStateCombine::Ignore());
break;
}
- case NAMED_SUPER_PROPERTY: {
- environment()->Push(value);
- VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
- VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
- Node* home_object = environment()->Pop();
- Node* receiver = environment()->Pop();
- value = environment()->Pop();
- Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- Node* store = BuildNamedSuperStore(receiver, home_object, name, value);
- PrepareFrameState(store, bailout_id, OutputFrameStateCombine::Ignore());
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- environment()->Push(value);
- VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
- VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
- VisitForValue(property->key());
- Node* key = environment()->Pop();
- Node* home_object = environment()->Pop();
- Node* receiver = environment()->Pop();
- value = environment()->Pop();
- Node* store = BuildKeyedSuperStore(receiver, home_object, key, value);
- PrepareFrameState(store, bailout_id, OutputFrameStateCombine::Ignore());
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
break;
- }
}
}
@@ -2071,13 +1596,8 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
VisitForValue(property->key());
break;
case NAMED_SUPER_PROPERTY:
- VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
- VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
- break;
case KEYED_SUPER_PROPERTY:
- VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
- VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
- VisitForValue(property->key());
+ UNREACHABLE();
break;
}
@@ -2115,28 +1635,10 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
OutputFrameStateCombine::Push());
break;
}
- case NAMED_SUPER_PROPERTY: {
- Node* home_object = environment()->Top();
- Node* receiver = environment()->Peek(1);
- Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- VectorSlotPair pair =
- CreateVectorSlotPair(property->PropertyFeedbackSlot());
- old_value = BuildNamedSuperLoad(receiver, home_object, name, pair);
- PrepareFrameState(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- Node* key = environment()->Top();
- Node* home_object = environment()->Peek(1);
- Node* receiver = environment()->Peek(2);
- VectorSlotPair pair =
- CreateVectorSlotPair(property->PropertyFeedbackSlot());
- old_value = BuildKeyedSuperLoad(receiver, home_object, key, pair);
- PrepareFrameState(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
break;
- }
}
environment()->Push(old_value);
VisitForValue(expr->value());
@@ -2181,22 +1683,10 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
OutputFrameStateCombine::Push());
break;
}
- case NAMED_SUPER_PROPERTY: {
- Node* home_object = environment()->Pop();
- Node* receiver = environment()->Pop();
- Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- Node* store = BuildNamedSuperStore(receiver, home_object, name, value);
- PrepareFrameState(store, expr->id(), ast_context()->GetStateCombine());
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- Node* key = environment()->Pop();
- Node* home_object = environment()->Pop();
- Node* receiver = environment()->Pop();
- Node* store = BuildKeyedSuperStore(receiver, home_object, key, value);
- PrepareFrameState(store, expr->id(), ast_context()->GetStateCombine());
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
break;
- }
}
ast_context()->ProduceValue(expr, value);
@@ -2205,8 +1695,7 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
void AstGraphBuilder::VisitYield(Yield* expr) {
// Generator functions are supported only by going through Ignition first.
- SetStackOverflow();
- ast_context()->ProduceValue(expr, jsgraph()->UndefinedConstant());
+ UNREACHABLE();
}
@@ -2243,27 +1732,10 @@ void AstGraphBuilder::VisitProperty(Property* expr) {
PrepareFrameState(value, expr->LoadId(), OutputFrameStateCombine::Push());
break;
}
- case NAMED_SUPER_PROPERTY: {
- VisitForValue(expr->obj()->AsSuperPropertyReference()->this_var());
- VisitForValue(expr->obj()->AsSuperPropertyReference()->home_object());
- Node* home_object = environment()->Pop();
- Node* receiver = environment()->Pop();
- Handle<Name> name = expr->key()->AsLiteral()->AsPropertyName();
- value = BuildNamedSuperLoad(receiver, home_object, name, pair);
- PrepareFrameState(value, expr->LoadId(), OutputFrameStateCombine::Push());
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- VisitForValue(expr->obj()->AsSuperPropertyReference()->this_var());
- VisitForValue(expr->obj()->AsSuperPropertyReference()->home_object());
- VisitForValue(expr->key());
- Node* key = environment()->Pop();
- Node* home_object = environment()->Pop();
- Node* receiver = environment()->Pop();
- value = BuildKeyedSuperLoad(receiver, home_object, key, pair);
- PrepareFrameState(value, expr->LoadId(), OutputFrameStateCombine::Push());
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
break;
- }
}
ast_context()->ProduceValue(expr, value);
}
@@ -2272,140 +1744,70 @@ void AstGraphBuilder::VisitProperty(Property* expr) {
void AstGraphBuilder::VisitCall(Call* expr) {
Expression* callee = expr->expression();
Call::CallType call_type = expr->GetCallType();
+ CHECK(!expr->is_possibly_eval());
// Prepare the callee and the receiver to the function call. This depends on
// the semantics of the underlying call type.
ConvertReceiverMode receiver_hint = ConvertReceiverMode::kAny;
Node* receiver_value = nullptr;
Node* callee_value = nullptr;
- if (expr->is_possibly_eval()) {
- if (callee->AsVariableProxy()->var()->IsLookupSlot()) {
- Variable* variable = callee->AsVariableProxy()->var();
- Node* name = jsgraph()->Constant(variable->name());
- const Operator* op =
- javascript()->CallRuntime(Runtime::kLoadLookupSlotForCall);
- Node* pair = NewNode(op, name);
- callee_value = NewNode(common()->Projection(0), pair);
- receiver_value = NewNode(common()->Projection(1), pair);
- PrepareFrameState(pair, expr->LookupId(),
- OutputFrameStateCombine::Push(2));
- } else {
- VisitForValue(callee);
- callee_value = environment()->Pop();
+ switch (call_type) {
+ case Call::GLOBAL_CALL: {
+ VariableProxy* proxy = callee->AsVariableProxy();
+ VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
+ PrepareEagerCheckpoint(BeforeId(proxy));
+ callee_value = BuildVariableLoad(proxy->var(), expr->expression()->id(),
+ pair, OutputFrameStateCombine::Push());
receiver_hint = ConvertReceiverMode::kNullOrUndefined;
receiver_value = jsgraph()->UndefinedConstant();
+ break;
}
- } else {
- switch (call_type) {
- case Call::GLOBAL_CALL: {
- VariableProxy* proxy = callee->AsVariableProxy();
- VectorSlotPair pair =
- CreateVectorSlotPair(proxy->VariableFeedbackSlot());
- PrepareEagerCheckpoint(BeforeId(proxy));
- callee_value = BuildVariableLoad(proxy->var(), expr->expression()->id(),
- pair, OutputFrameStateCombine::Push());
- receiver_hint = ConvertReceiverMode::kNullOrUndefined;
- receiver_value = jsgraph()->UndefinedConstant();
- break;
- }
- case Call::WITH_CALL: {
- Variable* variable = callee->AsVariableProxy()->var();
- Node* name = jsgraph()->Constant(variable->name());
- const Operator* op =
- javascript()->CallRuntime(Runtime::kLoadLookupSlotForCall);
- Node* pair = NewNode(op, name);
- callee_value = NewNode(common()->Projection(0), pair);
- receiver_value = NewNode(common()->Projection(1), pair);
- PrepareFrameState(pair, expr->LookupId(),
- OutputFrameStateCombine::Push(2));
- break;
- }
- case Call::NAMED_PROPERTY_CALL: {
- Property* property = callee->AsProperty();
- VectorSlotPair feedback =
- CreateVectorSlotPair(property->PropertyFeedbackSlot());
- VisitForValue(property->obj());
- Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- Node* object = environment()->Top();
- callee_value = BuildNamedLoad(object, name, feedback);
- PrepareFrameState(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
- // Note that a property call requires the receiver to be wrapped into
- // an object for sloppy callees. However the receiver is guaranteed
- // not to be null or undefined at this point.
- receiver_hint = ConvertReceiverMode::kNotNullOrUndefined;
- receiver_value = environment()->Pop();
- break;
- }
- case Call::KEYED_PROPERTY_CALL: {
- Property* property = callee->AsProperty();
- VectorSlotPair feedback =
- CreateVectorSlotPair(property->PropertyFeedbackSlot());
- VisitForValue(property->obj());
- VisitForValue(property->key());
- Node* key = environment()->Pop();
- Node* object = environment()->Top();
- callee_value = BuildKeyedLoad(object, key, feedback);
- PrepareFrameState(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
- // Note that a property call requires the receiver to be wrapped into
- // an object for sloppy callees. However the receiver is guaranteed
- // not to be null or undefined at this point.
- receiver_hint = ConvertReceiverMode::kNotNullOrUndefined;
- receiver_value = environment()->Pop();
- break;
- }
- case Call::NAMED_SUPER_PROPERTY_CALL: {
- Property* property = callee->AsProperty();
- SuperPropertyReference* super_ref =
- property->obj()->AsSuperPropertyReference();
- VisitForValue(super_ref->home_object());
- VisitForValue(super_ref->this_var());
- Node* home = environment()->Peek(1);
- Node* object = environment()->Top();
- Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- callee_value =
- BuildNamedSuperLoad(object, home, name, VectorSlotPair());
- PrepareFrameState(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
- // Note that a property call requires the receiver to be wrapped into
- // an object for sloppy callees. Since the receiver is not the target of
- // the load, it could very well be null or undefined at this point.
- receiver_value = environment()->Pop();
- environment()->Drop(1);
- break;
- }
- case Call::KEYED_SUPER_PROPERTY_CALL: {
- Property* property = callee->AsProperty();
- SuperPropertyReference* super_ref =
- property->obj()->AsSuperPropertyReference();
- VisitForValue(super_ref->home_object());
- VisitForValue(super_ref->this_var());
- environment()->Push(environment()->Top()); // Duplicate this_var.
- environment()->Push(environment()->Peek(2)); // Duplicate home_obj.
- VisitForValue(property->key());
- Node* key = environment()->Pop();
- Node* home = environment()->Pop();
- Node* object = environment()->Pop();
- callee_value = BuildKeyedSuperLoad(object, home, key, VectorSlotPair());
- PrepareFrameState(callee_value, property->LoadId(),
- OutputFrameStateCombine::Push());
- // Note that a property call requires the receiver to be wrapped into
- // an object for sloppy callees. Since the receiver is not the target of
- // the load, it could very well be null or undefined at this point.
- receiver_value = environment()->Pop();
- environment()->Drop(1);
- break;
- }
- case Call::SUPER_CALL:
- return VisitCallSuper(expr);
- case Call::OTHER_CALL:
- VisitForValue(callee);
- callee_value = environment()->Pop();
- receiver_hint = ConvertReceiverMode::kNullOrUndefined;
- receiver_value = jsgraph()->UndefinedConstant();
- break;
+ case Call::NAMED_PROPERTY_CALL: {
+ Property* property = callee->AsProperty();
+ VectorSlotPair feedback =
+ CreateVectorSlotPair(property->PropertyFeedbackSlot());
+ VisitForValue(property->obj());
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+ Node* object = environment()->Top();
+ callee_value = BuildNamedLoad(object, name, feedback);
+ PrepareFrameState(callee_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
+ // Note that a property call requires the receiver to be wrapped into
+ // an object for sloppy callees. However the receiver is guaranteed
+ // not to be null or undefined at this point.
+ receiver_hint = ConvertReceiverMode::kNotNullOrUndefined;
+ receiver_value = environment()->Pop();
+ break;
+ }
+ case Call::KEYED_PROPERTY_CALL: {
+ Property* property = callee->AsProperty();
+ VectorSlotPair feedback =
+ CreateVectorSlotPair(property->PropertyFeedbackSlot());
+ VisitForValue(property->obj());
+ VisitForValue(property->key());
+ Node* key = environment()->Pop();
+ Node* object = environment()->Top();
+ callee_value = BuildKeyedLoad(object, key, feedback);
+ PrepareFrameState(callee_value, property->LoadId(),
+ OutputFrameStateCombine::Push());
+ // Note that a property call requires the receiver to be wrapped into
+ // an object for sloppy callees. However the receiver is guaranteed
+ // not to be null or undefined at this point.
+ receiver_hint = ConvertReceiverMode::kNotNullOrUndefined;
+ receiver_value = environment()->Pop();
+ break;
}
+ case Call::OTHER_CALL:
+ VisitForValue(callee);
+ callee_value = environment()->Pop();
+ receiver_hint = ConvertReceiverMode::kNullOrUndefined;
+ receiver_value = jsgraph()->UndefinedConstant();
+ break;
+ case Call::NAMED_SUPER_PROPERTY_CALL:
+ case Call::KEYED_SUPER_PROPERTY_CALL:
+ case Call::SUPER_CALL:
+ case Call::WITH_CALL:
+ UNREACHABLE();
}
// The callee and the receiver both have to be pushed onto the operand stack
@@ -2417,41 +1819,13 @@ void AstGraphBuilder::VisitCall(Call* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForValues(args);
- // Resolve callee for a potential direct eval call. This block will mutate the
- // callee value pushed onto the environment.
- if (expr->is_possibly_eval() && args->length() > 0) {
- int arg_count = args->length();
-
- // Extract callee and source string from the environment.
- Node* callee = environment()->Peek(arg_count + 1);
- Node* source = environment()->Peek(arg_count - 1);
-
- // Create node to ask for help resolving potential eval call. This will
- // provide a fully resolved callee to patch into the environment.
- Node* function = GetFunctionClosure();
- Node* language = jsgraph()->Constant(language_mode());
- Node* eval_scope_position =
- jsgraph()->Constant(current_scope()->start_position());
- Node* eval_position = jsgraph()->Constant(expr->position());
- const Operator* op =
- javascript()->CallRuntime(Runtime::kResolvePossiblyDirectEval);
- Node* new_callee = NewNode(op, callee, source, function, language,
- eval_scope_position, eval_position);
- PrepareFrameState(new_callee, expr->EvalId(),
- OutputFrameStateCombine::PokeAt(arg_count + 1));
-
- // Patch callee on the environment.
- environment()->Poke(arg_count + 1, new_callee);
- }
-
// Create node to perform the function call.
float const frequency = ComputeCallFrequency(expr->CallFeedbackICSlot());
VectorSlotPair feedback = CreateVectorSlotPair(expr->CallFeedbackICSlot());
const Operator* call =
javascript()->CallFunction(args->length() + 2, frequency, feedback,
receiver_hint, expr->tail_call_mode());
- PrepareEagerCheckpoint(expr->is_possibly_eval() ? expr->EvalId()
- : expr->CallId());
+ PrepareEagerCheckpoint(expr->CallId());
Node* value = ProcessArguments(call, args->length() + 2);
// The callee passed to the call, we just need to push something here to
// satisfy the bailout location contract. The fullcodegen code will not
@@ -2463,34 +1837,6 @@ void AstGraphBuilder::VisitCall(Call* expr) {
}
-void AstGraphBuilder::VisitCallSuper(Call* expr) {
- SuperCallReference* super = expr->expression()->AsSuperCallReference();
- DCHECK_NOT_NULL(super);
-
- // Prepare the callee to the super call.
- VisitForValue(super->this_function_var());
- Node* this_function = environment()->Pop();
- const Operator* op =
- javascript()->CallRuntime(Runtime::kInlineGetSuperConstructor, 1);
- Node* super_function = NewNode(op, this_function);
- environment()->Push(super_function);
-
- // Evaluate all arguments to the super call.
- ZoneList<Expression*>* args = expr->arguments();
- VisitForValues(args);
-
- // The new target is loaded from the {new.target} variable.
- VisitForValue(super->new_target_var());
-
- // Create node to perform the super call.
- const Operator* call =
- javascript()->CallConstruct(args->length() + 2, 0.0f, VectorSlotPair());
- Node* value = ProcessArguments(call, args->length() + 2);
- PrepareFrameState(value, expr->ReturnId(), OutputFrameStateCombine::Push());
- ast_context()->ProduceValue(expr, value);
-}
-
-
void AstGraphBuilder::VisitCallNew(CallNew* expr) {
VisitForValue(expr->expression());
@@ -2625,35 +1971,10 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
stack_depth = 2;
break;
}
- case NAMED_SUPER_PROPERTY: {
- VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
- VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
- Node* home_object = environment()->Top();
- Node* receiver = environment()->Peek(1);
- Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- VectorSlotPair pair =
- CreateVectorSlotPair(property->PropertyFeedbackSlot());
- old_value = BuildNamedSuperLoad(receiver, home_object, name, pair);
- PrepareFrameState(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
- stack_depth = 2;
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
- VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
- VisitForValue(property->key());
- Node* key = environment()->Top();
- Node* home_object = environment()->Peek(1);
- Node* receiver = environment()->Peek(2);
- VectorSlotPair pair =
- CreateVectorSlotPair(property->PropertyFeedbackSlot());
- old_value = BuildKeyedSuperLoad(receiver, home_object, key, pair);
- PrepareFrameState(old_value, property->LoadId(),
- OutputFrameStateCombine::Push());
- stack_depth = 3;
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
break;
- }
}
// Convert old value into a number.
@@ -2708,24 +2029,10 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
OutputFrameStateCombine::Push());
break;
}
- case NAMED_SUPER_PROPERTY: {
- Node* home_object = environment()->Pop();
- Node* receiver = environment()->Pop();
- Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
- Node* store = BuildNamedSuperStore(receiver, home_object, name, value);
- PrepareFrameState(store, expr->AssignmentId(),
- OutputFrameStateCombine::Push());
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- Node* key = environment()->Pop();
- Node* home_object = environment()->Pop();
- Node* receiver = environment()->Pop();
- Node* store = BuildKeyedSuperStore(receiver, home_object, key, value);
- PrepareFrameState(store, expr->AssignmentId(),
- OutputFrameStateCombine::Push());
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
break;
- }
}
// Restore old value for postfix expressions.
@@ -2804,13 +2111,7 @@ void AstGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
return VisitLiteralCompareNil(expr, sub_expr, jsgraph()->NullConstant());
}
- CompareOperationHint hint;
- if (!type_hint_analysis_ ||
- !type_hint_analysis_->GetCompareOperationHint(
- expr->CompareOperationFeedbackId(), &hint)) {
- hint = CompareOperationHint::kAny;
- }
-
+ CompareOperationHint hint = CompareOperationHint::kAny;
const Operator* op;
switch (expr->op()) {
case Token::EQ:
@@ -2868,6 +2169,10 @@ void AstGraphBuilder::VisitEmptyParentheses(EmptyParentheses* expr) {
UNREACHABLE();
}
+void AstGraphBuilder::VisitGetIterator(GetIterator* expr) {
+ // GetIterator is supported only by going through Ignition first.
+ UNREACHABLE();
+}
void AstGraphBuilder::VisitThisFunction(ThisFunction* expr) {
Node* value = GetFunctionClosure();
@@ -2877,8 +2182,7 @@ void AstGraphBuilder::VisitThisFunction(ThisFunction* expr) {
void AstGraphBuilder::VisitSuperPropertyReference(
SuperPropertyReference* expr) {
- Node* value = BuildThrowUnsupportedSuperError(expr->id());
- ast_context()->ProduceValue(expr, value);
+ UNREACHABLE();
}
@@ -2905,10 +2209,10 @@ void AstGraphBuilder::VisitDeclarations(Declaration::List* declarations) {
for (Handle<Object> obj : *globals()) data->set(array_index++, *obj);
int encoded_flags = info()->GetDeclareGlobalsFlags();
Node* flags = jsgraph()->Constant(encoded_flags);
- Node* pairs = jsgraph()->Constant(data);
+ Node* decls = jsgraph()->Constant(data);
Node* vector = jsgraph()->Constant(feedback_vector);
const Operator* op = javascript()->CallRuntime(Runtime::kDeclareGlobals);
- Node* call = NewNode(op, pairs, flags, vector);
+ Node* call = NewNode(op, decls, flags, vector);
PrepareFrameState(call, BailoutId::Declarations());
globals()->clear();
}
@@ -2920,12 +2224,6 @@ void AstGraphBuilder::VisitIfNotNull(Statement* stmt) {
}
-void AstGraphBuilder::VisitInScope(Statement* stmt, Scope* s, Node* context) {
- ContextScope scope(this, s, context);
- DCHECK(s->declarations()->is_empty());
- Visit(stmt);
-}
-
void AstGraphBuilder::VisitIterationBody(IterationStatement* stmt,
LoopBuilder* loop,
BailoutId stack_check_id) {
@@ -3074,46 +2372,6 @@ void AstGraphBuilder::VisitRewritableExpression(RewritableExpression* node) {
Visit(node->expression());
}
-
-namespace {
-
-// Limit of context chain length to which inline check is possible.
-const int kMaxCheckDepth = 30;
-
-// Sentinel for {TryLoadDynamicVariable} disabling inline checks.
-const uint32_t kFullCheckRequired = -1;
-
-} // namespace
-
-
-uint32_t AstGraphBuilder::ComputeBitsetForDynamicGlobal(Variable* variable) {
- DCHECK_EQ(DYNAMIC_GLOBAL, variable->mode());
- uint32_t check_depths = 0;
- for (Scope* s = current_scope(); s != nullptr; s = s->outer_scope()) {
- if (!s->NeedsContext()) continue;
- if (!s->calls_sloppy_eval()) continue;
- int depth = current_scope()->ContextChainLength(s);
- if (depth > kMaxCheckDepth) return kFullCheckRequired;
- check_depths |= 1 << depth;
- }
- return check_depths;
-}
-
-
-uint32_t AstGraphBuilder::ComputeBitsetForDynamicContext(Variable* variable) {
- DCHECK_EQ(DYNAMIC_LOCAL, variable->mode());
- uint32_t check_depths = 0;
- for (Scope* s = current_scope(); s != nullptr; s = s->outer_scope()) {
- if (!s->NeedsContext()) continue;
- if (!s->calls_sloppy_eval() && s != variable->scope()) continue;
- int depth = current_scope()->ContextChainLength(s);
- if (depth > kMaxCheckDepth) return kFullCheckRequired;
- check_depths |= 1 << depth;
- if (s == variable->scope()) break;
- }
- return check_depths;
-}
-
float AstGraphBuilder::ComputeCallFrequency(FeedbackVectorSlot slot) const {
if (slot.IsInvalid()) return 0.0f;
Handle<TypeFeedbackVector> feedback_vector(
@@ -3147,7 +2405,8 @@ Node* AstGraphBuilder::BuildLocalActivationContext(Node* context) {
Variable* variable = scope->receiver();
DCHECK_EQ(0, scope->ContextChainLength(variable->scope()));
const Operator* op = javascript()->StoreContext(0, variable->index());
- NewNode(op, local_context, receiver);
+ Node* node = NewNode(op, receiver);
+ NodeProperties::ReplaceContextInput(node, local_context);
}
// Copy parameters into context if necessary.
@@ -3159,7 +2418,8 @@ Node* AstGraphBuilder::BuildLocalActivationContext(Node* context) {
// Context variable (at bottom of the context chain).
DCHECK_EQ(0, scope->ContextChainLength(variable->scope()));
const Operator* op = javascript()->StoreContext(0, variable->index());
- NewNode(op, local_context, parameter);
+ Node* node = NewNode(op, parameter);
+ NodeProperties::ReplaceContextInput(node, local_context);
}
return local_context;
@@ -3171,7 +2431,8 @@ Node* AstGraphBuilder::BuildLocalFunctionContext(Scope* scope) {
// Allocate a new local context.
int slot_count = scope->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- const Operator* op = javascript()->CreateFunctionContext(slot_count);
+ const Operator* op =
+ javascript()->CreateFunctionContext(slot_count, scope->scope_type());
Node* local_context = NewNode(op, GetFunctionClosure());
return local_context;
@@ -3224,52 +2485,6 @@ Node* AstGraphBuilder::BuildArgumentsObject(Variable* arguments) {
return object;
}
-Node* AstGraphBuilder::BuildRestArgumentsArray(Variable* rest) {
- if (rest == nullptr) return nullptr;
-
- // Allocate and initialize a new arguments object.
- CreateArgumentsType type = CreateArgumentsType::kRestParameter;
- const Operator* op = javascript()->CreateArguments(type);
- Node* object = NewNode(op, GetFunctionClosure());
- PrepareFrameState(object, BailoutId::None());
-
- // Assign the object to the {rest} variable. This should never lazy
- // deopt, so it is fine to send invalid bailout id.
- DCHECK(rest->IsContextSlot() || rest->IsStackAllocated());
- BuildVariableAssignment(rest, object, Token::ASSIGN, VectorSlotPair(),
- BailoutId::None());
- return object;
-}
-
-
-Node* AstGraphBuilder::BuildThisFunctionVariable(Variable* this_function_var) {
- if (this_function_var == nullptr) return nullptr;
-
- // Retrieve the closure we were called with.
- Node* this_function = GetFunctionClosure();
-
- // Assign the object to the {.this_function} variable. This should never lazy
- // deopt, so it is fine to send invalid bailout id.
- BuildVariableAssignment(this_function_var, this_function, Token::INIT,
- VectorSlotPair(), BailoutId::None());
- return this_function;
-}
-
-
-Node* AstGraphBuilder::BuildNewTargetVariable(Variable* new_target_var) {
- if (new_target_var == nullptr) return nullptr;
-
- // Retrieve the new target we were called with.
- Node* object = GetNewTarget();
-
- // Assign the object to the {new.target} variable. This should never lazy
- // deopt, so it is fine to send invalid bailout id.
- BuildVariableAssignment(new_target_var, object, Token::INIT, VectorSlotPair(),
- BailoutId::None());
- return object;
-}
-
-
Node* AstGraphBuilder::BuildHoleCheckThenThrow(Node* value, Variable* variable,
Node* not_hole,
BailoutId bailout_id) {
@@ -3305,25 +2520,6 @@ Node* AstGraphBuilder::BuildHoleCheckElseThrow(Node* value, Variable* variable,
return environment()->Pop();
}
-
-Node* AstGraphBuilder::BuildThrowIfStaticPrototype(Node* name,
- BailoutId bailout_id) {
- IfBuilder prototype_check(this);
- Node* prototype_string =
- jsgraph()->Constant(isolate()->factory()->prototype_string());
- Node* check = NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
- name, prototype_string);
- prototype_check.If(check);
- prototype_check.Then();
- Node* error = BuildThrowStaticPrototypeError(bailout_id);
- environment()->Push(error);
- prototype_check.Else();
- environment()->Push(name);
- prototype_check.End();
- return environment()->Pop();
-}
-
-
Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
BailoutId bailout_id,
const VectorSlotPair& feedback,
@@ -3363,7 +2559,7 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
info()->is_function_context_specializing();
const Operator* op =
javascript()->LoadContext(depth, variable->index(), immutable);
- Node* value = NewNode(op, current_context());
+ Node* value = NewNode(op);
// TODO(titzer): initialization checks are redundant for already
// initialized immutable context loads, but only specialization knows.
// Maybe specializer should be a parameter to the graph builder?
@@ -3373,17 +2569,7 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
}
return value;
}
- case VariableLocation::LOOKUP: {
- // Dynamic lookup of context variable (anywhere in the chain).
- Handle<String> name = variable->name();
- if (Node* node = TryLoadDynamicVariable(variable, name, bailout_id,
- feedback, combine, typeof_mode)) {
- return node;
- }
- Node* value = BuildDynamicLoad(name, typeof_mode);
- PrepareFrameState(value, bailout_id, combine);
- return value;
- }
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -3411,15 +2597,7 @@ Node* AstGraphBuilder::BuildVariableDelete(Variable* variable,
// Local var, const, or let variable or context variable.
return jsgraph()->BooleanConstant(variable->is_this());
}
- case VariableLocation::LOOKUP: {
- // Dynamic lookup of context variable (anywhere in the chain).
- Node* name = jsgraph()->Constant(variable->name());
- const Operator* op =
- javascript()->CallRuntime(Runtime::kDeleteLookupSlot);
- Node* result = NewNode(op, name);
- PrepareFrameState(result, bailout_id, combine);
- return result;
- }
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -3498,7 +2676,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
// Perform an initialization check for let declared variables.
const Operator* op =
javascript()->LoadContext(depth, variable->index(), false);
- Node* current = NewNode(op, current_context());
+ Node* current = NewNode(op);
value = BuildHoleCheckThenThrow(current, variable, value, bailout_id);
} else if (mode == CONST && op == Token::INIT) {
// Perform an initialization check for const {this} variables.
@@ -3507,7 +2685,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
if (variable->is_this()) {
const Operator* op =
javascript()->LoadContext(depth, variable->index(), false);
- Node* current = NewNode(op, current_context());
+ Node* current = NewNode(op);
value = BuildHoleCheckElseThrow(current, variable, value, bailout_id);
}
} else if (mode == CONST && op != Token::INIT &&
@@ -3524,22 +2702,16 @@ Node* AstGraphBuilder::BuildVariableAssignment(
if (variable->binding_needs_init()) {
const Operator* op =
javascript()->LoadContext(depth, variable->index(), false);
- Node* current = NewNode(op, current_context());
+ Node* current = NewNode(op);
BuildHoleCheckThenThrow(current, variable, value, bailout_id);
}
// Assignment to const is exception in all modes.
return BuildThrowConstAssignError(bailout_id);
}
const Operator* op = javascript()->StoreContext(depth, variable->index());
- return NewNode(op, current_context(), value);
- }
- case VariableLocation::LOOKUP: {
- // Dynamic lookup of context variable (anywhere in the chain).
- Handle<Name> name = variable->name();
- Node* store = BuildDynamicStore(name, value);
- PrepareFrameState(store, bailout_id, combine);
- return store;
+ return NewNode(op, value);
}
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -3551,7 +2723,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
Node* AstGraphBuilder::BuildKeyedLoad(Node* object, Node* key,
const VectorSlotPair& feedback) {
const Operator* op = javascript()->LoadProperty(feedback);
- Node* node = NewNode(op, object, key, GetFunctionClosure());
+ Node* node = NewNode(op, object, key);
return node;
}
@@ -3559,7 +2731,7 @@ Node* AstGraphBuilder::BuildKeyedLoad(Node* object, Node* key,
Node* AstGraphBuilder::BuildNamedLoad(Node* object, Handle<Name> name,
const VectorSlotPair& feedback) {
const Operator* op = javascript()->LoadNamed(name, feedback);
- Node* node = NewNode(op, object, GetFunctionClosure());
+ Node* node = NewNode(op, object);
return node;
}
@@ -3567,7 +2739,7 @@ Node* AstGraphBuilder::BuildNamedLoad(Node* object, Handle<Name> name,
Node* AstGraphBuilder::BuildKeyedStore(Node* object, Node* key, Node* value,
const VectorSlotPair& feedback) {
const Operator* op = javascript()->StoreProperty(language_mode(), feedback);
- Node* node = NewNode(op, object, key, value, GetFunctionClosure());
+ Node* node = NewNode(op, object, key, value);
return node;
}
@@ -3577,49 +2749,7 @@ Node* AstGraphBuilder::BuildNamedStore(Node* object, Handle<Name> name,
const VectorSlotPair& feedback) {
const Operator* op =
javascript()->StoreNamed(language_mode(), name, feedback);
- Node* node = NewNode(op, object, value, GetFunctionClosure());
- return node;
-}
-
-
-Node* AstGraphBuilder::BuildNamedSuperLoad(Node* receiver, Node* home_object,
- Handle<Name> name,
- const VectorSlotPair& feedback) {
- Node* name_node = jsgraph()->Constant(name);
- const Operator* op = javascript()->CallRuntime(Runtime::kLoadFromSuper);
- Node* node = NewNode(op, receiver, home_object, name_node);
- return node;
-}
-
-
-Node* AstGraphBuilder::BuildKeyedSuperLoad(Node* receiver, Node* home_object,
- Node* key,
- const VectorSlotPair& feedback) {
- const Operator* op = javascript()->CallRuntime(Runtime::kLoadKeyedFromSuper);
- Node* node = NewNode(op, receiver, home_object, key);
- return node;
-}
-
-
-Node* AstGraphBuilder::BuildKeyedSuperStore(Node* receiver, Node* home_object,
- Node* key, Node* value) {
- Runtime::FunctionId function_id = is_strict(language_mode())
- ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy;
- const Operator* op = javascript()->CallRuntime(function_id, 4);
- Node* node = NewNode(op, receiver, home_object, key, value);
- return node;
-}
-
-
-Node* AstGraphBuilder::BuildNamedSuperStore(Node* receiver, Node* home_object,
- Handle<Name> name, Node* value) {
- Node* name_node = jsgraph()->Constant(name);
- Runtime::FunctionId function_id = is_strict(language_mode())
- ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy;
- const Operator* op = javascript()->CallRuntime(function_id, 4);
- Node* node = NewNode(op, receiver, home_object, name_node, value);
+ Node* node = NewNode(op, object, value);
return node;
}
@@ -3628,7 +2758,7 @@ Node* AstGraphBuilder::BuildGlobalLoad(Handle<Name> name,
const VectorSlotPair& feedback,
TypeofMode typeof_mode) {
const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode);
- Node* node = NewNode(op, GetFunctionClosure());
+ Node* node = NewNode(op);
return node;
}
@@ -3637,33 +2767,10 @@ Node* AstGraphBuilder::BuildGlobalStore(Handle<Name> name, Node* value,
const VectorSlotPair& feedback) {
const Operator* op =
javascript()->StoreGlobal(language_mode(), name, feedback);
- Node* node = NewNode(op, value, GetFunctionClosure());
- return node;
-}
-
-
-Node* AstGraphBuilder::BuildDynamicLoad(Handle<Name> name,
- TypeofMode typeof_mode) {
- Node* name_node = jsgraph()->Constant(name);
- const Operator* op =
- javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
- ? Runtime::kLoadLookupSlot
- : Runtime::kLoadLookupSlotInsideTypeof);
- Node* node = NewNode(op, name_node);
+ Node* node = NewNode(op, value);
return node;
}
-
-Node* AstGraphBuilder::BuildDynamicStore(Handle<Name> name, Node* value) {
- Node* name_node = jsgraph()->Constant(name);
- const Operator* op = javascript()->CallRuntime(
- is_strict(language_mode()) ? Runtime::kStoreLookupSlot_Strict
- : Runtime::kStoreLookupSlot_Sloppy);
- Node* node = NewNode(op, name_node, value);
- return node;
-}
-
-
Node* AstGraphBuilder::BuildLoadGlobalObject() {
return BuildLoadNativeContextField(Context::EXTENSION_INDEX);
}
@@ -3672,30 +2779,20 @@ Node* AstGraphBuilder::BuildLoadGlobalObject() {
Node* AstGraphBuilder::BuildLoadNativeContextField(int index) {
const Operator* op =
javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true);
- Node* native_context = NewNode(op, current_context());
- return NewNode(javascript()->LoadContext(0, index, true), native_context);
+ Node* native_context = NewNode(op);
+ Node* result = NewNode(javascript()->LoadContext(0, index, true));
+ NodeProperties::ReplaceContextInput(result, native_context);
+ return result;
}
Node* AstGraphBuilder::BuildToBoolean(Node* input, TypeFeedbackId feedback_id) {
if (Node* node = TryFastToBoolean(input)) return node;
- ToBooleanHints hints;
- if (!type_hint_analysis_ ||
- !type_hint_analysis_->GetToBooleanHints(feedback_id, &hints)) {
- hints = ToBooleanHint::kAny;
- }
+ ToBooleanHints hints = ToBooleanHint::kAny;
return NewNode(javascript()->ToBoolean(hints), input);
}
-Node* AstGraphBuilder::BuildToName(Node* input, BailoutId bailout_id) {
- if (Node* node = TryFastToName(input)) return node;
- Node* name = NewNode(javascript()->ToName(), input);
- PrepareFrameState(name, bailout_id, OutputFrameStateCombine::Push());
- return name;
-}
-
-
Node* AstGraphBuilder::BuildToObject(Node* input, BailoutId bailout_id) {
Node* object = NewNode(javascript()->ToObject(), input);
PrepareFrameState(object, bailout_id, OutputFrameStateCombine::Push());
@@ -3750,28 +2847,6 @@ Node* AstGraphBuilder::BuildThrowConstAssignError(BailoutId bailout_id) {
}
-Node* AstGraphBuilder::BuildThrowStaticPrototypeError(BailoutId bailout_id) {
- const Operator* op =
- javascript()->CallRuntime(Runtime::kThrowStaticPrototypeError);
- Node* call = NewNode(op);
- PrepareFrameState(call, bailout_id);
- Node* control = NewNode(common()->Throw(), call);
- UpdateControlDependencyToLeaveFunction(control);
- return call;
-}
-
-
-Node* AstGraphBuilder::BuildThrowUnsupportedSuperError(BailoutId bailout_id) {
- const Operator* op =
- javascript()->CallRuntime(Runtime::kThrowUnsupportedSuperError);
- Node* call = NewNode(op);
- PrepareFrameState(call, bailout_id);
- Node* control = NewNode(common()->Throw(), call);
- UpdateControlDependencyToLeaveFunction(control);
- return call;
-}
-
-
Node* AstGraphBuilder::BuildReturn(Node* return_value) {
// Emit tracing call if requested to do so.
if (FLAG_trace) {
@@ -3796,11 +2871,7 @@ Node* AstGraphBuilder::BuildThrow(Node* exception_value) {
Node* AstGraphBuilder::BuildBinaryOp(Node* left, Node* right, Token::Value op,
TypeFeedbackId feedback_id) {
const Operator* js_op;
- BinaryOperationHint hint;
- if (!type_hint_analysis_ ||
- !type_hint_analysis_->GetBinaryOperationHint(feedback_id, &hint)) {
- hint = BinaryOperationHint::kAny;
- }
+ BinaryOperationHint hint = BinaryOperationHint::kAny;
switch (op) {
case Token::BIT_OR:
js_op = javascript()->BitwiseOr(hint);
@@ -3850,109 +2921,6 @@ Node* AstGraphBuilder::TryLoadGlobalConstant(Handle<Name> name) {
return nullptr;
}
-Node* AstGraphBuilder::TryLoadDynamicVariable(Variable* variable,
- Handle<String> name,
- BailoutId bailout_id,
- const VectorSlotPair& feedback,
- OutputFrameStateCombine combine,
- TypeofMode typeof_mode) {
- VariableMode mode = variable->mode();
-
- if (mode == DYNAMIC_GLOBAL) {
- uint32_t bitset = ComputeBitsetForDynamicGlobal(variable);
- if (bitset == kFullCheckRequired) return nullptr;
-
- // We are using two blocks to model fast and slow cases.
- BlockBuilder fast_block(this);
- BlockBuilder slow_block(this);
- environment()->Push(jsgraph()->TheHoleConstant());
- slow_block.BeginBlock();
- environment()->Pop();
- fast_block.BeginBlock();
-
- // Perform checks whether the fast mode applies, by looking for any
- // extension object which might shadow the optimistic declaration.
- for (int depth = 0; bitset != 0; bitset >>= 1, depth++) {
- if ((bitset & 1) == 0) continue;
- Node* load = NewNode(
- javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
- current_context());
- Node* check =
- NewNode(javascript()->StrictEqual(CompareOperationHint::kAny), load,
- jsgraph()->TheHoleConstant());
- fast_block.BreakUnless(check, BranchHint::kTrue);
- }
-
- // Fast case, because variable is not shadowed.
- if (Node* constant = TryLoadGlobalConstant(name)) {
- environment()->Push(constant);
- } else {
- // Perform global slot load.
- Node* fast = BuildGlobalLoad(name, feedback, typeof_mode);
- PrepareFrameState(fast, bailout_id, combine);
- environment()->Push(fast);
- }
- slow_block.Break();
- environment()->Pop();
- fast_block.EndBlock();
-
- // Slow case, because variable potentially shadowed. Perform dynamic lookup.
- Node* slow = BuildDynamicLoad(name, typeof_mode);
- PrepareFrameState(slow, bailout_id, combine);
- environment()->Push(slow);
- slow_block.EndBlock();
-
- return environment()->Pop();
- }
-
- if (mode == DYNAMIC_LOCAL) {
- uint32_t bitset = ComputeBitsetForDynamicContext(variable);
- if (bitset == kFullCheckRequired) return nullptr;
-
- // We are using two blocks to model fast and slow cases.
- BlockBuilder fast_block(this);
- BlockBuilder slow_block(this);
- environment()->Push(jsgraph()->TheHoleConstant());
- slow_block.BeginBlock();
- environment()->Pop();
- fast_block.BeginBlock();
-
- // Perform checks whether the fast mode applies, by looking for any
- // extension object which might shadow the optimistic declaration.
- for (int depth = 0; bitset != 0; bitset >>= 1, depth++) {
- if ((bitset & 1) == 0) continue;
- Node* load = NewNode(
- javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
- current_context());
- Node* check =
- NewNode(javascript()->StrictEqual(CompareOperationHint::kAny), load,
- jsgraph()->TheHoleConstant());
- fast_block.BreakUnless(check, BranchHint::kTrue);
- }
-
- // Fast case, because variable is not shadowed. Perform context slot load.
- Variable* local = variable->local_if_not_shadowed();
- DCHECK(local->location() == VariableLocation::CONTEXT); // Must be context.
- Node* fast =
- BuildVariableLoad(local, bailout_id, feedback, combine, typeof_mode);
- environment()->Push(fast);
- slow_block.Break();
- environment()->Pop();
- fast_block.EndBlock();
-
- // Slow case, because variable potentially shadowed. Perform dynamic lookup.
- Node* slow = BuildDynamicLoad(name, typeof_mode);
- PrepareFrameState(slow, bailout_id, combine);
- environment()->Push(slow);
- slow_block.EndBlock();
-
- return environment()->Pop();
- }
-
- return nullptr;
-}
-
-
Node* AstGraphBuilder::TryFastToBoolean(Node* input) {
switch (input->opcode()) {
case IrOpcode::kNumberConstant: {
@@ -3983,24 +2951,6 @@ Node* AstGraphBuilder::TryFastToBoolean(Node* input) {
}
-Node* AstGraphBuilder::TryFastToName(Node* input) {
- switch (input->opcode()) {
- case IrOpcode::kHeapConstant: {
- Handle<HeapObject> object = HeapObjectMatcher(input).Value();
- if (object->IsName()) return input;
- break;
- }
- case IrOpcode::kJSToString:
- case IrOpcode::kJSToName:
- case IrOpcode::kJSTypeOf:
- return input;
- default:
- break;
- }
- return nullptr;
-}
-
-
bool AstGraphBuilder::CheckOsrEntry(IterationStatement* stmt) {
if (info()->osr_ast_id() == stmt->OsrEntryId()) {
DCHECK_EQ(-1, info()->osr_expr_stack_height());
@@ -4073,7 +3023,6 @@ Node* AstGraphBuilder::MakeNode(const Operator* op, int value_input_count,
if (!has_context && !has_frame_state && !has_control && !has_effect) {
result = graph()->NewNode(op, value_input_count, value_inputs, incomplete);
} else {
- bool inside_try_scope = try_nesting_level_ > 0;
int input_count_with_deps = value_input_count;
if (has_context) ++input_count_with_deps;
if (has_frame_state) ++input_count_with_deps;
@@ -4107,18 +3056,6 @@ Node* AstGraphBuilder::MakeNode(const Operator* op, int value_input_count,
if (result->op()->EffectOutputCount() > 0) {
environment_->UpdateEffectDependency(result);
}
- // Add implicit exception continuation for throwing nodes.
- if (!result->op()->HasProperty(Operator::kNoThrow) && inside_try_scope) {
- // Copy the environment for the success continuation.
- Environment* success_env = environment()->CopyForConditional();
- const Operator* op = common()->IfException();
- Node* effect = environment()->GetEffectDependency();
- Node* on_exception = graph()->NewNode(op, effect, result);
- environment_->UpdateControlDependency(on_exception);
- environment_->UpdateEffectDependency(on_exception);
- execution_control()->ThrowValue(on_exception);
- set_environment(success_env);
- }
// Add implicit success continuation for throwing nodes.
if (!result->op()->HasProperty(Operator::kNoThrow)) {
const Operator* op = common()->IfSuccess();
@@ -4244,8 +3181,7 @@ void AstGraphBuilder::Environment::PrepareForOsrEntry() {
Node* osr_context = effect = contexts()->back();
int last = static_cast<int>(contexts()->size() - 1);
for (int i = last - 1; i >= 0; i--) {
- osr_context = effect =
- graph->NewNode(load_op, osr_context, osr_context, effect);
+ osr_context = effect = graph->NewNode(load_op, osr_context, effect);
contexts()->at(i) = osr_context;
}
UpdateEffectDependency(effect);
@@ -4364,10 +3300,9 @@ Node* AstGraphBuilder::MergeValue(Node* value, Node* other, Node* control) {
AstGraphBuilderWithPositions::AstGraphBuilderWithPositions(
Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
float invocation_frequency, LoopAssignmentAnalysis* loop_assignment,
- TypeHintAnalysis* type_hint_analysis, SourcePositionTable* source_positions,
- int inlining_id)
+ SourcePositionTable* source_positions, int inlining_id)
: AstGraphBuilder(local_zone, info, jsgraph, invocation_frequency,
- loop_assignment, type_hint_analysis),
+ loop_assignment),
source_positions_(source_positions),
start_position_(info->shared_info()->start_position(), inlining_id) {}
diff --git a/deps/v8/src/compiler/ast-graph-builder.h b/deps/v8/src/compiler/ast-graph-builder.h
index 2013f5053b..975e08094c 100644
--- a/deps/v8/src/compiler/ast-graph-builder.h
+++ b/deps/v8/src/compiler/ast-graph-builder.h
@@ -26,7 +26,6 @@ class Graph;
class LoopAssignmentAnalysis;
class LoopBuilder;
class Node;
-class TypeHintAnalysis;
// The AstGraphBuilder produces a high-level IR graph, based on an
@@ -39,8 +38,7 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
public:
AstGraphBuilder(Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
float invocation_frequency,
- LoopAssignmentAnalysis* loop_assignment = nullptr,
- TypeHintAnalysis* type_hint_analysis = nullptr);
+ LoopAssignmentAnalysis* loop_assignment = nullptr);
virtual ~AstGraphBuilder() {}
// Creates a graph by visiting the entire AST.
@@ -73,8 +71,6 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
class ControlScope;
class ControlScopeForBreakable;
class ControlScopeForIteration;
- class ControlScopeForCatch;
- class ControlScopeForFinally;
class Environment;
friend class ControlBuilder;
@@ -98,10 +94,6 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
// Nodes representing values in the activation record.
SetOncePointer<Node> function_closure_;
SetOncePointer<Node> function_context_;
- SetOncePointer<Node> new_target_;
-
- // Tracks how many try-blocks are currently entered.
- int try_nesting_level_;
// Temporary storage for building node input lists.
int input_buffer_size_;
@@ -119,9 +111,6 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
// Result of loop assignment analysis performed before graph creation.
LoopAssignmentAnalysis* loop_assignment_analysis_;
- // Result of type hint analysis performed before graph creation.
- TypeHintAnalysis* type_hint_analysis_;
-
// Cache for StateValues nodes for frame states.
StateValuesCache state_values_cache_;
@@ -171,9 +160,6 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
// Get or create the node that represents the incoming function context.
Node* GetFunctionContext();
- // Get or create the node that represents the incoming new target value.
- Node* GetNewTarget();
-
// Get or create the node that represents the empty frame state.
Node* GetEmptyFrameState();
@@ -262,11 +248,6 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
// Named and keyed loads require a VectorSlotPair for successful lowering.
VectorSlotPair CreateVectorSlotPair(FeedbackVectorSlot slot) const;
- // Determine which contexts need to be checked for extension objects that
- // might shadow the optimistic declaration of dynamic lookup variables.
- uint32_t ComputeBitsetForDynamicGlobal(Variable* variable);
- uint32_t ComputeBitsetForDynamicContext(Variable* variable);
-
// Computes the frequency for JSCallFunction and JSCallConstruct nodes.
float ComputeCallFrequency(FeedbackVectorSlot slot) const;
@@ -284,15 +265,6 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
// Builder to create an arguments object if it is used.
Node* BuildArgumentsObject(Variable* arguments);
- // Builder to create an array of rest parameters if used.
- Node* BuildRestArgumentsArray(Variable* rest);
-
- // Builder that assigns to the {.this_function} internal variable if needed.
- Node* BuildThisFunctionVariable(Variable* this_function_var);
-
- // Builder that assigns to the {new.target} internal variable if needed.
- Node* BuildNewTargetVariable(Variable* new_target_var);
-
// Builders for variable load and assignment.
Node* BuildVariableAssignment(Variable* variable, Node* value,
Token::Value op, const VectorSlotPair& slot,
@@ -316,33 +288,18 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
Node* BuildNamedStore(Node* receiver, Handle<Name> name, Node* value,
const VectorSlotPair& feedback);
- // Builders for super property loads and stores.
- Node* BuildKeyedSuperStore(Node* receiver, Node* home_object, Node* key,
- Node* value);
- Node* BuildNamedSuperStore(Node* receiver, Node* home_object,
- Handle<Name> name, Node* value);
- Node* BuildNamedSuperLoad(Node* receiver, Node* home_object,
- Handle<Name> name, const VectorSlotPair& feedback);
- Node* BuildKeyedSuperLoad(Node* receiver, Node* home_object, Node* key,
- const VectorSlotPair& feedback);
-
// Builders for global variable loads and stores.
Node* BuildGlobalLoad(Handle<Name> name, const VectorSlotPair& feedback,
TypeofMode typeof_mode);
Node* BuildGlobalStore(Handle<Name> name, Node* value,
const VectorSlotPair& feedback);
- // Builders for dynamic variable loads and stores.
- Node* BuildDynamicLoad(Handle<Name> name, TypeofMode typeof_mode);
- Node* BuildDynamicStore(Handle<Name> name, Node* value);
-
// Builders for accessing the function context.
Node* BuildLoadGlobalObject();
Node* BuildLoadNativeContextField(int index);
// Builders for automatic type conversion.
Node* BuildToBoolean(Node* input, TypeFeedbackId feedback_id);
- Node* BuildToName(Node* input, BailoutId bailout_id);
Node* BuildToObject(Node* input, BailoutId bailout_id);
// Builder for adding the [[HomeObject]] to a value if the value came from a
@@ -354,8 +311,6 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
Node* BuildThrowError(Node* exception, BailoutId bailout_id);
Node* BuildThrowReferenceError(Variable* var, BailoutId bailout_id);
Node* BuildThrowConstAssignError(BailoutId bailout_id);
- Node* BuildThrowStaticPrototypeError(BailoutId bailout_id);
- Node* BuildThrowUnsupportedSuperError(BailoutId bailout_id);
// Builders for dynamic hole-checks at runtime.
Node* BuildHoleCheckThenThrow(Node* value, Variable* var, Node* not_hole,
@@ -363,9 +318,6 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
Node* BuildHoleCheckElseThrow(Node* value, Variable* var, Node* for_hole,
BailoutId bailout_id);
- // Builders for conditional errors.
- Node* BuildThrowIfStaticPrototype(Node* name, BailoutId bailout_id);
-
// Builders for non-local control flow.
Node* BuildReturn(Node* return_value);
Node* BuildThrow(Node* exception_value);
@@ -387,17 +339,8 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
// Optimization for variable load from global object.
Node* TryLoadGlobalConstant(Handle<Name> name);
- // Optimization for variable load of dynamic lookup slot that is most likely
- // to resolve to a global slot or context slot (inferred from scope chain).
- Node* TryLoadDynamicVariable(Variable* variable, Handle<String> name,
- BailoutId bailout_id,
- const VectorSlotPair& feedback,
- OutputFrameStateCombine combine,
- TypeofMode typeof_mode);
-
// Optimizations for automatic type conversion.
Node* TryFastToBoolean(Node* input);
- Node* TryFastToName(Node* input);
// ===========================================================================
// The following visitation methods all recursively visit a subtree of the
@@ -408,7 +351,6 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
// Visit statements.
void VisitIfNotNull(Statement* stmt);
- void VisitInScope(Statement* stmt, Scope* scope, Node* context);
// Visit expressions.
void Visit(Expression* expr);
@@ -622,7 +564,6 @@ class AstGraphBuilderWithPositions final : public AstGraphBuilder {
AstGraphBuilderWithPositions(Zone* local_zone, CompilationInfo* info,
JSGraph* jsgraph, float invocation_frequency,
LoopAssignmentAnalysis* loop_assignment,
- TypeHintAnalysis* type_hint_analysis,
SourcePositionTable* source_positions,
int inlining_id = SourcePosition::kNotInlined);
diff --git a/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc b/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
index 82eaeb28a4..8239e3a058 100644
--- a/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
+++ b/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
@@ -5,6 +5,7 @@
#include "src/compiler/ast-loop-assignment-analyzer.h"
#include "src/ast/scopes.h"
#include "src/compilation-info.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -201,6 +202,7 @@ void ALAA::VisitSpread(Spread* e) { UNREACHABLE(); }
void ALAA::VisitEmptyParentheses(EmptyParentheses* e) { UNREACHABLE(); }
+void ALAA::VisitGetIterator(GetIterator* e) { UNREACHABLE(); }
void ALAA::VisitCaseClause(CaseClause* cc) {
if (!cc->is_default()) Visit(cc->label());
diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc
index 9b36eb1068..0b7ad19af7 100644
--- a/deps/v8/src/compiler/branch-elimination.cc
+++ b/deps/v8/src/compiler/branch-elimination.cc
@@ -18,7 +18,9 @@ BranchElimination::BranchElimination(Editor* editor, JSGraph* js_graph,
jsgraph_(js_graph),
node_conditions_(zone, js_graph->graph()->NodeCount()),
zone_(zone),
- dead_(js_graph->graph()->NewNode(js_graph->common()->Dead())) {}
+ dead_(js_graph->graph()->NewNode(js_graph->common()->Dead())) {
+ NodeProperties::SetType(dead_, Type::None());
+}
BranchElimination::~BranchElimination() {}
@@ -143,20 +145,27 @@ Reduction BranchElimination::ReduceLoop(Node* node) {
Reduction BranchElimination::ReduceMerge(Node* node) {
// Shortcut for the case when we do not know anything about some
// input.
- for (int i = 0; i < node->InputCount(); i++) {
- if (node_conditions_.Get(node->InputAt(i)) == nullptr) {
+ Node::Inputs inputs = node->inputs();
+ for (Node* input : inputs) {
+ if (node_conditions_.Get(input) == nullptr) {
return UpdateConditions(node, nullptr);
}
}
- const ControlPathConditions* first = node_conditions_.Get(node->InputAt(0));
+ auto input_it = inputs.begin();
+
+ DCHECK_GT(inputs.count(), 0);
+
+ const ControlPathConditions* first = node_conditions_.Get(*input_it);
+ ++input_it;
// Make a copy of the first input's conditions and merge with the conditions
// from other inputs.
ControlPathConditions* conditions =
new (zone_->New(sizeof(ControlPathConditions)))
ControlPathConditions(*first);
- for (int i = 1; i < node->InputCount(); i++) {
- conditions->Merge(*(node_conditions_.Get(node->InputAt(i))));
+ auto input_end = inputs.end();
+ for (; input_it != input_end; ++input_it) {
+ conditions->Merge(*(node_conditions_.Get(*input_it)));
}
return UpdateConditions(node, conditions);
diff --git a/deps/v8/src/compiler/bytecode-analysis.cc b/deps/v8/src/compiler/bytecode-analysis.cc
new file mode 100644
index 0000000000..f0e870739b
--- /dev/null
+++ b/deps/v8/src/compiler/bytecode-analysis.cc
@@ -0,0 +1,622 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/bytecode-analysis.h"
+
+#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/bytecode-array-random-iterator.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+using namespace interpreter;
+
+BytecodeLoopAssignments::BytecodeLoopAssignments(int parameter_count,
+ int register_count, Zone* zone)
+ : parameter_count_(parameter_count),
+ bit_vector_(new (zone)
+ BitVector(parameter_count + register_count, zone)) {}
+
+void BytecodeLoopAssignments::Add(interpreter::Register r) {
+ if (r.is_parameter()) {
+ bit_vector_->Add(r.ToParameterIndex(parameter_count_));
+ } else {
+ bit_vector_->Add(parameter_count_ + r.index());
+ }
+}
+
+void BytecodeLoopAssignments::AddPair(interpreter::Register r) {
+ if (r.is_parameter()) {
+ DCHECK(interpreter::Register(r.index() + 1).is_parameter());
+ bit_vector_->Add(r.ToParameterIndex(parameter_count_));
+ bit_vector_->Add(r.ToParameterIndex(parameter_count_) + 1);
+ } else {
+ DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
+ bit_vector_->Add(parameter_count_ + r.index());
+ bit_vector_->Add(parameter_count_ + r.index() + 1);
+ }
+}
+
+void BytecodeLoopAssignments::AddTriple(interpreter::Register r) {
+ if (r.is_parameter()) {
+ DCHECK(interpreter::Register(r.index() + 1).is_parameter());
+ DCHECK(interpreter::Register(r.index() + 2).is_parameter());
+ bit_vector_->Add(r.ToParameterIndex(parameter_count_));
+ bit_vector_->Add(r.ToParameterIndex(parameter_count_) + 1);
+ bit_vector_->Add(r.ToParameterIndex(parameter_count_) + 2);
+ } else {
+ DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
+ DCHECK(!interpreter::Register(r.index() + 2).is_parameter());
+ bit_vector_->Add(parameter_count_ + r.index());
+ bit_vector_->Add(parameter_count_ + r.index() + 1);
+ bit_vector_->Add(parameter_count_ + r.index() + 2);
+ }
+}
+
+void BytecodeLoopAssignments::AddAll() { bit_vector_->AddAll(); }
+
+void BytecodeLoopAssignments::Union(const BytecodeLoopAssignments& other) {
+ bit_vector_->Union(*other.bit_vector_);
+}
+
+bool BytecodeLoopAssignments::ContainsParameter(int index) const {
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, parameter_count());
+ return bit_vector_->Contains(index);
+}
+
+bool BytecodeLoopAssignments::ContainsLocal(int index) const {
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, local_count());
+ return bit_vector_->Contains(parameter_count_ + index);
+}
+
+bool BytecodeLoopAssignments::ContainsAccumulator() const {
+ // TODO(leszeks): This assumes the accumulator is always assigned. This is
+ // probably correct, but that assignment is also probably dead, so we should
+ // check liveness.
+ return true;
+}
+
+BytecodeAnalysis::BytecodeAnalysis(Handle<BytecodeArray> bytecode_array,
+ Zone* zone, bool do_liveness_analysis)
+ : bytecode_array_(bytecode_array),
+ do_liveness_analysis_(do_liveness_analysis),
+ zone_(zone),
+ loop_stack_(zone),
+ loop_end_index_queue_(zone),
+ end_to_header_(zone),
+ header_to_info_(zone),
+ liveness_map_(bytecode_array->length(), zone) {}
+
+namespace {
+
+void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState& in_liveness,
+ const BytecodeArrayAccessor& accessor) {
+ int num_operands = Bytecodes::NumberOfOperands(bytecode);
+ const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
+ AccumulatorUse accumulator_use = Bytecodes::GetAccumulatorUse(bytecode);
+
+ if (accumulator_use == AccumulatorUse::kWrite) {
+ in_liveness.MarkAccumulatorDead();
+ }
+ for (int i = 0; i < num_operands; ++i) {
+ switch (operand_types[i]) {
+ case OperandType::kRegOut: {
+ interpreter::Register r = accessor.GetRegisterOperand(i);
+ if (!r.is_parameter()) {
+ in_liveness.MarkRegisterDead(r.index());
+ }
+ break;
+ }
+ case OperandType::kRegOutPair: {
+ interpreter::Register r = accessor.GetRegisterOperand(i);
+ if (!r.is_parameter()) {
+ DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
+ in_liveness.MarkRegisterDead(r.index());
+ in_liveness.MarkRegisterDead(r.index() + 1);
+ }
+ break;
+ }
+ case OperandType::kRegOutTriple: {
+ interpreter::Register r = accessor.GetRegisterOperand(i);
+ if (!r.is_parameter()) {
+ DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
+ DCHECK(!interpreter::Register(r.index() + 2).is_parameter());
+ in_liveness.MarkRegisterDead(r.index());
+ in_liveness.MarkRegisterDead(r.index() + 1);
+ in_liveness.MarkRegisterDead(r.index() + 2);
+ }
+ break;
+ }
+ default:
+ DCHECK(!Bytecodes::IsRegisterOutputOperandType(operand_types[i]));
+ break;
+ }
+ }
+
+ if (accumulator_use == AccumulatorUse::kRead) {
+ in_liveness.MarkAccumulatorLive();
+ }
+ for (int i = 0; i < num_operands; ++i) {
+ switch (operand_types[i]) {
+ case OperandType::kReg: {
+ interpreter::Register r = accessor.GetRegisterOperand(i);
+ if (!r.is_parameter()) {
+ in_liveness.MarkRegisterLive(r.index());
+ }
+ break;
+ }
+ case OperandType::kRegPair: {
+ interpreter::Register r = accessor.GetRegisterOperand(i);
+ if (!r.is_parameter()) {
+ DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
+ in_liveness.MarkRegisterLive(r.index());
+ in_liveness.MarkRegisterLive(r.index() + 1);
+ }
+ break;
+ }
+ case OperandType::kRegList: {
+ interpreter::Register r = accessor.GetRegisterOperand(i++);
+ uint32_t reg_count = accessor.GetRegisterCountOperand(i);
+ if (!r.is_parameter()) {
+ for (uint32_t j = 0; j < reg_count; ++j) {
+ DCHECK(!interpreter::Register(r.index() + j).is_parameter());
+ in_liveness.MarkRegisterLive(r.index() + j);
+ }
+ }
+ }
+ default:
+ DCHECK(!Bytecodes::IsRegisterInputOperandType(operand_types[i]));
+ break;
+ }
+ }
+}
+
+void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState& out_liveness,
+ BytecodeLivenessState* next_bytecode_in_liveness,
+ const BytecodeArrayAccessor& accessor,
+ const BytecodeLivenessMap& liveness_map) {
+ int current_offset = accessor.current_offset();
+ const Handle<BytecodeArray>& bytecode_array = accessor.bytecode_array();
+
+ // Update from jump target (if any). Skip loops, we update these manually in
+ // the liveness iterations.
+ if (Bytecodes::IsForwardJump(bytecode)) {
+ int target_offset = accessor.GetJumpTargetOffset();
+ out_liveness.Union(*liveness_map.GetInLiveness(target_offset));
+ }
+
+ // Update from next bytecode (unless there isn't one or this is an
+ // unconditional jump).
+ if (next_bytecode_in_liveness != nullptr &&
+ !Bytecodes::IsUnconditionalJump(bytecode)) {
+ out_liveness.Union(*next_bytecode_in_liveness);
+ }
+
+ // Update from exception handler (if any).
+ if (!interpreter::Bytecodes::IsWithoutExternalSideEffects(bytecode)) {
+ int handler_context;
+ // TODO(leszeks): We should look up this range only once per entry.
+ HandlerTable* table = HandlerTable::cast(bytecode_array->handler_table());
+ int handler_offset =
+ table->LookupRange(current_offset, &handler_context, nullptr);
+
+ if (handler_offset != -1) {
+ out_liveness.Union(*liveness_map.GetInLiveness(handler_offset));
+ out_liveness.MarkRegisterLive(handler_context);
+ }
+ }
+}
+
+void UpdateAssignments(Bytecode bytecode, BytecodeLoopAssignments& assignments,
+ const BytecodeArrayAccessor& accessor) {
+ int num_operands = Bytecodes::NumberOfOperands(bytecode);
+ const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
+
+ for (int i = 0; i < num_operands; ++i) {
+ switch (operand_types[i]) {
+ case OperandType::kRegOut: {
+ assignments.Add(accessor.GetRegisterOperand(i));
+ break;
+ }
+ case OperandType::kRegOutPair: {
+ assignments.AddPair(accessor.GetRegisterOperand(i));
+ break;
+ }
+ case OperandType::kRegOutTriple: {
+ assignments.AddTriple(accessor.GetRegisterOperand(i));
+ break;
+ }
+ default:
+ DCHECK(!Bytecodes::IsRegisterOutputOperandType(operand_types[i]));
+ break;
+ }
+ }
+}
+
+} // namespace
+
+void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
+ loop_stack_.push({-1, nullptr});
+
+ BytecodeLivenessState* next_bytecode_in_liveness = nullptr;
+
+ int osr_loop_end_offset =
+ osr_bailout_id.IsNone() ? -1 : osr_bailout_id.ToInt();
+
+ BytecodeArrayRandomIterator iterator(bytecode_array(), zone());
+ for (iterator.GoToEnd(); iterator.IsValid(); --iterator) {
+ Bytecode bytecode = iterator.current_bytecode();
+ int current_offset = iterator.current_offset();
+
+ if (bytecode == Bytecode::kJumpLoop) {
+ // Every byte up to and including the last byte within the backwards jump
+ // instruction is considered part of the loop, set loop end accordingly.
+ int loop_end = current_offset + iterator.current_bytecode_size();
+ PushLoop(iterator.GetJumpTargetOffset(), loop_end);
+
+ // Normally prefixed bytecodes are treated as if the prefix's offset was
+ // the actual bytecode's offset. However, the OSR id is the offset of the
+ // actual JumpLoop bytecode, so we need to find the location of that
+ // bytecode ignoring the prefix.
+ int jump_loop_offset = current_offset + iterator.current_prefix_offset();
+ bool is_osr_loop = (jump_loop_offset == osr_loop_end_offset);
+
+ // Check that is_osr_loop is set iff the osr_loop_end_offset is within
+ // this bytecode.
+ DCHECK(!is_osr_loop ||
+ iterator.OffsetWithinBytecode(osr_loop_end_offset));
+
+ // OSR "assigns" everything to OSR values on entry into an OSR loop, so we
+ // need to make sure to considered everything to be assigned.
+ if (is_osr_loop) {
+ loop_stack_.top().loop_info->assignments().AddAll();
+ }
+
+ // Save the index so that we can do another pass later.
+ if (do_liveness_analysis_) {
+ loop_end_index_queue_.push_back(iterator.current_index());
+ }
+ } else if (loop_stack_.size() > 1) {
+ LoopStackEntry& current_loop = loop_stack_.top();
+ LoopInfo* current_loop_info = current_loop.loop_info;
+
+ // TODO(leszeks): Ideally, we'd only set values that were assigned in
+ // the loop *and* are live when the loop exits. However, this requires
+ // tracking the out-liveness of *all* loop exits, which is not
+ // information we currently have.
+ UpdateAssignments(bytecode, current_loop_info->assignments(), iterator);
+
+ if (current_offset == current_loop.header_offset) {
+ loop_stack_.pop();
+ if (loop_stack_.size() > 1) {
+ // Propagate inner loop assignments to outer loop.
+ loop_stack_.top().loop_info->assignments().Union(
+ current_loop_info->assignments());
+ }
+ }
+ }
+
+ if (do_liveness_analysis_) {
+ BytecodeLiveness& liveness = liveness_map_.InitializeLiveness(
+ current_offset, bytecode_array()->register_count(), zone());
+
+ UpdateOutLiveness(bytecode, *liveness.out, next_bytecode_in_liveness,
+ iterator, liveness_map_);
+ liveness.in->CopyFrom(*liveness.out);
+ UpdateInLiveness(bytecode, *liveness.in, iterator);
+
+ next_bytecode_in_liveness = liveness.in;
+ }
+ }
+
+ DCHECK_EQ(loop_stack_.size(), 1u);
+ DCHECK_EQ(loop_stack_.top().header_offset, -1);
+
+ if (!do_liveness_analysis_) return;
+
+ // At this point, every bytecode has a valid in and out liveness, except for
+ // propagating liveness across back edges (i.e. JumpLoop). Subsequent liveness
+ // analysis iterations can only add additional liveness bits that are pulled
+ // across these back edges.
+ //
+ // Furthermore, a loop header's in-liveness can only change based on any
+ // bytecodes *after* the loop end -- it cannot change as a result of the
+ // JumpLoop liveness being updated, as the only liveness bits than can be
+ // added to the loop body are those of the loop header.
+ //
+ // So, if we know that the liveness of bytecodes after a loop header won't
+ // change (e.g. because there are no loops in them, or we have already ensured
+ // those loops are valid), we can safely update the loop end and pass over the
+ // loop body, and then never have to pass over that loop end again, because we
+ // have shown that its target, the loop header, can't change from the entries
+ // after the loop, and can't change from any loop body pass.
+ //
+ // This means that in a pass, we can iterate backwards over the bytecode
+ // array, process any loops that we encounter, and on subsequent passes we can
+ // skip processing those loops (though we still have to process inner loops).
+ //
+ // Equivalently, we can queue up loop ends from back to front, and pass over
+ // the loops in that order, as this preserves both the bottom-to-top and
+ // outer-to-inner requirements.
+
+ for (int loop_end_index : loop_end_index_queue_) {
+ iterator.GoToIndex(loop_end_index);
+
+ DCHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpLoop);
+
+ int header_offset = iterator.GetJumpTargetOffset();
+ int end_offset = iterator.current_offset();
+
+ BytecodeLiveness& header_liveness =
+ liveness_map_.GetLiveness(header_offset);
+ BytecodeLiveness& end_liveness = liveness_map_.GetLiveness(end_offset);
+
+ if (!end_liveness.out->UnionIsChanged(*header_liveness.in)) {
+ // Only update the loop body if the loop end liveness changed.
+ continue;
+ }
+ end_liveness.in->CopyFrom(*end_liveness.out);
+ next_bytecode_in_liveness = end_liveness.in;
+
+ // Advance into the loop body.
+ --iterator;
+ for (; iterator.current_offset() > header_offset; --iterator) {
+ Bytecode bytecode = iterator.current_bytecode();
+
+ int current_offset = iterator.current_offset();
+ BytecodeLiveness& liveness = liveness_map_.GetLiveness(current_offset);
+
+ UpdateOutLiveness(bytecode, *liveness.out, next_bytecode_in_liveness,
+ iterator, liveness_map_);
+ liveness.in->CopyFrom(*liveness.out);
+ UpdateInLiveness(bytecode, *liveness.in, iterator);
+
+ next_bytecode_in_liveness = liveness.in;
+ }
+ // Now we are at the loop header. Since the in-liveness of the header
+ // can't change, we need only to update the out-liveness.
+ UpdateOutLiveness(iterator.current_bytecode(), *header_liveness.out,
+ next_bytecode_in_liveness, iterator, liveness_map_);
+ }
+
+ DCHECK(LivenessIsValid());
+}
+
+void BytecodeAnalysis::PushLoop(int loop_header, int loop_end) {
+ DCHECK(loop_header < loop_end);
+ DCHECK(loop_stack_.top().header_offset < loop_header);
+ DCHECK(end_to_header_.find(loop_end) == end_to_header_.end());
+ DCHECK(header_to_info_.find(loop_header) == header_to_info_.end());
+
+ int parent_offset = loop_stack_.top().header_offset;
+
+ end_to_header_.insert({loop_end, loop_header});
+ auto it = header_to_info_.insert(
+ {loop_header, LoopInfo(parent_offset, bytecode_array_->parameter_count(),
+ bytecode_array_->register_count(), zone_)});
+ // Get the loop info pointer from the output of insert.
+ LoopInfo* loop_info = &it.first->second;
+
+ loop_stack_.push({loop_header, loop_info});
+}
+
+bool BytecodeAnalysis::IsLoopHeader(int offset) const {
+ return header_to_info_.find(offset) != header_to_info_.end();
+}
+
+int BytecodeAnalysis::GetLoopOffsetFor(int offset) const {
+ auto loop_end_to_header = end_to_header_.upper_bound(offset);
+ // If there is no next end => offset is not in a loop.
+ if (loop_end_to_header == end_to_header_.end()) {
+ return -1;
+ }
+ // If the header preceeds the offset, this is the loop
+ //
+ // .> header <--loop_end_to_header
+ // |
+ // | <--offset
+ // |
+ // `- end
+ if (loop_end_to_header->second <= offset) {
+ return loop_end_to_header->second;
+ }
+ // Otherwise there is a (potentially nested) loop after this offset.
+ //
+ // <--offset
+ //
+ // .> header
+ // |
+ // | .> header <--loop_end_to_header
+ // | |
+ // | `- end
+ // |
+ // `- end
+ // We just return the parent of the next loop (might be -1).
+ DCHECK(header_to_info_.upper_bound(offset) != header_to_info_.end());
+
+ return header_to_info_.upper_bound(offset)->second.parent_offset();
+}
+
+const LoopInfo& BytecodeAnalysis::GetLoopInfoFor(int header_offset) const {
+ DCHECK(IsLoopHeader(header_offset));
+
+ return header_to_info_.find(header_offset)->second;
+}
+
+const BytecodeLivenessState* BytecodeAnalysis::GetInLivenessFor(
+ int offset) const {
+ if (!do_liveness_analysis_) return nullptr;
+
+ return liveness_map_.GetInLiveness(offset);
+}
+
+const BytecodeLivenessState* BytecodeAnalysis::GetOutLivenessFor(
+ int offset) const {
+ if (!do_liveness_analysis_) return nullptr;
+
+ return liveness_map_.GetOutLiveness(offset);
+}
+
+std::ostream& BytecodeAnalysis::PrintLivenessTo(std::ostream& os) const {
+ interpreter::BytecodeArrayIterator iterator(bytecode_array());
+
+ for (; !iterator.done(); iterator.Advance()) {
+ int current_offset = iterator.current_offset();
+
+ const BitVector& in_liveness =
+ GetInLivenessFor(current_offset)->bit_vector();
+ const BitVector& out_liveness =
+ GetOutLivenessFor(current_offset)->bit_vector();
+
+ for (int i = 0; i < in_liveness.length(); ++i) {
+ os << (in_liveness.Contains(i) ? "L" : ".");
+ }
+ os << " -> ";
+
+ for (int i = 0; i < out_liveness.length(); ++i) {
+ os << (out_liveness.Contains(i) ? "L" : ".");
+ }
+
+ os << " | " << current_offset << ": ";
+ iterator.PrintTo(os) << std::endl;
+ }
+
+ return os;
+}
+
+#if DEBUG
+bool BytecodeAnalysis::LivenessIsValid() {
+ BytecodeArrayRandomIterator iterator(bytecode_array(), zone());
+
+ BytecodeLivenessState previous_liveness(bytecode_array()->register_count(),
+ zone());
+
+ int invalid_offset = -1;
+ int which_invalid = -1;
+
+ BytecodeLivenessState* next_bytecode_in_liveness = nullptr;
+
+ // Ensure that there are no liveness changes if we iterate one more time.
+ for (iterator.GoToEnd(); iterator.IsValid(); --iterator) {
+ Bytecode bytecode = iterator.current_bytecode();
+
+ int current_offset = iterator.current_offset();
+
+ BytecodeLiveness& liveness = liveness_map_.GetLiveness(current_offset);
+
+ previous_liveness.CopyFrom(*liveness.out);
+
+ UpdateOutLiveness(bytecode, *liveness.out, next_bytecode_in_liveness,
+ iterator, liveness_map_);
+ // UpdateOutLiveness skips kJumpLoop, so we update it manually.
+ if (bytecode == Bytecode::kJumpLoop) {
+ int target_offset = iterator.GetJumpTargetOffset();
+ liveness.out->Union(*liveness_map_.GetInLiveness(target_offset));
+ }
+
+ if (!liveness.out->Equals(previous_liveness)) {
+ // Reset the invalid liveness.
+ liveness.out->CopyFrom(previous_liveness);
+ invalid_offset = current_offset;
+ which_invalid = 1;
+ break;
+ }
+
+ previous_liveness.CopyFrom(*liveness.in);
+
+ liveness.in->CopyFrom(*liveness.out);
+ UpdateInLiveness(bytecode, *liveness.in, iterator);
+
+ if (!liveness.in->Equals(previous_liveness)) {
+ // Reset the invalid liveness.
+ liveness.in->CopyFrom(previous_liveness);
+ invalid_offset = current_offset;
+ which_invalid = 0;
+ break;
+ }
+
+ next_bytecode_in_liveness = liveness.in;
+ }
+
+ if (invalid_offset != -1) {
+ OFStream of(stderr);
+ of << "Invalid liveness:" << std::endl;
+
+ // Dump the bytecode, annotated with the liveness and marking loops.
+
+ int loop_indent = 0;
+
+ BytecodeArrayIterator forward_iterator(bytecode_array());
+ for (; !forward_iterator.done(); forward_iterator.Advance()) {
+ int current_offset = forward_iterator.current_offset();
+ const BitVector& in_liveness =
+ GetInLivenessFor(current_offset)->bit_vector();
+ const BitVector& out_liveness =
+ GetOutLivenessFor(current_offset)->bit_vector();
+
+ for (int i = 0; i < in_liveness.length(); ++i) {
+ of << (in_liveness.Contains(i) ? 'L' : '.');
+ }
+
+ of << " | ";
+
+ for (int i = 0; i < out_liveness.length(); ++i) {
+ of << (out_liveness.Contains(i) ? 'L' : '.');
+ }
+
+ of << " : " << current_offset << " : ";
+
+ // Draw loop back edges by indentin everything between loop headers and
+ // jump loop instructions.
+ if (forward_iterator.current_bytecode() == Bytecode::kJumpLoop) {
+ loop_indent--;
+ }
+ for (int i = 0; i < loop_indent; ++i) {
+ of << " | ";
+ }
+ if (forward_iterator.current_bytecode() == Bytecode::kJumpLoop) {
+ of << " `-" << current_offset;
+ } else if (IsLoopHeader(current_offset)) {
+ of << " .>" << current_offset;
+ loop_indent++;
+ }
+ forward_iterator.PrintTo(of) << std::endl;
+
+ if (current_offset == invalid_offset) {
+ // Underline the invalid liveness.
+ if (which_invalid == 0) {
+ for (int i = 0; i < in_liveness.length(); ++i) {
+ of << '^';
+ }
+ } else {
+ for (int i = 0; i < in_liveness.length() + 3; ++i) {
+ of << ' ';
+ }
+ for (int i = 0; i < out_liveness.length(); ++i) {
+ of << '^';
+ }
+ }
+
+ // Make sure to draw the loop indentation marks on this additional line.
+ of << " : " << current_offset << " : ";
+ for (int i = 0; i < loop_indent; ++i) {
+ of << " | ";
+ }
+
+ of << std::endl;
+ }
+ }
+ }
+
+ return invalid_offset == -1;
+}
+#endif
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/bytecode-analysis.h b/deps/v8/src/compiler/bytecode-analysis.h
new file mode 100644
index 0000000000..ad93f8a652
--- /dev/null
+++ b/deps/v8/src/compiler/bytecode-analysis.h
@@ -0,0 +1,126 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_BYTECODE_ANALYSIS_H_
+#define V8_COMPILER_BYTECODE_ANALYSIS_H_
+
+#include "src/base/hashmap.h"
+#include "src/bit-vector.h"
+#include "src/compiler/bytecode-liveness-map.h"
+#include "src/handles.h"
+#include "src/interpreter/bytecode-register.h"
+#include "src/zone/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class BytecodeArray;
+
+namespace compiler {
+
+class V8_EXPORT_PRIVATE BytecodeLoopAssignments {
+ public:
+ BytecodeLoopAssignments(int parameter_count, int register_count, Zone* zone);
+
+ void Add(interpreter::Register r);
+ void AddPair(interpreter::Register r);
+ void AddTriple(interpreter::Register r);
+ void AddAll();
+ void Union(const BytecodeLoopAssignments& other);
+
+ bool ContainsParameter(int index) const;
+ bool ContainsLocal(int index) const;
+ bool ContainsAccumulator() const;
+
+ int parameter_count() const { return parameter_count_; }
+ int local_count() const { return bit_vector_->length() - parameter_count_; }
+
+ private:
+ int parameter_count_;
+ BitVector* bit_vector_;
+};
+
+struct V8_EXPORT_PRIVATE LoopInfo {
+ public:
+ LoopInfo(int parent_offset, int parameter_count, int register_count,
+ Zone* zone)
+ : parent_offset_(parent_offset),
+ assignments_(parameter_count, register_count, zone) {}
+
+ int parent_offset() const { return parent_offset_; }
+
+ BytecodeLoopAssignments& assignments() { return assignments_; }
+ const BytecodeLoopAssignments& assignments() const { return assignments_; }
+
+ private:
+ // The offset to the parent loop, or -1 if there is no parent.
+ int parent_offset_;
+ BytecodeLoopAssignments assignments_;
+};
+
+class V8_EXPORT_PRIVATE BytecodeAnalysis BASE_EMBEDDED {
+ public:
+ BytecodeAnalysis(Handle<BytecodeArray> bytecode_array, Zone* zone,
+ bool do_liveness_analysis);
+
+ // Analyze the bytecodes to find the loop ranges, loop nesting, loop
+ // assignments and liveness, under the assumption that there is an OSR bailout
+ // at {osr_bailout_id}.
+ //
+ // No other methods in this class return valid information until this has been
+ // called.
+ void Analyze(BailoutId osr_bailout_id);
+
+ // Return true if the given offset is a loop header
+ bool IsLoopHeader(int offset) const;
+ // Get the loop header offset of the containing loop for arbitrary
+ // {offset}, or -1 if the {offset} is not inside any loop.
+ int GetLoopOffsetFor(int offset) const;
+ // Get the loop info of the loop header at {header_offset}.
+ const LoopInfo& GetLoopInfoFor(int header_offset) const;
+
+ // Gets the in-liveness for the bytecode at {offset}.
+ const BytecodeLivenessState* GetInLivenessFor(int offset) const;
+
+ // Gets the out-liveness for the bytecode at {offset}.
+ const BytecodeLivenessState* GetOutLivenessFor(int offset) const;
+
+ std::ostream& PrintLivenessTo(std::ostream& os) const;
+
+ private:
+ struct LoopStackEntry {
+ int header_offset;
+ LoopInfo* loop_info;
+ };
+
+ void PushLoop(int loop_header, int loop_end);
+
+#if DEBUG
+ bool LivenessIsValid();
+#endif
+
+ Zone* zone() const { return zone_; }
+ Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
+
+ private:
+ Handle<BytecodeArray> bytecode_array_;
+ bool do_liveness_analysis_;
+ Zone* zone_;
+
+ ZoneStack<LoopStackEntry> loop_stack_;
+ ZoneVector<int> loop_end_index_queue_;
+
+ ZoneMap<int, int> end_to_header_;
+ ZoneMap<int, LoopInfo> header_to_info_;
+
+ BytecodeLivenessMap liveness_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodeAnalysis);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_BYTECODE_ANALYSIS_H_
diff --git a/deps/v8/src/compiler/bytecode-branch-analysis.cc b/deps/v8/src/compiler/bytecode-branch-analysis.cc
deleted file mode 100644
index 4e96a53aeb..0000000000
--- a/deps/v8/src/compiler/bytecode-branch-analysis.cc
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/bytecode-branch-analysis.h"
-
-#include "src/interpreter/bytecode-array-iterator.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-BytecodeBranchAnalysis::BytecodeBranchAnalysis(
- Handle<BytecodeArray> bytecode_array, Zone* zone)
- : bytecode_array_(bytecode_array),
- is_backward_target_(bytecode_array->length(), zone),
- is_forward_target_(bytecode_array->length(), zone),
- zone_(zone) {}
-
-void BytecodeBranchAnalysis::Analyze() {
- interpreter::BytecodeArrayIterator iterator(bytecode_array());
- while (!iterator.done()) {
- interpreter::Bytecode bytecode = iterator.current_bytecode();
- int current_offset = iterator.current_offset();
- if (interpreter::Bytecodes::IsJump(bytecode)) {
- AddBranch(current_offset, iterator.GetJumpTargetOffset());
- }
- iterator.Advance();
- }
-}
-
-void BytecodeBranchAnalysis::AddBranch(int source_offset, int target_offset) {
- if (source_offset < target_offset) {
- is_forward_target_.Add(target_offset);
- } else {
- is_backward_target_.Add(target_offset);
- }
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/bytecode-branch-analysis.h b/deps/v8/src/compiler/bytecode-branch-analysis.h
deleted file mode 100644
index 7d32da8281..0000000000
--- a/deps/v8/src/compiler/bytecode-branch-analysis.h
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_BYTECODE_BRANCH_ANALYSIS_H_
-#define V8_COMPILER_BYTECODE_BRANCH_ANALYSIS_H_
-
-#include "src/bit-vector.h"
-#include "src/handles.h"
-
-namespace v8 {
-namespace internal {
-
-class BytecodeArray;
-
-namespace compiler {
-
-// A class for identifying branch targets within a bytecode array.
-// This information can be used to construct the local control flow
-// logic for high-level IR graphs built from bytecode.
-//
-// N.B. If this class is used to determine loop headers, then such a
-// usage relies on the only backwards branches in bytecode being jumps
-// back to loop headers.
-class BytecodeBranchAnalysis BASE_EMBEDDED {
- public:
- BytecodeBranchAnalysis(Handle<BytecodeArray> bytecode_array, Zone* zone);
-
- // Analyze the bytecodes to find the branch sites and their
- // targets. No other methods in this class return valid information
- // until this has been called.
- void Analyze();
-
- // Returns true if there are any forward branches to the bytecode at
- // |offset|.
- bool forward_branches_target(int offset) const {
- return is_forward_target_.Contains(offset);
- }
-
- // Returns true if there are any backward branches to the bytecode
- // at |offset|.
- bool backward_branches_target(int offset) const {
- return is_backward_target_.Contains(offset);
- }
-
- private:
- void AddBranch(int origin_offset, int target_offset);
-
- Zone* zone() const { return zone_; }
- Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
-
- Handle<BytecodeArray> bytecode_array_;
- BitVector is_backward_target_;
- BitVector is_forward_target_;
- Zone* zone_;
-
- DISALLOW_COPY_AND_ASSIGN(BytecodeBranchAnalysis);
-};
-
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_BYTECODE_BRANCH_ANALYSIS_H_
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 34b50df308..d22746d9ec 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -7,10 +7,10 @@
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/compilation-info.h"
-#include "src/compiler/bytecode-branch-analysis.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/linkage.h"
#include "src/compiler/operator-properties.h"
+#include "src/compiler/simplified-operator.h"
#include "src/interpreter/bytecodes.h"
#include "src/objects-inl.h"
@@ -36,7 +36,6 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
Node* LookupAccumulator() const;
Node* LookupRegister(interpreter::Register the_register) const;
- void MarkAllRegistersLive();
void BindAccumulator(Node* node,
FrameStateAttachmentMode mode = kDontAttachFrameState);
@@ -57,7 +56,8 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
// Preserve a checkpoint of the environment for the IR graph. Any
// further mutation of the environment will not affect checkpoints.
Node* Checkpoint(BailoutId bytecode_offset, OutputFrameStateCombine combine,
- bool owner_has_exception);
+ bool owner_has_exception,
+ const BytecodeLivenessState* liveness);
// Control dependency tracked by this environment.
Node* GetControlDependency() const { return control_dependency_; }
@@ -68,30 +68,28 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
Node* Context() const { return context_; }
void SetContext(Node* new_context) { context_ = new_context; }
- Environment* CopyForConditional();
- Environment* CopyForLoop();
- Environment* CopyForOsrEntry();
+ Environment* Copy();
void Merge(Environment* other);
- void PrepareForOsrEntry();
- void PrepareForLoopExit(Node* loop);
+ void PrepareForOsrEntry();
+ void PrepareForLoop(const BytecodeLoopAssignments& assignments);
+ void PrepareForLoopExit(Node* loop,
+ const BytecodeLoopAssignments& assignments);
private:
- Environment(const Environment* copy, LivenessAnalyzerBlock* liveness_block);
- void PrepareForLoop();
+ explicit Environment(const Environment* copy);
- bool StateValuesRequireUpdate(Node** state_values, int offset, int count);
- void UpdateStateValues(Node** state_values, int offset, int count);
+ bool StateValuesRequireUpdate(Node** state_values, Node** values, int count);
+ void UpdateStateValues(Node** state_values, Node** values, int count);
+ void UpdateStateValuesWithCache(Node** state_values, Node** values, int count,
+ const BitVector* liveness);
int RegisterToValuesIndex(interpreter::Register the_register) const;
- bool IsLivenessBlockConsistent() const;
-
Zone* zone() const { return builder_->local_zone(); }
Graph* graph() const { return builder_->graph(); }
CommonOperatorBuilder* common() const { return builder_->common(); }
BytecodeGraphBuilder* builder() const { return builder_; }
- LivenessAnalyzerBlock* liveness_block() const { return liveness_block_; }
const NodeVector* values() const { return &values_; }
NodeVector* values() { return &values_; }
int register_base() const { return register_base_; }
@@ -100,7 +98,6 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
BytecodeGraphBuilder* builder_;
int register_count_;
int parameter_count_;
- LivenessAnalyzerBlock* liveness_block_;
Node* context_;
Node* control_dependency_;
Node* effect_dependency_;
@@ -124,9 +121,6 @@ BytecodeGraphBuilder::Environment::Environment(BytecodeGraphBuilder* builder,
: builder_(builder),
register_count_(register_count),
parameter_count_(parameter_count),
- liveness_block_(builder->is_liveness_analysis_enabled_
- ? builder_->liveness_analyzer()->NewBlock()
- : nullptr),
context_(context),
control_dependency_(control_dependency),
effect_dependency_(control_dependency),
@@ -161,12 +155,10 @@ BytecodeGraphBuilder::Environment::Environment(BytecodeGraphBuilder* builder,
}
BytecodeGraphBuilder::Environment::Environment(
- const BytecodeGraphBuilder::Environment* other,
- LivenessAnalyzerBlock* liveness_block)
+ const BytecodeGraphBuilder::Environment* other)
: builder_(other->builder_),
register_count_(other->register_count_),
parameter_count_(other->parameter_count_),
- liveness_block_(liveness_block),
context_(other->context_),
control_dependency_(other->control_dependency_),
effect_dependency_(other->effect_dependency_),
@@ -189,16 +181,7 @@ int BytecodeGraphBuilder::Environment::RegisterToValuesIndex(
}
}
-bool BytecodeGraphBuilder::Environment::IsLivenessBlockConsistent() const {
- return !builder_->IsLivenessAnalysisEnabled() ==
- (liveness_block() == nullptr);
-}
-
Node* BytecodeGraphBuilder::Environment::LookupAccumulator() const {
- DCHECK(IsLivenessBlockConsistent());
- if (liveness_block() != nullptr) {
- liveness_block()->LookupAccumulator();
- }
return values()->at(accumulator_base_);
}
@@ -213,32 +196,15 @@ Node* BytecodeGraphBuilder::Environment::LookupRegister(
return builder()->GetNewTarget();
} else {
int values_index = RegisterToValuesIndex(the_register);
- if (liveness_block() != nullptr && !the_register.is_parameter()) {
- DCHECK(IsLivenessBlockConsistent());
- liveness_block()->Lookup(the_register.index());
- }
return values()->at(values_index);
}
}
-void BytecodeGraphBuilder::Environment::MarkAllRegistersLive() {
- DCHECK(IsLivenessBlockConsistent());
- if (liveness_block() != nullptr) {
- for (int i = 0; i < register_count(); ++i) {
- liveness_block()->Lookup(i);
- }
- }
-}
-
void BytecodeGraphBuilder::Environment::BindAccumulator(
Node* node, FrameStateAttachmentMode mode) {
if (mode == FrameStateAttachmentMode::kAttachFrameState) {
builder()->PrepareFrameState(node, OutputFrameStateCombine::PokeAt(0));
}
- DCHECK(IsLivenessBlockConsistent());
- if (liveness_block() != nullptr) {
- liveness_block()->BindAccumulator();
- }
values()->at(accumulator_base_) = node;
}
@@ -251,10 +217,6 @@ void BytecodeGraphBuilder::Environment::BindRegister(
accumulator_base_ - values_index));
}
values()->at(values_index) = node;
- if (liveness_block() != nullptr && !the_register.is_parameter()) {
- DCHECK(IsLivenessBlockConsistent());
- liveness_block()->Bind(the_register.index());
- }
}
void BytecodeGraphBuilder::Environment::BindRegistersToProjections(
@@ -278,45 +240,13 @@ void BytecodeGraphBuilder::Environment::RecordAfterState(
}
}
-
-BytecodeGraphBuilder::Environment*
-BytecodeGraphBuilder::Environment::CopyForLoop() {
- PrepareForLoop();
- if (liveness_block() != nullptr) {
- // Finish the current block before copying.
- liveness_block_ = builder_->liveness_analyzer()->NewBlock(liveness_block());
- }
- return new (zone()) Environment(this, liveness_block());
-}
-
-BytecodeGraphBuilder::Environment*
-BytecodeGraphBuilder::Environment::CopyForOsrEntry() {
- return new (zone())
- Environment(this, builder_->liveness_analyzer()->NewBlock());
-}
-
-BytecodeGraphBuilder::Environment*
-BytecodeGraphBuilder::Environment::CopyForConditional() {
- LivenessAnalyzerBlock* copy_liveness_block = nullptr;
- if (liveness_block() != nullptr) {
- copy_liveness_block =
- builder_->liveness_analyzer()->NewBlock(liveness_block());
- liveness_block_ = builder_->liveness_analyzer()->NewBlock(liveness_block());
- }
- return new (zone()) Environment(this, copy_liveness_block);
+BytecodeGraphBuilder::Environment* BytecodeGraphBuilder::Environment::Copy() {
+ return new (zone()) Environment(this);
}
void BytecodeGraphBuilder::Environment::Merge(
BytecodeGraphBuilder::Environment* other) {
- if (builder_->is_liveness_analysis_enabled_) {
- if (GetControlDependency()->opcode() != IrOpcode::kLoop) {
- liveness_block_ =
- builder()->liveness_analyzer()->NewBlock(liveness_block());
- }
- liveness_block()->AddPredecessor(other->liveness_block());
- }
-
// Create a merge of the control dependencies of both environments and update
// the current environment's control dependency accordingly.
Node* control = builder()->MergeControl(GetControlDependency(),
@@ -337,8 +267,8 @@ void BytecodeGraphBuilder::Environment::Merge(
}
}
-
-void BytecodeGraphBuilder::Environment::PrepareForLoop() {
+void BytecodeGraphBuilder::Environment::PrepareForLoop(
+ const BytecodeLoopAssignments& assignments) {
// Create a control node for the loop header.
Node* control = builder()->NewLoop();
@@ -346,11 +276,23 @@ void BytecodeGraphBuilder::Environment::PrepareForLoop() {
Node* effect = builder()->NewEffectPhi(1, GetEffectDependency(), control);
UpdateEffectDependency(effect);
- // Assume everything in the loop is updated.
+ // Create Phis for any values that may be updated by the end of the loop.
context_ = builder()->NewPhi(1, context_, control);
- int size = static_cast<int>(values()->size());
- for (int i = 0; i < size; i++) {
- values()->at(i) = builder()->NewPhi(1, values()->at(i), control);
+ for (int i = 0; i < parameter_count(); i++) {
+ if (assignments.ContainsParameter(i)) {
+ values_[i] = builder()->NewPhi(1, values_[i], control);
+ }
+ }
+ for (int i = 0; i < register_count(); i++) {
+ if (assignments.ContainsLocal(i)) {
+ int index = register_base() + i;
+ values_[index] = builder()->NewPhi(1, values_[index], control);
+ }
+ }
+
+ if (assignments.ContainsAccumulator()) {
+ values_[accumulator_base()] =
+ builder()->NewPhi(1, values_[accumulator_base()], control);
}
// Connect to the loop end.
@@ -384,7 +326,7 @@ void BytecodeGraphBuilder::Environment::PrepareForOsrEntry() {
BailoutId loop_id(builder_->bytecode_iterator().current_offset());
Node* frame_state =
- Checkpoint(loop_id, OutputFrameStateCombine::Ignore(), false);
+ Checkpoint(loop_id, OutputFrameStateCombine::Ignore(), false, nullptr);
Node* checkpoint =
graph()->NewNode(common()->Checkpoint(), frame_state, entry, entry);
UpdateEffectDependency(checkpoint);
@@ -402,22 +344,22 @@ void BytecodeGraphBuilder::Environment::PrepareForOsrEntry() {
}
bool BytecodeGraphBuilder::Environment::StateValuesRequireUpdate(
- Node** state_values, int offset, int count) {
+ Node** state_values, Node** values, int count) {
if (*state_values == nullptr) {
return true;
}
- DCHECK_EQ((*state_values)->InputCount(), count);
- DCHECK_LE(static_cast<size_t>(offset + count), values()->size());
- Node** env_values = (count == 0) ? nullptr : &values()->at(offset);
+ Node::Inputs inputs = (*state_values)->inputs();
+ DCHECK_EQ(inputs.count(), count);
for (int i = 0; i < count; i++) {
- if ((*state_values)->InputAt(i) != env_values[i]) {
+ if (inputs[i] != values[i]) {
return true;
}
}
return false;
}
-void BytecodeGraphBuilder::Environment::PrepareForLoopExit(Node* loop) {
+void BytecodeGraphBuilder::Environment::PrepareForLoopExit(
+ Node* loop, const BytecodeLoopAssignments& assignments) {
DCHECK_EQ(loop->opcode(), IrOpcode::kLoop);
Node* control = GetControlDependency();
@@ -431,34 +373,65 @@ void BytecodeGraphBuilder::Environment::PrepareForLoopExit(Node* loop) {
GetEffectDependency(), loop_exit);
UpdateEffectDependency(effect_rename);
- // TODO(jarin) We should also rename context here. However, uncoditional
+ // TODO(jarin) We should also rename context here. However, unconditional
// renaming confuses global object and native context specialization.
// We should only rename if the context is assigned in the loop.
- // Rename the environmnent values.
- for (size_t i = 0; i < values_.size(); i++) {
- Node* rename =
- graph()->NewNode(common()->LoopExitValue(), values_[i], loop_exit);
- values_[i] = rename;
+ // Rename the environment values if they were assigned in the loop.
+ for (int i = 0; i < parameter_count(); i++) {
+ if (assignments.ContainsParameter(i)) {
+ Node* rename =
+ graph()->NewNode(common()->LoopExitValue(), values_[i], loop_exit);
+ values_[i] = rename;
+ }
+ }
+ for (int i = 0; i < register_count(); i++) {
+ if (assignments.ContainsLocal(i)) {
+ Node* rename = graph()->NewNode(common()->LoopExitValue(),
+ values_[register_base() + i], loop_exit);
+ values_[register_base() + i] = rename;
+ }
+ }
+
+ if (assignments.ContainsAccumulator()) {
+ Node* rename = graph()->NewNode(common()->LoopExitValue(),
+ values_[accumulator_base()], loop_exit);
+ values_[accumulator_base()] = rename;
}
}
void BytecodeGraphBuilder::Environment::UpdateStateValues(Node** state_values,
- int offset,
+ Node** values,
int count) {
- if (StateValuesRequireUpdate(state_values, offset, count)) {
- const Operator* op = common()->StateValues(count);
- (*state_values) = graph()->NewNode(op, count, &values()->at(offset));
+ if (StateValuesRequireUpdate(state_values, values, count)) {
+ const Operator* op = common()->StateValues(count, SparseInputMask::Dense());
+ (*state_values) = graph()->NewNode(op, count, values);
}
}
+void BytecodeGraphBuilder::Environment::UpdateStateValuesWithCache(
+ Node** state_values, Node** values, int count, const BitVector* liveness) {
+ *state_values = builder_->state_values_cache_.GetNodeForValues(
+ values, static_cast<size_t>(count), liveness);
+}
+
Node* BytecodeGraphBuilder::Environment::Checkpoint(
BailoutId bailout_id, OutputFrameStateCombine combine,
- bool owner_has_exception) {
- UpdateStateValues(&parameters_state_values_, 0, parameter_count());
- UpdateStateValues(&registers_state_values_, register_base(),
- register_count());
- UpdateStateValues(&accumulator_state_values_, accumulator_base(), 1);
+ bool owner_has_exception, const BytecodeLivenessState* liveness) {
+ UpdateStateValues(&parameters_state_values_, &values()->at(0),
+ parameter_count());
+
+ // TODO(leszeks): We should pass a view of the liveness bitvector here, with
+ // offset and count, rather than passing the entire bitvector and assuming
+ // that register liveness starts at offset 0.
+ UpdateStateValuesWithCache(&registers_state_values_,
+ &values()->at(register_base()), register_count(),
+ liveness ? &liveness->bit_vector() : nullptr);
+
+ Node* accumulator_value = liveness == nullptr || liveness->AccumulatorIsLive()
+ ? values()->at(accumulator_base())
+ : builder()->jsgraph()->OptimizedOutConstant();
+ UpdateStateValues(&accumulator_state_values_, &accumulator_value, 1);
const Operator* op = common()->FrameState(
bailout_id, combine, builder()->frame_state_function_info());
@@ -467,51 +440,40 @@ Node* BytecodeGraphBuilder::Environment::Checkpoint(
accumulator_state_values_, Context(), builder()->GetFunctionClosure(),
builder()->graph()->start());
- if (liveness_block() != nullptr) {
- // If the owning node has an exception, register the checkpoint to the
- // predecessor so that the checkpoint is used for both the normal and the
- // exceptional paths. Yes, this is a terrible hack and we might want
- // to use an explicit frame state for the exceptional path.
- if (owner_has_exception) {
- liveness_block()->GetPredecessor()->Checkpoint(result);
- } else {
- liveness_block()->Checkpoint(result);
- }
- }
-
return result;
}
BytecodeGraphBuilder::BytecodeGraphBuilder(
- Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
- float invocation_frequency, SourcePositionTable* source_positions,
- int inlining_id)
+ Zone* local_zone, Handle<SharedFunctionInfo> shared_info,
+ Handle<TypeFeedbackVector> feedback_vector, BailoutId osr_ast_id,
+ JSGraph* jsgraph, float invocation_frequency,
+ SourcePositionTable* source_positions, int inlining_id)
: local_zone_(local_zone),
jsgraph_(jsgraph),
invocation_frequency_(invocation_frequency),
- bytecode_array_(handle(info->shared_info()->bytecode_array())),
+ bytecode_array_(handle(shared_info->bytecode_array())),
exception_handler_table_(
handle(HandlerTable::cast(bytecode_array()->handler_table()))),
- feedback_vector_(handle(info->closure()->feedback_vector())),
+ feedback_vector_(feedback_vector),
frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
FrameStateType::kInterpretedFunction,
bytecode_array()->parameter_count(),
- bytecode_array()->register_count(), info->shared_info())),
- osr_ast_id_(info->osr_ast_id()),
+ bytecode_array()->register_count(), shared_info)),
+ bytecode_iterator_(nullptr),
+ bytecode_analysis_(nullptr),
+ environment_(nullptr),
+ osr_ast_id_(osr_ast_id),
+ osr_loop_offset_(-1),
merge_environments_(local_zone),
exception_handlers_(local_zone),
current_exception_handler_(0),
input_buffer_size_(0),
input_buffer_(nullptr),
exit_controls_(local_zone),
- is_liveness_analysis_enabled_(FLAG_analyze_environment_liveness &&
- info->is_deoptimization_enabled()),
+ is_liveness_analysis_enabled_(FLAG_analyze_environment_liveness),
state_values_cache_(jsgraph),
- liveness_analyzer_(
- static_cast<size_t>(bytecode_array()->register_count()), true,
- local_zone),
source_positions_(source_positions),
- start_position_(info->shared_info()->start_position(), inlining_id) {}
+ start_position_(shared_info->start_position(), inlining_id) {}
Node* BytecodeGraphBuilder::GetNewTarget() {
if (!new_target_.is_set()) {
@@ -551,8 +513,10 @@ Node* BytecodeGraphBuilder::GetFunctionClosure() {
Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) {
const Operator* op =
javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true);
- Node* native_context = NewNode(op, environment()->Context());
- return NewNode(javascript()->LoadContext(0, index, true), native_context);
+ Node* native_context = NewNode(op);
+ Node* result = NewNode(javascript()->LoadContext(0, index, true));
+ NodeProperties::ReplaceContextInput(result, native_context);
+ return result;
}
@@ -587,8 +551,6 @@ bool BytecodeGraphBuilder::CreateGraph(bool stack_check) {
Node* end = graph()->NewNode(common()->End(input_count), input_count, inputs);
graph()->SetEnd(end);
- ClearNonLiveSlotsInFrameStates();
-
return true;
}
@@ -601,8 +563,13 @@ void BytecodeGraphBuilder::PrepareEagerCheckpoint() {
DCHECK_EQ(IrOpcode::kDead,
NodeProperties::GetFrameStateInput(node)->opcode());
BailoutId bailout_id(bytecode_iterator().current_offset());
+
+ const BytecodeLivenessState* liveness_before =
+ bytecode_analysis()->GetInLivenessFor(
+ bytecode_iterator().current_offset());
+
Node* frame_state_before = environment()->Checkpoint(
- bailout_id, OutputFrameStateCombine::Ignore(), false);
+ bailout_id, OutputFrameStateCombine::Ignore(), false, liveness_before);
NodeProperties::ReplaceFrameStateInput(node, frame_state_before);
}
}
@@ -617,40 +584,36 @@ void BytecodeGraphBuilder::PrepareFrameState(Node* node,
NodeProperties::GetFrameStateInput(node)->opcode());
BailoutId bailout_id(bytecode_iterator().current_offset());
bool has_exception = NodeProperties::IsExceptionalCall(node);
- Node* frame_state_after =
- environment()->Checkpoint(bailout_id, combine, has_exception);
- NodeProperties::ReplaceFrameStateInput(node, frame_state_after);
- }
-}
-void BytecodeGraphBuilder::ClearNonLiveSlotsInFrameStates() {
- if (!IsLivenessAnalysisEnabled()) {
- return;
- }
- NonLiveFrameStateSlotReplacer replacer(
- &state_values_cache_, jsgraph()->OptimizedOutConstant(),
- liveness_analyzer()->local_count(), true, local_zone());
- liveness_analyzer()->Run(&replacer);
- if (FLAG_trace_environment_liveness) {
- OFStream os(stdout);
- liveness_analyzer()->Print(os);
+ const BytecodeLivenessState* liveness_after =
+ bytecode_analysis()->GetOutLivenessFor(
+ bytecode_iterator().current_offset());
+
+ Node* frame_state_after = environment()->Checkpoint(
+ bailout_id, combine, has_exception, liveness_after);
+ NodeProperties::ReplaceFrameStateInput(node, frame_state_after);
}
}
void BytecodeGraphBuilder::VisitBytecodes(bool stack_check) {
- BytecodeBranchAnalysis analysis(bytecode_array(), local_zone());
- BytecodeLoopAnalysis loop_analysis(bytecode_array(), &analysis, local_zone());
- analysis.Analyze();
- loop_analysis.Analyze();
- set_branch_analysis(&analysis);
- set_loop_analysis(&loop_analysis);
+ BytecodeAnalysis bytecode_analysis(bytecode_array(), local_zone(),
+ FLAG_analyze_environment_liveness);
+ bytecode_analysis.Analyze(osr_ast_id_);
+ set_bytecode_analysis(&bytecode_analysis);
interpreter::BytecodeArrayIterator iterator(bytecode_array());
set_bytecode_iterator(&iterator);
SourcePositionTableIterator source_position_iterator(
bytecode_array()->source_position_table());
+ if (FLAG_trace_environment_liveness) {
+ OFStream of(stdout);
+
+ bytecode_analysis.PrintLivenessTo(of);
+ }
+
BuildOSRNormalEntryPoint();
+
for (; !iterator.done(); iterator.Advance()) {
int current_offset = iterator.current_offset();
UpdateCurrentSourcePosition(&source_position_iterator, current_offset);
@@ -658,7 +621,6 @@ void BytecodeGraphBuilder::VisitBytecodes(bool stack_check) {
SwitchToMergeEnvironment(current_offset);
if (environment() != nullptr) {
BuildLoopHeaderEnvironment(current_offset);
- BuildOSRLoopEntryPoint(current_offset);
// Skip the first stack check if stack_check is false
if (!stack_check &&
@@ -677,8 +639,7 @@ void BytecodeGraphBuilder::VisitBytecodes(bool stack_check) {
}
}
}
-
- set_branch_analysis(nullptr);
+ set_bytecode_analysis(nullptr);
set_bytecode_iterator(nullptr);
DCHECK(exception_handlers_.empty());
}
@@ -741,27 +702,33 @@ void BytecodeGraphBuilder::VisitMov() {
environment()->BindRegister(bytecode_iterator().GetRegisterOperand(1), value);
}
-Node* BytecodeGraphBuilder::BuildLoadGlobal(uint32_t feedback_slot_index,
+Node* BytecodeGraphBuilder::BuildLoadGlobal(Handle<Name> name,
+ uint32_t feedback_slot_index,
TypeofMode typeof_mode) {
VectorSlotPair feedback = CreateVectorSlotPair(feedback_slot_index);
DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC,
feedback_vector()->GetKind(feedback.slot()));
- Handle<Name> name(feedback_vector()->GetName(feedback.slot()));
const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode);
- return NewNode(op, GetFunctionClosure());
+ return NewNode(op);
}
void BytecodeGraphBuilder::VisitLdaGlobal() {
PrepareEagerCheckpoint();
- Node* node = BuildLoadGlobal(bytecode_iterator().GetIndexOperand(0),
- TypeofMode::NOT_INSIDE_TYPEOF);
+ Handle<Name> name =
+ Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
+ uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
+ Node* node =
+ BuildLoadGlobal(name, feedback_slot_index, TypeofMode::NOT_INSIDE_TYPEOF);
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
PrepareEagerCheckpoint();
- Node* node = BuildLoadGlobal(bytecode_iterator().GetIndexOperand(0),
- TypeofMode::INSIDE_TYPEOF);
+ Handle<Name> name =
+ Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
+ uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
+ Node* node =
+ BuildLoadGlobal(name, feedback_slot_index, TypeofMode::INSIDE_TYPEOF);
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
@@ -774,7 +741,7 @@ void BytecodeGraphBuilder::BuildStoreGlobal(LanguageMode language_mode) {
Node* value = environment()->LookupAccumulator();
const Operator* op = javascript()->StoreGlobal(language_mode, name, feedback);
- Node* node = NewNode(op, value, GetFunctionClosure());
+ Node* node = NewNode(op, value);
environment()->RecordAfterState(node, Environment::kAttachFrameState);
}
@@ -786,6 +753,23 @@ void BytecodeGraphBuilder::VisitStaGlobalStrict() {
BuildStoreGlobal(LanguageMode::STRICT);
}
+void BytecodeGraphBuilder::VisitStaDataPropertyInLiteral() {
+ PrepareEagerCheckpoint();
+
+ Node* object =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ Node* name =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
+ Node* value = environment()->LookupAccumulator();
+ int flags = bytecode_iterator().GetFlagOperand(2);
+ VectorSlotPair feedback =
+ CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(3));
+
+ const Operator* op = javascript()->StoreDataPropertyInLiteral(feedback);
+ Node* node = NewNode(op, object, name, value, jsgraph()->Constant(flags));
+ environment()->RecordAfterState(node, Environment::kAttachFrameState);
+}
+
void BytecodeGraphBuilder::VisitLdaContextSlot() {
// TODO(mythria): immutable flag is also set to false. This information is not
// available in bytecode array. update this code when the implementation
@@ -793,9 +777,10 @@ void BytecodeGraphBuilder::VisitLdaContextSlot() {
const Operator* op = javascript()->LoadContext(
bytecode_iterator().GetUnsignedImmediateOperand(2),
bytecode_iterator().GetIndexOperand(1), false);
+ Node* node = NewNode(op);
Node* context =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- Node* node = NewNode(op, context);
+ NodeProperties::ReplaceContextInput(node, context);
environment()->BindAccumulator(node);
}
@@ -805,8 +790,7 @@ void BytecodeGraphBuilder::VisitLdaCurrentContextSlot() {
// changes.
const Operator* op = javascript()->LoadContext(
0, bytecode_iterator().GetIndexOperand(0), false);
- Node* context = environment()->Context();
- Node* node = NewNode(op, context);
+ Node* node = NewNode(op);
environment()->BindAccumulator(node);
}
@@ -814,18 +798,18 @@ void BytecodeGraphBuilder::VisitStaContextSlot() {
const Operator* op = javascript()->StoreContext(
bytecode_iterator().GetUnsignedImmediateOperand(2),
bytecode_iterator().GetIndexOperand(1));
+ Node* value = environment()->LookupAccumulator();
+ Node* node = NewNode(op, value);
Node* context =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- Node* value = environment()->LookupAccumulator();
- NewNode(op, context, value);
+ NodeProperties::ReplaceContextInput(node, context);
}
void BytecodeGraphBuilder::VisitStaCurrentContextSlot() {
const Operator* op =
javascript()->StoreContext(0, bytecode_iterator().GetIndexOperand(0));
- Node* context = environment()->Context();
Node* value = environment()->LookupAccumulator();
- NewNode(op, context, value);
+ NewNode(op, value);
}
void BytecodeGraphBuilder::BuildLdaLookupSlot(TypeofMode typeof_mode) {
@@ -857,15 +841,14 @@ BytecodeGraphBuilder::Environment* BytecodeGraphBuilder::CheckContextExtensions(
// the same scope as the variable itself has no way of shadowing it.
for (uint32_t d = 0; d < depth; d++) {
Node* extension_slot =
- NewNode(javascript()->LoadContext(d, Context::EXTENSION_INDEX, false),
- environment()->Context());
+ NewNode(javascript()->LoadContext(d, Context::EXTENSION_INDEX, false));
Node* check_no_extension =
NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
extension_slot, jsgraph()->TheHoleConstant());
NewBranch(check_no_extension);
- Environment* true_environment = environment()->CopyForConditional();
+ Environment* true_environment = environment()->Copy();
{
NewIfFalse();
@@ -904,8 +887,7 @@ void BytecodeGraphBuilder::BuildLdaLookupContextSlot(TypeofMode typeof_mode) {
uint32_t slot_index = bytecode_iterator().GetIndexOperand(1);
const Operator* op = javascript()->LoadContext(depth, slot_index, false);
- Node* context = environment()->Context();
- environment()->BindAccumulator(NewNode(op, context));
+ environment()->BindAccumulator(NewNode(op));
}
// Only build the slow path if there were any slow-path checks.
@@ -950,8 +932,10 @@ void BytecodeGraphBuilder::BuildLdaLookupGlobalSlot(TypeofMode typeof_mode) {
// Fast path, do a global load.
{
PrepareEagerCheckpoint();
- Node* node =
- BuildLoadGlobal(bytecode_iterator().GetIndexOperand(1), typeof_mode);
+ Handle<Name> name =
+ Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
+ uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
+ Node* node = BuildLoadGlobal(name, feedback_slot_index, typeof_mode);
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
@@ -1018,7 +1002,7 @@ void BytecodeGraphBuilder::VisitLdaNamedProperty() {
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
const Operator* op = javascript()->LoadNamed(name, feedback);
- Node* node = NewNode(op, object, GetFunctionClosure());
+ Node* node = NewNode(op, object);
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
@@ -1031,7 +1015,7 @@ void BytecodeGraphBuilder::VisitLdaKeyedProperty() {
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
const Operator* op = javascript()->LoadProperty(feedback);
- Node* node = NewNode(op, object, key, GetFunctionClosure());
+ Node* node = NewNode(op, object, key);
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
@@ -1046,7 +1030,7 @@ void BytecodeGraphBuilder::BuildNamedStore(LanguageMode language_mode) {
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
const Operator* op = javascript()->StoreNamed(language_mode, name, feedback);
- Node* node = NewNode(op, object, value, GetFunctionClosure());
+ Node* node = NewNode(op, object, value);
environment()->RecordAfterState(node, Environment::kAttachFrameState);
}
@@ -1069,7 +1053,7 @@ void BytecodeGraphBuilder::BuildKeyedStore(LanguageMode language_mode) {
CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
const Operator* op = javascript()->StoreProperty(language_mode, feedback);
- Node* node = NewNode(op, object, key, value, GetFunctionClosure());
+ Node* node = NewNode(op, object, key, value);
environment()->RecordAfterState(node, Environment::kAttachFrameState);
}
@@ -1084,9 +1068,8 @@ void BytecodeGraphBuilder::VisitStaKeyedPropertyStrict() {
void BytecodeGraphBuilder::VisitLdaModuleVariable() {
int32_t cell_index = bytecode_iterator().GetImmediateOperand(0);
uint32_t depth = bytecode_iterator().GetUnsignedImmediateOperand(1);
- Node* module =
- NewNode(javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
- environment()->Context());
+ Node* module = NewNode(
+ javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false));
Node* value = NewNode(javascript()->LoadModule(cell_index), module);
environment()->BindAccumulator(value);
}
@@ -1094,9 +1077,8 @@ void BytecodeGraphBuilder::VisitLdaModuleVariable() {
void BytecodeGraphBuilder::VisitStaModuleVariable() {
int32_t cell_index = bytecode_iterator().GetImmediateOperand(0);
uint32_t depth = bytecode_iterator().GetUnsignedImmediateOperand(1);
- Node* module =
- NewNode(javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
- environment()->Context());
+ Node* module = NewNode(
+ javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false));
Node* value = environment()->LookupAccumulator();
NewNode(javascript()->StoreModule(cell_index), module, value);
}
@@ -1117,12 +1099,14 @@ void BytecodeGraphBuilder::VisitPopContext() {
void BytecodeGraphBuilder::VisitCreateClosure() {
Handle<SharedFunctionInfo> shared_info = Handle<SharedFunctionInfo>::cast(
bytecode_iterator().GetConstantForIndexOperand(0));
+ int const slot_id = bytecode_iterator().GetIndexOperand(1);
+ VectorSlotPair pair = CreateVectorSlotPair(slot_id);
PretenureFlag tenured =
interpreter::CreateClosureFlags::PretenuredBit::decode(
- bytecode_iterator().GetFlagOperand(1))
+ bytecode_iterator().GetFlagOperand(2))
? TENURED
: NOT_TENURED;
- const Operator* op = javascript()->CreateClosure(shared_info, tenured);
+ const Operator* op = javascript()->CreateClosure(shared_info, pair, tenured);
Node* closure = NewNode(op);
environment()->BindAccumulator(closure);
}
@@ -1138,7 +1122,15 @@ void BytecodeGraphBuilder::VisitCreateBlockContext() {
void BytecodeGraphBuilder::VisitCreateFunctionContext() {
uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(0);
- const Operator* op = javascript()->CreateFunctionContext(slots);
+ const Operator* op =
+ javascript()->CreateFunctionContext(slots, FUNCTION_SCOPE);
+ Node* context = NewNode(op, GetFunctionClosure());
+ environment()->BindAccumulator(context);
+}
+
+void BytecodeGraphBuilder::VisitCreateEvalContext() {
+ uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(0);
+ const Operator* op = javascript()->CreateFunctionContext(slots, EVAL_SCOPE);
Node* context = NewNode(op, GetFunctionClosure());
environment()->BindAccumulator(context);
}
@@ -1198,16 +1190,21 @@ void BytecodeGraphBuilder::VisitCreateRegExpLiteral() {
}
void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
- Handle<FixedArray> constant_elements = Handle<FixedArray>::cast(
- bytecode_iterator().GetConstantForIndexOperand(0));
+ Handle<ConstantElementsPair> constant_elements =
+ Handle<ConstantElementsPair>::cast(
+ bytecode_iterator().GetConstantForIndexOperand(0));
int literal_index = bytecode_iterator().GetIndexOperand(1);
- int literal_flags = bytecode_iterator().GetFlagOperand(2);
+ int bytecode_flags = bytecode_iterator().GetFlagOperand(2);
+ int literal_flags =
+ interpreter::CreateArrayLiteralFlags::FlagsBits::decode(bytecode_flags);
// Disable allocation site mementos. Only unoptimized code will collect
// feedback about allocation site. Once the code is optimized we expect the
// data to converge. So, we disable allocation site mementos in optimized
// code. We can revisit this when we have data to the contrary.
literal_flags |= ArrayLiteral::kDisableMementos;
- int number_of_elements = constant_elements->length();
+ // TODO(mstarzinger): Thread through number of elements. The below number is
+ // only an estimate and does not match {ArrayLiteral::values::length}.
+ int number_of_elements = constant_elements->constant_values()->length();
Node* literal = NewNode(
javascript()->CreateLiteralArray(constant_elements, literal_flags,
literal_index, number_of_elements),
@@ -1223,7 +1220,8 @@ void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
int bytecode_flags = bytecode_iterator().GetFlagOperand(2);
int literal_flags =
interpreter::CreateObjectLiteralFlags::FlagsBits::decode(bytecode_flags);
- // TODO(mstarzinger): Thread through number of properties.
+ // TODO(mstarzinger): Thread through number of properties. The below number is
+ // only an estimate and does not match {ObjectLiteral::properties_count}.
int number_of_properties = constant_properties->length() / 2;
Node* literal = NewNode(
javascript()->CreateLiteralObject(constant_properties, literal_flags,
@@ -1340,6 +1338,17 @@ void BytecodeGraphBuilder::VisitCallRuntimeForPair() {
Environment::kAttachFrameState);
}
+void BytecodeGraphBuilder::VisitNewWithSpread() {
+ PrepareEagerCheckpoint();
+ interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(0);
+ size_t arg_count = bytecode_iterator().GetRegisterCountOperand(1);
+
+ const Operator* op =
+ javascript()->CallConstructWithSpread(static_cast<int>(arg_count));
+ Node* value = ProcessCallRuntimeArguments(op, first_arg, arg_count);
+ environment()->BindAccumulator(value, Environment::kAttachFrameState);
+}
+
void BytecodeGraphBuilder::VisitInvokeIntrinsic() {
PrepareEagerCheckpoint();
Runtime::FunctionId functionId = bytecode_iterator().GetIntrinsicIdOperand(0);
@@ -1607,6 +1616,13 @@ void BytecodeGraphBuilder::VisitDeletePropertySloppy() {
BuildDelete(LanguageMode::SLOPPY);
}
+void BytecodeGraphBuilder::VisitGetSuperConstructor() {
+ Node* node = NewNode(javascript()->GetSuperConstructor(),
+ environment()->LookupAccumulator());
+ environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0), node,
+ Environment::kAttachFrameState);
+}
+
void BytecodeGraphBuilder::BuildCompareOp(const Operator* js_op) {
PrepareEagerCheckpoint();
Node* left =
@@ -1652,8 +1668,30 @@ void BytecodeGraphBuilder::VisitTestInstanceOf() {
BuildCompareOp(javascript()->InstanceOf());
}
+void BytecodeGraphBuilder::VisitTestUndetectable() {
+ Node* object =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ Node* node = NewNode(jsgraph()->simplified()->ObjectIsUndetectable(), object);
+ environment()->BindAccumulator(node);
+}
+
+void BytecodeGraphBuilder::VisitTestNull() {
+ Node* object =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ Node* result = NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
+ object, jsgraph()->NullConstant());
+ environment()->BindAccumulator(result);
+}
+
+void BytecodeGraphBuilder::VisitTestUndefined() {
+ Node* object =
+ environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+ Node* result = NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
+ object, jsgraph()->UndefinedConstant());
+ environment()->BindAccumulator(result);
+}
+
void BytecodeGraphBuilder::BuildCastOperator(const Operator* js_op) {
- PrepareEagerCheckpoint();
Node* value = NewNode(js_op, environment()->LookupAccumulator());
environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0), value,
Environment::kAttachFrameState);
@@ -1705,6 +1743,12 @@ void BytecodeGraphBuilder::VisitJumpIfNotHoleConstant() {
BuildJumpIfNotHole();
}
+void BytecodeGraphBuilder::VisitJumpIfJSReceiver() { BuildJumpIfJSReceiver(); }
+
+void BytecodeGraphBuilder::VisitJumpIfJSReceiverConstant() {
+ BuildJumpIfJSReceiver();
+}
+
void BytecodeGraphBuilder::VisitJumpIfNull() {
BuildJumpIfEqual(jsgraph()->NullConstant());
}
@@ -1729,6 +1773,12 @@ void BytecodeGraphBuilder::VisitStackCheck() {
environment()->RecordAfterState(node, Environment::kAttachFrameState);
}
+void BytecodeGraphBuilder::VisitSetPendingMessage() {
+ Node* previous_message = NewNode(javascript()->LoadMessage());
+ NewNode(javascript()->StoreMessage(), environment()->LookupAccumulator());
+ environment()->BindAccumulator(previous_message);
+}
+
void BytecodeGraphBuilder::VisitReturn() {
BuildLoopExitsForFunctionExit();
Node* pop_node = jsgraph()->ZeroConstant();
@@ -1742,7 +1792,6 @@ void BytecodeGraphBuilder::VisitDebugger() {
Node* call =
NewNode(javascript()->CallRuntime(Runtime::kHandleDebuggerStatement));
environment()->BindAccumulator(call, Environment::kAttachFrameState);
- environment()->MarkAllRegistersLive();
}
// We cannot create a graph from the debugger copy of the bytecode array.
@@ -1866,33 +1915,43 @@ void BytecodeGraphBuilder::VisitIllegal() {
void BytecodeGraphBuilder::VisitNop() {}
void BytecodeGraphBuilder::SwitchToMergeEnvironment(int current_offset) {
- if (merge_environments_[current_offset] != nullptr) {
+ auto it = merge_environments_.find(current_offset);
+ if (it != merge_environments_.end()) {
if (environment() != nullptr) {
- merge_environments_[current_offset]->Merge(environment());
+ it->second->Merge(environment());
}
- set_environment(merge_environments_[current_offset]);
+ set_environment(it->second);
}
}
void BytecodeGraphBuilder::BuildLoopHeaderEnvironment(int current_offset) {
- if (branch_analysis()->backward_branches_target(current_offset)) {
- // Add loop header and store a copy so we can connect merged back
- // edge inputs to the loop header.
- merge_environments_[current_offset] = environment()->CopyForLoop();
+ if (bytecode_analysis()->IsLoopHeader(current_offset)) {
+ const LoopInfo& loop_info =
+ bytecode_analysis()->GetLoopInfoFor(current_offset);
+
+ // Add loop header.
+ environment()->PrepareForLoop(loop_info.assignments());
+
+ BuildOSRLoopEntryPoint(current_offset);
+
+ // Store a copy of the environment so we can connect merged back edge inputs
+ // to the loop header.
+ merge_environments_[current_offset] = environment()->Copy();
}
}
void BytecodeGraphBuilder::MergeIntoSuccessorEnvironment(int target_offset) {
BuildLoopExitsForBranch(target_offset);
- if (merge_environments_[target_offset] == nullptr) {
+ Environment*& merge_environment = merge_environments_[target_offset];
+ if (merge_environment == nullptr) {
// Append merge nodes to the environment. We may merge here with another
// environment. So add a place holder for merge nodes. We may add redundant
// but will be eliminated in a later pass.
// TODO(mstarzinger): Be smarter about this!
NewMerge();
- merge_environments_[target_offset] = environment();
+ merge_environment = environment();
} else {
- merge_environments_[target_offset]->Merge(environment());
+ merge_environment->Merge(environment());
}
set_environment(nullptr);
}
@@ -1903,13 +1962,14 @@ void BytecodeGraphBuilder::MergeControlToLeaveFunction(Node* exit) {
}
void BytecodeGraphBuilder::BuildOSRLoopEntryPoint(int current_offset) {
- if (!osr_ast_id_.IsNone() && osr_ast_id_.ToInt() == current_offset) {
+ DCHECK(bytecode_analysis()->IsLoopHeader(current_offset));
+
+ if (!osr_ast_id_.IsNone() && osr_loop_offset_ == current_offset) {
// For OSR add a special {OsrLoopEntry} node into the current loop header.
// It will be turned into a usable entry by the OSR deconstruction.
- Environment* loop_env = merge_environments_[current_offset];
- Environment* osr_env = loop_env->CopyForOsrEntry();
+ Environment* osr_env = environment()->Copy();
osr_env->PrepareForOsrEntry();
- loop_env->Merge(osr_env);
+ environment()->Merge(osr_env);
}
}
@@ -1918,9 +1978,11 @@ void BytecodeGraphBuilder::BuildOSRNormalEntryPoint() {
// For OSR add an {OsrNormalEntry} as the the top-level environment start.
// It will be replaced with {Dead} by the OSR deconstruction.
NewNode(common()->OsrNormalEntry());
- // Note that the requested OSR entry point must be the target of a backward
- // branch, otherwise there will not be a proper loop header available.
- DCHECK(branch_analysis()->backward_branches_target(osr_ast_id_.ToInt()));
+ // Translate the offset of the jump instruction to the jump target offset of
+ // that instruction so that the derived BailoutId points to the loop header.
+ osr_loop_offset_ =
+ bytecode_analysis()->GetLoopOffsetFor(osr_ast_id_.ToInt());
+ DCHECK(bytecode_analysis()->IsLoopHeader(osr_loop_offset_));
}
}
@@ -1928,17 +1990,20 @@ void BytecodeGraphBuilder::BuildLoopExitsForBranch(int target_offset) {
int origin_offset = bytecode_iterator().current_offset();
// Only build loop exits for forward edges.
if (target_offset > origin_offset) {
- BuildLoopExitsUntilLoop(loop_analysis()->GetLoopOffsetFor(target_offset));
+ BuildLoopExitsUntilLoop(
+ bytecode_analysis()->GetLoopOffsetFor(target_offset));
}
}
void BytecodeGraphBuilder::BuildLoopExitsUntilLoop(int loop_offset) {
int origin_offset = bytecode_iterator().current_offset();
- int current_loop = loop_analysis()->GetLoopOffsetFor(origin_offset);
+ int current_loop = bytecode_analysis()->GetLoopOffsetFor(origin_offset);
while (loop_offset < current_loop) {
Node* loop_node = merge_environments_[current_loop]->GetControlDependency();
- environment()->PrepareForLoopExit(loop_node);
- current_loop = loop_analysis()->GetParentLoopFor(current_loop);
+ const LoopInfo& loop_info =
+ bytecode_analysis()->GetLoopInfoFor(current_loop);
+ environment()->PrepareForLoopExit(loop_node, loop_info.assignments());
+ current_loop = loop_info.parent_offset();
}
}
@@ -1952,7 +2017,7 @@ void BytecodeGraphBuilder::BuildJump() {
void BytecodeGraphBuilder::BuildJumpIf(Node* condition) {
NewBranch(condition);
- Environment* if_false_environment = environment()->CopyForConditional();
+ Environment* if_false_environment = environment()->Copy();
NewIfTrue();
MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
set_environment(if_false_environment);
@@ -1961,7 +2026,7 @@ void BytecodeGraphBuilder::BuildJumpIf(Node* condition) {
void BytecodeGraphBuilder::BuildJumpIfNot(Node* condition) {
NewBranch(condition);
- Environment* if_true_environment = environment()->CopyForConditional();
+ Environment* if_true_environment = environment()->Copy();
NewIfFalse();
MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
set_environment(if_true_environment);
@@ -2006,6 +2071,12 @@ void BytecodeGraphBuilder::BuildJumpIfNotHole() {
BuildJumpIfNot(condition);
}
+void BytecodeGraphBuilder::BuildJumpIfJSReceiver() {
+ Node* accumulator = environment()->LookupAccumulator();
+ Node* condition = NewNode(simplified()->ObjectIsReceiver(), accumulator);
+ BuildJumpIf(condition);
+}
+
Node** BytecodeGraphBuilder::EnsureInputBufferSize(int size) {
if (size > input_buffer_size_) {
size = size + kInputBufferSizeIncrement + input_buffer_size_;
@@ -2093,7 +2164,7 @@ Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count,
int handler_offset = exception_handlers_.top().handler_offset_;
int context_index = exception_handlers_.top().context_register_;
interpreter::Register context_register(context_index);
- Environment* success_env = environment()->CopyForConditional();
+ Environment* success_env = environment()->Copy();
const Operator* op = common()->IfException();
Node* effect = environment()->GetEffectDependency();
Node* on_exception = graph()->NewNode(op, effect, result);
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index 6994226dc3..6ca7d29152 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -5,12 +5,10 @@
#ifndef V8_COMPILER_BYTECODE_GRAPH_BUILDER_H_
#define V8_COMPILER_BYTECODE_GRAPH_BUILDER_H_
-#include "src/compiler/bytecode-branch-analysis.h"
-#include "src/compiler/bytecode-loop-analysis.h"
+#include "src/compiler/bytecode-analysis.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/liveness-analyzer.h"
#include "src/compiler/state-values-utils.h"
-#include "src/compiler/type-hint-analyzer.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-flags.h"
#include "src/interpreter/bytecodes.h"
@@ -18,9 +16,6 @@
namespace v8 {
namespace internal {
-
-class CompilationInfo;
-
namespace compiler {
class SourcePositionTable;
@@ -29,8 +24,10 @@ class SourcePositionTable;
// interpreter bytecodes.
class BytecodeGraphBuilder {
public:
- BytecodeGraphBuilder(Zone* local_zone, CompilationInfo* info,
- JSGraph* jsgraph, float invocation_frequency,
+ BytecodeGraphBuilder(Zone* local_zone, Handle<SharedFunctionInfo> shared,
+ Handle<TypeFeedbackVector> feedback_vector,
+ BailoutId osr_ast_id, JSGraph* jsgraph,
+ float invocation_frequency,
SourcePositionTable* source_positions,
int inlining_id = SourcePosition::kNotInlined);
@@ -131,12 +128,9 @@ class BytecodeGraphBuilder {
// Conceptually this frame state is "after" a given operation.
void PrepareFrameState(Node* node, OutputFrameStateCombine combine);
- // Computes register liveness and replaces dead ones in frame states with the
- // undefined values.
- void ClearNonLiveSlotsInFrameStates();
-
void BuildCreateArguments(CreateArgumentsType type);
- Node* BuildLoadGlobal(uint32_t feedback_slot_index, TypeofMode typeof_mode);
+ Node* BuildLoadGlobal(Handle<Name> name, uint32_t feedback_slot_index,
+ TypeofMode typeof_mode);
void BuildStoreGlobal(LanguageMode language_mode);
void BuildNamedStore(LanguageMode language_mode);
void BuildKeyedStore(LanguageMode language_mode);
@@ -181,6 +175,7 @@ class BytecodeGraphBuilder {
void BuildJumpIfToBooleanTrue();
void BuildJumpIfToBooleanFalse();
void BuildJumpIfNotHole();
+ void BuildJumpIfJSReceiver();
// Simulates control flow by forward-propagating environments.
void MergeIntoSuccessorEnvironment(int target_offset);
@@ -203,6 +198,10 @@ class BytecodeGraphBuilder {
// Simulates entry and exit of exception handlers.
void EnterAndExitExceptionHandlers(int current_offset);
+ // Update the current position of the {SourcePositionTable} to that of the
+ // bytecode at {offset}, if any.
+ void UpdateCurrentSourcePosition(SourcePositionTableIterator* it, int offset);
+
// Growth increment for the temporary buffer used to construct input lists to
// new nodes.
static const int kInputBufferSizeIncrement = 64;
@@ -224,6 +223,9 @@ class BytecodeGraphBuilder {
Zone* graph_zone() const { return graph()->zone(); }
JSGraph* jsgraph() const { return jsgraph_; }
JSOperatorBuilder* javascript() const { return jsgraph_->javascript(); }
+ SimplifiedOperatorBuilder* simplified() const {
+ return jsgraph_->simplified();
+ }
Zone* local_zone() const { return local_zone_; }
const Handle<BytecodeArray>& bytecode_array() const {
return bytecode_array_;
@@ -247,22 +249,14 @@ class BytecodeGraphBuilder {
bytecode_iterator_ = bytecode_iterator;
}
- const BytecodeBranchAnalysis* branch_analysis() const {
- return branch_analysis_;
+ const BytecodeAnalysis* bytecode_analysis() const {
+ return bytecode_analysis_;
}
- void set_branch_analysis(const BytecodeBranchAnalysis* branch_analysis) {
- branch_analysis_ = branch_analysis;
+ void set_bytecode_analysis(const BytecodeAnalysis* bytecode_analysis) {
+ bytecode_analysis_ = bytecode_analysis;
}
- const BytecodeLoopAnalysis* loop_analysis() const { return loop_analysis_; }
-
- void set_loop_analysis(const BytecodeLoopAnalysis* loop_analysis) {
- loop_analysis_ = loop_analysis;
- }
-
- LivenessAnalyzer* liveness_analyzer() { return &liveness_analyzer_; }
-
bool IsLivenessAnalysisEnabled() const {
return this->is_liveness_analysis_enabled_;
}
@@ -279,10 +273,10 @@ class BytecodeGraphBuilder {
Handle<TypeFeedbackVector> feedback_vector_;
const FrameStateFunctionInfo* frame_state_function_info_;
const interpreter::BytecodeArrayIterator* bytecode_iterator_;
- const BytecodeBranchAnalysis* branch_analysis_;
- const BytecodeLoopAnalysis* loop_analysis_;
+ const BytecodeAnalysis* bytecode_analysis_;
Environment* environment_;
BailoutId osr_ast_id_;
+ int osr_loop_offset_;
// Merge environments are snapshots of the environment at points where the
// control flow merges. This models a forward data flow propagation of all
@@ -309,18 +303,11 @@ class BytecodeGraphBuilder {
StateValuesCache state_values_cache_;
- // Analyzer of register liveness.
- LivenessAnalyzer liveness_analyzer_;
-
- // The Turbofan source position table, to be populated.
+ // The source position table, to be populated.
SourcePositionTable* source_positions_;
SourcePosition const start_position_;
- // Update [source_positions_]'s current position to that of the bytecode at
- // [offset], if any.
- void UpdateCurrentSourcePosition(SourcePositionTableIterator* it, int offset);
-
static int const kBinaryOperationHintIndex = 1;
static int const kCountOperationHintIndex = 0;
static int const kBinaryOperationSmiHintIndex = 2;
diff --git a/deps/v8/src/compiler/bytecode-liveness-map.cc b/deps/v8/src/compiler/bytecode-liveness-map.cc
new file mode 100644
index 0000000000..ba98dec6e5
--- /dev/null
+++ b/deps/v8/src/compiler/bytecode-liveness-map.cc
@@ -0,0 +1,42 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/bytecode-liveness-map.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+BytecodeLiveness::BytecodeLiveness(int register_count, Zone* zone)
+ : in(new (zone) BytecodeLivenessState(register_count, zone)),
+ out(new (zone) BytecodeLivenessState(register_count, zone)) {}
+
+BytecodeLivenessMap::BytecodeLivenessMap(int bytecode_size, Zone* zone)
+ : liveness_map_(base::bits::RoundUpToPowerOfTwo32(bytecode_size / 4 + 1),
+ base::KeyEqualityMatcher<int>(),
+ ZoneAllocationPolicy(zone)) {}
+
+uint32_t OffsetHash(int offset) { return offset; }
+
+BytecodeLiveness& BytecodeLivenessMap::InitializeLiveness(int offset,
+ int register_count,
+ Zone* zone) {
+ return liveness_map_
+ .LookupOrInsert(offset, OffsetHash(offset),
+ [&]() { return BytecodeLiveness(register_count, zone); },
+ ZoneAllocationPolicy(zone))
+ ->value;
+}
+
+BytecodeLiveness& BytecodeLivenessMap::GetLiveness(int offset) {
+ return liveness_map_.Lookup(offset, OffsetHash(offset))->value;
+}
+
+const BytecodeLiveness& BytecodeLivenessMap::GetLiveness(int offset) const {
+ return liveness_map_.Lookup(offset, OffsetHash(offset))->value;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/bytecode-liveness-map.h b/deps/v8/src/compiler/bytecode-liveness-map.h
new file mode 100644
index 0000000000..03251f1367
--- /dev/null
+++ b/deps/v8/src/compiler/bytecode-liveness-map.h
@@ -0,0 +1,119 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_BYTECODE_LIVENESS_MAP_H_
+#define V8_COMPILER_BYTECODE_LIVENESS_MAP_H_
+
+#include "src/base/hashmap.h"
+#include "src/bit-vector.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+
+class Zone;
+
+namespace compiler {
+
+class BytecodeLivenessState : public ZoneObject {
+ public:
+ BytecodeLivenessState(int register_count, Zone* zone)
+ : bit_vector_(register_count + 1, zone) {}
+
+ const BitVector& bit_vector() const { return bit_vector_; }
+
+ BitVector& bit_vector() { return bit_vector_; }
+
+ bool RegisterIsLive(int index) const {
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, bit_vector_.length() - 1);
+ return bit_vector_.Contains(index);
+ }
+
+ bool AccumulatorIsLive() const {
+ return bit_vector_.Contains(bit_vector_.length() - 1);
+ }
+
+ bool Equals(const BytecodeLivenessState& other) const {
+ return bit_vector_.Equals(other.bit_vector_);
+ }
+
+ void MarkRegisterLive(int index) {
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, bit_vector_.length() - 1);
+ bit_vector_.Add(index);
+ }
+
+ void MarkRegisterDead(int index) {
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, bit_vector_.length() - 1);
+ bit_vector_.Remove(index);
+ }
+
+ void MarkAccumulatorLive() { bit_vector_.Add(bit_vector_.length() - 1); }
+
+ void MarkAccumulatorDead() { bit_vector_.Remove(bit_vector_.length() - 1); }
+
+ void MarkAllLive() { bit_vector_.AddAll(); }
+
+ void Union(const BytecodeLivenessState& other) {
+ bit_vector_.Union(other.bit_vector_);
+ }
+
+ bool UnionIsChanged(const BytecodeLivenessState& other) {
+ return bit_vector_.UnionIsChanged(other.bit_vector_);
+ }
+
+ void CopyFrom(const BytecodeLivenessState& other) {
+ bit_vector_.CopyFrom(other.bit_vector_);
+ }
+
+ private:
+ BitVector bit_vector_;
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodeLivenessState);
+};
+
+struct BytecodeLiveness {
+ BytecodeLivenessState* in;
+ BytecodeLivenessState* out;
+
+ BytecodeLiveness(int register_count, Zone* zone);
+};
+
+class V8_EXPORT_PRIVATE BytecodeLivenessMap {
+ public:
+ BytecodeLivenessMap(int size, Zone* zone);
+
+ BytecodeLiveness& InitializeLiveness(int offset, int register_count,
+ Zone* zone);
+
+ BytecodeLiveness& GetLiveness(int offset);
+ const BytecodeLiveness& GetLiveness(int offset) const;
+
+ BytecodeLivenessState* GetInLiveness(int offset) {
+ return GetLiveness(offset).in;
+ }
+ const BytecodeLivenessState* GetInLiveness(int offset) const {
+ return GetLiveness(offset).in;
+ }
+
+ BytecodeLivenessState* GetOutLiveness(int offset) {
+ return GetLiveness(offset).out;
+ }
+ const BytecodeLivenessState* GetOutLiveness(int offset) const {
+ return GetLiveness(offset).out;
+ }
+
+ private:
+ base::TemplateHashMapImpl<int, BytecodeLiveness,
+ base::KeyEqualityMatcher<int>, ZoneAllocationPolicy>
+ liveness_map_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_BYTECODE_LIVENESS_MAP_H_
diff --git a/deps/v8/src/compiler/bytecode-loop-analysis.cc b/deps/v8/src/compiler/bytecode-loop-analysis.cc
deleted file mode 100644
index 03c11f7196..0000000000
--- a/deps/v8/src/compiler/bytecode-loop-analysis.cc
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/bytecode-loop-analysis.h"
-
-#include "src/compiler/bytecode-branch-analysis.h"
-#include "src/interpreter/bytecode-array-iterator.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-BytecodeLoopAnalysis::BytecodeLoopAnalysis(
- Handle<BytecodeArray> bytecode_array,
- const BytecodeBranchAnalysis* branch_analysis, Zone* zone)
- : bytecode_array_(bytecode_array),
- branch_analysis_(branch_analysis),
- zone_(zone),
- current_loop_offset_(-1),
- found_current_backedge_(false),
- backedge_to_header_(zone),
- loop_header_to_parent_(zone) {}
-
-void BytecodeLoopAnalysis::Analyze() {
- current_loop_offset_ = -1;
- found_current_backedge_ = false;
- interpreter::BytecodeArrayIterator iterator(bytecode_array());
- while (!iterator.done()) {
- interpreter::Bytecode bytecode = iterator.current_bytecode();
- int current_offset = iterator.current_offset();
- if (branch_analysis_->backward_branches_target(current_offset)) {
- AddLoopEntry(current_offset);
- } else if (interpreter::Bytecodes::IsJump(bytecode)) {
- AddBranch(current_offset, iterator.GetJumpTargetOffset());
- }
- iterator.Advance();
- }
-}
-
-void BytecodeLoopAnalysis::AddLoopEntry(int entry_offset) {
- if (found_current_backedge_) {
- // We assume that all backedges of a loop must occur together and before
- // another loop entry or an outer loop backedge.
- // This is guaranteed by the invariants from AddBranch, such that every
- // backedge must either go to the current loop or be the first of the
- // backedges to the parent loop.
- // Thus here, the current loop actually ended before and we have a loop
- // with the same parent.
- current_loop_offset_ = loop_header_to_parent_[current_loop_offset_];
- found_current_backedge_ = false;
- }
- loop_header_to_parent_[entry_offset] = current_loop_offset_;
- current_loop_offset_ = entry_offset;
-}
-
-void BytecodeLoopAnalysis::AddBranch(int origin_offset, int target_offset) {
- // If this is a backedge, record it.
- if (target_offset < origin_offset) {
- backedge_to_header_[origin_offset] = target_offset;
- // Check whether this is actually a backedge of the outer loop and we have
- // already finished the current loop.
- if (target_offset < current_loop_offset_) {
- DCHECK(found_current_backedge_);
- int parent_offset = loop_header_to_parent_[current_loop_offset_];
- DCHECK_EQ(target_offset, parent_offset);
- current_loop_offset_ = parent_offset;
- } else {
- DCHECK_EQ(target_offset, current_loop_offset_);
- found_current_backedge_ = true;
- }
- }
-}
-
-int BytecodeLoopAnalysis::GetLoopOffsetFor(int offset) const {
- auto next_backedge = backedge_to_header_.lower_bound(offset);
- // If there is no next backedge => offset is not in a loop.
- if (next_backedge == backedge_to_header_.end()) {
- return -1;
- }
- // If the header preceeds the offset, it is the backedge of the containing
- // loop.
- if (next_backedge->second <= offset) {
- return next_backedge->second;
- }
- // Otherwise there is a nested loop after this offset. We just return the
- // parent of the next nested loop.
- return loop_header_to_parent_.upper_bound(offset)->second;
-}
-
-int BytecodeLoopAnalysis::GetParentLoopFor(int header_offset) const {
- auto parent = loop_header_to_parent_.find(header_offset);
- DCHECK(parent != loop_header_to_parent_.end());
- return parent->second;
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/bytecode-loop-analysis.h b/deps/v8/src/compiler/bytecode-loop-analysis.h
deleted file mode 100644
index 1a86d7b81f..0000000000
--- a/deps/v8/src/compiler/bytecode-loop-analysis.h
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_BYTECODE_LOOP_ANALYSIS_H_
-#define V8_COMPILER_BYTECODE_LOOP_ANALYSIS_H_
-
-#include "src/handles.h"
-#include "src/zone/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-
-class BytecodeArray;
-
-namespace compiler {
-
-class BytecodeBranchAnalysis;
-
-class BytecodeLoopAnalysis BASE_EMBEDDED {
- public:
- BytecodeLoopAnalysis(Handle<BytecodeArray> bytecode_array,
- const BytecodeBranchAnalysis* branch_analysis,
- Zone* zone);
-
- // Analyze the bytecodes to find the branch sites and their
- // targets. No other methods in this class return valid information
- // until this has been called.
- void Analyze();
-
- // Get the loop header offset of the containing loop for arbitrary
- // {offset}, or -1 if the {offset} is not inside any loop.
- int GetLoopOffsetFor(int offset) const;
- // Gets the loop header offset of the parent loop of the loop header
- // at {header_offset}, or -1 for outer-most loops.
- int GetParentLoopFor(int header_offset) const;
-
- private:
- void AddLoopEntry(int entry_offset);
- void AddBranch(int origin_offset, int target_offset);
-
- Zone* zone() const { return zone_; }
- Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
-
- Handle<BytecodeArray> bytecode_array_;
- const BytecodeBranchAnalysis* branch_analysis_;
- Zone* zone_;
-
- int current_loop_offset_;
- bool found_current_backedge_;
-
- // Map from the offset of a backedge jump to the offset of the corresponding
- // loop header. There might be multiple backedges for do-while loops.
- ZoneMap<int, int> backedge_to_header_;
- // Map from the offset of a loop header to the offset of its parent's loop
- // header. This map will have as many entries as there are loops in the
- // function.
- ZoneMap<int, int> loop_header_to_parent_;
-
- DISALLOW_COPY_AND_ASSIGN(BytecodeLoopAnalysis);
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_BYTECODE_LOOP_ANALYSIS_H_
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 3431098446..991ae3699d 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -22,15 +22,23 @@
#include "src/utils.h"
#include "src/zone/zone.h"
+#define REPEAT_1_TO_2(V, T) V(T) V(T, T)
+#define REPEAT_1_TO_3(V, T) REPEAT_1_TO_2(V, T) V(T, T, T)
+#define REPEAT_1_TO_4(V, T) REPEAT_1_TO_3(V, T) V(T, T, T, T)
+#define REPEAT_1_TO_5(V, T) REPEAT_1_TO_4(V, T) V(T, T, T, T, T)
+#define REPEAT_1_TO_6(V, T) REPEAT_1_TO_5(V, T) V(T, T, T, T, T, T)
+#define REPEAT_1_TO_7(V, T) REPEAT_1_TO_6(V, T) V(T, T, T, T, T, T, T)
+#define REPEAT_1_TO_8(V, T) REPEAT_1_TO_7(V, T) V(T, T, T, T, T, T, T, T)
+#define REPEAT_1_TO_9(V, T) REPEAT_1_TO_8(V, T) V(T, T, T, T, T, T, T, T, T)
+
namespace v8 {
namespace internal {
namespace compiler {
-CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone,
- const CallInterfaceDescriptor& descriptor,
- Code::Flags flags, const char* name,
- size_t result_size)
- : CodeAssembler(
+CodeAssemblerState::CodeAssemblerState(
+ Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
+ Code::Flags flags, const char* name, size_t result_size)
+ : CodeAssemblerState(
isolate, zone,
Linkage::GetStubCallDescriptor(
isolate, zone, descriptor, descriptor.GetStackParameterCount(),
@@ -38,19 +46,20 @@ CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone,
MachineType::AnyTagged(), result_size),
flags, name) {}
-CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone, int parameter_count,
- Code::Flags flags, const char* name)
- : CodeAssembler(isolate, zone,
- Linkage::GetJSCallDescriptor(
- zone, false, parameter_count,
- Code::ExtractKindFromFlags(flags) == Code::BUILTIN
- ? CallDescriptor::kPushArgumentCount
- : CallDescriptor::kNoFlags),
- flags, name) {}
-
-CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone,
- CallDescriptor* call_descriptor, Code::Flags flags,
- const char* name)
+CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
+ int parameter_count, Code::Flags flags,
+ const char* name)
+ : CodeAssemblerState(isolate, zone,
+ Linkage::GetJSCallDescriptor(
+ zone, false, parameter_count,
+ Code::ExtractKindFromFlags(flags) == Code::BUILTIN
+ ? CallDescriptor::kPushArgumentCount
+ : CallDescriptor::kNoFlags),
+ flags, name) {}
+
+CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
+ CallDescriptor* call_descriptor,
+ Code::Flags flags, const char* name)
: raw_assembler_(new RawMachineAssembler(
isolate, new (zone) Graph(zone), call_descriptor,
MachineType::PointerRepresentation(),
@@ -61,56 +70,109 @@ CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone,
code_generated_(false),
variables_(zone) {}
+CodeAssemblerState::~CodeAssemblerState() {}
+
+int CodeAssemblerState::parameter_count() const {
+ return static_cast<int>(raw_assembler_->call_descriptor()->ParameterCount());
+}
+
CodeAssembler::~CodeAssembler() {}
-void CodeAssembler::CallPrologue() {}
+class BreakOnNodeDecorator final : public GraphDecorator {
+ public:
+ explicit BreakOnNodeDecorator(NodeId node_id) : node_id_(node_id) {}
+
+ void Decorate(Node* node) final {
+ if (node->id() == node_id_) {
+ base::OS::DebugBreak();
+ }
+ }
+
+ private:
+ NodeId node_id_;
+};
+
+void CodeAssembler::BreakOnNode(int node_id) {
+ Graph* graph = raw_assembler()->graph();
+ Zone* zone = graph->zone();
+ GraphDecorator* decorator =
+ new (zone) BreakOnNodeDecorator(static_cast<NodeId>(node_id));
+ graph->AddDecorator(decorator);
+}
-void CodeAssembler::CallEpilogue() {}
+void CodeAssembler::RegisterCallGenerationCallbacks(
+ const CodeAssemblerCallback& call_prologue,
+ const CodeAssemblerCallback& call_epilogue) {
+ // The callback can be registered only once.
+ DCHECK(!state_->call_prologue_);
+ DCHECK(!state_->call_epilogue_);
+ state_->call_prologue_ = call_prologue;
+ state_->call_epilogue_ = call_epilogue;
+}
-Handle<Code> CodeAssembler::GenerateCode() {
- DCHECK(!code_generated_);
+void CodeAssembler::UnregisterCallGenerationCallbacks() {
+ state_->call_prologue_ = nullptr;
+ state_->call_epilogue_ = nullptr;
+}
- Schedule* schedule = raw_assembler_->Export();
+void CodeAssembler::CallPrologue() {
+ if (state_->call_prologue_) {
+ state_->call_prologue_();
+ }
+}
+
+void CodeAssembler::CallEpilogue() {
+ if (state_->call_epilogue_) {
+ state_->call_epilogue_();
+ }
+}
+
+// static
+Handle<Code> CodeAssembler::GenerateCode(CodeAssemblerState* state) {
+ DCHECK(!state->code_generated_);
+
+ RawMachineAssembler* rasm = state->raw_assembler_.get();
+ Schedule* schedule = rasm->Export();
Handle<Code> code = Pipeline::GenerateCodeForCodeStub(
- isolate(), raw_assembler_->call_descriptor(), raw_assembler_->graph(),
- schedule, flags_, name_);
+ rasm->isolate(), rasm->call_descriptor(), rasm->graph(), schedule,
+ state->flags_, state->name_);
- code_generated_ = true;
+ state->code_generated_ = true;
return code;
}
-bool CodeAssembler::Is64() const { return raw_assembler_->machine()->Is64(); }
+bool CodeAssembler::Is64() const { return raw_assembler()->machine()->Is64(); }
bool CodeAssembler::IsFloat64RoundUpSupported() const {
- return raw_assembler_->machine()->Float64RoundUp().IsSupported();
+ return raw_assembler()->machine()->Float64RoundUp().IsSupported();
}
bool CodeAssembler::IsFloat64RoundDownSupported() const {
- return raw_assembler_->machine()->Float64RoundDown().IsSupported();
+ return raw_assembler()->machine()->Float64RoundDown().IsSupported();
}
bool CodeAssembler::IsFloat64RoundTiesEvenSupported() const {
- return raw_assembler_->machine()->Float64RoundTiesEven().IsSupported();
+ return raw_assembler()->machine()->Float64RoundTiesEven().IsSupported();
}
bool CodeAssembler::IsFloat64RoundTruncateSupported() const {
- return raw_assembler_->machine()->Float64RoundTruncate().IsSupported();
+ return raw_assembler()->machine()->Float64RoundTruncate().IsSupported();
}
Node* CodeAssembler::Int32Constant(int32_t value) {
- return raw_assembler_->Int32Constant(value);
+ return raw_assembler()->Int32Constant(value);
}
Node* CodeAssembler::Int64Constant(int64_t value) {
- return raw_assembler_->Int64Constant(value);
+ return raw_assembler()->Int64Constant(value);
}
Node* CodeAssembler::IntPtrConstant(intptr_t value) {
- return raw_assembler_->IntPtrConstant(value);
+ return raw_assembler()->IntPtrConstant(value);
}
Node* CodeAssembler::NumberConstant(double value) {
- return raw_assembler_->NumberConstant(value);
+ return raw_assembler()->NumberConstant(value);
}
Node* CodeAssembler::SmiConstant(Smi* value) {
@@ -122,19 +184,19 @@ Node* CodeAssembler::SmiConstant(int value) {
}
Node* CodeAssembler::HeapConstant(Handle<HeapObject> object) {
- return raw_assembler_->HeapConstant(object);
+ return raw_assembler()->HeapConstant(object);
}
Node* CodeAssembler::BooleanConstant(bool value) {
- return raw_assembler_->BooleanConstant(value);
+ return raw_assembler()->BooleanConstant(value);
}
Node* CodeAssembler::ExternalConstant(ExternalReference address) {
- return raw_assembler_->ExternalConstant(address);
+ return raw_assembler()->ExternalConstant(address);
}
Node* CodeAssembler::Float64Constant(double value) {
- return raw_assembler_->Float64Constant(value);
+ return raw_assembler()->Float64Constant(value);
}
Node* CodeAssembler::NaNConstant() {
@@ -174,24 +236,28 @@ bool CodeAssembler::ToSmiConstant(Node* node, Smi*& out_value) {
}
bool CodeAssembler::ToIntPtrConstant(Node* node, intptr_t& out_value) {
+ if (node->opcode() == IrOpcode::kBitcastWordToTaggedSigned ||
+ node->opcode() == IrOpcode::kBitcastWordToTagged) {
+ node = node->InputAt(0);
+ }
IntPtrMatcher m(node);
if (m.HasValue()) out_value = m.Value();
return m.HasValue();
}
Node* CodeAssembler::Parameter(int value) {
- return raw_assembler_->Parameter(value);
+ return raw_assembler()->Parameter(value);
}
void CodeAssembler::Return(Node* value) {
- return raw_assembler_->Return(value);
+ return raw_assembler()->Return(value);
}
void CodeAssembler::PopAndReturn(Node* pop, Node* value) {
- return raw_assembler_->PopAndReturn(pop, value);
+ return raw_assembler()->PopAndReturn(pop, value);
}
-void CodeAssembler::DebugBreak() { raw_assembler_->DebugBreak(); }
+void CodeAssembler::DebugBreak() { raw_assembler()->DebugBreak(); }
void CodeAssembler::Comment(const char* format, ...) {
if (!FLAG_code_comments) return;
@@ -210,81 +276,118 @@ void CodeAssembler::Comment(const char* format, ...) {
MemCopy(copy + prefix_len, builder.Finalize(), length);
copy[0] = ';';
copy[1] = ' ';
- raw_assembler_->Comment(copy);
+ raw_assembler()->Comment(copy);
}
-void CodeAssembler::Bind(CodeAssembler::Label* label) { return label->Bind(); }
+void CodeAssembler::Bind(Label* label) { return label->Bind(); }
Node* CodeAssembler::LoadFramePointer() {
- return raw_assembler_->LoadFramePointer();
+ return raw_assembler()->LoadFramePointer();
}
Node* CodeAssembler::LoadParentFramePointer() {
- return raw_assembler_->LoadParentFramePointer();
+ return raw_assembler()->LoadParentFramePointer();
}
Node* CodeAssembler::LoadStackPointer() {
- return raw_assembler_->LoadStackPointer();
+ return raw_assembler()->LoadStackPointer();
}
#define DEFINE_CODE_ASSEMBLER_BINARY_OP(name) \
Node* CodeAssembler::name(Node* a, Node* b) { \
- return raw_assembler_->name(a, b); \
+ return raw_assembler()->name(a, b); \
}
CODE_ASSEMBLER_BINARY_OP_LIST(DEFINE_CODE_ASSEMBLER_BINARY_OP)
#undef DEFINE_CODE_ASSEMBLER_BINARY_OP
+Node* CodeAssembler::IntPtrAdd(Node* left, Node* right) {
+ intptr_t left_constant;
+ bool is_left_constant = ToIntPtrConstant(left, left_constant);
+ intptr_t right_constant;
+ bool is_right_constant = ToIntPtrConstant(right, right_constant);
+ if (is_left_constant) {
+ if (is_right_constant) {
+ return IntPtrConstant(left_constant + right_constant);
+ }
+ if (left_constant == 0) {
+ return right;
+ }
+ } else if (is_right_constant) {
+ if (right_constant == 0) {
+ return left;
+ }
+ }
+ return raw_assembler()->IntPtrAdd(left, right);
+}
+
+Node* CodeAssembler::IntPtrSub(Node* left, Node* right) {
+ intptr_t left_constant;
+ bool is_left_constant = ToIntPtrConstant(left, left_constant);
+ intptr_t right_constant;
+ bool is_right_constant = ToIntPtrConstant(right, right_constant);
+ if (is_left_constant) {
+ if (is_right_constant) {
+ return IntPtrConstant(left_constant - right_constant);
+ }
+ } else if (is_right_constant) {
+ if (right_constant == 0) {
+ return left;
+ }
+ }
+ return raw_assembler()->IntPtrSub(left, right);
+}
+
Node* CodeAssembler::WordShl(Node* value, int shift) {
- return (shift != 0) ? raw_assembler_->WordShl(value, IntPtrConstant(shift))
+ return (shift != 0) ? raw_assembler()->WordShl(value, IntPtrConstant(shift))
: value;
}
Node* CodeAssembler::WordShr(Node* value, int shift) {
- return (shift != 0) ? raw_assembler_->WordShr(value, IntPtrConstant(shift))
+ return (shift != 0) ? raw_assembler()->WordShr(value, IntPtrConstant(shift))
: value;
}
Node* CodeAssembler::Word32Shr(Node* value, int shift) {
- return (shift != 0) ? raw_assembler_->Word32Shr(value, Int32Constant(shift))
+ return (shift != 0) ? raw_assembler()->Word32Shr(value, Int32Constant(shift))
: value;
}
Node* CodeAssembler::ChangeUint32ToWord(Node* value) {
- if (raw_assembler_->machine()->Is64()) {
- value = raw_assembler_->ChangeUint32ToUint64(value);
+ if (raw_assembler()->machine()->Is64()) {
+ value = raw_assembler()->ChangeUint32ToUint64(value);
}
return value;
}
Node* CodeAssembler::ChangeInt32ToIntPtr(Node* value) {
- if (raw_assembler_->machine()->Is64()) {
- value = raw_assembler_->ChangeInt32ToInt64(value);
+ if (raw_assembler()->machine()->Is64()) {
+ value = raw_assembler()->ChangeInt32ToInt64(value);
}
return value;
}
Node* CodeAssembler::RoundIntPtrToFloat64(Node* value) {
- if (raw_assembler_->machine()->Is64()) {
- return raw_assembler_->RoundInt64ToFloat64(value);
+ if (raw_assembler()->machine()->Is64()) {
+ return raw_assembler()->RoundInt64ToFloat64(value);
}
- return raw_assembler_->ChangeInt32ToFloat64(value);
+ return raw_assembler()->ChangeInt32ToFloat64(value);
}
#define DEFINE_CODE_ASSEMBLER_UNARY_OP(name) \
- Node* CodeAssembler::name(Node* a) { return raw_assembler_->name(a); }
+ Node* CodeAssembler::name(Node* a) { return raw_assembler()->name(a); }
CODE_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_ASSEMBLER_UNARY_OP)
#undef DEFINE_CODE_ASSEMBLER_UNARY_OP
Node* CodeAssembler::Load(MachineType rep, Node* base) {
- return raw_assembler_->Load(rep, base);
+ return raw_assembler()->Load(rep, base);
}
-Node* CodeAssembler::Load(MachineType rep, Node* base, Node* index) {
- return raw_assembler_->Load(rep, base, index);
+Node* CodeAssembler::Load(MachineType rep, Node* base, Node* offset) {
+ return raw_assembler()->Load(rep, base, offset);
}
-Node* CodeAssembler::AtomicLoad(MachineType rep, Node* base, Node* index) {
- return raw_assembler_->AtomicLoad(rep, base, index);
+Node* CodeAssembler::AtomicLoad(MachineType rep, Node* base, Node* offset) {
+ return raw_assembler()->AtomicLoad(rep, base, offset);
}
Node* CodeAssembler::LoadRoot(Heap::RootListIndex root_index) {
@@ -303,28 +406,35 @@ Node* CodeAssembler::LoadRoot(Heap::RootListIndex root_index) {
IntPtrConstant(root_index * kPointerSize));
}
-Node* CodeAssembler::Store(MachineRepresentation rep, Node* base, Node* value) {
- return raw_assembler_->Store(rep, base, value, kFullWriteBarrier);
+Node* CodeAssembler::Store(Node* base, Node* value) {
+ return raw_assembler()->Store(MachineRepresentation::kTagged, base, value,
+ kFullWriteBarrier);
+}
+
+Node* CodeAssembler::Store(Node* base, Node* offset, Node* value) {
+ return raw_assembler()->Store(MachineRepresentation::kTagged, base, offset,
+ value, kFullWriteBarrier);
}
-Node* CodeAssembler::Store(MachineRepresentation rep, Node* base, Node* index,
- Node* value) {
- return raw_assembler_->Store(rep, base, index, value, kFullWriteBarrier);
+Node* CodeAssembler::StoreWithMapWriteBarrier(Node* base, Node* offset,
+ Node* value) {
+ return raw_assembler()->Store(MachineRepresentation::kTagged, base, offset,
+ value, kMapWriteBarrier);
}
Node* CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base,
Node* value) {
- return raw_assembler_->Store(rep, base, value, kNoWriteBarrier);
+ return raw_assembler()->Store(rep, base, value, kNoWriteBarrier);
}
Node* CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base,
- Node* index, Node* value) {
- return raw_assembler_->Store(rep, base, index, value, kNoWriteBarrier);
+ Node* offset, Node* value) {
+ return raw_assembler()->Store(rep, base, offset, value, kNoWriteBarrier);
}
Node* CodeAssembler::AtomicStore(MachineRepresentation rep, Node* base,
- Node* index, Node* value) {
- return raw_assembler_->AtomicStore(rep, base, index, value);
+ Node* offset, Node* value) {
+ return raw_assembler()->AtomicStore(rep, base, offset, value);
}
Node* CodeAssembler::StoreRoot(Heap::RootListIndex root_index, Node* value) {
@@ -336,11 +446,11 @@ Node* CodeAssembler::StoreRoot(Heap::RootListIndex root_index, Node* value) {
}
Node* CodeAssembler::Retain(Node* value) {
- return raw_assembler_->Retain(value);
+ return raw_assembler()->Retain(value);
}
Node* CodeAssembler::Projection(int index, Node* value) {
- return raw_assembler_->Projection(index, value);
+ return raw_assembler()->Projection(index, value);
}
void CodeAssembler::GotoIfException(Node* node, Label* if_exception,
@@ -350,11 +460,11 @@ void CodeAssembler::GotoIfException(Node* node, Label* if_exception,
exception.MergeVariables();
DCHECK(!node->op()->HasProperty(Operator::kNoThrow));
- raw_assembler_->Continuations(node, success.label_, exception.label_);
+ raw_assembler()->Continuations(node, success.label_, exception.label_);
Bind(&exception);
- const Operator* op = raw_assembler_->common()->IfException();
- Node* exception_value = raw_assembler_->AddNode(op, node, node);
+ const Operator* op = raw_assembler()->common()->IfException();
+ Node* exception_value = raw_assembler()->AddNode(op, node, node);
if (exception_var != nullptr) {
exception_var->Bind(exception_value);
}
@@ -363,627 +473,155 @@ void CodeAssembler::GotoIfException(Node* node, Label* if_exception,
Bind(&success);
}
-Node* CodeAssembler::CallN(CallDescriptor* descriptor, Node* code_target,
- Node** args) {
- CallPrologue();
- Node* return_value = raw_assembler_->CallN(descriptor, code_target, args);
- CallEpilogue();
- return return_value;
-}
+template <class... TArgs>
+Node* CodeAssembler::CallRuntime(Runtime::FunctionId function, Node* context,
+ TArgs... args) {
+ int argc = static_cast<int>(sizeof...(args));
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ zone(), function, argc, Operator::kNoProperties,
+ CallDescriptor::kNoFlags);
+ int return_count = static_cast<int>(desc->ReturnCount());
-Node* CodeAssembler::TailCallN(CallDescriptor* descriptor, Node* code_target,
- Node** args) {
- return raw_assembler_->TailCallN(descriptor, code_target, args);
-}
+ Node* centry =
+ HeapConstant(CodeFactory::RuntimeCEntry(isolate(), return_count));
+ Node* ref = ExternalConstant(ExternalReference(function, isolate()));
+ Node* arity = Int32Constant(argc);
-Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id,
- Node* context) {
- CallPrologue();
- Node* return_value = raw_assembler_->CallRuntime0(function_id, context);
- CallEpilogue();
- return return_value;
-}
+ Node* nodes[] = {centry, args..., ref, arity, context};
-Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
- Node* arg1) {
CallPrologue();
- Node* return_value = raw_assembler_->CallRuntime1(function_id, arg1, context);
+ Node* return_value = raw_assembler()->CallN(desc, arraysize(nodes), nodes);
CallEpilogue();
return return_value;
}
-Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
- Node* arg1, Node* arg2) {
- CallPrologue();
- Node* return_value =
- raw_assembler_->CallRuntime2(function_id, arg1, arg2, context);
- CallEpilogue();
- return return_value;
-}
+// Instantiate CallRuntime() with up to 6 arguments.
+#define INSTANTIATE(...) \
+ template V8_EXPORT_PRIVATE Node* CodeAssembler::CallRuntime( \
+ Runtime::FunctionId, __VA_ARGS__);
+REPEAT_1_TO_7(INSTANTIATE, Node*)
+#undef INSTANTIATE
-Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
- Node* arg1, Node* arg2, Node* arg3) {
- CallPrologue();
- Node* return_value =
- raw_assembler_->CallRuntime3(function_id, arg1, arg2, arg3, context);
- CallEpilogue();
- return return_value;
-}
+template <class... TArgs>
+Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function,
+ Node* context, TArgs... args) {
+ int argc = static_cast<int>(sizeof...(args));
+ CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+ zone(), function, argc, Operator::kNoProperties,
+ CallDescriptor::kSupportsTailCalls);
+ int return_count = static_cast<int>(desc->ReturnCount());
-Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
- Node* arg1, Node* arg2, Node* arg3,
- Node* arg4) {
- CallPrologue();
- Node* return_value = raw_assembler_->CallRuntime4(function_id, arg1, arg2,
- arg3, arg4, context);
- CallEpilogue();
- return return_value;
-}
+ Node* centry =
+ HeapConstant(CodeFactory::RuntimeCEntry(isolate(), return_count));
+ Node* ref = ExternalConstant(ExternalReference(function, isolate()));
+ Node* arity = Int32Constant(argc);
-Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
- Node* arg1, Node* arg2, Node* arg3, Node* arg4,
- Node* arg5) {
- CallPrologue();
- Node* return_value = raw_assembler_->CallRuntime5(function_id, arg1, arg2,
- arg3, arg4, arg5, context);
- CallEpilogue();
- return return_value;
-}
+ Node* nodes[] = {centry, args..., ref, arity, context};
-Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
- Node* context) {
- return raw_assembler_->TailCallRuntime0(function_id, context);
+ return raw_assembler()->TailCallN(desc, arraysize(nodes), nodes);
}
-Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
- Node* context, Node* arg1) {
- return raw_assembler_->TailCallRuntime1(function_id, arg1, context);
-}
+// Instantiate TailCallRuntime() with up to 6 arguments.
+#define INSTANTIATE(...) \
+ template V8_EXPORT_PRIVATE Node* CodeAssembler::TailCallRuntime( \
+ Runtime::FunctionId, __VA_ARGS__);
+REPEAT_1_TO_7(INSTANTIATE, Node*)
+#undef INSTANTIATE
-Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
- Node* context, Node* arg1, Node* arg2) {
- return raw_assembler_->TailCallRuntime2(function_id, arg1, arg2, context);
+template <class... TArgs>
+Node* CodeAssembler::CallStubR(const CallInterfaceDescriptor& descriptor,
+ size_t result_size, Node* target, Node* context,
+ TArgs... args) {
+ Node* nodes[] = {target, args..., context};
+ return CallStubN(descriptor, result_size, arraysize(nodes), nodes);
}
-Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
- Node* context, Node* arg1, Node* arg2,
- Node* arg3) {
- return raw_assembler_->TailCallRuntime3(function_id, arg1, arg2, arg3,
- context);
-}
-
-Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
- Node* context, Node* arg1, Node* arg2,
- Node* arg3, Node* arg4) {
- return raw_assembler_->TailCallRuntime4(function_id, arg1, arg2, arg3, arg4,
- context);
-}
-
-Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
- Node* context, Node* arg1, Node* arg2,
- Node* arg3, Node* arg4, Node* arg5) {
- return raw_assembler_->TailCallRuntime5(function_id, arg1, arg2, arg3, arg4,
- arg5, context);
-}
-
-Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
- Node* context, Node* arg1, Node* arg2,
- Node* arg3, Node* arg4, Node* arg5,
- Node* arg6) {
- return raw_assembler_->TailCallRuntime6(function_id, arg1, arg2, arg3, arg4,
- arg5, arg6, context);
-}
-
-Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
- Node* arg1, size_t result_size) {
- Node* target = HeapConstant(callable.code());
- return CallStub(callable.descriptor(), target, context, arg1, result_size);
-}
-
-Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
- Node* arg1, Node* arg2, size_t result_size) {
- Node* target = HeapConstant(callable.code());
- return CallStub(callable.descriptor(), target, context, arg1, arg2,
- result_size);
-}
-
-Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
- Node* arg1, Node* arg2, Node* arg3,
- size_t result_size) {
- Node* target = HeapConstant(callable.code());
- return CallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
- result_size);
-}
-
-Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
- Node* arg1, Node* arg2, Node* arg3, Node* arg4,
- size_t result_size) {
- Node* target = HeapConstant(callable.code());
- return CallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
- arg4, result_size);
-}
-
-Node* CodeAssembler::CallStubN(Callable const& callable, Node** args,
- size_t result_size) {
- Node* target = HeapConstant(callable.code());
- return CallStubN(callable.descriptor(), target, args, result_size);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(1);
- args[0] = context;
-
- return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(2);
- args[0] = arg1;
- args[1] = context;
-
- return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- Node* arg2, size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(3);
- args[0] = arg1;
- args[1] = arg2;
- args[2] = context;
-
- return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- Node* arg2, Node* arg3, size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(4);
- args[0] = arg1;
- args[1] = arg2;
- args[2] = arg3;
- args[3] = context;
-
- return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- Node* arg2, Node* arg3, Node* arg4,
- size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(5);
- args[0] = arg1;
- args[1] = arg2;
- args[2] = arg3;
- args[3] = arg4;
- args[4] = context;
-
- return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- Node* arg2, Node* arg3, Node* arg4, Node* arg5,
- size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(6);
- args[0] = arg1;
- args[1] = arg2;
- args[2] = arg3;
- args[3] = arg4;
- args[4] = arg5;
- args[5] = context;
-
- return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, const Arg& arg1,
- const Arg& arg2, size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- const int kArgsCount = 3;
- Node** args = zone()->NewArray<Node*>(kArgsCount);
- DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
- args[arg1.index] = arg1.value;
- args[arg2.index] = arg2.value;
- args[kArgsCount - 1] = context;
- DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
-
- return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, const Arg& arg1,
- const Arg& arg2, const Arg& arg3,
- size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- const int kArgsCount = 4;
- Node** args = zone()->NewArray<Node*>(kArgsCount);
- DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
- args[arg1.index] = arg1.value;
- args[arg2.index] = arg2.value;
- args[arg3.index] = arg3.value;
- args[kArgsCount - 1] = context;
- DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
-
- return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, const Arg& arg1,
- const Arg& arg2, const Arg& arg3, const Arg& arg4,
- size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- const int kArgsCount = 5;
- Node** args = zone()->NewArray<Node*>(kArgsCount);
- DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
- args[arg1.index] = arg1.value;
- args[arg2.index] = arg2.value;
- args[arg3.index] = arg3.value;
- args[arg4.index] = arg4.value;
- args[kArgsCount - 1] = context;
- DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
-
- return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, const Arg& arg1,
- const Arg& arg2, const Arg& arg3, const Arg& arg4,
- const Arg& arg5, size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kNoFlags, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- const int kArgsCount = 6;
- Node** args = zone()->NewArray<Node*>(kArgsCount);
- DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
- args[arg1.index] = arg1.value;
- args[arg2.index] = arg2.value;
- args[arg3.index] = arg3.value;
- args[arg4.index] = arg4.value;
- args[arg5.index] = arg5.value;
- args[kArgsCount - 1] = context;
- DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
-
- return CallN(call_descriptor, target, args);
-}
+// Instantiate CallStubR() with up to 6 arguments.
+#define INSTANTIATE(...) \
+ template V8_EXPORT_PRIVATE Node* CodeAssembler::CallStubR( \
+ const CallInterfaceDescriptor& descriptor, size_t, Node*, __VA_ARGS__);
+REPEAT_1_TO_7(INSTANTIATE, Node*)
+#undef INSTANTIATE
Node* CodeAssembler::CallStubN(const CallInterfaceDescriptor& descriptor,
- int js_parameter_count, Node* target,
- Node** args, size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor,
- descriptor.GetStackParameterCount() + js_parameter_count,
+ size_t result_size, int input_count,
+ Node* const* inputs) {
+ // 2 is for target and context.
+ DCHECK_LE(2, input_count);
+ int argc = input_count - 2;
+ DCHECK_LE(descriptor.GetParameterCount(), argc);
+ // Extra arguments not mentioned in the descriptor are passed on the stack.
+ int stack_parameter_count = argc - descriptor.GetRegisterParameterCount();
+ DCHECK_LE(descriptor.GetStackParameterCount(), stack_parameter_count);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, stack_parameter_count,
CallDescriptor::kNoFlags, Operator::kNoProperties,
MachineType::AnyTagged(), result_size);
- return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
- Node* arg1, size_t result_size) {
- Node* target = HeapConstant(callable.code());
- return TailCallStub(callable.descriptor(), target, context, arg1,
- result_size);
-}
-
-Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
- Node* arg1, Node* arg2, size_t result_size) {
- Node* target = HeapConstant(callable.code());
- return TailCallStub(callable.descriptor(), target, context, arg1, arg2,
- result_size);
-}
-
-Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
- Node* arg1, Node* arg2, Node* arg3,
- size_t result_size) {
- Node* target = HeapConstant(callable.code());
- return TailCallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
- result_size);
-}
-
-Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
- Node* arg1, Node* arg2, Node* arg3,
- Node* arg4, size_t result_size) {
- Node* target = HeapConstant(callable.code());
- return TailCallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
- arg4, result_size);
-}
-
-Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
- Node* arg1, Node* arg2, Node* arg3,
- Node* arg4, Node* arg5, size_t result_size) {
- Node* target = HeapConstant(callable.code());
- return TailCallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
- arg4, arg5, result_size);
-}
-
-Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(2);
- args[0] = arg1;
- args[1] = context;
-
- return raw_assembler_->TailCallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- Node* arg2, size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(3);
- args[0] = arg1;
- args[1] = arg2;
- args[2] = context;
-
- return raw_assembler_->TailCallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- Node* arg2, Node* arg3, size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(4);
- args[0] = arg1;
- args[1] = arg2;
- args[2] = arg3;
- args[3] = context;
-
- return raw_assembler_->TailCallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- Node* arg2, Node* arg3, Node* arg4,
- size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(5);
- args[0] = arg1;
- args[1] = arg2;
- args[2] = arg3;
- args[3] = arg4;
- args[4] = context;
-
- return raw_assembler_->TailCallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- Node* arg2, Node* arg3, Node* arg4,
- Node* arg5, size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(6);
- args[0] = arg1;
- args[1] = arg2;
- args[2] = arg3;
- args[3] = arg4;
- args[4] = arg5;
- args[5] = context;
-
- return raw_assembler_->TailCallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, Node* arg1,
- Node* arg2, Node* arg3, Node* arg4,
- Node* arg5, Node* arg6, size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- Node** args = zone()->NewArray<Node*>(7);
- args[0] = arg1;
- args[1] = arg2;
- args[2] = arg3;
- args[3] = arg4;
- args[4] = arg5;
- args[5] = arg6;
- args[6] = context;
-
- return raw_assembler_->TailCallN(call_descriptor, target, args);
+ CallPrologue();
+ Node* return_value = raw_assembler()->CallN(desc, input_count, inputs);
+ CallEpilogue();
+ return return_value;
}
+template <class... TArgs>
Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, const Arg& arg1,
- const Arg& arg2, const Arg& arg3,
- const Arg& arg4, size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ Node* target, Node* context, TArgs... args) {
+ DCHECK_EQ(descriptor.GetParameterCount(), sizeof...(args));
+ size_t result_size = 1;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
MachineType::AnyTagged(), result_size);
- const int kArgsCount = 5;
- Node** args = zone()->NewArray<Node*>(kArgsCount);
- DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
- args[arg1.index] = arg1.value;
- args[arg2.index] = arg2.value;
- args[arg3.index] = arg3.value;
- args[arg4.index] = arg4.value;
- args[kArgsCount - 1] = context;
- DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
+ Node* nodes[] = {target, args..., context};
- return raw_assembler_->TailCallN(call_descriptor, target, args);
+ return raw_assembler()->TailCallN(desc, arraysize(nodes), nodes);
}
-Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
- Node* target, Node* context, const Arg& arg1,
- const Arg& arg2, const Arg& arg3,
- const Arg& arg4, const Arg& arg5,
- size_t result_size) {
- CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
- CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
- MachineType::AnyTagged(), result_size);
-
- const int kArgsCount = 6;
- Node** args = zone()->NewArray<Node*>(kArgsCount);
- DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
- args[arg1.index] = arg1.value;
- args[arg2.index] = arg2.value;
- args[arg3.index] = arg3.value;
- args[arg4.index] = arg4.value;
- args[arg5.index] = arg5.value;
- args[kArgsCount - 1] = context;
- DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
-
- return raw_assembler_->TailCallN(call_descriptor, target, args);
-}
+// Instantiate TailCallStub() with up to 6 arguments.
+#define INSTANTIATE(...) \
+ template V8_EXPORT_PRIVATE Node* CodeAssembler::TailCallStub( \
+ const CallInterfaceDescriptor& descriptor, Node*, __VA_ARGS__);
+REPEAT_1_TO_7(INSTANTIATE, Node*)
+#undef INSTANTIATE
+template <class... TArgs>
Node* CodeAssembler::TailCallBytecodeDispatch(
- const CallInterfaceDescriptor& interface_descriptor,
- Node* code_target_address, Node** args) {
- CallDescriptor* descriptor = Linkage::GetBytecodeDispatchCallDescriptor(
- isolate(), zone(), interface_descriptor,
- interface_descriptor.GetStackParameterCount());
- return raw_assembler_->TailCallN(descriptor, code_target_address, args);
-}
-
-Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
- Node* function, Node* receiver,
- size_t result_size) {
- const int argc = 0;
- Node* target = HeapConstant(callable.code());
+ const CallInterfaceDescriptor& descriptor, Node* target, TArgs... args) {
+ DCHECK_EQ(descriptor.GetParameterCount(), sizeof...(args));
+ CallDescriptor* desc = Linkage::GetBytecodeDispatchCallDescriptor(
+ isolate(), zone(), descriptor, descriptor.GetStackParameterCount());
- Node** args = zone()->NewArray<Node*>(argc + 4);
- args[0] = function;
- args[1] = Int32Constant(argc);
- args[2] = receiver;
- args[3] = context;
-
- return CallStubN(callable.descriptor(), argc + 1, target, args, result_size);
+ Node* nodes[] = {target, args...};
+ return raw_assembler()->TailCallN(desc, arraysize(nodes), nodes);
}
-Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
- Node* function, Node* receiver, Node* arg1,
- size_t result_size) {
- const int argc = 1;
- Node* target = HeapConstant(callable.code());
-
- Node** args = zone()->NewArray<Node*>(argc + 4);
- args[0] = function;
- args[1] = Int32Constant(argc);
- args[2] = receiver;
- args[3] = arg1;
- args[4] = context;
-
- return CallStubN(callable.descriptor(), argc + 1, target, args, result_size);
-}
-
-Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
- Node* function, Node* receiver, Node* arg1,
- Node* arg2, size_t result_size) {
- const int argc = 2;
- Node* target = HeapConstant(callable.code());
-
- Node** args = zone()->NewArray<Node*>(argc + 4);
- args[0] = function;
- args[1] = Int32Constant(argc);
- args[2] = receiver;
- args[3] = arg1;
- args[4] = arg2;
- args[5] = context;
-
- return CallStubN(callable.descriptor(), argc + 1, target, args, result_size);
-}
-
-Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
- Node* function, Node* receiver, Node* arg1,
- Node* arg2, Node* arg3, size_t result_size) {
- const int argc = 3;
- Node* target = HeapConstant(callable.code());
-
- Node** args = zone()->NewArray<Node*>(argc + 4);
- args[0] = function;
- args[1] = Int32Constant(argc);
- args[2] = receiver;
- args[3] = arg1;
- args[4] = arg2;
- args[5] = arg3;
- args[6] = context;
-
- return CallStubN(callable.descriptor(), argc + 1, target, args, result_size);
-}
+// Instantiate TailCallBytecodeDispatch() with 4 arguments.
+template V8_EXPORT_PRIVATE Node* CodeAssembler::TailCallBytecodeDispatch(
+ const CallInterfaceDescriptor& descriptor, Node* target, Node*, Node*,
+ Node*, Node*);
Node* CodeAssembler::CallCFunction2(MachineType return_type,
MachineType arg0_type,
MachineType arg1_type, Node* function,
Node* arg0, Node* arg1) {
- return raw_assembler_->CallCFunction2(return_type, arg0_type, arg1_type,
- function, arg0, arg1);
+ return raw_assembler()->CallCFunction2(return_type, arg0_type, arg1_type,
+ function, arg0, arg1);
+}
+
+Node* CodeAssembler::CallCFunction3(MachineType return_type,
+ MachineType arg0_type,
+ MachineType arg1_type,
+ MachineType arg2_type, Node* function,
+ Node* arg0, Node* arg1, Node* arg2) {
+ return raw_assembler()->CallCFunction3(return_type, arg0_type, arg1_type,
+ arg2_type, function, arg0, arg1, arg2);
}
-void CodeAssembler::Goto(CodeAssembler::Label* label) {
+void CodeAssembler::Goto(Label* label) {
label->MergeVariables();
- raw_assembler_->Goto(label->label_);
+ raw_assembler()->Goto(label->label_);
}
void CodeAssembler::GotoIf(Node* condition, Label* true_label) {
@@ -998,12 +636,12 @@ void CodeAssembler::GotoUnless(Node* condition, Label* false_label) {
Bind(&true_label);
}
-void CodeAssembler::Branch(Node* condition, CodeAssembler::Label* true_label,
- CodeAssembler::Label* false_label) {
+void CodeAssembler::Branch(Node* condition, Label* true_label,
+ Label* false_label) {
true_label->MergeVariables();
false_label->MergeVariables();
- return raw_assembler_->Branch(condition, true_label->label_,
- false_label->label_);
+ return raw_assembler()->Branch(condition, true_label->label_,
+ false_label->label_);
}
void CodeAssembler::Switch(Node* index, Label* default_label,
@@ -1017,75 +655,61 @@ void CodeAssembler::Switch(Node* index, Label* default_label,
case_labels[i]->MergeVariables();
default_label->MergeVariables();
}
- return raw_assembler_->Switch(index, default_label->label_, case_values,
- labels, case_count);
-}
-
-Node* CodeAssembler::Select(Node* condition, Node* true_value,
- Node* false_value, MachineRepresentation rep) {
- Variable value(this, rep);
- Label vtrue(this), vfalse(this), end(this);
- Branch(condition, &vtrue, &vfalse);
-
- Bind(&vtrue);
- {
- value.Bind(true_value);
- Goto(&end);
- }
- Bind(&vfalse);
- {
- value.Bind(false_value);
- Goto(&end);
- }
-
- Bind(&end);
- return value.value();
+ return raw_assembler()->Switch(index, default_label->label_, case_values,
+ labels, case_count);
}
// RawMachineAssembler delegate helpers:
-Isolate* CodeAssembler::isolate() const { return raw_assembler_->isolate(); }
+Isolate* CodeAssembler::isolate() const { return raw_assembler()->isolate(); }
Factory* CodeAssembler::factory() const { return isolate()->factory(); }
-Zone* CodeAssembler::zone() const { return raw_assembler_->zone(); }
+Zone* CodeAssembler::zone() const { return raw_assembler()->zone(); }
+
+RawMachineAssembler* CodeAssembler::raw_assembler() const {
+ return state_->raw_assembler_.get();
+}
// The core implementation of Variable is stored through an indirection so
// that it can outlive the often block-scoped Variable declarations. This is
// needed to ensure that variable binding and merging through phis can
// properly be verified.
-class CodeAssembler::Variable::Impl : public ZoneObject {
+class CodeAssemblerVariable::Impl : public ZoneObject {
public:
explicit Impl(MachineRepresentation rep) : value_(nullptr), rep_(rep) {}
Node* value_;
MachineRepresentation rep_;
};
-CodeAssembler::Variable::Variable(CodeAssembler* assembler,
- MachineRepresentation rep)
- : impl_(new (assembler->zone()) Impl(rep)), assembler_(assembler) {
- assembler->variables_.insert(impl_);
+CodeAssemblerVariable::CodeAssemblerVariable(CodeAssembler* assembler,
+ MachineRepresentation rep)
+ : impl_(new (assembler->zone()) Impl(rep)), state_(assembler->state()) {
+ state_->variables_.insert(impl_);
}
-CodeAssembler::Variable::~Variable() { assembler_->variables_.erase(impl_); }
+CodeAssemblerVariable::~CodeAssemblerVariable() {
+ state_->variables_.erase(impl_);
+}
-void CodeAssembler::Variable::Bind(Node* value) { impl_->value_ = value; }
+void CodeAssemblerVariable::Bind(Node* value) { impl_->value_ = value; }
-Node* CodeAssembler::Variable::value() const {
+Node* CodeAssemblerVariable::value() const {
DCHECK_NOT_NULL(impl_->value_);
return impl_->value_;
}
-MachineRepresentation CodeAssembler::Variable::rep() const {
- return impl_->rep_;
-}
+MachineRepresentation CodeAssemblerVariable::rep() const { return impl_->rep_; }
-bool CodeAssembler::Variable::IsBound() const {
- return impl_->value_ != nullptr;
-}
+bool CodeAssemblerVariable::IsBound() const { return impl_->value_ != nullptr; }
-CodeAssembler::Label::Label(CodeAssembler* assembler, size_t vars_count,
- Variable** vars, CodeAssembler::Label::Type type)
- : bound_(false), merge_count_(0), assembler_(assembler), label_(nullptr) {
+CodeAssemblerLabel::CodeAssemblerLabel(CodeAssembler* assembler,
+ size_t vars_count,
+ CodeAssemblerVariable** vars,
+ CodeAssemblerLabel::Type type)
+ : bound_(false),
+ merge_count_(0),
+ state_(assembler->state()),
+ label_(nullptr) {
void* buffer = assembler->zone()->New(sizeof(RawMachineLabel));
label_ = new (buffer)
RawMachineLabel(type == kDeferred ? RawMachineLabel::kDeferred
@@ -1095,9 +719,9 @@ CodeAssembler::Label::Label(CodeAssembler* assembler, size_t vars_count,
}
}
-void CodeAssembler::Label::MergeVariables() {
+void CodeAssemblerLabel::MergeVariables() {
++merge_count_;
- for (auto var : assembler_->variables_) {
+ for (auto var : state_->variables_) {
size_t count = 0;
Node* node = var->value_;
if (node != nullptr) {
@@ -1122,7 +746,7 @@ void CodeAssembler::Label::MergeVariables() {
auto phi = variable_phis_.find(var);
if (phi != variable_phis_.end()) {
DCHECK_NOT_NULL(phi->second);
- assembler_->raw_assembler_->AppendPhiInput(phi->second, node);
+ state_->raw_assembler_->AppendPhiInput(phi->second, node);
} else {
auto i = variable_merges_.find(var);
if (i != variable_merges_.end()) {
@@ -1141,13 +765,13 @@ void CodeAssembler::Label::MergeVariables() {
}
}
-void CodeAssembler::Label::Bind() {
+void CodeAssemblerLabel::Bind() {
DCHECK(!bound_);
- assembler_->raw_assembler_->Bind(label_);
+ state_->raw_assembler_->Bind(label_);
// Make sure that all variables that have changed along any path up to this
// point are marked as merge variables.
- for (auto var : assembler_->variables_) {
+ for (auto var : state_->variables_) {
Node* shared_value = nullptr;
auto i = variable_merges_.find(var);
if (i != variable_merges_.end()) {
@@ -1165,22 +789,23 @@ void CodeAssembler::Label::Bind() {
}
for (auto var : variable_phis_) {
- CodeAssembler::Variable::Impl* var_impl = var.first;
+ CodeAssemblerVariable::Impl* var_impl = var.first;
auto i = variable_merges_.find(var_impl);
- // If the following assert fires, then a variable that has been marked as
+ // If the following asserts fire, then a variable that has been marked as
// being merged at the label--either by explicitly marking it so in the
// label constructor or by having seen different bound values at branches
// into the label--doesn't have a bound value along all of the paths that
// have been merged into the label up to this point.
- DCHECK(i != variable_merges_.end() && i->second.size() == merge_count_);
- Node* phi = assembler_->raw_assembler_->Phi(
+ DCHECK(i != variable_merges_.end());
+ DCHECK_EQ(i->second.size(), merge_count_);
+ Node* phi = state_->raw_assembler_->Phi(
var.first->rep_, static_cast<int>(merge_count_), &(i->second[0]));
variable_phis_[var_impl] = phi;
}
// Bind all variables to a merge phi, the common value along all paths or
// null.
- for (auto var : assembler_->variables_) {
+ for (auto var : state_->variables_) {
auto i = variable_phis_.find(var);
if (i != variable_phis_.end()) {
var->value_ = i->second;
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 1f364d99e3..25b1fab4a7 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -12,6 +12,7 @@
// Do not include anything from src/compiler here!
#include "src/allocation.h"
#include "src/builtins/builtins.h"
+#include "src/code-factory.h"
#include "src/globals.h"
#include "src/heap/heap.h"
#include "src/machine-type.h"
@@ -30,10 +31,17 @@ class Zone;
namespace compiler {
class CallDescriptor;
+class CodeAssemblerLabel;
+class CodeAssemblerVariable;
+class CodeAssemblerState;
class Node;
class RawMachineAssembler;
class RawMachineLabel;
+typedef ZoneList<CodeAssemblerVariable*> CodeAssemblerVariableList;
+
+typedef std::function<void()> CodeAssemblerCallback;
+
#define CODE_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \
V(Float32Equal) \
V(Float32LessThan) \
@@ -79,9 +87,7 @@ class RawMachineLabel;
V(Float64Pow) \
V(Float64InsertLowWord32) \
V(Float64InsertHighWord32) \
- V(IntPtrAdd) \
V(IntPtrAddWithOverflow) \
- V(IntPtrSub) \
V(IntPtrSubWithOverflow) \
V(IntPtrMul) \
V(Int32Add) \
@@ -157,6 +163,7 @@ class RawMachineLabel;
V(Float64RoundTiesEven) \
V(Float64RoundTruncate) \
V(Word32Clz) \
+ V(Word32Not) \
V(Word32BinaryNot)
// A "public" interface used by components outside of compiler directory to
@@ -175,22 +182,16 @@ class RawMachineLabel;
// clients, CodeAssembler also provides an abstraction for creating variables
// and enhanced Label functionality to merge variable values along paths where
// they have differing values, including loops.
+//
+// The CodeAssembler itself is stateless (and instances are expected to be
+// temporary-scoped and short-lived); all its state is encapsulated into
+// a CodeAssemblerState instance.
class V8_EXPORT_PRIVATE CodeAssembler {
public:
- // Create with CallStub linkage.
- // |result_size| specifies the number of results returned by the stub.
- // TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
- CodeAssembler(Isolate* isolate, Zone* zone,
- const CallInterfaceDescriptor& descriptor, Code::Flags flags,
- const char* name, size_t result_size = 1);
-
- // Create with JSCall linkage.
- CodeAssembler(Isolate* isolate, Zone* zone, int parameter_count,
- Code::Flags flags, const char* name);
+ explicit CodeAssembler(CodeAssemblerState* state) : state_(state) {}
+ ~CodeAssembler();
- virtual ~CodeAssembler();
-
- Handle<Code> GenerateCode();
+ static Handle<Code> GenerateCode(CodeAssemblerState* state);
bool Is64() const;
bool IsFloat64RoundUpSupported() const;
@@ -198,24 +199,10 @@ class V8_EXPORT_PRIVATE CodeAssembler {
bool IsFloat64RoundTiesEvenSupported() const;
bool IsFloat64RoundTruncateSupported() const;
- class Label;
- class Variable {
- public:
- explicit Variable(CodeAssembler* assembler, MachineRepresentation rep);
- ~Variable();
- void Bind(Node* value);
- Node* value() const;
- MachineRepresentation rep() const;
- bool IsBound() const;
-
- private:
- friend class CodeAssembler;
- class Impl;
- Impl* impl_;
- CodeAssembler* assembler_;
- };
-
- typedef ZoneList<Variable*> VariableList;
+ // Shortened aliases for use in CodeAssembler subclasses.
+ typedef CodeAssemblerLabel Label;
+ typedef CodeAssemblerVariable Variable;
+ typedef CodeAssemblerVariableList VariableList;
// ===========================================================================
// Base Assembler
@@ -255,9 +242,6 @@ class V8_EXPORT_PRIVATE CodeAssembler {
void Switch(Node* index, Label* default_label, const int32_t* case_values,
Label** case_labels, size_t case_count);
- Node* Select(Node* condition, Node* true_value, Node* false_value,
- MachineRepresentation rep = MachineRepresentation::kTagged);
-
// Access to the frame pointer
Node* LoadFramePointer();
Node* LoadParentFramePointer();
@@ -267,19 +251,20 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Load raw memory location.
Node* Load(MachineType rep, Node* base);
- Node* Load(MachineType rep, Node* base, Node* index);
- Node* AtomicLoad(MachineType rep, Node* base, Node* index);
+ Node* Load(MachineType rep, Node* base, Node* offset);
+ Node* AtomicLoad(MachineType rep, Node* base, Node* offset);
// Load a value from the root array.
Node* LoadRoot(Heap::RootListIndex root_index);
// Store value to raw memory location.
- Node* Store(MachineRepresentation rep, Node* base, Node* value);
- Node* Store(MachineRepresentation rep, Node* base, Node* index, Node* value);
+ Node* Store(Node* base, Node* value);
+ Node* Store(Node* base, Node* offset, Node* value);
+ Node* StoreWithMapWriteBarrier(Node* base, Node* offset, Node* value);
Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* value);
- Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* index,
+ Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* offset,
Node* value);
- Node* AtomicStore(MachineRepresentation rep, Node* base, Node* index,
+ Node* AtomicStore(MachineRepresentation rep, Node* base, Node* offset,
Node* value);
// Store a value to the root array.
@@ -290,6 +275,9 @@ class V8_EXPORT_PRIVATE CodeAssembler {
CODE_ASSEMBLER_BINARY_OP_LIST(DECLARE_CODE_ASSEMBLER_BINARY_OP)
#undef DECLARE_CODE_ASSEMBLER_BINARY_OP
+ Node* IntPtrAdd(Node* left, Node* right);
+ Node* IntPtrSub(Node* left, Node* right);
+
Node* WordShl(Node* value, int shift);
Node* WordShr(Node* value, int shift);
Node* Word32Shr(Node* value, int shift);
@@ -316,149 +304,76 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* Projection(int index, Node* value);
// Calls
- Node* CallRuntime(Runtime::FunctionId function_id, Node* context);
- Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1);
- Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
- Node* arg2);
- Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
- Node* arg2, Node* arg3);
- Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
- Node* arg2, Node* arg3, Node* arg4);
- Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
- Node* arg2, Node* arg3, Node* arg4, Node* arg5);
-
- Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context);
- Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
- Node* arg1);
- Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
- Node* arg1, Node* arg2);
- Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
- Node* arg1, Node* arg2, Node* arg3);
- Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
- Node* arg1, Node* arg2, Node* arg3, Node* arg4);
- Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
- Node* arg1, Node* arg2, Node* arg3, Node* arg4,
- Node* arg5);
- Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
- Node* arg1, Node* arg2, Node* arg3, Node* arg4,
- Node* arg5, Node* arg6);
-
- // A pair of a zero-based argument index and a value.
- // It helps writing arguments order independent code.
- struct Arg {
- Arg(int index, Node* value) : index(index), value(value) {}
-
- int const index;
- Node* const value;
- };
-
- Node* CallStub(Callable const& callable, Node* context, Node* arg1,
- size_t result_size = 1);
- Node* CallStub(Callable const& callable, Node* context, Node* arg1,
- Node* arg2, size_t result_size = 1);
- Node* CallStub(Callable const& callable, Node* context, Node* arg1,
- Node* arg2, Node* arg3, size_t result_size = 1);
- Node* CallStub(Callable const& callable, Node* context, Node* arg1,
- Node* arg2, Node* arg3, Node* arg4, size_t result_size = 1);
- Node* CallStubN(Callable const& callable, Node** args,
- size_t result_size = 1);
+ template <class... TArgs>
+ Node* CallRuntime(Runtime::FunctionId function, Node* context, TArgs... args);
- Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, size_t result_size = 1);
- Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, size_t result_size = 1);
- Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, Node* arg2, size_t result_size = 1);
- Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, Node* arg2, Node* arg3,
- size_t result_size = 1);
- Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
- size_t result_size = 1);
- Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
- Node* arg5, size_t result_size = 1);
+ template <class... TArgs>
+ Node* TailCallRuntime(Runtime::FunctionId function, Node* context,
+ TArgs... args);
+ template <class... TArgs>
+ Node* CallStub(Callable const& callable, Node* context, TArgs... args) {
+ Node* target = HeapConstant(callable.code());
+ return CallStub(callable.descriptor(), target, context, args...);
+ }
+
+ template <class... TArgs>
Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, const Arg& arg1, const Arg& arg2,
- size_t result_size = 1);
- Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, const Arg& arg1, const Arg& arg2,
- const Arg& arg3, size_t result_size = 1);
- Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, const Arg& arg1, const Arg& arg2,
- const Arg& arg3, const Arg& arg4, size_t result_size = 1);
- Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, const Arg& arg1, const Arg& arg2,
- const Arg& arg3, const Arg& arg4, const Arg& arg5,
- size_t result_size = 1);
-
- Node* CallStubN(const CallInterfaceDescriptor& descriptor,
- int js_parameter_count, Node* target, Node** args,
- size_t result_size = 1);
- Node* CallStubN(const CallInterfaceDescriptor& descriptor, Node* target,
- Node** args, size_t result_size = 1) {
- return CallStubN(descriptor, 0, target, args, result_size);
+ Node* context, TArgs... args) {
+ return CallStubR(descriptor, 1, target, context, args...);
}
- Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
- size_t result_size = 1);
- Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
- Node* arg2, size_t result_size = 1);
- Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
- Node* arg2, Node* arg3, size_t result_size = 1);
- Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
- Node* arg2, Node* arg3, Node* arg4,
- size_t result_size = 1);
- Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
- Node* arg2, Node* arg3, Node* arg4, Node* arg5,
- size_t result_size = 1);
+ template <class... TArgs>
+ Node* CallStubR(const CallInterfaceDescriptor& descriptor, size_t result_size,
+ Node* target, Node* context, TArgs... args);
- Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, size_t result_size = 1);
- Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, Node* arg2,
- size_t result_size = 1);
- Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, Node* arg2, Node* arg3,
- size_t result_size = 1);
- Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, Node* arg2, Node* arg3,
- Node* arg4, size_t result_size = 1);
- Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, Node* arg2, Node* arg3,
- Node* arg4, Node* arg5, size_t result_size = 1);
- Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, Node* arg1, Node* arg2, Node* arg3,
- Node* arg4, Node* arg5, Node* arg6,
- size_t result_size = 1);
+ Node* CallStubN(const CallInterfaceDescriptor& descriptor, size_t result_size,
+ int input_count, Node* const* inputs);
+ template <class... TArgs>
+ Node* TailCallStub(Callable const& callable, Node* context, TArgs... args) {
+ Node* target = HeapConstant(callable.code());
+ return TailCallStub(callable.descriptor(), target, context, args...);
+ }
+
+ template <class... TArgs>
Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, const Arg& arg1, const Arg& arg2,
- const Arg& arg3, const Arg& arg4, size_t result_size = 1);
- Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
- Node* context, const Arg& arg1, const Arg& arg2,
- const Arg& arg3, const Arg& arg4, const Arg& arg5,
- size_t result_size = 1);
+ Node* context, TArgs... args);
+ template <class... TArgs>
Node* TailCallBytecodeDispatch(const CallInterfaceDescriptor& descriptor,
- Node* code_target_address, Node** args);
+ Node* target, TArgs... args);
+ template <class... TArgs>
Node* CallJS(Callable const& callable, Node* context, Node* function,
- Node* receiver, size_t result_size = 1);
- Node* CallJS(Callable const& callable, Node* context, Node* function,
- Node* receiver, Node* arg1, size_t result_size = 1);
- Node* CallJS(Callable const& callable, Node* context, Node* function,
- Node* receiver, Node* arg1, Node* arg2, size_t result_size = 1);
- Node* CallJS(Callable const& callable, Node* context, Node* function,
- Node* receiver, Node* arg1, Node* arg2, Node* arg3,
- size_t result_size = 1);
+ Node* receiver, TArgs... args) {
+ int argc = static_cast<int>(sizeof...(args));
+ Node* arity = Int32Constant(argc);
+ return CallStub(callable, context, function, arity, receiver, args...);
+ }
+
+ template <class... TArgs>
+ Node* ConstructJS(Callable const& callable, Node* context, Node* new_target,
+ TArgs... args) {
+ int argc = static_cast<int>(sizeof...(args));
+ Node* arity = Int32Constant(argc);
+ Node* receiver = LoadRoot(Heap::kUndefinedValueRootIndex);
+
+ // Construct(target, new_target, arity, receiver, arguments...)
+ return CallStub(callable, context, new_target, new_target, arity, receiver,
+ args...);
+ }
// Call to a C function with two arguments.
Node* CallCFunction2(MachineType return_type, MachineType arg0_type,
MachineType arg1_type, Node* function, Node* arg0,
Node* arg1);
+ // Call to a C function with three arguments.
+ Node* CallCFunction3(MachineType return_type, MachineType arg0_type,
+ MachineType arg1_type, MachineType arg2_type,
+ Node* function, Node* arg0, Node* arg1, Node* arg2);
+
// Exception handling support.
void GotoIfException(Node* node, Label* if_exception,
Variable* exception_var = nullptr);
@@ -468,45 +383,68 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Isolate* isolate() const;
Zone* zone() const;
+ CodeAssemblerState* state() { return state_; }
+
+ void BreakOnNode(int node_id);
+
protected:
- // Enables subclasses to perform operations before and after a call.
- virtual void CallPrologue();
- virtual void CallEpilogue();
+ void RegisterCallGenerationCallbacks(
+ const CodeAssemblerCallback& call_prologue,
+ const CodeAssemblerCallback& call_epilogue);
+ void UnregisterCallGenerationCallbacks();
private:
- CodeAssembler(Isolate* isolate, Zone* zone, CallDescriptor* call_descriptor,
- Code::Flags flags, const char* name);
+ RawMachineAssembler* raw_assembler() const;
- Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args);
- Node* TailCallN(CallDescriptor* descriptor, Node* code_target, Node** args);
+ // Calls respective callback registered in the state.
+ void CallPrologue();
+ void CallEpilogue();
- std::unique_ptr<RawMachineAssembler> raw_assembler_;
- Code::Flags flags_;
- const char* name_;
- bool code_generated_;
- ZoneSet<Variable::Impl*> variables_;
+ CodeAssemblerState* state_;
DISALLOW_COPY_AND_ASSIGN(CodeAssembler);
};
-class CodeAssembler::Label {
+class CodeAssemblerVariable {
+ public:
+ explicit CodeAssemblerVariable(CodeAssembler* assembler,
+ MachineRepresentation rep);
+ ~CodeAssemblerVariable();
+ void Bind(Node* value);
+ Node* value() const;
+ MachineRepresentation rep() const;
+ bool IsBound() const;
+
+ private:
+ friend class CodeAssemblerLabel;
+ friend class CodeAssemblerState;
+ class Impl;
+ Impl* impl_;
+ CodeAssemblerState* state_;
+};
+
+class CodeAssemblerLabel {
public:
enum Type { kDeferred, kNonDeferred };
- explicit Label(
+ explicit CodeAssemblerLabel(
+ CodeAssembler* assembler,
+ CodeAssemblerLabel::Type type = CodeAssemblerLabel::kNonDeferred)
+ : CodeAssemblerLabel(assembler, 0, nullptr, type) {}
+ CodeAssemblerLabel(
CodeAssembler* assembler,
- CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred)
- : CodeAssembler::Label(assembler, 0, nullptr, type) {}
- Label(CodeAssembler* assembler, const VariableList& merged_variables,
- CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred)
- : CodeAssembler::Label(assembler, merged_variables.length(),
- &(merged_variables[0]), type) {}
- Label(CodeAssembler* assembler, size_t count, Variable** vars,
- CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred);
- Label(CodeAssembler* assembler, CodeAssembler::Variable* merged_variable,
- CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred)
- : Label(assembler, 1, &merged_variable, type) {}
- ~Label() {}
+ const CodeAssemblerVariableList& merged_variables,
+ CodeAssemblerLabel::Type type = CodeAssemblerLabel::kNonDeferred)
+ : CodeAssemblerLabel(assembler, merged_variables.length(),
+ &(merged_variables[0]), type) {}
+ CodeAssemblerLabel(
+ CodeAssembler* assembler, size_t count, CodeAssemblerVariable** vars,
+ CodeAssemblerLabel::Type type = CodeAssemblerLabel::kNonDeferred);
+ CodeAssemblerLabel(
+ CodeAssembler* assembler, CodeAssemblerVariable* merged_variable,
+ CodeAssemblerLabel::Type type = CodeAssemblerLabel::kNonDeferred)
+ : CodeAssemblerLabel(assembler, 1, &merged_variable, type) {}
+ ~CodeAssemblerLabel() {}
private:
friend class CodeAssembler;
@@ -516,14 +454,53 @@ class CodeAssembler::Label {
bool bound_;
size_t merge_count_;
- CodeAssembler* assembler_;
+ CodeAssemblerState* state_;
RawMachineLabel* label_;
// Map of variables that need to be merged to their phi nodes (or placeholders
// for those phis).
- std::map<Variable::Impl*, Node*> variable_phis_;
+ std::map<CodeAssemblerVariable::Impl*, Node*> variable_phis_;
// Map of variables to the list of value nodes that have been added from each
// merge path in their order of merging.
- std::map<Variable::Impl*, std::vector<Node*>> variable_merges_;
+ std::map<CodeAssemblerVariable::Impl*, std::vector<Node*>> variable_merges_;
+};
+
+class V8_EXPORT_PRIVATE CodeAssemblerState {
+ public:
+ // Create with CallStub linkage.
+ // |result_size| specifies the number of results returned by the stub.
+ // TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
+ CodeAssemblerState(Isolate* isolate, Zone* zone,
+ const CallInterfaceDescriptor& descriptor,
+ Code::Flags flags, const char* name,
+ size_t result_size = 1);
+
+ // Create with JSCall linkage.
+ CodeAssemblerState(Isolate* isolate, Zone* zone, int parameter_count,
+ Code::Flags flags, const char* name);
+
+ ~CodeAssemblerState();
+
+ const char* name() const { return name_; }
+ int parameter_count() const;
+
+ private:
+ friend class CodeAssembler;
+ friend class CodeAssemblerLabel;
+ friend class CodeAssemblerVariable;
+
+ CodeAssemblerState(Isolate* isolate, Zone* zone,
+ CallDescriptor* call_descriptor, Code::Flags flags,
+ const char* name);
+
+ std::unique_ptr<RawMachineAssembler> raw_assembler_;
+ Code::Flags flags_;
+ const char* name_;
+ bool code_generated_;
+ ZoneSet<CodeAssemblerVariable::Impl*> variables_;
+ CodeAssemblerCallback call_prologue_;
+ CodeAssemblerCallback call_epilogue_;
+
+ DISALLOW_COPY_AND_ASSIGN(CodeAssemblerState);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index c69e86e0a5..7863476871 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -33,8 +33,10 @@ class CodeGenerator::JumpTable final : public ZoneObject {
size_t const target_count_;
};
-CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
- InstructionSequence* code, CompilationInfo* info)
+CodeGenerator::CodeGenerator(
+ Frame* frame, Linkage* linkage, InstructionSequence* code,
+ CompilationInfo* info,
+ ZoneVector<trap_handler::ProtectedInstructionData>* protected_instructions)
: frame_access_state_(nullptr),
linkage_(linkage),
code_(code),
@@ -56,8 +58,10 @@ CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
jump_tables_(nullptr),
ools_(nullptr),
osr_pc_offset_(-1),
+ optimized_out_literal_id_(-1),
source_position_table_builder_(code->zone(),
- info->SourcePositionRecordingMode()) {
+ info->SourcePositionRecordingMode()),
+ protected_instructions_(protected_instructions) {
for (int i = 0; i < code->InstructionBlockCount(); ++i) {
new (&labels_[i]) Label;
}
@@ -71,6 +75,15 @@ void CodeGenerator::CreateFrameAccessState(Frame* frame) {
frame_access_state_ = new (code()->zone()) FrameAccessState(frame);
}
+void CodeGenerator::AddProtectedInstruction(int instr_offset,
+ int landing_offset) {
+ if (protected_instructions_ != nullptr) {
+ trap_handler::ProtectedInstructionData data = {instr_offset,
+ landing_offset};
+ protected_instructions_->emplace_back(data);
+ }
+}
+
Handle<Code> CodeGenerator::GenerateCode() {
CompilationInfo* info = this->info();
@@ -79,6 +92,11 @@ Handle<Code> CodeGenerator::GenerateCode() {
// the frame (that is done in AssemblePrologue).
FrameScope frame_scope(masm(), StackFrame::MANUAL);
+ if (info->is_source_positions_enabled()) {
+ SourcePosition source_position(info->shared_info()->start_position());
+ AssembleSourcePosition(source_position);
+ }
+
// Place function entry hook if requested to do so.
if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
ProfileEntryHookStub::MaybeCallEntryHook(masm());
@@ -392,6 +410,10 @@ void CodeGenerator::GetPushCompatibleMoves(Instruction* instr,
CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
Instruction* instr, const InstructionBlock* block) {
int first_unused_stack_slot;
+ FlagsMode mode = FlagsModeField::decode(instr->opcode());
+ if (mode != kFlags_trap) {
+ AssembleSourcePosition(instr);
+ }
bool adjust_stack =
GetSlotAboveSPBeforeTailCall(instr, &first_unused_stack_slot);
if (adjust_stack) AssembleTailCallBeforeGap(instr, first_unused_stack_slot);
@@ -404,12 +426,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
if (instr->IsJump() && block->must_deconstruct_frame()) {
AssembleDeconstructFrame();
}
- AssembleSourcePosition(instr);
// Assemble architecture-specific code for the instruction.
CodeGenResult result = AssembleArchInstruction(instr);
if (result != kSuccess) return result;
- FlagsMode mode = FlagsModeField::decode(instr->opcode());
FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
switch (mode) {
case kFlags_branch: {
@@ -461,6 +481,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
AssembleArchBoolean(instr, condition);
break;
}
+ case kFlags_trap: {
+ AssembleArchTrap(instr, condition);
+ break;
+ }
case kFlags_none: {
break;
}
@@ -468,10 +492,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
return kSuccess;
}
-
void CodeGenerator::AssembleSourcePosition(Instruction* instr) {
SourcePosition source_position = SourcePosition::Unknown();
+ if (instr->IsNop() && instr->AreMovesRedundant()) return;
if (!code()->GetSourcePosition(instr, &source_position)) return;
+ AssembleSourcePosition(source_position);
+}
+
+void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) {
if (source_position == current_source_position_) return;
current_source_position_ = source_position;
if (!source_position.IsKnown()) return;
@@ -481,7 +509,13 @@ void CodeGenerator::AssembleSourcePosition(Instruction* instr) {
CompilationInfo* info = this->info();
if (!info->parse_info()) return;
std::ostringstream buffer;
- buffer << "-- " << source_position.InliningStack(info) << " --";
+ buffer << "-- ";
+ if (FLAG_trace_turbo) {
+ buffer << source_position;
+ } else {
+ buffer << source_position.InliningStack(info);
+ }
+ buffer << " --";
masm()->RecordComment(StrDup(buffer.str().c_str()));
}
}
@@ -628,15 +662,6 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
deopt_state_id = BuildTranslation(instr, -1, frame_state_offset,
OutputFrameStateCombine::Ignore());
}
-#if DEBUG
- // Make sure all the values live in stack slots or they are immediates.
- // (The values should not live in register because registers are clobbered
- // by calls.)
- for (size_t i = 0; i < descriptor->GetSize(); i++) {
- InstructionOperand* op = instr->InputAt(frame_state_offset + 1 + i);
- CHECK(op->IsStackSlot() || op->IsFPStackSlot() || op->IsImmediate());
- }
-#endif
safepoints()->RecordLazyDeoptimizationIndex(deopt_state_id);
}
}
@@ -666,19 +691,37 @@ DeoptimizeReason CodeGenerator::GetDeoptimizationReason(
}
void CodeGenerator::TranslateStateValueDescriptor(
- StateValueDescriptor* desc, Translation* translation,
- InstructionOperandIterator* iter) {
+ StateValueDescriptor* desc, StateValueList* nested,
+ Translation* translation, InstructionOperandIterator* iter) {
+ // Note:
+ // If translation is null, we just skip the relevant instruction operands.
if (desc->IsNested()) {
- translation->BeginCapturedObject(static_cast<int>(desc->size()));
- for (size_t index = 0; index < desc->fields().size(); index++) {
- TranslateStateValueDescriptor(&desc->fields()[index], translation, iter);
+ if (translation != nullptr) {
+ translation->BeginCapturedObject(static_cast<int>(nested->size()));
+ }
+ for (auto field : *nested) {
+ TranslateStateValueDescriptor(field.desc, field.nested, translation,
+ iter);
}
} else if (desc->IsDuplicate()) {
- translation->DuplicateObject(static_cast<int>(desc->id()));
+ if (translation != nullptr) {
+ translation->DuplicateObject(static_cast<int>(desc->id()));
+ }
+ } else if (desc->IsPlain()) {
+ InstructionOperand* op = iter->Advance();
+ if (translation != nullptr) {
+ AddTranslationForOperand(translation, iter->instruction(), op,
+ desc->type());
+ }
} else {
- DCHECK(desc->IsPlain());
- AddTranslationForOperand(translation, iter->instruction(), iter->Advance(),
- desc->type());
+ DCHECK(desc->IsOptimizedOut());
+ if (translation != nullptr) {
+ if (optimized_out_literal_id_ == -1) {
+ optimized_out_literal_id_ =
+ DefineDeoptimizationLiteral(isolate()->factory()->optimized_out());
+ }
+ translation->StoreLiteral(optimized_out_literal_id_);
+ }
}
}
@@ -686,44 +729,41 @@ void CodeGenerator::TranslateStateValueDescriptor(
void CodeGenerator::TranslateFrameStateDescriptorOperands(
FrameStateDescriptor* desc, InstructionOperandIterator* iter,
OutputFrameStateCombine combine, Translation* translation) {
- for (size_t index = 0; index < desc->GetSize(combine); index++) {
- switch (combine.kind()) {
- case OutputFrameStateCombine::kPushOutput: {
- DCHECK(combine.GetPushCount() <= iter->instruction()->OutputCount());
- size_t size_without_output =
- desc->GetSize(OutputFrameStateCombine::Ignore());
- // If the index is past the existing stack items in values_.
- if (index >= size_without_output) {
- // Materialize the result of the call instruction in this slot.
- AddTranslationForOperand(
- translation, iter->instruction(),
- iter->instruction()->OutputAt(index - size_without_output),
- MachineType::AnyTagged());
- continue;
- }
- break;
+ size_t index = 0;
+ StateValueList* values = desc->GetStateValueDescriptors();
+ for (StateValueList::iterator it = values->begin(); it != values->end();
+ ++it, ++index) {
+ StateValueDescriptor* value_desc = (*it).desc;
+ if (combine.kind() == OutputFrameStateCombine::kPokeAt) {
+ // The result of the call should be placed at position
+ // [index_from_top] in the stack (overwriting whatever was
+ // previously there).
+ size_t index_from_top =
+ desc->GetSize(combine) - 1 - combine.GetOffsetToPokeAt();
+ if (index >= index_from_top &&
+ index < index_from_top + iter->instruction()->OutputCount()) {
+ DCHECK_NOT_NULL(translation);
+ AddTranslationForOperand(
+ translation, iter->instruction(),
+ iter->instruction()->OutputAt(index - index_from_top),
+ MachineType::AnyTagged());
+ // Skip the instruction operands.
+ TranslateStateValueDescriptor(value_desc, (*it).nested, nullptr, iter);
+ continue;
}
- case OutputFrameStateCombine::kPokeAt:
- // The result of the call should be placed at position
- // [index_from_top] in the stack (overwriting whatever was
- // previously there).
- size_t index_from_top =
- desc->GetSize(combine) - 1 - combine.GetOffsetToPokeAt();
- if (index >= index_from_top &&
- index < index_from_top + iter->instruction()->OutputCount()) {
- AddTranslationForOperand(
- translation, iter->instruction(),
- iter->instruction()->OutputAt(index - index_from_top),
- MachineType::AnyTagged());
- iter->Advance(); // We do not use this input, but we need to
- // advace, as the input got replaced.
- continue;
- }
- break;
}
- StateValueDescriptor* value_desc = desc->GetStateValueDescriptor();
- TranslateStateValueDescriptor(&value_desc->fields()[index], translation,
- iter);
+ TranslateStateValueDescriptor(value_desc, (*it).nested, translation, iter);
+ }
+ DCHECK_EQ(desc->GetSize(OutputFrameStateCombine::Ignore()), index);
+
+ if (combine.kind() == OutputFrameStateCombine::kPushOutput) {
+ DCHECK(combine.GetPushCount() <= iter->instruction()->OutputCount());
+ for (size_t output = 0; output < combine.GetPushCount(); output++) {
+ // Materialize the result of the call instruction in this slot.
+ AddTranslationForOperand(translation, iter->instruction(),
+ iter->instruction()->OutputAt(output),
+ MachineType::AnyTagged());
+ }
}
}
diff --git a/deps/v8/src/compiler/code-generator.h b/deps/v8/src/compiler/code-generator.h
index 7aed85a37f..e20a8be774 100644
--- a/deps/v8/src/compiler/code-generator.h
+++ b/deps/v8/src/compiler/code-generator.h
@@ -12,6 +12,7 @@
#include "src/macro-assembler.h"
#include "src/safepoint-table.h"
#include "src/source-position-table.h"
+#include "src/trap-handler/trap-handler.h"
namespace v8 {
namespace internal {
@@ -52,7 +53,9 @@ class InstructionOperandIterator {
class CodeGenerator final : public GapResolver::Assembler {
public:
explicit CodeGenerator(Frame* frame, Linkage* linkage,
- InstructionSequence* code, CompilationInfo* info);
+ InstructionSequence* code, CompilationInfo* info,
+ ZoneVector<trap_handler::ProtectedInstructionData>*
+ protected_instructions = nullptr);
// Generate native code.
Handle<Code> GenerateCode();
@@ -65,6 +68,16 @@ class CodeGenerator final : public GapResolver::Assembler {
Label* GetLabel(RpoNumber rpo) { return &labels_[rpo.ToSize()]; }
+ void AddProtectedInstruction(int instr_offset, int landing_offset);
+
+ void AssembleSourcePosition(Instruction* instr);
+
+ void AssembleSourcePosition(SourcePosition source_position);
+
+ // Record a safepoint with the given pointer map.
+ void RecordSafepoint(ReferenceMap* references, Safepoint::Kind kind,
+ int arguments, Safepoint::DeoptMode deopt_mode);
+
private:
MacroAssembler* masm() { return &masm_; }
GapResolver* resolver() { return &resolver_; }
@@ -82,10 +95,6 @@ class CodeGenerator final : public GapResolver::Assembler {
// assembling code, in which case, a fall-through can be used.
bool IsNextInAssemblyOrder(RpoNumber block) const;
- // Record a safepoint with the given pointer map.
- void RecordSafepoint(ReferenceMap* references, Safepoint::Kind kind,
- int arguments, Safepoint::DeoptMode deopt_mode);
-
// Check if a heap object can be materialized by loading from a heap root,
// which is cheaper on some platforms than materializing the actual heap
// object constant.
@@ -100,7 +109,6 @@ class CodeGenerator final : public GapResolver::Assembler {
// Assemble code for the specified instruction.
CodeGenResult AssembleInstruction(Instruction* instr,
const InstructionBlock* block);
- void AssembleSourcePosition(Instruction* instr);
void AssembleGaps(Instruction* instr);
// Returns true if a instruction is a tail call that needs to adjust the stack
@@ -116,6 +124,7 @@ class CodeGenerator final : public GapResolver::Assembler {
void AssembleArchJump(RpoNumber target);
void AssembleArchBranch(Instruction* instr, BranchInfo* branch);
void AssembleArchBoolean(Instruction* instr, FlagsCondition condition);
+ void AssembleArchTrap(Instruction* instr, FlagsCondition condition);
void AssembleArchLookupSwitch(Instruction* instr);
void AssembleArchTableSwitch(Instruction* instr);
@@ -213,6 +222,7 @@ class CodeGenerator final : public GapResolver::Assembler {
FrameStateDescriptor* descriptor, InstructionOperandIterator* iter,
Translation* translation, OutputFrameStateCombine state_combine);
void TranslateStateValueDescriptor(StateValueDescriptor* desc,
+ StateValueList* nested,
Translation* translation,
InstructionOperandIterator* iter);
void TranslateFrameStateDescriptorOperands(FrameStateDescriptor* desc,
@@ -279,7 +289,9 @@ class CodeGenerator final : public GapResolver::Assembler {
JumpTable* jump_tables_;
OutOfLineCode* ools_;
int osr_pc_offset_;
+ int optimized_out_literal_id_;
SourcePositionTableBuilder source_position_table_builder_;
+ ZoneVector<trap_handler::ProtectedInstructionData>* protected_instructions_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index 9a368162ef..85d49b7ae6 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -36,7 +36,6 @@ Decision DecideCondition(Node* const cond) {
} // namespace
-
CommonOperatorReducer::CommonOperatorReducer(Editor* editor, Graph* graph,
CommonOperatorBuilder* common,
MachineOperatorBuilder* machine)
@@ -44,8 +43,9 @@ CommonOperatorReducer::CommonOperatorReducer(Editor* editor, Graph* graph,
graph_(graph),
common_(common),
machine_(machine),
- dead_(graph->NewNode(common->Dead())) {}
-
+ dead_(graph->NewNode(common->Dead())) {
+ NodeProperties::SetType(dead_, Type::None());
+}
Reduction CommonOperatorReducer::Reduce(Node* node) {
switch (node->opcode()) {
@@ -195,15 +195,16 @@ Reduction CommonOperatorReducer::ReduceMerge(Node* node) {
Reduction CommonOperatorReducer::ReduceEffectPhi(Node* node) {
DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode());
- int const input_count = node->InputCount() - 1;
- DCHECK_LE(1, input_count);
- Node* const merge = node->InputAt(input_count);
+ Node::Inputs inputs = node->inputs();
+ int const effect_input_count = inputs.count() - 1;
+ DCHECK_LE(1, effect_input_count);
+ Node* const merge = inputs[effect_input_count];
DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
- DCHECK_EQ(input_count, merge->InputCount());
- Node* const effect = node->InputAt(0);
+ DCHECK_EQ(effect_input_count, merge->InputCount());
+ Node* const effect = inputs[0];
DCHECK_NE(node, effect);
- for (int i = 1; i < input_count; ++i) {
- Node* const input = node->InputAt(i);
+ for (int i = 1; i < effect_input_count; ++i) {
+ Node* const input = inputs[i];
if (input == node) {
// Ignore redundant inputs.
DCHECK_EQ(IrOpcode::kLoop, merge->opcode());
@@ -219,16 +220,18 @@ Reduction CommonOperatorReducer::ReduceEffectPhi(Node* node) {
Reduction CommonOperatorReducer::ReducePhi(Node* node) {
DCHECK_EQ(IrOpcode::kPhi, node->opcode());
- int const input_count = node->InputCount() - 1;
- DCHECK_LE(1, input_count);
- Node* const merge = node->InputAt(input_count);
+ Node::Inputs inputs = node->inputs();
+ int const value_input_count = inputs.count() - 1;
+ DCHECK_LE(1, value_input_count);
+ Node* const merge = inputs[value_input_count];
DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
- DCHECK_EQ(input_count, merge->InputCount());
- if (input_count == 2) {
- Node* vtrue = node->InputAt(0);
- Node* vfalse = node->InputAt(1);
- Node* if_true = merge->InputAt(0);
- Node* if_false = merge->InputAt(1);
+ DCHECK_EQ(value_input_count, merge->InputCount());
+ if (value_input_count == 2) {
+ Node* vtrue = inputs[0];
+ Node* vfalse = inputs[1];
+ Node::Inputs merge_inputs = merge->inputs();
+ Node* if_true = merge_inputs[0];
+ Node* if_false = merge_inputs[1];
if (if_true->opcode() != IrOpcode::kIfTrue) {
std::swap(if_true, if_false);
std::swap(vtrue, vfalse);
@@ -265,10 +268,10 @@ Reduction CommonOperatorReducer::ReducePhi(Node* node) {
}
}
}
- Node* const value = node->InputAt(0);
+ Node* const value = inputs[0];
DCHECK_NE(node, value);
- for (int i = 1; i < input_count; ++i) {
- Node* const input = node->InputAt(i);
+ for (int i = 1; i < value_input_count; ++i) {
+ Node* const input = inputs[i];
if (input == node) {
// Ignore redundant inputs.
DCHECK_EQ(IrOpcode::kLoop, merge->opcode());
@@ -284,7 +287,6 @@ Reduction CommonOperatorReducer::ReducePhi(Node* node) {
Reduction CommonOperatorReducer::ReduceReturn(Node* node) {
DCHECK_EQ(IrOpcode::kReturn, node->opcode());
- Node* const value = node->InputAt(1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* const control = NodeProperties::GetControlInput(node);
bool changed = false;
@@ -295,25 +297,32 @@ Reduction CommonOperatorReducer::ReduceReturn(Node* node) {
NodeProperties::ReplaceEffectInput(node, effect);
changed = true;
}
+ // TODO(ahaas): Extend the reduction below to multiple return values.
+ if (ValueInputCountOfReturn(node->op()) != 1) {
+ return NoChange();
+ }
+ Node* const value = node->InputAt(1);
if (value->opcode() == IrOpcode::kPhi &&
NodeProperties::GetControlInput(value) == control &&
effect->opcode() == IrOpcode::kEffectPhi &&
NodeProperties::GetControlInput(effect) == control &&
control->opcode() == IrOpcode::kMerge) {
- int const control_input_count = control->InputCount();
- DCHECK_NE(0, control_input_count);
- DCHECK_EQ(control_input_count, value->InputCount() - 1);
- DCHECK_EQ(control_input_count, effect->InputCount() - 1);
+ Node::Inputs control_inputs = control->inputs();
+ Node::Inputs value_inputs = value->inputs();
+ Node::Inputs effect_inputs = effect->inputs();
+ DCHECK_NE(0, control_inputs.count());
+ DCHECK_EQ(control_inputs.count(), value_inputs.count() - 1);
+ DCHECK_EQ(control_inputs.count(), effect_inputs.count() - 1);
DCHECK_EQ(IrOpcode::kEnd, graph()->end()->opcode());
DCHECK_NE(0, graph()->end()->InputCount());
- for (int i = 0; i < control_input_count; ++i) {
+ for (int i = 0; i < control_inputs.count(); ++i) {
// Create a new {Return} and connect it to {end}. We don't need to mark
// {end} as revisit, because we mark {node} as {Dead} below, which was
// previously connected to {end}, so we know for sure that at some point
// the reducer logic will visit {end} again.
Node* ret = graph()->NewNode(common()->Return(), node->InputAt(0),
- value->InputAt(i), effect->InputAt(i),
- control->InputAt(i));
+ value_inputs[i], effect_inputs[i],
+ control_inputs[i]);
NodeProperties::MergeControlToEnd(graph(), common(), ret);
}
// Mark the merge {control} and return {node} as {dead}.
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index 9ce6f71a0f..2cd63314cf 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -7,9 +7,11 @@
#include "src/assembler.h"
#include "src/base/lazy-instance.h"
#include "src/compiler/linkage.h"
+#include "src/compiler/node.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
#include "src/handles-inl.h"
+#include "src/objects-inl.h"
#include "src/zone/zone.h"
namespace v8 {
@@ -41,6 +43,13 @@ DeoptimizeReason DeoptimizeReasonOf(Operator const* const op) {
return OpParameter<DeoptimizeReason>(op);
}
+int ValueInputCountOfReturn(Operator const* const op) {
+ DCHECK(op->opcode() == IrOpcode::kReturn);
+ // Return nodes have a hidden input at index 0 which we ignore in the value
+ // input count.
+ return op->ValueInputCount() - 1;
+}
+
size_t hash_value(DeoptimizeKind kind) { return static_cast<size_t>(kind); }
std::ostream& operator<<(std::ostream& os, DeoptimizeKind kind) {
@@ -171,6 +180,106 @@ std::ostream& operator<<(std::ostream& os,
return os << p.value() << "|" << p.rmode() << "|" << p.type();
}
+SparseInputMask::InputIterator::InputIterator(
+ SparseInputMask::BitMaskType bit_mask, Node* parent)
+ : bit_mask_(bit_mask), parent_(parent), real_index_(0) {
+#if DEBUG
+ if (bit_mask_ != SparseInputMask::kDenseBitMask) {
+ DCHECK_EQ(base::bits::CountPopulation(bit_mask_) -
+ base::bits::CountPopulation(kEndMarker),
+ parent->InputCount());
+ }
+#endif
+}
+
+void SparseInputMask::InputIterator::Advance() {
+ DCHECK(!IsEnd());
+
+ if (IsReal()) {
+ ++real_index_;
+ }
+ bit_mask_ >>= 1;
+}
+
+Node* SparseInputMask::InputIterator::GetReal() const {
+ DCHECK(IsReal());
+ return parent_->InputAt(real_index_);
+}
+
+bool SparseInputMask::InputIterator::IsReal() const {
+ return bit_mask_ == SparseInputMask::kDenseBitMask ||
+ (bit_mask_ & kEntryMask);
+}
+
+bool SparseInputMask::InputIterator::IsEnd() const {
+ return (bit_mask_ == kEndMarker) ||
+ (bit_mask_ == SparseInputMask::kDenseBitMask &&
+ real_index_ >= parent_->InputCount());
+}
+
+int SparseInputMask::CountReal() const {
+ DCHECK(!IsDense());
+ return base::bits::CountPopulation(bit_mask_) -
+ base::bits::CountPopulation(kEndMarker);
+}
+
+SparseInputMask::InputIterator SparseInputMask::IterateOverInputs(Node* node) {
+ DCHECK(IsDense() || CountReal() == node->InputCount());
+ return InputIterator(bit_mask_, node);
+}
+
+bool operator==(SparseInputMask const& lhs, SparseInputMask const& rhs) {
+ return lhs.mask() == rhs.mask();
+}
+
+bool operator!=(SparseInputMask const& lhs, SparseInputMask const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(SparseInputMask const& p) {
+ return base::hash_value(p.mask());
+}
+
+std::ostream& operator<<(std::ostream& os, SparseInputMask const& p) {
+ if (p.IsDense()) {
+ return os << "dense";
+ } else {
+ SparseInputMask::BitMaskType mask = p.mask();
+ DCHECK_NE(mask, SparseInputMask::kDenseBitMask);
+
+ os << "sparse:";
+
+ while (mask != SparseInputMask::kEndMarker) {
+ if (mask & SparseInputMask::kEntryMask) {
+ os << "^";
+ } else {
+ os << ".";
+ }
+ mask >>= 1;
+ }
+ return os;
+ }
+}
+
+bool operator==(TypedStateValueInfo const& lhs,
+ TypedStateValueInfo const& rhs) {
+ return lhs.machine_types() == rhs.machine_types() &&
+ lhs.sparse_input_mask() == rhs.sparse_input_mask();
+}
+
+bool operator!=(TypedStateValueInfo const& lhs,
+ TypedStateValueInfo const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(TypedStateValueInfo const& p) {
+ return base::hash_combine(p.machine_types(), p.sparse_input_mask());
+}
+
+std::ostream& operator<<(std::ostream& os, TypedStateValueInfo const& p) {
+ return os << p.machine_types() << "|" << p.sparse_input_mask();
+}
+
size_t hash_value(RegionObservability observability) {
return static_cast<size_t>(observability);
}
@@ -235,9 +344,23 @@ OsrGuardType OsrGuardTypeOf(Operator const* op) {
return OpParameter<OsrGuardType>(op);
}
+SparseInputMask SparseInputMaskOf(Operator const* op) {
+ DCHECK(op->opcode() == IrOpcode::kStateValues ||
+ op->opcode() == IrOpcode::kTypedStateValues);
+
+ if (op->opcode() == IrOpcode::kTypedStateValues) {
+ return OpParameter<TypedStateValueInfo>(op).sparse_input_mask();
+ }
+ return OpParameter<SparseInputMask>(op);
+}
+
ZoneVector<MachineType> const* MachineTypesOf(Operator const* op) {
DCHECK(op->opcode() == IrOpcode::kTypedObjectState ||
op->opcode() == IrOpcode::kTypedStateValues);
+
+ if (op->opcode() == IrOpcode::kTypedStateValues) {
+ return OpParameter<TypedStateValueInfo>(op).machine_types();
+ }
return OpParameter<const ZoneVector<MachineType>*>(op);
}
@@ -330,6 +453,21 @@ ZoneVector<MachineType> const* MachineTypesOf(Operator const* op) {
V(WrongInstanceType) \
V(WrongMap)
+#define CACHED_TRAP_IF_LIST(V) \
+ V(TrapDivUnrepresentable) \
+ V(TrapFloatUnrepresentable)
+
+// The reason for a trap.
+#define CACHED_TRAP_UNLESS_LIST(V) \
+ V(TrapUnreachable) \
+ V(TrapMemOutOfBounds) \
+ V(TrapDivByZero) \
+ V(TrapDivUnrepresentable) \
+ V(TrapRemByZero) \
+ V(TrapFloatUnrepresentable) \
+ V(TrapFuncInvalid) \
+ V(TrapFuncSigMismatch)
+
#define CACHED_PARAMETER_LIST(V) \
V(0) \
V(1) \
@@ -529,6 +667,38 @@ struct CommonOperatorGlobalCache final {
CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
#undef CACHED_DEOPTIMIZE_UNLESS
+ template <int32_t trap_id>
+ struct TrapIfOperator final : public Operator1<int32_t> {
+ TrapIfOperator()
+ : Operator1<int32_t>( // --
+ IrOpcode::kTrapIf, // opcode
+ Operator::kFoldable | Operator::kNoThrow, // properties
+ "TrapIf", // name
+ 1, 1, 1, 0, 0, 1, // counts
+ trap_id) {} // parameter
+ };
+#define CACHED_TRAP_IF(Trap) \
+ TrapIfOperator<static_cast<int32_t>(Runtime::kThrowWasm##Trap)> \
+ kTrapIf##Trap##Operator;
+ CACHED_TRAP_IF_LIST(CACHED_TRAP_IF)
+#undef CACHED_TRAP_IF
+
+ template <int32_t trap_id>
+ struct TrapUnlessOperator final : public Operator1<int32_t> {
+ TrapUnlessOperator()
+ : Operator1<int32_t>( // --
+ IrOpcode::kTrapUnless, // opcode
+ Operator::kFoldable | Operator::kNoThrow, // properties
+ "TrapUnless", // name
+ 1, 1, 1, 0, 0, 1, // counts
+ trap_id) {} // parameter
+ };
+#define CACHED_TRAP_UNLESS(Trap) \
+ TrapUnlessOperator<static_cast<int32_t>(Runtime::kThrowWasm##Trap)> \
+ kTrapUnless##Trap##Operator;
+ CACHED_TRAP_UNLESS_LIST(CACHED_TRAP_UNLESS)
+#undef CACHED_TRAP_UNLESS
+
template <MachineRepresentation kRep, int kInputCount>
struct PhiOperator final : public Operator1<MachineRepresentation> {
PhiOperator()
@@ -588,13 +758,14 @@ struct CommonOperatorGlobalCache final {
#undef CACHED_PROJECTION
template <int kInputCount>
- struct StateValuesOperator final : public Operator {
+ struct StateValuesOperator final : public Operator1<SparseInputMask> {
StateValuesOperator()
- : Operator( // --
- IrOpcode::kStateValues, // opcode
- Operator::kPure, // flags
- "StateValues", // name
- kInputCount, 0, 0, 1, 0, 0) {} // counts
+ : Operator1<SparseInputMask>( // --
+ IrOpcode::kStateValues, // opcode
+ Operator::kPure, // flags
+ "StateValues", // name
+ kInputCount, 0, 0, 1, 0, 0, // counts
+ SparseInputMask::Dense()) {} // parameter
};
#define CACHED_STATE_VALUES(input_count) \
StateValuesOperator<input_count> kStateValues##input_count##Operator;
@@ -727,6 +898,43 @@ const Operator* CommonOperatorBuilder::DeoptimizeUnless(
reason); // parameter
}
+const Operator* CommonOperatorBuilder::TrapIf(int32_t trap_id) {
+ switch (trap_id) {
+#define CACHED_TRAP_IF(Trap) \
+ case Runtime::kThrowWasm##Trap: \
+ return &cache_.kTrapIf##Trap##Operator;
+ CACHED_TRAP_IF_LIST(CACHED_TRAP_IF)
+#undef CACHED_TRAP_IF
+ default:
+ break;
+ }
+ // Uncached
+ return new (zone()) Operator1<int>( // --
+ IrOpcode::kTrapIf, // opcode
+ Operator::kFoldable | Operator::kNoThrow, // properties
+ "TrapIf", // name
+ 1, 1, 1, 0, 0, 1, // counts
+ trap_id); // parameter
+}
+
+const Operator* CommonOperatorBuilder::TrapUnless(int32_t trap_id) {
+ switch (trap_id) {
+#define CACHED_TRAP_UNLESS(Trap) \
+ case Runtime::kThrowWasm##Trap: \
+ return &cache_.kTrapUnless##Trap##Operator;
+ CACHED_TRAP_UNLESS_LIST(CACHED_TRAP_UNLESS)
+#undef CACHED_TRAP_UNLESS
+ default:
+ break;
+ }
+ // Uncached
+ return new (zone()) Operator1<int>( // --
+ IrOpcode::kTrapUnless, // opcode
+ Operator::kFoldable | Operator::kNoThrow, // properties
+ "TrapUnless", // name
+ 1, 1, 1, 0, 0, 1, // counts
+ trap_id); // parameter
+}
const Operator* CommonOperatorBuilder::Switch(size_t control_output_count) {
return new (zone()) Operator( // --
@@ -1000,30 +1208,44 @@ const Operator* CommonOperatorBuilder::BeginRegion(
return nullptr;
}
-const Operator* CommonOperatorBuilder::StateValues(int arguments) {
- switch (arguments) {
+const Operator* CommonOperatorBuilder::StateValues(int arguments,
+ SparseInputMask bitmask) {
+ if (bitmask.IsDense()) {
+ switch (arguments) {
#define CACHED_STATE_VALUES(arguments) \
case arguments: \
return &cache_.kStateValues##arguments##Operator;
- CACHED_STATE_VALUES_LIST(CACHED_STATE_VALUES)
+ CACHED_STATE_VALUES_LIST(CACHED_STATE_VALUES)
#undef CACHED_STATE_VALUES
- default:
- break;
+ default:
+ break;
+ }
}
+
+#if DEBUG
+ DCHECK(bitmask.IsDense() || bitmask.CountReal() == arguments);
+#endif
+
// Uncached.
- return new (zone()) Operator( // --
- IrOpcode::kStateValues, Operator::kPure, // opcode
- "StateValues", // name
- arguments, 0, 0, 1, 0, 0); // counts
+ return new (zone()) Operator1<SparseInputMask>( // --
+ IrOpcode::kStateValues, Operator::kPure, // opcode
+ "StateValues", // name
+ arguments, 0, 0, 1, 0, 0, // counts
+ bitmask); // parameter
}
const Operator* CommonOperatorBuilder::TypedStateValues(
- const ZoneVector<MachineType>* types) {
- return new (zone()) Operator1<const ZoneVector<MachineType>*>( // --
- IrOpcode::kTypedStateValues, Operator::kPure, // opcode
- "TypedStateValues", // name
- static_cast<int>(types->size()), 0, 0, 1, 0, 0, // counts
- types); // parameter
+ const ZoneVector<MachineType>* types, SparseInputMask bitmask) {
+#if DEBUG
+ DCHECK(bitmask.IsDense() ||
+ bitmask.CountReal() == static_cast<int>(types->size()));
+#endif
+
+ return new (zone()) Operator1<TypedStateValueInfo>( // --
+ IrOpcode::kTypedStateValues, Operator::kPure, // opcode
+ "TypedStateValues", // name
+ static_cast<int>(types->size()), 0, 0, 1, 0, 0, // counts
+ TypedStateValueInfo(types, bitmask)); // parameters
}
const Operator* CommonOperatorBuilder::ObjectState(int pointer_slots) {
@@ -1131,6 +1353,43 @@ const Operator* CommonOperatorBuilder::ResizeMergeOrPhi(const Operator* op,
}
}
+const Operator* CommonOperatorBuilder::Int32x4ExtractLane(int32_t lane_number) {
+ DCHECK(0 <= lane_number && lane_number < 4);
+ return new (zone()) Operator1<int32_t>( // --
+ IrOpcode::kInt32x4ExtractLane, Operator::kPure, // opcode
+ "Int32x4ExtractLane", // name
+ 1, 0, 0, 1, 0, 0, // counts
+ lane_number); // parameter
+}
+
+const Operator* CommonOperatorBuilder::Int32x4ReplaceLane(int32_t lane_number) {
+ DCHECK(0 <= lane_number && lane_number < 4);
+ return new (zone()) Operator1<int32_t>( // --
+ IrOpcode::kInt32x4ReplaceLane, Operator::kPure, // opcode
+ "Int32x4ReplaceLane", // name
+ 2, 0, 0, 1, 0, 0, // counts
+ lane_number); // parameter
+}
+
+const Operator* CommonOperatorBuilder::Float32x4ExtractLane(
+ int32_t lane_number) {
+ DCHECK(0 <= lane_number && lane_number < 4);
+ return new (zone()) Operator1<int32_t>( // --
+ IrOpcode::kFloat32x4ExtractLane, Operator::kPure, // opcode
+ "Float32x4ExtractLane", // name
+ 1, 0, 0, 1, 0, 0, // counts
+ lane_number); // parameter
+}
+
+const Operator* CommonOperatorBuilder::Float32x4ReplaceLane(
+ int32_t lane_number) {
+ DCHECK(0 <= lane_number && lane_number < 4);
+ return new (zone()) Operator1<int32_t>( // --
+ IrOpcode::kFloat32x4ReplaceLane, Operator::kPure, // opcode
+ "Float32x4ReplaceLane", // name
+ 2, 0, 0, 1, 0, 0, // counts
+ lane_number); // parameter
+}
const FrameStateFunctionInfo*
CommonOperatorBuilder::CreateFrameStateFunctionInfo(
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 1f258a0ec0..5d0a6df31d 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -22,6 +22,7 @@ class CallDescriptor;
struct CommonOperatorGlobalCache;
class Operator;
class Type;
+class Node;
// Prediction hint for branches.
enum class BranchHint : uint8_t { kNone, kTrue, kFalse };
@@ -48,6 +49,9 @@ V8_EXPORT_PRIVATE BranchHint BranchHintOf(const Operator* const);
// Deoptimize reason for Deoptimize, DeoptimizeIf and DeoptimizeUnless.
DeoptimizeReason DeoptimizeReasonOf(Operator const* const);
+// Helper function for return nodes, because returns have a hidden value input.
+int ValueInputCountOfReturn(Operator const* const op);
+
// Deoptimize bailout kind.
enum class DeoptimizeKind : uint8_t { kEager, kSoft };
@@ -158,6 +162,123 @@ std::ostream& operator<<(std::ostream&, RelocatablePtrConstantInfo const&);
size_t hash_value(RelocatablePtrConstantInfo const& p);
+// Used to define a sparse set of inputs. This can be used to efficiently encode
+// nodes that can have a lot of inputs, but where many inputs can have the same
+// value.
+class SparseInputMask final {
+ public:
+ typedef uint32_t BitMaskType;
+
+ // The mask representing a dense input set.
+ static const BitMaskType kDenseBitMask = 0x0;
+ // The bits representing the end of a sparse input set.
+ static const BitMaskType kEndMarker = 0x1;
+ // The mask for accessing a sparse input entry in the bitmask.
+ static const BitMaskType kEntryMask = 0x1;
+
+ // The number of bits in the mask, minus one for the end marker.
+ static const int kMaxSparseInputs = (sizeof(BitMaskType) * kBitsPerByte - 1);
+
+ // An iterator over a node's sparse inputs.
+ class InputIterator final {
+ public:
+ InputIterator() {}
+ InputIterator(BitMaskType bit_mask, Node* parent);
+
+ Node* parent() const { return parent_; }
+ int real_index() const { return real_index_; }
+
+ // Advance the iterator to the next sparse input. Only valid if the iterator
+ // has not reached the end.
+ void Advance();
+
+ // Get the current sparse input's real node value. Only valid if the
+ // current sparse input is real.
+ Node* GetReal() const;
+
+ // Get the current sparse input, returning either a real input node if
+ // the current sparse input is real, or the given {empty_value} if the
+ // current sparse input is empty.
+ Node* Get(Node* empty_value) const {
+ return IsReal() ? GetReal() : empty_value;
+ }
+
+ // True if the current sparse input is a real input node.
+ bool IsReal() const;
+
+ // True if the current sparse input is an empty value.
+ bool IsEmpty() const { return !IsReal(); }
+
+ // True if the iterator has reached the end of the sparse inputs.
+ bool IsEnd() const;
+
+ private:
+ BitMaskType bit_mask_;
+ Node* parent_;
+ int real_index_;
+ };
+
+ explicit SparseInputMask(BitMaskType bit_mask) : bit_mask_(bit_mask) {}
+
+ // Provides a SparseInputMask representing a dense input set.
+ static SparseInputMask Dense() { return SparseInputMask(kDenseBitMask); }
+
+ BitMaskType mask() const { return bit_mask_; }
+
+ bool IsDense() const { return bit_mask_ == SparseInputMask::kDenseBitMask; }
+
+ // Counts how many real values are in the sparse array. Only valid for
+ // non-dense masks.
+ int CountReal() const;
+
+ // Returns an iterator over the sparse inputs of {node}.
+ InputIterator IterateOverInputs(Node* node);
+
+ private:
+ //
+ // The sparse input mask has a bitmask specifying if the node's inputs are
+ // represented sparsely. If the bitmask value is 0, then the inputs are dense;
+ // otherwise, they should be interpreted as follows:
+ //
+ // * The bitmask represents which values are real, with 1 for real values
+ // and 0 for empty values.
+ // * The inputs to the node are the real values, in the order of the 1s from
+ // least- to most-significant.
+ // * The top bit of the bitmask is a guard indicating the end of the values,
+ // whether real or empty (and is not representative of a real input
+ // itself). This is used so that we don't have to additionally store a
+ // value count.
+ //
+ // So, for N 1s in the bitmask, there are N - 1 inputs into the node.
+ BitMaskType bit_mask_;
+};
+
+bool operator==(SparseInputMask const& lhs, SparseInputMask const& rhs);
+bool operator!=(SparseInputMask const& lhs, SparseInputMask const& rhs);
+
+class TypedStateValueInfo final {
+ public:
+ TypedStateValueInfo(ZoneVector<MachineType> const* machine_types,
+ SparseInputMask sparse_input_mask)
+ : machine_types_(machine_types), sparse_input_mask_(sparse_input_mask) {}
+
+ ZoneVector<MachineType> const* machine_types() const {
+ return machine_types_;
+ }
+ SparseInputMask sparse_input_mask() const { return sparse_input_mask_; }
+
+ private:
+ ZoneVector<MachineType> const* machine_types_;
+ SparseInputMask sparse_input_mask_;
+};
+
+bool operator==(TypedStateValueInfo const& lhs, TypedStateValueInfo const& rhs);
+bool operator!=(TypedStateValueInfo const& lhs, TypedStateValueInfo const& rhs);
+
+std::ostream& operator<<(std::ostream&, TypedStateValueInfo const&);
+
+size_t hash_value(TypedStateValueInfo const& p);
+
// Used to mark a region (as identified by BeginRegion/FinishRegion) as either
// JavaScript-observable or not (i.e. allocations are not JavaScript observable
// themselves, but transitioning stores are).
@@ -181,6 +302,8 @@ size_t hash_value(OsrGuardType type);
std::ostream& operator<<(std::ostream&, OsrGuardType);
OsrGuardType OsrGuardTypeOf(Operator const*);
+SparseInputMask SparseInputMaskOf(Operator const*);
+
ZoneVector<MachineType> const* MachineTypesOf(Operator const*)
WARN_UNUSED_RESULT;
@@ -205,6 +328,8 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* Deoptimize(DeoptimizeKind kind, DeoptimizeReason reason);
const Operator* DeoptimizeIf(DeoptimizeReason reason);
const Operator* DeoptimizeUnless(DeoptimizeReason reason);
+ const Operator* TrapIf(int32_t trap_id);
+ const Operator* TrapUnless(int32_t trap_id);
const Operator* Return(int value_input_count = 1);
const Operator* Terminate();
@@ -243,8 +368,9 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* Checkpoint();
const Operator* BeginRegion(RegionObservability);
const Operator* FinishRegion();
- const Operator* StateValues(int arguments);
- const Operator* TypedStateValues(const ZoneVector<MachineType>* types);
+ const Operator* StateValues(int arguments, SparseInputMask bitmask);
+ const Operator* TypedStateValues(const ZoneVector<MachineType>* types,
+ SparseInputMask bitmask);
const Operator* ObjectState(int pointer_slots);
const Operator* TypedObjectState(const ZoneVector<MachineType>* types);
const Operator* FrameState(BailoutId bailout_id,
@@ -260,6 +386,12 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
// with {size} inputs.
const Operator* ResizeMergeOrPhi(const Operator* op, int size);
+ // Simd Operators
+ const Operator* Int32x4ExtractLane(int32_t);
+ const Operator* Int32x4ReplaceLane(int32_t);
+ const Operator* Float32x4ExtractLane(int32_t);
+ const Operator* Float32x4ReplaceLane(int32_t);
+
// Constructs function info for frame state construction.
const FrameStateFunctionInfo* CreateFrameStateFunctionInfo(
FrameStateType type, int parameter_count, int local_count,
diff --git a/deps/v8/src/compiler/control-builders.cc b/deps/v8/src/compiler/control-builders.cc
index b159bb2da7..a0b3ebdd77 100644
--- a/deps/v8/src/compiler/control-builders.cc
+++ b/deps/v8/src/compiler/control-builders.cc
@@ -4,6 +4,8 @@
#include "src/compiler/control-builders.h"
+#include "src/objects-inl.h"
+
namespace v8 {
namespace internal {
namespace compiler {
@@ -180,65 +182,6 @@ void BlockBuilder::EndBlock() {
set_environment(break_environment_);
}
-
-void TryCatchBuilder::BeginTry() {
- exit_environment_ = environment()->CopyAsUnreachable();
- catch_environment_ = environment()->CopyAsUnreachable();
- catch_environment_->Push(the_hole());
-}
-
-
-void TryCatchBuilder::Throw(Node* exception) {
- environment()->Push(exception);
- catch_environment_->Merge(environment());
- environment()->Pop();
- environment()->MarkAsUnreachable();
-}
-
-
-void TryCatchBuilder::EndTry() {
- exit_environment_->Merge(environment());
- exception_node_ = catch_environment_->Pop();
- set_environment(catch_environment_);
-}
-
-
-void TryCatchBuilder::EndCatch() {
- exit_environment_->Merge(environment());
- set_environment(exit_environment_);
-}
-
-
-void TryFinallyBuilder::BeginTry() {
- finally_environment_ = environment()->CopyAsUnreachable();
- finally_environment_->Push(the_hole());
- finally_environment_->Push(the_hole());
-}
-
-
-void TryFinallyBuilder::LeaveTry(Node* token, Node* value) {
- environment()->Push(value);
- environment()->Push(token);
- finally_environment_->Merge(environment());
- environment()->Drop(2);
-}
-
-
-void TryFinallyBuilder::EndTry(Node* fallthrough_token, Node* value) {
- environment()->Push(value);
- environment()->Push(fallthrough_token);
- finally_environment_->Merge(environment());
- environment()->Drop(2);
- token_node_ = finally_environment_->Pop();
- value_node_ = finally_environment_->Pop();
- set_environment(finally_environment_);
-}
-
-
-void TryFinallyBuilder::EndFinally() {
- // Nothing to be done here.
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/control-builders.h b/deps/v8/src/compiler/control-builders.h
index a59dcb699a..88efd276ad 100644
--- a/deps/v8/src/compiler/control-builders.h
+++ b/deps/v8/src/compiler/control-builders.h
@@ -145,59 +145,6 @@ class BlockBuilder final : public ControlBuilder {
Environment* break_environment_; // Environment after the block exits.
};
-
-// Tracks control flow for a try-catch statement.
-class TryCatchBuilder final : public ControlBuilder {
- public:
- explicit TryCatchBuilder(AstGraphBuilder* builder)
- : ControlBuilder(builder),
- catch_environment_(nullptr),
- exit_environment_(nullptr),
- exception_node_(nullptr) {}
-
- // Primitive control commands.
- void BeginTry();
- void Throw(Node* exception);
- void EndTry();
- void EndCatch();
-
- // Returns the exception value inside the 'catch' body.
- Node* GetExceptionNode() const { return exception_node_; }
-
- private:
- Environment* catch_environment_; // Environment for the 'catch' body.
- Environment* exit_environment_; // Environment after the statement.
- Node* exception_node_; // Node for exception in 'catch' body.
-};
-
-
-// Tracks control flow for a try-finally statement.
-class TryFinallyBuilder final : public ControlBuilder {
- public:
- explicit TryFinallyBuilder(AstGraphBuilder* builder)
- : ControlBuilder(builder),
- finally_environment_(nullptr),
- token_node_(nullptr),
- value_node_(nullptr) {}
-
- // Primitive control commands.
- void BeginTry();
- void LeaveTry(Node* token, Node* value);
- void EndTry(Node* token, Node* value);
- void EndFinally();
-
- // Returns the dispatch token value inside the 'finally' body.
- Node* GetDispatchTokenNode() const { return token_node_; }
-
- // Returns the saved result value inside the 'finally' body.
- Node* GetResultValueNode() const { return value_node_; }
-
- private:
- Environment* finally_environment_; // Environment for the 'finally' body.
- Node* token_node_; // Node for token in 'finally' body.
- Node* value_node_; // Node for value in 'finally' body.
-};
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/dead-code-elimination.cc b/deps/v8/src/compiler/dead-code-elimination.cc
index 81bf2997e6..d66a9c58d5 100644
--- a/deps/v8/src/compiler/dead-code-elimination.cc
+++ b/deps/v8/src/compiler/dead-code-elimination.cc
@@ -18,8 +18,9 @@ DeadCodeElimination::DeadCodeElimination(Editor* editor, Graph* graph,
: AdvancedReducer(editor),
graph_(graph),
common_(common),
- dead_(graph->NewNode(common->Dead())) {}
-
+ dead_(graph->NewNode(common->Dead())) {
+ NodeProperties::SetType(dead_, Type::None());
+}
Reduction DeadCodeElimination::Reduce(Node* node) {
switch (node->opcode()) {
@@ -40,11 +41,11 @@ Reduction DeadCodeElimination::Reduce(Node* node) {
Reduction DeadCodeElimination::ReduceEnd(Node* node) {
DCHECK_EQ(IrOpcode::kEnd, node->opcode());
- int const input_count = node->InputCount();
- DCHECK_LE(1, input_count);
+ Node::Inputs inputs = node->inputs();
+ DCHECK_LE(1, inputs.count());
int live_input_count = 0;
- for (int i = 0; i < input_count; ++i) {
- Node* const input = node->InputAt(i);
+ for (int i = 0; i < inputs.count(); ++i) {
+ Node* const input = inputs[i];
// Skip dead inputs.
if (input->opcode() == IrOpcode::kDead) continue;
// Compact live inputs.
@@ -53,20 +54,20 @@ Reduction DeadCodeElimination::ReduceEnd(Node* node) {
}
if (live_input_count == 0) {
return Replace(dead());
- } else if (live_input_count < input_count) {
+ } else if (live_input_count < inputs.count()) {
node->TrimInputCount(live_input_count);
NodeProperties::ChangeOp(node, common()->End(live_input_count));
return Changed(node);
}
- DCHECK_EQ(input_count, live_input_count);
+ DCHECK_EQ(inputs.count(), live_input_count);
return NoChange();
}
Reduction DeadCodeElimination::ReduceLoopOrMerge(Node* node) {
DCHECK(IrOpcode::IsMergeOpcode(node->opcode()));
- int const input_count = node->InputCount();
- DCHECK_LE(1, input_count);
+ Node::Inputs inputs = node->inputs();
+ DCHECK_LE(1, inputs.count());
// Count the number of live inputs to {node} and compact them on the fly, also
// compacting the inputs of the associated {Phi} and {EffectPhi} uses at the
// same time. We consider {Loop}s dead even if only the first control input
@@ -74,8 +75,8 @@ Reduction DeadCodeElimination::ReduceLoopOrMerge(Node* node) {
int live_input_count = 0;
if (node->opcode() != IrOpcode::kLoop ||
node->InputAt(0)->opcode() != IrOpcode::kDead) {
- for (int i = 0; i < input_count; ++i) {
- Node* const input = node->InputAt(i);
+ for (int i = 0; i < inputs.count(); ++i) {
+ Node* const input = inputs[i];
// Skip dead inputs.
if (input->opcode() == IrOpcode::kDead) continue;
// Compact live inputs.
@@ -83,7 +84,7 @@ Reduction DeadCodeElimination::ReduceLoopOrMerge(Node* node) {
node->ReplaceInput(live_input_count, input);
for (Node* const use : node->uses()) {
if (NodeProperties::IsPhi(use)) {
- DCHECK_EQ(input_count + 1, use->InputCount());
+ DCHECK_EQ(inputs.count() + 1, use->InputCount());
use->ReplaceInput(live_input_count, use->InputAt(i));
}
}
@@ -109,9 +110,9 @@ Reduction DeadCodeElimination::ReduceLoopOrMerge(Node* node) {
return Replace(node->InputAt(0));
}
DCHECK_LE(2, live_input_count);
- DCHECK_LE(live_input_count, input_count);
+ DCHECK_LE(live_input_count, inputs.count());
// Trim input count for the {Merge} or {Loop} node.
- if (live_input_count < input_count) {
+ if (live_input_count < inputs.count()) {
// Trim input counts for all phi uses and revisit them.
for (Node* const use : node->uses()) {
if (NodeProperties::IsPhi(use)) {
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index d4b0576f79..b88906cfc1 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -24,7 +24,8 @@ EffectControlLinearizer::EffectControlLinearizer(
: js_graph_(js_graph),
schedule_(schedule),
temp_zone_(temp_zone),
- source_positions_(source_positions) {}
+ source_positions_(source_positions),
+ graph_assembler_(js_graph, nullptr, nullptr, temp_zone) {}
Graph* EffectControlLinearizer::graph() const { return js_graph_->graph(); }
CommonOperatorBuilder* EffectControlLinearizer::common() const {
@@ -596,829 +597,690 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
Node* frame_state,
Node** effect,
Node** control) {
- ValueEffectControl state(nullptr, nullptr, nullptr);
+ gasm()->Reset(*effect, *control);
+ Node* result = nullptr;
switch (node->opcode()) {
case IrOpcode::kChangeBitToTagged:
- state = LowerChangeBitToTagged(node, *effect, *control);
+ result = LowerChangeBitToTagged(node);
break;
case IrOpcode::kChangeInt31ToTaggedSigned:
- state = LowerChangeInt31ToTaggedSigned(node, *effect, *control);
+ result = LowerChangeInt31ToTaggedSigned(node);
break;
case IrOpcode::kChangeInt32ToTagged:
- state = LowerChangeInt32ToTagged(node, *effect, *control);
+ result = LowerChangeInt32ToTagged(node);
break;
case IrOpcode::kChangeUint32ToTagged:
- state = LowerChangeUint32ToTagged(node, *effect, *control);
+ result = LowerChangeUint32ToTagged(node);
break;
case IrOpcode::kChangeFloat64ToTagged:
- state = LowerChangeFloat64ToTagged(node, *effect, *control);
+ result = LowerChangeFloat64ToTagged(node);
break;
case IrOpcode::kChangeFloat64ToTaggedPointer:
- state = LowerChangeFloat64ToTaggedPointer(node, *effect, *control);
+ result = LowerChangeFloat64ToTaggedPointer(node);
break;
case IrOpcode::kChangeTaggedSignedToInt32:
- state = LowerChangeTaggedSignedToInt32(node, *effect, *control);
+ result = LowerChangeTaggedSignedToInt32(node);
break;
case IrOpcode::kChangeTaggedToBit:
- state = LowerChangeTaggedToBit(node, *effect, *control);
+ result = LowerChangeTaggedToBit(node);
break;
case IrOpcode::kChangeTaggedToInt32:
- state = LowerChangeTaggedToInt32(node, *effect, *control);
+ result = LowerChangeTaggedToInt32(node);
break;
case IrOpcode::kChangeTaggedToUint32:
- state = LowerChangeTaggedToUint32(node, *effect, *control);
+ result = LowerChangeTaggedToUint32(node);
break;
case IrOpcode::kChangeTaggedToFloat64:
- state = LowerChangeTaggedToFloat64(node, *effect, *control);
+ result = LowerChangeTaggedToFloat64(node);
break;
case IrOpcode::kTruncateTaggedToBit:
- state = LowerTruncateTaggedToBit(node, *effect, *control);
+ result = LowerTruncateTaggedToBit(node);
break;
case IrOpcode::kTruncateTaggedToFloat64:
- state = LowerTruncateTaggedToFloat64(node, *effect, *control);
+ result = LowerTruncateTaggedToFloat64(node);
break;
case IrOpcode::kCheckBounds:
- state = LowerCheckBounds(node, frame_state, *effect, *control);
+ result = LowerCheckBounds(node, frame_state);
break;
case IrOpcode::kCheckMaps:
- state = LowerCheckMaps(node, frame_state, *effect, *control);
+ result = LowerCheckMaps(node, frame_state);
break;
case IrOpcode::kCheckNumber:
- state = LowerCheckNumber(node, frame_state, *effect, *control);
+ result = LowerCheckNumber(node, frame_state);
break;
case IrOpcode::kCheckString:
- state = LowerCheckString(node, frame_state, *effect, *control);
+ result = LowerCheckString(node, frame_state);
+ break;
+ case IrOpcode::kCheckInternalizedString:
+ result = LowerCheckInternalizedString(node, frame_state);
break;
case IrOpcode::kCheckIf:
- state = LowerCheckIf(node, frame_state, *effect, *control);
+ result = LowerCheckIf(node, frame_state);
break;
case IrOpcode::kCheckedInt32Add:
- state = LowerCheckedInt32Add(node, frame_state, *effect, *control);
+ result = LowerCheckedInt32Add(node, frame_state);
break;
case IrOpcode::kCheckedInt32Sub:
- state = LowerCheckedInt32Sub(node, frame_state, *effect, *control);
+ result = LowerCheckedInt32Sub(node, frame_state);
break;
case IrOpcode::kCheckedInt32Div:
- state = LowerCheckedInt32Div(node, frame_state, *effect, *control);
+ result = LowerCheckedInt32Div(node, frame_state);
break;
case IrOpcode::kCheckedInt32Mod:
- state = LowerCheckedInt32Mod(node, frame_state, *effect, *control);
+ result = LowerCheckedInt32Mod(node, frame_state);
break;
case IrOpcode::kCheckedUint32Div:
- state = LowerCheckedUint32Div(node, frame_state, *effect, *control);
+ result = LowerCheckedUint32Div(node, frame_state);
break;
case IrOpcode::kCheckedUint32Mod:
- state = LowerCheckedUint32Mod(node, frame_state, *effect, *control);
+ result = LowerCheckedUint32Mod(node, frame_state);
break;
case IrOpcode::kCheckedInt32Mul:
- state = LowerCheckedInt32Mul(node, frame_state, *effect, *control);
+ result = LowerCheckedInt32Mul(node, frame_state);
break;
case IrOpcode::kCheckedInt32ToTaggedSigned:
- state =
- LowerCheckedInt32ToTaggedSigned(node, frame_state, *effect, *control);
+ result = LowerCheckedInt32ToTaggedSigned(node, frame_state);
break;
case IrOpcode::kCheckedUint32ToInt32:
- state = LowerCheckedUint32ToInt32(node, frame_state, *effect, *control);
+ result = LowerCheckedUint32ToInt32(node, frame_state);
break;
case IrOpcode::kCheckedUint32ToTaggedSigned:
- state = LowerCheckedUint32ToTaggedSigned(node, frame_state, *effect,
- *control);
+ result = LowerCheckedUint32ToTaggedSigned(node, frame_state);
break;
case IrOpcode::kCheckedFloat64ToInt32:
- state = LowerCheckedFloat64ToInt32(node, frame_state, *effect, *control);
+ result = LowerCheckedFloat64ToInt32(node, frame_state);
break;
case IrOpcode::kCheckedTaggedSignedToInt32:
- state =
- LowerCheckedTaggedSignedToInt32(node, frame_state, *effect, *control);
+ result = LowerCheckedTaggedSignedToInt32(node, frame_state);
break;
case IrOpcode::kCheckedTaggedToInt32:
- state = LowerCheckedTaggedToInt32(node, frame_state, *effect, *control);
+ result = LowerCheckedTaggedToInt32(node, frame_state);
break;
case IrOpcode::kCheckedTaggedToFloat64:
- state = LowerCheckedTaggedToFloat64(node, frame_state, *effect, *control);
+ result = LowerCheckedTaggedToFloat64(node, frame_state);
break;
case IrOpcode::kCheckedTaggedToTaggedSigned:
- state = LowerCheckedTaggedToTaggedSigned(node, frame_state, *effect,
- *control);
+ result = LowerCheckedTaggedToTaggedSigned(node, frame_state);
break;
case IrOpcode::kCheckedTaggedToTaggedPointer:
- state = LowerCheckedTaggedToTaggedPointer(node, frame_state, *effect,
- *control);
+ result = LowerCheckedTaggedToTaggedPointer(node, frame_state);
break;
case IrOpcode::kTruncateTaggedToWord32:
- state = LowerTruncateTaggedToWord32(node, *effect, *control);
+ result = LowerTruncateTaggedToWord32(node);
break;
case IrOpcode::kCheckedTruncateTaggedToWord32:
- state = LowerCheckedTruncateTaggedToWord32(node, frame_state, *effect,
- *control);
+ result = LowerCheckedTruncateTaggedToWord32(node, frame_state);
break;
case IrOpcode::kObjectIsCallable:
- state = LowerObjectIsCallable(node, *effect, *control);
+ result = LowerObjectIsCallable(node);
break;
case IrOpcode::kObjectIsNumber:
- state = LowerObjectIsNumber(node, *effect, *control);
+ result = LowerObjectIsNumber(node);
break;
case IrOpcode::kObjectIsReceiver:
- state = LowerObjectIsReceiver(node, *effect, *control);
+ result = LowerObjectIsReceiver(node);
break;
case IrOpcode::kObjectIsSmi:
- state = LowerObjectIsSmi(node, *effect, *control);
+ result = LowerObjectIsSmi(node);
break;
case IrOpcode::kObjectIsString:
- state = LowerObjectIsString(node, *effect, *control);
+ result = LowerObjectIsString(node);
break;
case IrOpcode::kObjectIsUndetectable:
- state = LowerObjectIsUndetectable(node, *effect, *control);
+ result = LowerObjectIsUndetectable(node);
+ break;
+ case IrOpcode::kNewRestParameterElements:
+ result = LowerNewRestParameterElements(node);
+ break;
+ case IrOpcode::kNewUnmappedArgumentsElements:
+ result = LowerNewUnmappedArgumentsElements(node);
break;
case IrOpcode::kArrayBufferWasNeutered:
- state = LowerArrayBufferWasNeutered(node, *effect, *control);
+ result = LowerArrayBufferWasNeutered(node);
break;
case IrOpcode::kStringFromCharCode:
- state = LowerStringFromCharCode(node, *effect, *control);
+ result = LowerStringFromCharCode(node);
break;
case IrOpcode::kStringFromCodePoint:
- state = LowerStringFromCodePoint(node, *effect, *control);
+ result = LowerStringFromCodePoint(node);
+ break;
+ case IrOpcode::kStringCharAt:
+ result = LowerStringCharAt(node);
break;
case IrOpcode::kStringCharCodeAt:
- state = LowerStringCharCodeAt(node, *effect, *control);
+ result = LowerStringCharCodeAt(node);
break;
case IrOpcode::kStringEqual:
- state = LowerStringEqual(node, *effect, *control);
+ result = LowerStringEqual(node);
break;
case IrOpcode::kStringLessThan:
- state = LowerStringLessThan(node, *effect, *control);
+ result = LowerStringLessThan(node);
break;
case IrOpcode::kStringLessThanOrEqual:
- state = LowerStringLessThanOrEqual(node, *effect, *control);
+ result = LowerStringLessThanOrEqual(node);
break;
case IrOpcode::kCheckFloat64Hole:
- state = LowerCheckFloat64Hole(node, frame_state, *effect, *control);
+ result = LowerCheckFloat64Hole(node, frame_state);
break;
case IrOpcode::kCheckTaggedHole:
- state = LowerCheckTaggedHole(node, frame_state, *effect, *control);
+ result = LowerCheckTaggedHole(node, frame_state);
break;
case IrOpcode::kConvertTaggedHoleToUndefined:
- state = LowerConvertTaggedHoleToUndefined(node, *effect, *control);
+ result = LowerConvertTaggedHoleToUndefined(node);
break;
case IrOpcode::kPlainPrimitiveToNumber:
- state = LowerPlainPrimitiveToNumber(node, *effect, *control);
+ result = LowerPlainPrimitiveToNumber(node);
break;
case IrOpcode::kPlainPrimitiveToWord32:
- state = LowerPlainPrimitiveToWord32(node, *effect, *control);
+ result = LowerPlainPrimitiveToWord32(node);
break;
case IrOpcode::kPlainPrimitiveToFloat64:
- state = LowerPlainPrimitiveToFloat64(node, *effect, *control);
+ result = LowerPlainPrimitiveToFloat64(node);
break;
case IrOpcode::kEnsureWritableFastElements:
- state = LowerEnsureWritableFastElements(node, *effect, *control);
+ result = LowerEnsureWritableFastElements(node);
break;
case IrOpcode::kMaybeGrowFastElements:
- state = LowerMaybeGrowFastElements(node, frame_state, *effect, *control);
+ result = LowerMaybeGrowFastElements(node, frame_state);
break;
case IrOpcode::kTransitionElementsKind:
- state = LowerTransitionElementsKind(node, *effect, *control);
+ LowerTransitionElementsKind(node);
break;
case IrOpcode::kLoadTypedElement:
- state = LowerLoadTypedElement(node, *effect, *control);
+ result = LowerLoadTypedElement(node);
break;
case IrOpcode::kStoreTypedElement:
- state = LowerStoreTypedElement(node, *effect, *control);
+ LowerStoreTypedElement(node);
break;
case IrOpcode::kFloat64RoundUp:
- state = LowerFloat64RoundUp(node, *effect, *control);
+ if (!LowerFloat64RoundUp(node).To(&result)) {
+ return false;
+ }
break;
case IrOpcode::kFloat64RoundDown:
- state = LowerFloat64RoundDown(node, *effect, *control);
+ if (!LowerFloat64RoundDown(node).To(&result)) {
+ return false;
+ }
break;
case IrOpcode::kFloat64RoundTruncate:
- state = LowerFloat64RoundTruncate(node, *effect, *control);
+ if (!LowerFloat64RoundTruncate(node).To(&result)) {
+ return false;
+ }
break;
case IrOpcode::kFloat64RoundTiesEven:
- state = LowerFloat64RoundTiesEven(node, *effect, *control);
+ if (!LowerFloat64RoundTiesEven(node).To(&result)) {
+ return false;
+ }
break;
default:
return false;
}
- NodeProperties::ReplaceUses(node, state.value, state.effect, state.control);
- *effect = state.effect;
- *control = state.control;
+ *effect = gasm()->ExtractCurrentEffect();
+ *control = gasm()->ExtractCurrentControl();
+ NodeProperties::ReplaceUses(node, result, *effect, *control);
return true;
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeFloat64ToTagged(Node* node, Node* effect,
- Node* control) {
+#define __ gasm()->
+
+Node* EffectControlLinearizer::LowerChangeFloat64ToTagged(Node* node) {
Node* value = node->InputAt(0);
- return AllocateHeapNumberWithValue(value, effect, control);
+ return AllocateHeapNumberWithValue(value);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeFloat64ToTaggedPointer(Node* node,
- Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerChangeFloat64ToTaggedPointer(Node* node) {
Node* value = node->InputAt(0);
- return AllocateHeapNumberWithValue(value, effect, control);
+ return AllocateHeapNumberWithValue(value);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeBitToTagged(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerChangeBitToTagged(Node* node) {
Node* value = node->InputAt(0);
- Node* branch = graph()->NewNode(common()->Branch(), value, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* vtrue = jsgraph()->TrueConstant();
+ auto if_true = __ MakeLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse = jsgraph()->FalseConstant();
+ __ GotoIf(value, &if_true);
+ __ Goto(&done, __ FalseConstant());
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue, vfalse, control);
+ __ Bind(&if_true);
+ __ Goto(&done, __ TrueConstant());
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeInt31ToTaggedSigned(Node* node,
- Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerChangeInt31ToTaggedSigned(Node* node) {
Node* value = node->InputAt(0);
- value = ChangeInt32ToSmi(value);
- return ValueEffectControl(value, effect, control);
+ return ChangeInt32ToSmi(value);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeInt32ToTagged(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerChangeInt32ToTagged(Node* node) {
Node* value = node->InputAt(0);
if (machine()->Is64()) {
- return ValueEffectControl(ChangeInt32ToSmi(value), effect, control);
+ return ChangeInt32ToSmi(value);
}
- Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), value, value,
- control);
+ auto if_overflow = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
- Node* ovf = graph()->NewNode(common()->Projection(1), add, control);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), ovf, control);
+ Node* add = __ Int32AddWithOverflow(value, value);
+ Node* ovf = __ Projection(1, add);
+ __ GotoIf(ovf, &if_overflow);
+ __ Goto(&done, __ Projection(0, add));
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- ValueEffectControl alloc =
- AllocateHeapNumberWithValue(ChangeInt32ToFloat64(value), effect, if_true);
+ __ Bind(&if_overflow);
+ Node* number = AllocateHeapNumberWithValue(__ ChangeInt32ToFloat64(value));
+ __ Goto(&done, number);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse = graph()->NewNode(common()->Projection(0), add, if_false);
-
- Node* merge = graph()->NewNode(common()->Merge(2), alloc.control, if_false);
- Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- alloc.value, vfalse, merge);
- Node* ephi =
- graph()->NewNode(common()->EffectPhi(2), alloc.effect, effect, merge);
-
- return ValueEffectControl(phi, ephi, merge);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeUint32ToTagged(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerChangeUint32ToTagged(Node* node) {
Node* value = node->InputAt(0);
- Node* check = graph()->NewNode(machine()->Uint32LessThanOrEqual(), value,
- SmiMaxValueConstant());
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ auto if_not_in_smi_range = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* vtrue = ChangeUint32ToSmi(value);
+ Node* check = __ Uint32LessThanOrEqual(value, SmiMaxValueConstant());
+ __ GotoUnless(check, &if_not_in_smi_range);
+ __ Goto(&done, ChangeUint32ToSmi(value));
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- ValueEffectControl alloc = AllocateHeapNumberWithValue(
- ChangeUint32ToFloat64(value), effect, if_false);
+ __ Bind(&if_not_in_smi_range);
+ Node* number = AllocateHeapNumberWithValue(__ ChangeUint32ToFloat64(value));
- Node* merge = graph()->NewNode(common()->Merge(2), if_true, alloc.control);
- Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue, alloc.value, merge);
- Node* ephi =
- graph()->NewNode(common()->EffectPhi(2), effect, alloc.effect, merge);
+ __ Goto(&done, number);
+ __ Bind(&done);
- return ValueEffectControl(phi, ephi, merge);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeTaggedSignedToInt32(Node* node,
- Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerChangeTaggedSignedToInt32(Node* node) {
Node* value = node->InputAt(0);
- value = ChangeSmiToInt32(value);
- return ValueEffectControl(value, effect, control);
+ return ChangeSmiToInt32(value);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeTaggedToBit(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerChangeTaggedToBit(Node* node) {
Node* value = node->InputAt(0);
- value = graph()->NewNode(machine()->WordEqual(), value,
- jsgraph()->TrueConstant());
- return ValueEffectControl(value, effect, control);
+ return __ WordEqual(value, __ TrueConstant());
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node) {
Node* value = node->InputAt(0);
- Node* zero = jsgraph()->Int32Constant(0);
- Node* fzero = jsgraph()->Float64Constant(0.0);
- // Collect effect/control/value triples.
- int count = 0;
- Node* values[6];
- Node* effects[6];
- Node* controls[5];
+ auto if_smi = __ MakeDeferredLabel<1>();
+ auto if_not_oddball = __ MakeDeferredLabel<1>();
+ auto if_not_string = __ MakeDeferredLabel<1>();
+ auto if_not_heapnumber = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<5>(MachineRepresentation::kBit);
+
+ Node* zero = __ Int32Constant(0);
+ Node* fzero = __ Float64Constant(0.0);
// Check if {value} is a Smi.
Node* check_smi = ObjectIsSmi(value);
- Node* branch_smi = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check_smi, control);
-
- // If {value} is a Smi, then we only need to check that it's not zero.
- Node* if_smi = graph()->NewNode(common()->IfTrue(), branch_smi);
- Node* esmi = effect;
- {
- controls[count] = if_smi;
- effects[count] = esmi;
- values[count] =
- graph()->NewNode(machine()->Word32Equal(),
- graph()->NewNode(machine()->WordEqual(), value,
- jsgraph()->IntPtrConstant(0)),
- zero);
- count++;
- }
- control = graph()->NewNode(common()->IfFalse(), branch_smi);
+ __ GotoIf(check_smi, &if_smi);
// Load the map instance type of {value}.
- Node* value_map = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMap()), value, effect, control);
- Node* value_instance_type = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapInstanceType()), value_map,
- effect, control);
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* value_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
// Check if {value} is an Oddball.
Node* check_oddball =
- graph()->NewNode(machine()->Word32Equal(), value_instance_type,
- jsgraph()->Int32Constant(ODDBALL_TYPE));
- Node* branch_oddball = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check_oddball, control);
+ __ Word32Equal(value_instance_type, __ Int32Constant(ODDBALL_TYPE));
+ __ GotoUnless(check_oddball, &if_not_oddball);
// The only Oddball {value} that is trueish is true itself.
- Node* if_oddball = graph()->NewNode(common()->IfTrue(), branch_oddball);
- Node* eoddball = effect;
- {
- controls[count] = if_oddball;
- effects[count] = eoddball;
- values[count] = graph()->NewNode(machine()->WordEqual(), value,
- jsgraph()->TrueConstant());
- count++;
- }
- control = graph()->NewNode(common()->IfFalse(), branch_oddball);
+ __ Goto(&done, __ WordEqual(value, __ TrueConstant()));
+ __ Bind(&if_not_oddball);
// Check if {value} is a String.
- Node* check_string =
- graph()->NewNode(machine()->Int32LessThan(), value_instance_type,
- jsgraph()->Int32Constant(FIRST_NONSTRING_TYPE));
- Node* branch_string =
- graph()->NewNode(common()->Branch(), check_string, control);
-
+ Node* check_string = __ Int32LessThan(value_instance_type,
+ __ Int32Constant(FIRST_NONSTRING_TYPE));
+ __ GotoUnless(check_string, &if_not_string);
// For String {value}, we need to check that the length is not zero.
- Node* if_string = graph()->NewNode(common()->IfTrue(), branch_string);
- Node* estring = effect;
- {
- // Load the {value} length.
- Node* value_length = estring = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForStringLength()), value,
- estring, if_string);
-
- controls[count] = if_string;
- effects[count] = estring;
- values[count] =
- graph()->NewNode(machine()->Word32Equal(),
- graph()->NewNode(machine()->WordEqual(), value_length,
- jsgraph()->IntPtrConstant(0)),
- zero);
- count++;
- }
- control = graph()->NewNode(common()->IfFalse(), branch_string);
+ Node* value_length = __ LoadField(AccessBuilder::ForStringLength(), value);
+ __ Goto(&done, __ Word32Equal(
+ __ WordEqual(value_length, __ IntPtrConstant(0)), zero));
+ __ Bind(&if_not_string);
// Check if {value} is a HeapNumber.
Node* check_heapnumber =
- graph()->NewNode(machine()->Word32Equal(), value_instance_type,
- jsgraph()->Int32Constant(HEAP_NUMBER_TYPE));
- Node* branch_heapnumber =
- graph()->NewNode(common()->Branch(), check_heapnumber, control);
-
- // For HeapNumber {value}, just check that its value is not 0.0, -0.0 or NaN.
- Node* if_heapnumber = graph()->NewNode(common()->IfTrue(), branch_heapnumber);
- Node* eheapnumber = effect;
- {
- // Load the raw value of {value}.
- Node* value_value = eheapnumber = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
- eheapnumber, if_heapnumber);
-
- // Check if {value} is not one of 0, -0, or NaN.
- controls[count] = if_heapnumber;
- effects[count] = eheapnumber;
- values[count] = graph()->NewNode(
- machine()->Float64LessThan(), fzero,
- graph()->NewNode(machine()->Float64Abs(), value_value));
- count++;
- }
- control = graph()->NewNode(common()->IfFalse(), branch_heapnumber);
+ __ Word32Equal(value_instance_type, __ Int32Constant(HEAP_NUMBER_TYPE));
+ __ GotoUnless(check_heapnumber, &if_not_heapnumber);
+
+ // For HeapNumber {value}, just check that its value is not 0.0, -0.0 or
+ // NaN.
+ // Load the raw value of {value}.
+ Node* value_value = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+ __ Goto(&done, __ Float64LessThan(fzero, __ Float64Abs(value_value)));
// The {value} is either a JSReceiver, a Symbol or some Simd128Value. In
// those cases we can just the undetectable bit on the map, which will only
// be set for certain JSReceivers, i.e. document.all.
- {
- // Load the {value} map bit field.
- Node* value_map_bitfield = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapBitField()), value_map,
- effect, control);
-
- controls[count] = control;
- effects[count] = effect;
- values[count] = graph()->NewNode(
- machine()->Word32Equal(),
- graph()->NewNode(machine()->Word32And(), value_map_bitfield,
- jsgraph()->Int32Constant(1 << Map::kIsUndetectable)),
- zero);
- count++;
- }
+ __ Bind(&if_not_heapnumber);
+
+ // Load the {value} map bit field.
+ Node* value_map_bitfield =
+ __ LoadField(AccessBuilder::ForMapBitField(), value_map);
+ __ Goto(&done, __ Word32Equal(
+ __ Word32And(value_map_bitfield,
+ __ Int32Constant(1 << Map::kIsUndetectable)),
+ zero));
- // Merge the different controls.
- control = graph()->NewNode(common()->Merge(count), count, controls);
- effects[count] = control;
- effect = graph()->NewNode(common()->EffectPhi(count), count + 1, effects);
- values[count] = control;
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, count),
- count + 1, values);
+ __ Bind(&if_smi);
+ // If {value} is a Smi, then we only need to check that it's not zero.
+ __ Goto(&done,
+ __ Word32Equal(__ WordEqual(value, __ IntPtrConstant(0)), zero));
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeTaggedToInt32(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerChangeTaggedToInt32(Node* node) {
Node* value = node->InputAt(0);
- Node* check = ObjectIsSmi(value);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = ChangeSmiToInt32(value);
+ auto if_not_smi = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
- vfalse = efalse = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
- efalse, if_false);
- vfalse = graph()->NewNode(machine()->ChangeFloat64ToInt32(), vfalse);
- }
+ Node* check = ObjectIsSmi(value);
+ __ GotoUnless(check, &if_not_smi);
+ __ Goto(&done, ChangeSmiToInt32(value));
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
- vtrue, vfalse, control);
+ __ Bind(&if_not_smi);
+ STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+ Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+ vfalse = __ ChangeFloat64ToInt32(vfalse);
+ __ Goto(&done, vfalse);
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeTaggedToUint32(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerChangeTaggedToUint32(Node* node) {
Node* value = node->InputAt(0);
- Node* check = ObjectIsSmi(value);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = ChangeSmiToInt32(value);
+ auto if_not_smi = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
- vfalse = efalse = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
- efalse, if_false);
- vfalse = graph()->NewNode(machine()->ChangeFloat64ToUint32(), vfalse);
- }
+ Node* check = ObjectIsSmi(value);
+ __ GotoUnless(check, &if_not_smi);
+ __ Goto(&done, ChangeSmiToInt32(value));
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
- vtrue, vfalse, control);
+ __ Bind(&if_not_smi);
+ STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+ Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+ vfalse = __ ChangeFloat64ToUint32(vfalse);
+ __ Goto(&done, vfalse);
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeTaggedToFloat64(Node* node, Node* effect,
- Node* control) {
- return LowerTruncateTaggedToFloat64(node, effect, control);
+Node* EffectControlLinearizer::LowerChangeTaggedToFloat64(Node* node) {
+ return LowerTruncateTaggedToFloat64(node);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerTruncateTaggedToFloat64(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerTruncateTaggedToFloat64(Node* node) {
Node* value = node->InputAt(0);
- Node* check = ObjectIsSmi(value);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue;
- {
- vtrue = ChangeSmiToInt32(value);
- vtrue = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue);
- }
+ auto if_not_smi = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kFloat64);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
- vfalse = efalse = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
- efalse, if_false);
- }
+ Node* check = ObjectIsSmi(value);
+ __ GotoUnless(check, &if_not_smi);
+ Node* vtrue = ChangeSmiToInt32(value);
+ vtrue = __ ChangeInt32ToFloat64(vtrue);
+ __ Goto(&done, vtrue);
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue, vfalse, control);
+ __ Bind(&if_not_smi);
+ STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+ Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+ __ Goto(&done, vfalse);
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckBounds(Node* node, Node* frame_state,
- Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckBounds(Node* node, Node* frame_state) {
Node* index = node->InputAt(0);
Node* limit = node->InputAt(1);
- Node* check = graph()->NewNode(machine()->Uint32LessThan(), index, limit);
- control = effect = graph()->NewNode(
- common()->DeoptimizeUnless(DeoptimizeReason::kOutOfBounds), check,
- frame_state, effect, control);
-
- return ValueEffectControl(index, effect, control);
+ Node* check = __ Uint32LessThan(index, limit);
+ __ DeoptimizeUnless(DeoptimizeReason::kOutOfBounds, check, frame_state);
+ return index;
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state,
- Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
+ CheckMapsParameters const& p = CheckMapsParametersOf(node->op());
Node* value = node->InputAt(0);
- // Load the current map of the {value}.
- Node* value_map = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMap()), value, effect, control);
+ ZoneHandleSet<Map> const& maps = p.maps();
+ size_t const map_count = maps.size();
- int const map_count = node->op()->ValueInputCount() - 1;
- Node** controls = temp_zone()->NewArray<Node*>(map_count);
- Node** effects = temp_zone()->NewArray<Node*>(map_count + 1);
+ if (p.flags() & CheckMapsFlag::kTryMigrateInstance) {
+ auto done =
+ __ MakeLabelFor(GraphAssemblerLabelType::kNonDeferred, map_count * 2);
+ auto migrate = __ MakeDeferredLabel<1>();
- for (int i = 0; i < map_count; ++i) {
- Node* map = node->InputAt(1 + i);
+ // Load the current map of the {value}.
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
- Node* check = graph()->NewNode(machine()->WordEqual(), value_map, map);
- if (i == map_count - 1) {
- controls[i] = effects[i] = graph()->NewNode(
- common()->DeoptimizeUnless(DeoptimizeReason::kWrongMap), check,
- frame_state, effect, control);
- } else {
- control = graph()->NewNode(common()->Branch(), check, control);
- controls[i] = graph()->NewNode(common()->IfTrue(), control);
- control = graph()->NewNode(common()->IfFalse(), control);
- effects[i] = effect;
+ // Perform the map checks.
+ for (size_t i = 0; i < map_count; ++i) {
+ Node* map = __ HeapConstant(maps[i]);
+ Node* check = __ WordEqual(value_map, map);
+ if (i == map_count - 1) {
+ __ GotoUnless(check, &migrate);
+ __ Goto(&done);
+ } else {
+ __ GotoIf(check, &done);
+ }
}
- }
- control = graph()->NewNode(common()->Merge(map_count), map_count, controls);
- effects[map_count] = control;
- effect =
- graph()->NewNode(common()->EffectPhi(map_count), map_count + 1, effects);
+ // Perform the (deferred) instance migration.
+ __ Bind(&migrate);
+ {
+ Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
+ Runtime::FunctionId id = Runtime::kTryMigrateInstance;
+ CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+ graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
+ Node* result =
+ __ Call(desc, __ CEntryStubConstant(1), value,
+ __ ExternalConstant(ExternalReference(id, isolate())),
+ __ Int32Constant(1), __ NoContextConstant());
+ Node* check = ObjectIsSmi(result);
+ __ DeoptimizeIf(DeoptimizeReason::kInstanceMigrationFailed, check,
+ frame_state);
+ }
- return ValueEffectControl(value, effect, control);
-}
+ // Reload the current map of the {value}.
+ value_map = __ LoadField(AccessBuilder::ForMap(), value);
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckNumber(Node* node, Node* frame_state,
- Node* effect, Node* control) {
- Node* value = node->InputAt(0);
+ // Perform the map checks again.
+ for (size_t i = 0; i < map_count; ++i) {
+ Node* map = __ HeapConstant(maps[i]);
+ Node* check = __ WordEqual(value_map, map);
+ if (i == map_count - 1) {
+ __ DeoptimizeUnless(DeoptimizeReason::kWrongMap, check, frame_state);
+ } else {
+ __ GotoIf(check, &done);
+ }
+ }
- Node* check0 = ObjectIsSmi(value);
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+ __ Goto(&done);
+ __ Bind(&done);
+ } else {
+ auto done =
+ __ MakeLabelFor(GraphAssemblerLabelType::kNonDeferred, map_count);
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* etrue0 = effect;
+ // Load the current map of the {value}.
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* efalse0 = effect;
- {
- Node* value_map = efalse0 =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- value, efalse0, if_false0);
- Node* check1 = graph()->NewNode(machine()->WordEqual(), value_map,
- jsgraph()->HeapNumberMapConstant());
- if_false0 = efalse0 = graph()->NewNode(
- common()->DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber), check1,
- frame_state, efalse0, if_false0);
+ for (size_t i = 0; i < map_count; ++i) {
+ Node* map = __ HeapConstant(maps[i]);
+ Node* check = __ WordEqual(value_map, map);
+ if (i == map_count - 1) {
+ __ DeoptimizeUnless(DeoptimizeReason::kWrongMap, check, frame_state);
+ } else {
+ __ GotoIf(check, &done);
+ }
+ }
+ __ Goto(&done);
+ __ Bind(&done);
}
-
- control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
-
- return ValueEffectControl(value, effect, control);
+ return value;
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckString(Node* node, Node* frame_state,
- Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckNumber(Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
- Node* check0 = ObjectIsSmi(value);
- control = effect =
- graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kSmi), check0,
- frame_state, effect, control);
+ auto if_not_smi = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>();
- Node* value_map = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMap()), value, effect, control);
- Node* value_instance_type = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapInstanceType()), value_map,
- effect, control);
+ Node* check0 = ObjectIsSmi(value);
+ __ GotoUnless(check0, &if_not_smi);
+ __ Goto(&done);
- Node* check1 =
- graph()->NewNode(machine()->Uint32LessThan(), value_instance_type,
- jsgraph()->Uint32Constant(FIRST_NONSTRING_TYPE));
- control = effect = graph()->NewNode(
- common()->DeoptimizeUnless(DeoptimizeReason::kWrongInstanceType), check1,
- frame_state, effect, control);
+ __ Bind(&if_not_smi);
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* check1 = __ WordEqual(value_map, __ HeapNumberMapConstant());
+ __ DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber, check1, frame_state);
+ __ Goto(&done);
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return value;
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckIf(Node* node, Node* frame_state,
- Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckString(Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
- control = effect =
- graph()->NewNode(common()->DeoptimizeUnless(DeoptimizeReason::kNoReason),
- value, frame_state, effect, control);
+ Node* check0 = ObjectIsSmi(value);
+ __ DeoptimizeIf(DeoptimizeReason::kSmi, check0, frame_state);
+
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* value_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
- return ValueEffectControl(value, effect, control);
+ Node* check1 = __ Uint32LessThan(value_instance_type,
+ __ Uint32Constant(FIRST_NONSTRING_TYPE));
+ __ DeoptimizeUnless(DeoptimizeReason::kWrongInstanceType, check1,
+ frame_state);
+ return value;
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedInt32Add(Node* node, Node* frame_state,
- Node* effect, Node* control) {
- Node* lhs = node->InputAt(0);
- Node* rhs = node->InputAt(1);
+Node* EffectControlLinearizer::LowerCheckInternalizedString(Node* node,
+ Node* frame_state) {
+ Node* value = node->InputAt(0);
- Node* value =
- graph()->NewNode(machine()->Int32AddWithOverflow(), lhs, rhs, control);
+ Node* check0 = ObjectIsSmi(value);
+ __ DeoptimizeIf(DeoptimizeReason::kSmi, check0, frame_state);
+
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* value_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
- Node* check = graph()->NewNode(common()->Projection(1), value, control);
- control = effect =
- graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
- check, frame_state, effect, control);
+ Node* check1 = __ Word32Equal(
+ __ Word32And(value_instance_type,
+ __ Int32Constant(kIsNotStringMask | kIsNotInternalizedMask)),
+ __ Int32Constant(kInternalizedTag));
+ __ DeoptimizeUnless(DeoptimizeReason::kWrongInstanceType, check1,
+ frame_state);
- value = graph()->NewNode(common()->Projection(0), value, control);
+ return value;
+}
- return ValueEffectControl(value, effect, control);
+Node* EffectControlLinearizer::LowerCheckIf(Node* node, Node* frame_state) {
+ Node* value = node->InputAt(0);
+ __ DeoptimizeUnless(DeoptimizeReason::kNoReason, value, frame_state);
+ return value;
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedInt32Sub(Node* node, Node* frame_state,
- Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckedInt32Add(Node* node,
+ Node* frame_state) {
Node* lhs = node->InputAt(0);
Node* rhs = node->InputAt(1);
- Node* value =
- graph()->NewNode(machine()->Int32SubWithOverflow(), lhs, rhs, control);
-
- Node* check = graph()->NewNode(common()->Projection(1), value, control);
- control = effect =
- graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
- check, frame_state, effect, control);
+ Node* value = __ Int32AddWithOverflow(lhs, rhs);
+ Node* check = __ Projection(1, value);
+ __ DeoptimizeIf(DeoptimizeReason::kOverflow, check, frame_state);
+ return __ Projection(0, value);
+}
- value = graph()->NewNode(common()->Projection(0), value, control);
+Node* EffectControlLinearizer::LowerCheckedInt32Sub(Node* node,
+ Node* frame_state) {
+ Node* lhs = node->InputAt(0);
+ Node* rhs = node->InputAt(1);
- return ValueEffectControl(value, effect, control);
+ Node* value = __ Int32SubWithOverflow(lhs, rhs);
+ Node* check = __ Projection(1, value);
+ __ DeoptimizeIf(DeoptimizeReason::kOverflow, check, frame_state);
+ return __ Projection(0, value);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedInt32Div(Node* node, Node* frame_state,
- Node* effect, Node* control) {
- Node* zero = jsgraph()->Int32Constant(0);
- Node* minusone = jsgraph()->Int32Constant(-1);
- Node* minint = jsgraph()->Int32Constant(std::numeric_limits<int32_t>::min());
-
+Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node,
+ Node* frame_state) {
Node* lhs = node->InputAt(0);
Node* rhs = node->InputAt(1);
+ auto if_not_positive = __ MakeDeferredLabel<1>();
+ auto if_is_minint = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
+ auto minint_check_done = __ MakeLabel<2>();
+
+ Node* zero = __ Int32Constant(0);
+
// Check if {rhs} is positive (and not zero).
- Node* check0 = graph()->NewNode(machine()->Int32LessThan(), zero, rhs);
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+ Node* check0 = __ Int32LessThan(zero, rhs);
+ __ GotoUnless(check0, &if_not_positive);
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* etrue0 = effect;
- Node* vtrue0;
- {
- // Fast case, no additional checking required.
- vtrue0 = graph()->NewNode(machine()->Int32Div(), lhs, rhs, if_true0);
- }
+ // Fast case, no additional checking required.
+ __ Goto(&done, __ Int32Div(lhs, rhs));
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* efalse0 = effect;
- Node* vfalse0;
{
+ __ Bind(&if_not_positive);
+
// Check if {rhs} is zero.
- Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
- if_false0 = efalse0 = graph()->NewNode(
- common()->DeoptimizeIf(DeoptimizeReason::kDivisionByZero), check,
- frame_state, efalse0, if_false0);
+ Node* check = __ Word32Equal(rhs, zero);
+ __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, check, frame_state);
// Check if {lhs} is zero, as that would produce minus zero.
- check = graph()->NewNode(machine()->Word32Equal(), lhs, zero);
- if_false0 = efalse0 =
- graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kMinusZero),
- check, frame_state, efalse0, if_false0);
+ check = __ Word32Equal(lhs, zero);
+ __ DeoptimizeIf(DeoptimizeReason::kMinusZero, check, frame_state);
// Check if {lhs} is kMinInt and {rhs} is -1, in which case we'd have
// to return -kMinInt, which is not representable.
+ Node* minint = __ Int32Constant(std::numeric_limits<int32_t>::min());
Node* check1 = graph()->NewNode(machine()->Word32Equal(), lhs, minint);
- Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check1, if_false0);
+ __ GotoIf(check1, &if_is_minint);
+ __ Goto(&minint_check_done);
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1 = efalse0;
- {
- // Check if {rhs} is -1.
- Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, minusone);
- if_true1 = etrue1 =
- graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
- check, frame_state, etrue1, if_true1);
- }
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* efalse1 = efalse0;
-
- if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- efalse0 =
- graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
+ __ Bind(&if_is_minint);
+ // Check if {rhs} is -1.
+ Node* minusone = __ Int32Constant(-1);
+ Node* is_minus_one = __ Word32Equal(rhs, minusone);
+ __ DeoptimizeIf(DeoptimizeReason::kOverflow, is_minus_one, frame_state);
+ __ Goto(&minint_check_done);
+ __ Bind(&minint_check_done);
// Perform the actual integer division.
- vfalse0 = graph()->NewNode(machine()->Int32Div(), lhs, rhs, if_false0);
+ __ Goto(&done, __ Int32Div(lhs, rhs));
}
- control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2), vtrue0,
- vfalse0, control);
+ __ Bind(&done);
+ Node* value = done.PhiAt(0);
// Check if the remainder is non-zero.
- Node* check =
- graph()->NewNode(machine()->Word32Equal(), lhs,
- graph()->NewNode(machine()->Int32Mul(), rhs, value));
- control = effect = graph()->NewNode(
- common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecision), check,
- frame_state, effect, control);
+ Node* check = __ Word32Equal(lhs, __ Int32Mul(rhs, value));
+ __ DeoptimizeUnless(DeoptimizeReason::kLostPrecision, check, frame_state);
- return ValueEffectControl(value, effect, control);
+ return value;
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedInt32Mod(Node* node, Node* frame_state,
- Node* effect, Node* control) {
- Node* zero = jsgraph()->Int32Constant(0);
- Node* one = jsgraph()->Int32Constant(1);
-
+Node* EffectControlLinearizer::LowerCheckedInt32Mod(Node* node,
+ Node* frame_state) {
// General case for signed integer modulus, with optimization for (unknown)
// power of 2 right hand side.
//
@@ -1439,1226 +1301,673 @@ EffectControlLinearizer::LowerCheckedInt32Mod(Node* node, Node* frame_state,
Node* lhs = node->InputAt(0);
Node* rhs = node->InputAt(1);
+ auto if_rhs_not_positive = __ MakeDeferredLabel<1>();
+ auto if_lhs_negative = __ MakeDeferredLabel<1>();
+ auto if_power_of_two = __ MakeLabel<1>();
+ auto rhs_checked = __ MakeLabel<2>(MachineRepresentation::kWord32);
+ auto done = __ MakeLabel<3>(MachineRepresentation::kWord32);
+
+ Node* zero = __ Int32Constant(0);
+
// Check if {rhs} is not strictly positive.
- Node* check0 = graph()->NewNode(machine()->Int32LessThanOrEqual(), rhs, zero);
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
+ Node* check0 = __ Int32LessThanOrEqual(rhs, zero);
+ __ GotoIf(check0, &if_rhs_not_positive);
+ __ Goto(&rhs_checked, rhs);
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* etrue0 = effect;
- Node* vtrue0;
+ __ Bind(&if_rhs_not_positive);
{
// Negate {rhs}, might still produce a negative result in case of
// -2^31, but that is handled safely below.
- vtrue0 = graph()->NewNode(machine()->Int32Sub(), zero, rhs);
+ Node* vtrue0 = __ Int32Sub(zero, rhs);
// Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
- Node* check = graph()->NewNode(machine()->Word32Equal(), vtrue0, zero);
- if_true0 = etrue0 = graph()->NewNode(
- common()->DeoptimizeIf(DeoptimizeReason::kDivisionByZero), check,
- frame_state, etrue0, if_true0);
+ Node* check = __ Word32Equal(vtrue0, zero);
+ __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, check, frame_state);
+ __ Goto(&rhs_checked, vtrue0);
}
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* efalse0 = effect;
- Node* vfalse0 = rhs;
-
- // At this point {rhs} is either greater than zero or -2^31, both are
- // fine for the code that follows.
- control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
- rhs = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
- vtrue0, vfalse0, control);
+ __ Bind(&rhs_checked);
+ rhs = rhs_checked.PhiAt(0);
// Check if {lhs} is negative.
- Node* check1 = graph()->NewNode(machine()->Int32LessThan(), lhs, zero);
- Node* branch1 =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check1, control);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1 = effect;
- Node* vtrue1;
- {
- // Compute the remainder using {lhs % msk}.
- vtrue1 = graph()->NewNode(machine()->Int32Mod(), lhs, rhs, if_true1);
-
- // Check if we would have to return -0.
- Node* check = graph()->NewNode(machine()->Word32Equal(), vtrue1, zero);
- if_true1 = etrue1 =
- graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kMinusZero),
- check, frame_state, etrue1, if_true1);
- }
+ Node* check1 = __ Int32LessThan(lhs, zero);
+ __ GotoIf(check1, &if_lhs_negative);
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* efalse1 = effect;
- Node* vfalse1;
+ // {lhs} non-negative.
{
- Node* msk = graph()->NewNode(machine()->Int32Sub(), rhs, one);
+ Node* one = __ Int32Constant(1);
+ Node* msk = __ Int32Sub(rhs, one);
// Check if {rhs} minus one is a valid mask.
- Node* check2 = graph()->NewNode(
- machine()->Word32Equal(),
- graph()->NewNode(machine()->Word32And(), rhs, msk), zero);
- Node* branch2 = graph()->NewNode(common()->Branch(), check2, if_false1);
+ Node* check2 = __ Word32Equal(__ Word32And(rhs, msk), zero);
+ __ GotoIf(check2, &if_power_of_two);
+ // Compute the remainder using the generic {lhs % rhs}.
+ __ Goto(&done, __ Int32Mod(lhs, rhs));
+ __ Bind(&if_power_of_two);
// Compute the remainder using {lhs & msk}.
- Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
- Node* vtrue2 = graph()->NewNode(machine()->Word32And(), lhs, msk);
+ __ Goto(&done, __ Word32And(lhs, msk));
+ }
- // Compute the remainder using the generic {lhs % rhs}.
- Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
- Node* vfalse2 =
- graph()->NewNode(machine()->Int32Mod(), lhs, rhs, if_false2);
+ __ Bind(&if_lhs_negative);
+ {
+ // Compute the remainder using {lhs % msk}.
+ Node* vtrue1 = __ Int32Mod(lhs, rhs);
- if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
- vfalse1 = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
- vtrue2, vfalse2, if_false1);
+ // Check if we would have to return -0.
+ Node* check = __ Word32Equal(vtrue1, zero);
+ __ DeoptimizeIf(DeoptimizeReason::kMinusZero, check, frame_state);
+ __ Goto(&done, vtrue1);
}
- control = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, control);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2), vtrue1,
- vfalse1, control);
-
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedUint32Div(Node* node, Node* frame_state,
- Node* effect, Node* control) {
- Node* zero = jsgraph()->Int32Constant(0);
-
+Node* EffectControlLinearizer::LowerCheckedUint32Div(Node* node,
+ Node* frame_state) {
Node* lhs = node->InputAt(0);
Node* rhs = node->InputAt(1);
+ Node* zero = __ Int32Constant(0);
+
// Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
- Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
- control = effect = graph()->NewNode(
- common()->DeoptimizeIf(DeoptimizeReason::kDivisionByZero), check,
- frame_state, effect, control);
+ Node* check = __ Word32Equal(rhs, zero);
+ __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, check, frame_state);
// Perform the actual unsigned integer division.
- Node* value = graph()->NewNode(machine()->Uint32Div(), lhs, rhs, control);
+ Node* value = __ Uint32Div(lhs, rhs);
// Check if the remainder is non-zero.
- check = graph()->NewNode(machine()->Word32Equal(), lhs,
- graph()->NewNode(machine()->Int32Mul(), rhs, value));
- control = effect = graph()->NewNode(
- common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecision), check,
- frame_state, effect, control);
-
- return ValueEffectControl(value, effect, control);
+ check = __ Word32Equal(lhs, __ Int32Mul(rhs, value));
+ __ DeoptimizeUnless(DeoptimizeReason::kLostPrecision, check, frame_state);
+ return value;
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedUint32Mod(Node* node, Node* frame_state,
- Node* effect, Node* control) {
- Node* zero = jsgraph()->Int32Constant(0);
-
+Node* EffectControlLinearizer::LowerCheckedUint32Mod(Node* node,
+ Node* frame_state) {
Node* lhs = node->InputAt(0);
Node* rhs = node->InputAt(1);
+ Node* zero = __ Int32Constant(0);
+
// Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
- Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
- control = effect = graph()->NewNode(
- common()->DeoptimizeIf(DeoptimizeReason::kDivisionByZero), check,
- frame_state, effect, control);
+ Node* check = __ Word32Equal(rhs, zero);
+ __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, check, frame_state);
// Perform the actual unsigned integer modulus.
- Node* value = graph()->NewNode(machine()->Uint32Mod(), lhs, rhs, control);
-
- return ValueEffectControl(value, effect, control);
+ return __ Uint32Mod(lhs, rhs);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedInt32Mul(Node* node, Node* frame_state,
- Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckedInt32Mul(Node* node,
+ Node* frame_state) {
CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
- Node* zero = jsgraph()->Int32Constant(0);
Node* lhs = node->InputAt(0);
Node* rhs = node->InputAt(1);
- Node* projection =
- graph()->NewNode(machine()->Int32MulWithOverflow(), lhs, rhs, control);
+ Node* projection = __ Int32MulWithOverflow(lhs, rhs);
+ Node* check = __ Projection(1, projection);
+ __ DeoptimizeIf(DeoptimizeReason::kOverflow, check, frame_state);
- Node* check = graph()->NewNode(common()->Projection(1), projection, control);
- control = effect =
- graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
- check, frame_state, effect, control);
-
- Node* value = graph()->NewNode(common()->Projection(0), projection, control);
+ Node* value = __ Projection(0, projection);
if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
- Node* check_zero = graph()->NewNode(machine()->Word32Equal(), value, zero);
- Node* branch_zero = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check_zero, control);
-
- Node* if_zero = graph()->NewNode(common()->IfTrue(), branch_zero);
- Node* e_if_zero = effect;
- {
- // We may need to return negative zero.
- Node* or_inputs = graph()->NewNode(machine()->Word32Or(), lhs, rhs);
- Node* check_or =
- graph()->NewNode(machine()->Int32LessThan(), or_inputs, zero);
- if_zero = e_if_zero =
- graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kMinusZero),
- check_or, frame_state, e_if_zero, if_zero);
- }
+ auto if_zero = __ MakeDeferredLabel<1>();
+ auto check_done = __ MakeLabel<2>();
+ Node* zero = __ Int32Constant(0);
+ Node* check_zero = __ Word32Equal(value, zero);
+ __ GotoIf(check_zero, &if_zero);
+ __ Goto(&check_done);
- Node* if_not_zero = graph()->NewNode(common()->IfFalse(), branch_zero);
- Node* e_if_not_zero = effect;
+ __ Bind(&if_zero);
+ // We may need to return negative zero.
+ Node* check_or = __ Int32LessThan(__ Word32Or(lhs, rhs), zero);
+ __ DeoptimizeIf(DeoptimizeReason::kMinusZero, check_or, frame_state);
+ __ Goto(&check_done);
- control = graph()->NewNode(common()->Merge(2), if_zero, if_not_zero);
- effect = graph()->NewNode(common()->EffectPhi(2), e_if_zero, e_if_not_zero,
- control);
+ __ Bind(&check_done);
}
- return ValueEffectControl(value, effect, control);
+ return value;
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedInt32ToTaggedSigned(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerCheckedInt32ToTaggedSigned(
+ Node* node, Node* frame_state) {
DCHECK(SmiValuesAre31Bits());
Node* value = node->InputAt(0);
- Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), value, value,
- control);
-
- Node* check = graph()->NewNode(common()->Projection(1), add, control);
- control = effect =
- graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
- check, frame_state, effect, control);
-
- value = graph()->NewNode(common()->Projection(0), add, control);
-
- return ValueEffectControl(value, effect, control);
+ Node* add = __ Int32AddWithOverflow(value, value);
+ Node* check = __ Projection(1, add);
+ __ DeoptimizeIf(DeoptimizeReason::kOverflow, check, frame_state);
+ return __ Projection(0, add);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedUint32ToInt32(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerCheckedUint32ToInt32(Node* node,
+ Node* frame_state) {
Node* value = node->InputAt(0);
- Node* max_int = jsgraph()->Int32Constant(std::numeric_limits<int32_t>::max());
- Node* is_safe =
- graph()->NewNode(machine()->Uint32LessThanOrEqual(), value, max_int);
- control = effect = graph()->NewNode(
- common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecision), is_safe,
- frame_state, effect, control);
-
- return ValueEffectControl(value, effect, control);
+ Node* max_int = __ Int32Constant(std::numeric_limits<int32_t>::max());
+ Node* is_safe = __ Uint32LessThanOrEqual(value, max_int);
+ __ DeoptimizeUnless(DeoptimizeReason::kLostPrecision, is_safe, frame_state);
+ return value;
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedUint32ToTaggedSigned(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerCheckedUint32ToTaggedSigned(
+ Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
- Node* check = graph()->NewNode(machine()->Uint32LessThanOrEqual(), value,
- SmiMaxValueConstant());
- control = effect = graph()->NewNode(
- common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecision), check,
- frame_state, effect, control);
- value = ChangeUint32ToSmi(value);
-
- return ValueEffectControl(value, effect, control);
-}
-
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode,
- Node* value,
- Node* frame_state,
- Node* effect,
- Node* control) {
- Node* value32 = graph()->NewNode(machine()->RoundFloat64ToInt32(), value);
- Node* check_same = graph()->NewNode(
- machine()->Float64Equal(), value,
- graph()->NewNode(machine()->ChangeInt32ToFloat64(), value32));
- control = effect = graph()->NewNode(
- common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecisionOrNaN),
- check_same, frame_state, effect, control);
+ Node* check = __ Uint32LessThanOrEqual(value, SmiMaxValueConstant());
+ __ DeoptimizeUnless(DeoptimizeReason::kLostPrecision, check, frame_state);
+ return ChangeUint32ToSmi(value);
+}
+
+Node* EffectControlLinearizer::BuildCheckedFloat64ToInt32(
+ CheckForMinusZeroMode mode, Node* value, Node* frame_state) {
+ Node* value32 = __ RoundFloat64ToInt32(value);
+ Node* check_same = __ Float64Equal(value, __ ChangeInt32ToFloat64(value32));
+ __ DeoptimizeUnless(DeoptimizeReason::kLostPrecisionOrNaN, check_same,
+ frame_state);
if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
// Check if {value} is -0.
- Node* check_zero = graph()->NewNode(machine()->Word32Equal(), value32,
- jsgraph()->Int32Constant(0));
- Node* branch_zero = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check_zero, control);
+ auto if_zero = __ MakeDeferredLabel<1>();
+ auto check_done = __ MakeLabel<2>();
- Node* if_zero = graph()->NewNode(common()->IfTrue(), branch_zero);
- Node* if_notzero = graph()->NewNode(common()->IfFalse(), branch_zero);
+ Node* check_zero = __ Word32Equal(value32, __ Int32Constant(0));
+ __ GotoIf(check_zero, &if_zero);
+ __ Goto(&check_done);
+ __ Bind(&if_zero);
// In case of 0, we need to check the high bits for the IEEE -0 pattern.
- Node* check_negative = graph()->NewNode(
- machine()->Int32LessThan(),
- graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
- jsgraph()->Int32Constant(0));
-
- Node* deopt_minus_zero =
- graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kMinusZero),
- check_negative, frame_state, effect, if_zero);
-
- control =
- graph()->NewNode(common()->Merge(2), deopt_minus_zero, if_notzero);
- effect = graph()->NewNode(common()->EffectPhi(2), deopt_minus_zero, effect,
- control);
- }
+ Node* check_negative = __ Int32LessThan(__ Float64ExtractHighWord32(value),
+ __ Int32Constant(0));
+ __ DeoptimizeIf(DeoptimizeReason::kMinusZero, check_negative, frame_state);
+ __ Goto(&check_done);
- return ValueEffectControl(value32, effect, control);
+ __ Bind(&check_done);
+ }
+ return value32;
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedFloat64ToInt32(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerCheckedFloat64ToInt32(Node* node,
+ Node* frame_state) {
CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
Node* value = node->InputAt(0);
-
- return BuildCheckedFloat64ToInt32(mode, value, frame_state, effect, control);
+ return BuildCheckedFloat64ToInt32(mode, value, frame_state);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedTaggedSignedToInt32(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerCheckedTaggedSignedToInt32(
+ Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
-
Node* check = ObjectIsSmi(value);
- control = effect =
- graph()->NewNode(common()->DeoptimizeUnless(DeoptimizeReason::kNotASmi),
- check, frame_state, effect, control);
- value = ChangeSmiToInt32(value);
-
- return ValueEffectControl(value, effect, control);
+ __ DeoptimizeUnless(DeoptimizeReason::kNotASmi, check, frame_state);
+ return ChangeSmiToInt32(value);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedTaggedToInt32(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerCheckedTaggedToInt32(Node* node,
+ Node* frame_state) {
CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
Node* value = node->InputAt(0);
- Node* check = ObjectIsSmi(value);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ auto if_not_smi = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
+ Node* check = ObjectIsSmi(value);
+ __ GotoUnless(check, &if_not_smi);
// In the Smi case, just convert to int32.
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = ChangeSmiToInt32(value);
+ __ Goto(&done, ChangeSmiToInt32(value));
// In the non-Smi case, check the heap numberness, load the number and convert
// to int32.
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- Node* value_map = efalse =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- value, efalse, if_false);
- Node* check = graph()->NewNode(machine()->WordEqual(), value_map,
- jsgraph()->HeapNumberMapConstant());
- if_false = efalse = graph()->NewNode(
- common()->DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber), check,
- frame_state, efalse, if_false);
- vfalse = efalse = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
- efalse, if_false);
- ValueEffectControl state =
- BuildCheckedFloat64ToInt32(mode, vfalse, frame_state, efalse, if_false);
- if_false = state.control;
- efalse = state.effect;
- vfalse = state.value;
- }
-
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
- vtrue, vfalse, control);
-
- return ValueEffectControl(value, effect, control);
-}
-
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::BuildCheckedHeapNumberOrOddballToFloat64(
- CheckTaggedInputMode mode, Node* value, Node* frame_state, Node* effect,
- Node* control) {
- Node* value_map = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMap()), value, effect, control);
-
- Node* check_number = graph()->NewNode(machine()->WordEqual(), value_map,
- jsgraph()->HeapNumberMapConstant());
-
+ __ Bind(&if_not_smi);
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* check_map = __ WordEqual(value_map, __ HeapNumberMapConstant());
+ __ DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber, check_map,
+ frame_state);
+ Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+ vfalse = BuildCheckedFloat64ToInt32(mode, vfalse, frame_state);
+ __ Goto(&done, vfalse);
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
+Node* EffectControlLinearizer::BuildCheckedHeapNumberOrOddballToFloat64(
+ CheckTaggedInputMode mode, Node* value, Node* frame_state) {
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* check_number = __ WordEqual(value_map, __ HeapNumberMapConstant());
switch (mode) {
case CheckTaggedInputMode::kNumber: {
- control = effect = graph()->NewNode(
- common()->DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber),
- check_number, frame_state, effect, control);
+ __ DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber, check_number,
+ frame_state);
break;
}
case CheckTaggedInputMode::kNumberOrOddball: {
- Node* branch =
- graph()->NewNode(common()->Branch(), check_number, control);
+ auto check_done = __ MakeLabel<2>();
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
-
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ __ GotoIf(check_number, &check_done);
// For oddballs also contain the numeric value, let us just check that
// we have an oddball here.
- Node* efalse = effect;
- Node* instance_type = efalse = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
- value_map, efalse, if_false);
+ Node* instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
Node* check_oddball =
- graph()->NewNode(machine()->Word32Equal(), instance_type,
- jsgraph()->Int32Constant(ODDBALL_TYPE));
- if_false = efalse = graph()->NewNode(
- common()->DeoptimizeUnless(DeoptimizeReason::kNotANumberOrOddball),
- check_oddball, frame_state, efalse, if_false);
+ __ Word32Equal(instance_type, __ Int32Constant(ODDBALL_TYPE));
+ __ DeoptimizeUnless(DeoptimizeReason::kNotANumberOrOddball, check_oddball,
+ frame_state);
STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+ __ Goto(&check_done);
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+ __ Bind(&check_done);
break;
}
}
-
- value = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
- effect, control);
- return ValueEffectControl(value, effect, control);
+ return __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedTaggedToFloat64(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerCheckedTaggedToFloat64(Node* node,
+ Node* frame_state) {
CheckTaggedInputMode mode = CheckTaggedInputModeOf(node->op());
Node* value = node->InputAt(0);
+ auto if_smi = __ MakeLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kFloat64);
+
Node* check = ObjectIsSmi(value);
- Node* branch = graph()->NewNode(common()->Branch(), check, control);
+ __ GotoIf(check, &if_smi);
// In the Smi case, just convert to int32 and then float64.
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = ChangeSmiToInt32(value);
- vtrue = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue);
-
// Otherwise, check heap numberness and load the number.
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- ValueEffectControl number_state = BuildCheckedHeapNumberOrOddballToFloat64(
- mode, value, frame_state, effect, if_false);
-
- Node* merge =
- graph()->NewNode(common()->Merge(2), if_true, number_state.control);
- Node* effect_phi = graph()->NewNode(common()->EffectPhi(2), etrue,
- number_state.effect, merge);
- Node* result =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2), vtrue,
- number_state.value, merge);
-
- return ValueEffectControl(result, effect_phi, merge);
-}
-
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedTaggedToTaggedSigned(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control) {
- Node* value = node->InputAt(0);
+ Node* number =
+ BuildCheckedHeapNumberOrOddballToFloat64(mode, value, frame_state);
+ __ Goto(&done, number);
- Node* check = ObjectIsSmi(value);
- control = effect =
- graph()->NewNode(common()->DeoptimizeUnless(DeoptimizeReason::kNotASmi),
- check, frame_state, effect, control);
+ __ Bind(&if_smi);
+ Node* from_smi = ChangeSmiToInt32(value);
+ from_smi = __ ChangeInt32ToFloat64(from_smi);
+ __ Goto(&done, from_smi);
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedTaggedToTaggedPointer(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerCheckedTaggedToTaggedSigned(
+ Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
Node* check = ObjectIsSmi(value);
- control = effect =
- graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kSmi), check,
- frame_state, effect, control);
+ __ DeoptimizeUnless(DeoptimizeReason::kNotASmi, check, frame_state);
- return ValueEffectControl(value, effect, control);
+ return value;
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerTruncateTaggedToWord32(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerCheckedTaggedToTaggedPointer(
+ Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
Node* check = ObjectIsSmi(value);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ __ DeoptimizeIf(DeoptimizeReason::kSmi, check, frame_state);
+ return value;
+}
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = ChangeSmiToInt32(value);
+Node* EffectControlLinearizer::LowerTruncateTaggedToWord32(Node* node) {
+ Node* value = node->InputAt(0);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
- vfalse = efalse = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
- efalse, if_false);
- vfalse = graph()->NewNode(machine()->TruncateFloat64ToWord32(), vfalse);
- }
+ auto if_not_smi = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
+
+ Node* check = ObjectIsSmi(value);
+ __ GotoUnless(check, &if_not_smi);
+ __ Goto(&done, ChangeSmiToInt32(value));
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
- vtrue, vfalse, control);
+ __ Bind(&if_not_smi);
+ STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+ Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+ vfalse = __ TruncateFloat64ToWord32(vfalse);
+ __ Goto(&done, vfalse);
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedTruncateTaggedToWord32(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerCheckedTruncateTaggedToWord32(
+ Node* node, Node* frame_state) {
Node* value = node->InputAt(0);
- Node* check = ObjectIsSmi(value);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+ auto if_not_smi = __ MakeLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
+ Node* check = ObjectIsSmi(value);
+ __ GotoUnless(check, &if_not_smi);
// In the Smi case, just convert to int32.
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = ChangeSmiToInt32(value);
+ __ Goto(&done, ChangeSmiToInt32(value));
// Otherwise, check that it's a heap number or oddball and truncate the value
// to int32.
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- ValueEffectControl false_state = BuildCheckedHeapNumberOrOddballToFloat64(
- CheckTaggedInputMode::kNumberOrOddball, value, frame_state, effect,
- if_false);
- false_state.value =
- graph()->NewNode(machine()->TruncateFloat64ToWord32(), false_state.value);
-
- Node* merge =
- graph()->NewNode(common()->Merge(2), if_true, false_state.control);
- Node* effect_phi = graph()->NewNode(common()->EffectPhi(2), etrue,
- false_state.effect, merge);
- Node* result =
- graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2), vtrue,
- false_state.value, merge);
-
- return ValueEffectControl(result, effect_phi, merge);
-}
-
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerObjectIsCallable(Node* node, Node* effect,
- Node* control) {
+ __ Bind(&if_not_smi);
+ Node* number = BuildCheckedHeapNumberOrOddballToFloat64(
+ CheckTaggedInputMode::kNumberOrOddball, value, frame_state);
+ number = __ TruncateFloat64ToWord32(number);
+ __ Goto(&done, number);
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
+Node* EffectControlLinearizer::LowerObjectIsCallable(Node* node) {
Node* value = node->InputAt(0);
- Node* check = ObjectIsSmi(value);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ auto if_smi = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kBit);
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = jsgraph()->Int32Constant(0);
+ Node* check = ObjectIsSmi(value);
+ __ GotoIf(check, &if_smi);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- Node* value_map = efalse =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- value, efalse, if_false);
- Node* value_bit_field = efalse = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapBitField()), value_map,
- efalse, if_false);
- vfalse = graph()->NewNode(
- machine()->Word32Equal(),
- jsgraph()->Int32Constant(1 << Map::kIsCallable),
- graph()->NewNode(
- machine()->Word32And(), value_bit_field,
- jsgraph()->Int32Constant((1 << Map::kIsCallable) |
- (1 << Map::kIsUndetectable))));
- }
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* value_bit_field =
+ __ LoadField(AccessBuilder::ForMapBitField(), value_map);
+ Node* vfalse = __ Word32Equal(
+ __ Int32Constant(1 << Map::kIsCallable),
+ __ Word32And(value_bit_field,
+ __ Int32Constant((1 << Map::kIsCallable) |
+ (1 << Map::kIsUndetectable))));
+ __ Goto(&done, vfalse);
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
- vfalse, control);
+ __ Bind(&if_smi);
+ __ Goto(&done, __ Int32Constant(0));
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerObjectIsNumber(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerObjectIsNumber(Node* node) {
Node* value = node->InputAt(0);
- Node* check = ObjectIsSmi(value);
- Node* branch = graph()->NewNode(common()->Branch(), check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = jsgraph()->Int32Constant(1);
+ auto if_smi = __ MakeLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kBit);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- Node* value_map = efalse =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- value, efalse, if_false);
- vfalse = graph()->NewNode(machine()->WordEqual(), value_map,
- jsgraph()->HeapNumberMapConstant());
- }
+ __ GotoIf(ObjectIsSmi(value), &if_smi);
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ __ Goto(&done, __ WordEqual(value_map, __ HeapNumberMapConstant()));
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
- vfalse, control);
+ __ Bind(&if_smi);
+ __ Goto(&done, __ Int32Constant(1));
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerObjectIsReceiver(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerObjectIsReceiver(Node* node) {
Node* value = node->InputAt(0);
- Node* check = ObjectIsSmi(value);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ auto if_smi = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kBit);
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = jsgraph()->Int32Constant(0);
+ __ GotoIf(ObjectIsSmi(value), &if_smi);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- Node* value_map = efalse =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- value, efalse, if_false);
- Node* value_instance_type = efalse = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapInstanceType()), value_map,
- efalse, if_false);
- vfalse = graph()->NewNode(machine()->Uint32LessThanOrEqual(),
- jsgraph()->Uint32Constant(FIRST_JS_RECEIVER_TYPE),
- value_instance_type);
- }
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* value_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
+ Node* result = __ Uint32LessThanOrEqual(
+ __ Uint32Constant(FIRST_JS_RECEIVER_TYPE), value_instance_type);
+ __ Goto(&done, result);
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
- vfalse, control);
+ __ Bind(&if_smi);
+ __ Goto(&done, __ Int32Constant(0));
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerObjectIsSmi(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerObjectIsSmi(Node* node) {
Node* value = node->InputAt(0);
- value = ObjectIsSmi(value);
- return ValueEffectControl(value, effect, control);
+ return ObjectIsSmi(value);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerObjectIsString(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerObjectIsString(Node* node) {
Node* value = node->InputAt(0);
- Node* check = ObjectIsSmi(value);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = jsgraph()->Int32Constant(0);
+ auto if_smi = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kBit);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- Node* value_map = efalse =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- value, efalse, if_false);
- Node* value_instance_type = efalse = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapInstanceType()), value_map,
- efalse, if_false);
- vfalse = graph()->NewNode(machine()->Uint32LessThan(), value_instance_type,
- jsgraph()->Uint32Constant(FIRST_NONSTRING_TYPE));
- }
+ Node* check = ObjectIsSmi(value);
+ __ GotoIf(check, &if_smi);
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* value_instance_type =
+ __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
+ Node* vfalse = __ Uint32LessThan(value_instance_type,
+ __ Uint32Constant(FIRST_NONSTRING_TYPE));
+ __ Goto(&done, vfalse);
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
- vfalse, control);
+ __ Bind(&if_smi);
+ __ Goto(&done, __ Int32Constant(0));
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerObjectIsUndetectable(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerObjectIsUndetectable(Node* node) {
Node* value = node->InputAt(0);
- Node* check = ObjectIsSmi(value);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+ auto if_smi = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kBit);
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = jsgraph()->Int32Constant(0);
+ Node* check = ObjectIsSmi(value);
+ __ GotoIf(check, &if_smi);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- Node* value_map = efalse =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- value, efalse, if_false);
- Node* value_bit_field = efalse = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapBitField()), value_map,
- efalse, if_false);
- vfalse = graph()->NewNode(
- machine()->Word32Equal(),
- graph()->NewNode(
- machine()->Word32Equal(), jsgraph()->Int32Constant(0),
- graph()->NewNode(
- machine()->Word32And(), value_bit_field,
- jsgraph()->Int32Constant(1 << Map::kIsUndetectable))),
- jsgraph()->Int32Constant(0));
- }
+ Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+ Node* value_bit_field =
+ __ LoadField(AccessBuilder::ForMapBitField(), value_map);
+ Node* vfalse = __ Word32Equal(
+ __ Word32Equal(__ Int32Constant(0),
+ __ Word32And(value_bit_field,
+ __ Int32Constant(1 << Map::kIsUndetectable))),
+ __ Int32Constant(0));
+ __ Goto(&done, vfalse);
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
- vfalse, control);
+ __ Bind(&if_smi);
+ __ Goto(&done, __ Int32Constant(0));
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerArrayBufferWasNeutered(Node* node, Node* effect,
- Node* control) {
- Node* value = node->InputAt(0);
-
- Node* value_bit_field = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayBufferBitField()), value,
- effect, control);
- value = graph()->NewNode(
- machine()->Word32Equal(),
- graph()->NewNode(machine()->Word32Equal(),
- graph()->NewNode(machine()->Word32And(), value_bit_field,
- jsgraph()->Int32Constant(
- JSArrayBuffer::WasNeutered::kMask)),
- jsgraph()->Int32Constant(0)),
- jsgraph()->Int32Constant(0));
-
- return ValueEffectControl(value, effect, control);
-}
-
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerStringCharCodeAt(Node* node, Node* effect,
- Node* control) {
- Node* subject = node->InputAt(0);
- Node* index = node->InputAt(1);
-
- // We may need to loop several times for ConsString/SlicedString {subject}s.
- Node* loop =
- graph()->NewNode(common()->Loop(4), control, control, control, control);
- Node* lsubject =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 4),
- subject, subject, subject, subject, loop);
- Node* lindex =
- graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 4), index,
- index, index, index, loop);
- Node* leffect = graph()->NewNode(common()->EffectPhi(4), effect, effect,
- effect, effect, loop);
-
- control = loop;
- effect = leffect;
-
- // Determine the instance type of {lsubject}.
- Node* lsubject_map = effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- lsubject, effect, control);
- Node* lsubject_instance_type = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
- lsubject_map, effect, control);
-
- // Check if {lsubject} is a SeqString.
- Node* check0 = graph()->NewNode(
- machine()->Word32Equal(),
- graph()->NewNode(machine()->Word32And(), lsubject_instance_type,
- jsgraph()->Int32Constant(kStringRepresentationMask)),
- jsgraph()->Int32Constant(kSeqStringTag));
- Node* branch0 = graph()->NewNode(common()->Branch(), check0, control);
-
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* etrue0 = effect;
- Node* vtrue0;
- {
- // Check if the {lsubject} is a TwoByteSeqString or a OneByteSeqString.
- Node* check1 = graph()->NewNode(
- machine()->Word32Equal(),
- graph()->NewNode(machine()->Word32And(), lsubject_instance_type,
- jsgraph()->Int32Constant(kStringEncodingMask)),
- jsgraph()->Int32Constant(kTwoByteStringTag));
- Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1 = etrue0;
- Node* vtrue1 = etrue1 =
- graph()->NewNode(simplified()->LoadElement(
- AccessBuilder::ForSeqTwoByteStringCharacter()),
- lsubject, lindex, etrue1, if_true1);
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* efalse1 = etrue0;
- Node* vfalse1 = efalse1 =
- graph()->NewNode(simplified()->LoadElement(
- AccessBuilder::ForSeqOneByteStringCharacter()),
- lsubject, lindex, efalse1, if_false1);
-
- if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- etrue0 =
- graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
- vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
- vtrue1, vfalse1, if_true0);
- }
-
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* efalse0 = effect;
- Node* vfalse0;
- {
- // Check if the {lsubject} is a ConsString.
- Node* check1 = graph()->NewNode(
- machine()->Word32Equal(),
- graph()->NewNode(machine()->Word32And(), lsubject_instance_type,
- jsgraph()->Int32Constant(kStringRepresentationMask)),
- jsgraph()->Int32Constant(kConsStringTag));
- Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1 = efalse0;
- {
- // Load the right hand side of the {lsubject} ConsString.
- Node* lsubject_second = etrue1 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForConsStringSecond()),
- lsubject, etrue1, if_true1);
-
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we flatten the string first.
- Node* check2 = graph()->NewNode(machine()->WordEqual(), lsubject_second,
- jsgraph()->EmptyStringConstant());
- Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check2, if_true1);
-
- Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
- Node* etrue2 = etrue1;
- Node* vtrue2 = etrue2 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForConsStringFirst()),
- lsubject, etrue2, if_true2);
-
- Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
- Node* efalse2 = etrue1;
- Node* vfalse2;
- {
- // Flatten the {lsubject} ConsString first.
- Operator::Properties properties =
- Operator::kNoDeopt | Operator::kNoThrow;
- Runtime::FunctionId id = Runtime::kFlattenString;
- CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
- graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
- vfalse2 = efalse2 = graph()->NewNode(
- common()->Call(desc), jsgraph()->CEntryStubConstant(1), lsubject,
- jsgraph()->ExternalConstant(ExternalReference(id, isolate())),
- jsgraph()->Int32Constant(1), jsgraph()->NoContextConstant(),
- efalse2, if_false2);
- }
+Node* EffectControlLinearizer::LowerNewRestParameterElements(Node* node) {
+ int const formal_parameter_count = ParameterCountOf(node->op());
- // Retry the {loop} with the new subject.
- loop->ReplaceInput(1, if_true2);
- lindex->ReplaceInput(1, lindex);
- leffect->ReplaceInput(1, etrue2);
- lsubject->ReplaceInput(1, vtrue2);
- loop->ReplaceInput(2, if_false2);
- lindex->ReplaceInput(2, lindex);
- leffect->ReplaceInput(2, efalse2);
- lsubject->ReplaceInput(2, vfalse2);
- }
+ Callable const callable = CodeFactory::NewRestParameterElements(isolate());
+ Operator::Properties const properties = node->op()->properties();
+ CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ return __ Call(desc, __ HeapConstant(callable.code()),
+ __ IntPtrConstant(formal_parameter_count),
+ __ NoContextConstant());
+}
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* efalse1 = efalse0;
- Node* vfalse1;
- {
- // Check if the {lsubject} is an ExternalString.
- Node* check2 = graph()->NewNode(
- machine()->Word32Equal(),
- graph()->NewNode(machine()->Word32And(), lsubject_instance_type,
- jsgraph()->Int32Constant(kStringRepresentationMask)),
- jsgraph()->Int32Constant(kExternalStringTag));
- Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check2, if_false1);
-
- Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
- Node* etrue2 = efalse1;
- Node* vtrue2;
- {
- // Check if the {lsubject} is a short external string.
- Node* check3 = graph()->NewNode(
- machine()->Word32Equal(),
- graph()->NewNode(
- machine()->Word32And(), lsubject_instance_type,
- jsgraph()->Int32Constant(kShortExternalStringMask)),
- jsgraph()->Int32Constant(0));
- Node* branch3 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
- check3, if_true2);
-
- Node* if_true3 = graph()->NewNode(common()->IfTrue(), branch3);
- Node* etrue3 = etrue2;
- Node* vtrue3;
- {
- // Load the actual resource data from the {lsubject}.
- Node* lsubject_resource_data = etrue3 = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForExternalStringResourceData()),
- lsubject, etrue3, if_true3);
-
- // Check if the {lsubject} is a TwoByteExternalString or a
- // OneByteExternalString.
- Node* check4 = graph()->NewNode(
- machine()->Word32Equal(),
- graph()->NewNode(machine()->Word32And(), lsubject_instance_type,
- jsgraph()->Int32Constant(kStringEncodingMask)),
- jsgraph()->Int32Constant(kTwoByteStringTag));
- Node* branch4 =
- graph()->NewNode(common()->Branch(), check4, if_true3);
-
- Node* if_true4 = graph()->NewNode(common()->IfTrue(), branch4);
- Node* etrue4 = etrue3;
- Node* vtrue4 = etrue4 = graph()->NewNode(
- simplified()->LoadElement(
- AccessBuilder::ForExternalTwoByteStringCharacter()),
- lsubject_resource_data, lindex, etrue4, if_true4);
-
- Node* if_false4 = graph()->NewNode(common()->IfFalse(), branch4);
- Node* efalse4 = etrue3;
- Node* vfalse4 = efalse4 = graph()->NewNode(
- simplified()->LoadElement(
- AccessBuilder::ForExternalOneByteStringCharacter()),
- lsubject_resource_data, lindex, efalse4, if_false4);
-
- if_true3 = graph()->NewNode(common()->Merge(2), if_true4, if_false4);
- etrue3 = graph()->NewNode(common()->EffectPhi(2), etrue4, efalse4,
- if_true3);
- vtrue3 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
- vtrue4, vfalse4, if_true3);
- }
+Node* EffectControlLinearizer::LowerNewUnmappedArgumentsElements(Node* node) {
+ int const formal_parameter_count = ParameterCountOf(node->op());
- Node* if_false3 = graph()->NewNode(common()->IfFalse(), branch3);
- Node* efalse3 = etrue2;
- Node* vfalse3;
- {
- // The {lsubject} might be compressed, call the runtime.
- Operator::Properties properties =
- Operator::kNoDeopt | Operator::kNoThrow;
- Runtime::FunctionId id = Runtime::kExternalStringGetChar;
- CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
- graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
- vfalse3 = efalse3 = graph()->NewNode(
- common()->Call(desc), jsgraph()->CEntryStubConstant(1), lsubject,
- ChangeInt32ToSmi(lindex),
- jsgraph()->ExternalConstant(ExternalReference(id, isolate())),
- jsgraph()->Int32Constant(2), jsgraph()->NoContextConstant(),
- efalse3, if_false3);
- vfalse3 = ChangeSmiToInt32(vfalse3);
- }
+ Callable const callable =
+ CodeFactory::NewUnmappedArgumentsElements(isolate());
+ Operator::Properties const properties = node->op()->properties();
+ CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ return __ Call(desc, __ HeapConstant(callable.code()),
+ __ IntPtrConstant(formal_parameter_count),
+ __ NoContextConstant());
+}
- if_true2 = graph()->NewNode(common()->Merge(2), if_true3, if_false3);
- etrue2 =
- graph()->NewNode(common()->EffectPhi(2), etrue3, efalse3, if_true2);
- vtrue2 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
- vtrue3, vfalse3, if_true2);
- }
+Node* EffectControlLinearizer::LowerArrayBufferWasNeutered(Node* node) {
+ Node* value = node->InputAt(0);
- Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
- Node* efalse2 = efalse1;
- {
- // The {lsubject} is a SlicedString, continue with its parent.
- Node* lsubject_parent = efalse2 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForSlicedStringParent()),
- lsubject, efalse2, if_false2);
- Node* lsubject_offset = efalse2 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForSlicedStringOffset()),
- lsubject, efalse2, if_false2);
- Node* lsubject_index = graph()->NewNode(
- machine()->Int32Add(), lindex, ChangeSmiToInt32(lsubject_offset));
-
- // Retry the {loop} with the parent subject.
- loop->ReplaceInput(3, if_false2);
- leffect->ReplaceInput(3, efalse2);
- lindex->ReplaceInput(3, lsubject_index);
- lsubject->ReplaceInput(3, lsubject_parent);
- }
+ Node* value_bit_field =
+ __ LoadField(AccessBuilder::ForJSArrayBufferBitField(), value);
+ return __ Word32Equal(
+ __ Word32Equal(
+ __ Word32And(value_bit_field,
+ __ Int32Constant(JSArrayBuffer::WasNeutered::kMask)),
+ __ Int32Constant(0)),
+ __ Int32Constant(0));
+}
- if_false1 = if_true2;
- efalse1 = etrue2;
- vfalse1 = vtrue2;
- }
+Node* EffectControlLinearizer::LowerStringCharAt(Node* node) {
+ Node* receiver = node->InputAt(0);
+ Node* position = node->InputAt(1);
- if_false0 = if_false1;
- efalse0 = efalse1;
- vfalse0 = vfalse1;
- }
+ Callable const callable = CodeFactory::StringCharAt(isolate());
+ Operator::Properties properties = Operator::kNoThrow | Operator::kNoWrite;
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ return __ Call(desc, __ HeapConstant(callable.code()), receiver, position,
+ __ NoContextConstant());
+}
- control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2), vtrue0,
- vfalse0, control);
+Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
+ Node* receiver = node->InputAt(0);
+ Node* position = node->InputAt(1);
- return ValueEffectControl(value, effect, control);
+ Callable const callable = CodeFactory::StringCharCodeAt(isolate());
+ Operator::Properties properties = Operator::kNoThrow | Operator::kNoWrite;
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties,
+ MachineType::TaggedSigned());
+ return __ Call(desc, __ HeapConstant(callable.code()), receiver, position,
+ __ NoContextConstant());
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerStringFromCharCode(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerStringFromCharCode(Node* node) {
Node* value = node->InputAt(0);
+ auto runtime_call = __ MakeDeferredLabel<2>();
+ auto if_undefined = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
+
// Compute the character code.
- Node* code =
- graph()->NewNode(machine()->Word32And(), value,
- jsgraph()->Int32Constant(String::kMaxUtf16CodeUnit));
+ Node* code = __ Word32And(value, __ Int32Constant(String::kMaxUtf16CodeUnit));
// Check if the {code} is a one-byte char code.
- Node* check0 =
- graph()->NewNode(machine()->Int32LessThanOrEqual(), code,
- jsgraph()->Int32Constant(String::kMaxOneByteCharCode));
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
-
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* efalse0 = effect;
-
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* etrue0 = effect;
+ Node* check0 = __ Int32LessThanOrEqual(
+ code, __ Int32Constant(String::kMaxOneByteCharCode));
+ __ GotoUnless(check0, &runtime_call);
// Load the isolate wide single character string cache.
- Node* cache =
- jsgraph()->HeapConstant(factory()->single_character_string_cache());
+ Node* cache = __ HeapConstant(factory()->single_character_string_cache());
// Compute the {cache} index for {code}.
- Node* index = machine()->Is32()
- ? code
- : graph()->NewNode(machine()->ChangeUint32ToUint64(), code);
+ Node* index = machine()->Is32() ? code : __ ChangeUint32ToUint64(code);
// Check if we have an entry for the {code} in the single character string
// cache already.
- Node* entry = etrue0 = graph()->NewNode(
- simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()), cache,
- index, etrue0, if_true0);
+ Node* entry =
+ __ LoadElement(AccessBuilder::ForFixedArrayElement(), cache, index);
- Node* check1 = graph()->NewNode(machine()->WordEqual(), entry,
- jsgraph()->UndefinedConstant());
- Node* branch1 =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check1, if_true0);
-
- // Use the {entry} from the {cache}.
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* efalse1 = etrue0;
- Node* vfalse1 = entry;
+ Node* check1 = __ WordEqual(entry, __ UndefinedConstant());
+ __ GotoIf(check1, &runtime_call);
+ __ Goto(&done, entry);
// Let %StringFromCharCode handle this case.
// TODO(turbofan): At some point we may consider adding a stub for this
// deferred case, so that we don't need to call to C++ here.
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1 = etrue0;
- Node* vtrue1;
+ __ Bind(&runtime_call);
{
- if_true1 = graph()->NewNode(common()->Merge(2), if_true1, if_false0);
- etrue1 =
- graph()->NewNode(common()->EffectPhi(2), etrue1, efalse0, if_true1);
Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
Runtime::FunctionId id = Runtime::kStringCharFromCode;
CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
- vtrue1 = etrue1 = graph()->NewNode(
- common()->Call(desc), jsgraph()->CEntryStubConstant(1),
- ChangeInt32ToSmi(code),
- jsgraph()->ExternalConstant(ExternalReference(id, isolate())),
- jsgraph()->Int32Constant(1), jsgraph()->NoContextConstant(), etrue1,
- if_true1);
+ Node* vtrue1 =
+ __ Call(desc, __ CEntryStubConstant(1), ChangeInt32ToSmi(code),
+ __ ExternalConstant(ExternalReference(id, isolate())),
+ __ Int32Constant(1), __ NoContextConstant());
+ __ Goto(&done, vtrue1);
}
-
- control = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue1, vfalse1, control);
-
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerStringFromCodePoint(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerStringFromCodePoint(Node* node) {
Node* value = node->InputAt(0);
Node* code = value;
- Node* etrue0 = effect;
- Node* vtrue0;
+ auto if_not_single_code = __ MakeDeferredLabel<1>();
+ auto if_not_one_byte = __ MakeDeferredLabel<1>();
+ auto cache_miss = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<4>(MachineRepresentation::kTagged);
// Check if the {code} is a single code unit
- Node* check0 = graph()->NewNode(machine()->Uint32LessThanOrEqual(), code,
- jsgraph()->Uint32Constant(0xFFFF));
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+ Node* check0 = __ Uint32LessThanOrEqual(code, __ Uint32Constant(0xFFFF));
+ __ GotoUnless(check0, &if_not_single_code);
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
{
// Check if the {code} is a one byte character
- Node* check1 = graph()->NewNode(
- machine()->Uint32LessThanOrEqual(), code,
- jsgraph()->Uint32Constant(String::kMaxOneByteCharCode));
- Node* branch1 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check1, if_true0);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1 = etrue0;
- Node* vtrue1;
+ Node* check1 = __ Uint32LessThanOrEqual(
+ code, __ Uint32Constant(String::kMaxOneByteCharCode));
+ __ GotoUnless(check1, &if_not_one_byte);
{
// Load the isolate wide single character string cache.
- Node* cache =
- jsgraph()->HeapConstant(factory()->single_character_string_cache());
+ Node* cache = __ HeapConstant(factory()->single_character_string_cache());
// Compute the {cache} index for {code}.
- Node* index =
- machine()->Is32()
- ? code
- : graph()->NewNode(machine()->ChangeUint32ToUint64(), code);
+ Node* index = machine()->Is32() ? code : __ ChangeUint32ToUint64(code);
// Check if we have an entry for the {code} in the single character string
// cache already.
- Node* entry = etrue1 = graph()->NewNode(
- simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()),
- cache, index, etrue1, if_true1);
-
- Node* check2 = graph()->NewNode(machine()->WordEqual(), entry,
- jsgraph()->UndefinedConstant());
- Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check2, if_true1);
-
- Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
- Node* etrue2 = etrue1;
- Node* vtrue2;
+ Node* entry =
+ __ LoadElement(AccessBuilder::ForFixedArrayElement(), cache, index);
+
+ Node* check2 = __ WordEqual(entry, __ UndefinedConstant());
+ __ GotoIf(check2, &cache_miss);
+
+ // Use the {entry} from the {cache}.
+ __ Goto(&done, entry);
+
+ __ Bind(&cache_miss);
{
// Allocate a new SeqOneByteString for {code}.
- vtrue2 = etrue2 = graph()->NewNode(
- simplified()->Allocate(NOT_TENURED),
- jsgraph()->Int32Constant(SeqOneByteString::SizeFor(1)), etrue2,
- if_true2);
- etrue2 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForMap()), vtrue2,
- jsgraph()->HeapConstant(factory()->one_byte_string_map()), etrue2,
- if_true2);
- etrue2 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForNameHashField()), vtrue2,
- jsgraph()->IntPtrConstant(Name::kEmptyHashField), etrue2, if_true2);
- etrue2 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForStringLength()), vtrue2,
- jsgraph()->SmiConstant(1), etrue2, if_true2);
- etrue2 = graph()->NewNode(
- machine()->Store(StoreRepresentation(MachineRepresentation::kWord8,
- kNoWriteBarrier)),
- vtrue2, jsgraph()->IntPtrConstant(SeqOneByteString::kHeaderSize -
- kHeapObjectTag),
- code, etrue2, if_true2);
+ Node* vtrue2 = __ Allocate(
+ NOT_TENURED, __ Int32Constant(SeqOneByteString::SizeFor(1)));
+ __ StoreField(AccessBuilder::ForMap(), vtrue2,
+ __ HeapConstant(factory()->one_byte_string_map()));
+ __ StoreField(AccessBuilder::ForNameHashField(), vtrue2,
+ __ IntPtrConstant(Name::kEmptyHashField));
+ __ StoreField(AccessBuilder::ForStringLength(), vtrue2,
+ __ SmiConstant(1));
+ __ Store(
+ StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier),
+ vtrue2,
+ __ IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag),
+ code);
// Remember it in the {cache}.
- etrue2 = graph()->NewNode(
- simplified()->StoreElement(AccessBuilder::ForFixedArrayElement()),
- cache, index, vtrue2, etrue2, if_true2);
+ __ StoreElement(AccessBuilder::ForFixedArrayElement(), cache, index,
+ vtrue2);
+ __ Goto(&done, vtrue2);
}
-
- // Use the {entry} from the {cache}.
- Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
- Node* efalse2 = etrue0;
- Node* vfalse2 = entry;
-
- if_true1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
- etrue1 =
- graph()->NewNode(common()->EffectPhi(2), etrue2, efalse2, if_true1);
- vtrue1 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue2, vfalse2, if_true1);
}
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* efalse1 = effect;
- Node* vfalse1;
+ __ Bind(&if_not_one_byte);
{
// Allocate a new SeqTwoByteString for {code}.
- vfalse1 = efalse1 = graph()->NewNode(
- simplified()->Allocate(NOT_TENURED),
- jsgraph()->Int32Constant(SeqTwoByteString::SizeFor(1)), efalse1,
- if_false1);
- efalse1 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForMap()), vfalse1,
- jsgraph()->HeapConstant(factory()->string_map()), efalse1, if_false1);
- efalse1 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForNameHashField()), vfalse1,
- jsgraph()->IntPtrConstant(Name::kEmptyHashField), efalse1, if_false1);
- efalse1 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForStringLength()), vfalse1,
- jsgraph()->SmiConstant(1), efalse1, if_false1);
- efalse1 = graph()->NewNode(
- machine()->Store(StoreRepresentation(MachineRepresentation::kWord16,
- kNoWriteBarrier)),
- vfalse1, jsgraph()->IntPtrConstant(SeqTwoByteString::kHeaderSize -
- kHeapObjectTag),
- code, efalse1, if_false1);
+ Node* vfalse1 = __ Allocate(
+ NOT_TENURED, __ Int32Constant(SeqTwoByteString::SizeFor(1)));
+ __ StoreField(AccessBuilder::ForMap(), vfalse1,
+ __ HeapConstant(factory()->string_map()));
+ __ StoreField(AccessBuilder::ForNameHashField(), vfalse1,
+ __ IntPtrConstant(Name::kEmptyHashField));
+ __ StoreField(AccessBuilder::ForStringLength(), vfalse1,
+ __ SmiConstant(1));
+ __ Store(
+ StoreRepresentation(MachineRepresentation::kWord16, kNoWriteBarrier),
+ vfalse1,
+ __ IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag),
+ code);
+ __ Goto(&done, vfalse1);
}
-
- if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- etrue0 =
- graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
- vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue1, vfalse1, if_true0);
}
+ __ Bind(&if_not_single_code);
// Generate surrogate pair string
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* efalse0 = effect;
- Node* vfalse0;
{
switch (UnicodeEncodingOf(node->op())) {
case UnicodeEncoding::UTF16:
@@ -2666,553 +1975,359 @@ EffectControlLinearizer::LowerStringFromCodePoint(Node* node, Node* effect,
case UnicodeEncoding::UTF32: {
// Convert UTF32 to UTF16 code units, and store as a 32 bit word.
- Node* lead_offset = jsgraph()->Int32Constant(0xD800 - (0x10000 >> 10));
+ Node* lead_offset = __ Int32Constant(0xD800 - (0x10000 >> 10));
// lead = (codepoint >> 10) + LEAD_OFFSET
Node* lead =
- graph()->NewNode(machine()->Int32Add(),
- graph()->NewNode(machine()->Word32Shr(), code,
- jsgraph()->Int32Constant(10)),
- lead_offset);
+ __ Int32Add(__ Word32Shr(code, __ Int32Constant(10)), lead_offset);
// trail = (codepoint & 0x3FF) + 0xDC00;
- Node* trail =
- graph()->NewNode(machine()->Int32Add(),
- graph()->NewNode(machine()->Word32And(), code,
- jsgraph()->Int32Constant(0x3FF)),
- jsgraph()->Int32Constant(0xDC00));
+ Node* trail = __ Int32Add(__ Word32And(code, __ Int32Constant(0x3FF)),
+ __ Int32Constant(0xDC00));
// codpoint = (trail << 16) | lead;
- code = graph()->NewNode(machine()->Word32Or(),
- graph()->NewNode(machine()->Word32Shl(), trail,
- jsgraph()->Int32Constant(16)),
- lead);
+ code = __ Word32Or(__ Word32Shl(trail, __ Int32Constant(16)), lead);
break;
}
}
// Allocate a new SeqTwoByteString for {code}.
- vfalse0 = efalse0 =
- graph()->NewNode(simplified()->Allocate(NOT_TENURED),
- jsgraph()->Int32Constant(SeqTwoByteString::SizeFor(2)),
- efalse0, if_false0);
- efalse0 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForMap()), vfalse0,
- jsgraph()->HeapConstant(factory()->string_map()), efalse0, if_false0);
- efalse0 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForNameHashField()), vfalse0,
- jsgraph()->IntPtrConstant(Name::kEmptyHashField), efalse0, if_false0);
- efalse0 = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForStringLength()), vfalse0,
- jsgraph()->SmiConstant(2), efalse0, if_false0);
- efalse0 = graph()->NewNode(
- machine()->Store(StoreRepresentation(MachineRepresentation::kWord32,
- kNoWriteBarrier)),
- vfalse0, jsgraph()->IntPtrConstant(SeqTwoByteString::kHeaderSize -
- kHeapObjectTag),
- code, efalse0, if_false0);
- }
-
- control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue0, vfalse0, control);
-
- return ValueEffectControl(value, effect, control);
-}
+ Node* vfalse0 = __ Allocate(NOT_TENURED,
+ __ Int32Constant(SeqTwoByteString::SizeFor(2)));
+ __ StoreField(AccessBuilder::ForMap(), vfalse0,
+ __ HeapConstant(factory()->string_map()));
+ __ StoreField(AccessBuilder::ForNameHashField(), vfalse0,
+ __ IntPtrConstant(Name::kEmptyHashField));
+ __ StoreField(AccessBuilder::ForStringLength(), vfalse0, __ SmiConstant(2));
+ __ Store(
+ StoreRepresentation(MachineRepresentation::kWord32, kNoWriteBarrier),
+ vfalse0,
+ __ IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag),
+ code);
+ __ Goto(&done, vfalse0);
+ }
+
+ __ Bind(&done);
+ return done.PhiAt(0);
+}
+
+Node* EffectControlLinearizer::LowerStringComparison(Callable const& callable,
+ Node* node) {
+ Node* lhs = node->InputAt(0);
+ Node* rhs = node->InputAt(1);
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerStringComparison(Callable const& callable,
- Node* node, Node* effect,
- Node* control) {
Operator::Properties properties = Operator::kEliminatable;
CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
- node->InsertInput(graph()->zone(), 0,
- jsgraph()->HeapConstant(callable.code()));
- node->AppendInput(graph()->zone(), jsgraph()->NoContextConstant());
- node->AppendInput(graph()->zone(), effect);
- NodeProperties::ChangeOp(node, common()->Call(desc));
- return ValueEffectControl(node, node, control);
+ return __ Call(desc, __ HeapConstant(callable.code()), lhs, rhs,
+ __ NoContextConstant());
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerStringEqual(Node* node, Node* effect,
- Node* control) {
- return LowerStringComparison(CodeFactory::StringEqual(isolate()), node,
- effect, control);
+Node* EffectControlLinearizer::LowerStringEqual(Node* node) {
+ return LowerStringComparison(CodeFactory::StringEqual(isolate()), node);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerStringLessThan(Node* node, Node* effect,
- Node* control) {
- return LowerStringComparison(CodeFactory::StringLessThan(isolate()), node,
- effect, control);
+Node* EffectControlLinearizer::LowerStringLessThan(Node* node) {
+ return LowerStringComparison(CodeFactory::StringLessThan(isolate()), node);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerStringLessThanOrEqual(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerStringLessThanOrEqual(Node* node) {
return LowerStringComparison(CodeFactory::StringLessThanOrEqual(isolate()),
- node, effect, control);
+ node);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckFloat64Hole(Node* node, Node* frame_state,
- Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckFloat64Hole(Node* node,
+ Node* frame_state) {
// If we reach this point w/o eliminating the {node} that's marked
// with allow-return-hole, we cannot do anything, so just deoptimize
// in case of the hole NaN (similar to Crankshaft).
Node* value = node->InputAt(0);
- Node* check = graph()->NewNode(
- machine()->Word32Equal(),
- graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
- jsgraph()->Int32Constant(kHoleNanUpper32));
- control = effect =
- graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kHole), check,
- frame_state, effect, control);
-
- return ValueEffectControl(value, effect, control);
+ Node* check = __ Word32Equal(__ Float64ExtractHighWord32(value),
+ __ Int32Constant(kHoleNanUpper32));
+ __ DeoptimizeIf(DeoptimizeReason::kHole, check, frame_state);
+ return value;
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckTaggedHole(Node* node, Node* frame_state,
- Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckTaggedHole(Node* node,
+ Node* frame_state) {
Node* value = node->InputAt(0);
- Node* check = graph()->NewNode(machine()->WordEqual(), value,
- jsgraph()->TheHoleConstant());
- control = effect =
- graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kHole), check,
- frame_state, effect, control);
-
- return ValueEffectControl(value, effect, control);
+ Node* check = __ WordEqual(value, __ TheHoleConstant());
+ __ DeoptimizeIf(DeoptimizeReason::kHole, check, frame_state);
+ return value;
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerConvertTaggedHoleToUndefined(Node* node,
- Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerConvertTaggedHoleToUndefined(Node* node) {
Node* value = node->InputAt(0);
- Node* check = graph()->NewNode(machine()->WordEqual(), value,
- jsgraph()->TheHoleConstant());
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* vtrue = jsgraph()->UndefinedConstant();
+ auto if_is_hole = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* vfalse = value;
+ Node* check = __ WordEqual(value, __ TheHoleConstant());
+ __ GotoIf(check, &if_is_hole);
+ __ Goto(&done, value);
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue, vfalse, control);
+ __ Bind(&if_is_hole);
+ __ Goto(&done, __ UndefinedConstant());
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::AllocateHeapNumberWithValue(Node* value, Node* effect,
- Node* control) {
- Node* result = effect = graph()->NewNode(
- simplified()->Allocate(NOT_TENURED),
- jsgraph()->Int32Constant(HeapNumber::kSize), effect, control);
- effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
- result, jsgraph()->HeapNumberMapConstant(), effect,
- control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForHeapNumberValue()), result,
- value, effect, control);
- return ValueEffectControl(result, effect, control);
+Node* EffectControlLinearizer::AllocateHeapNumberWithValue(Node* value) {
+ Node* result = __ Allocate(NOT_TENURED, __ Int32Constant(HeapNumber::kSize));
+ __ StoreField(AccessBuilder::ForMap(), result, __ HeapNumberMapConstant());
+ __ StoreField(AccessBuilder::ForHeapNumberValue(), result, value);
+ return result;
}
Node* EffectControlLinearizer::ChangeInt32ToSmi(Node* value) {
if (machine()->Is64()) {
- value = graph()->NewNode(machine()->ChangeInt32ToInt64(), value);
+ value = __ ChangeInt32ToInt64(value);
}
- return graph()->NewNode(machine()->WordShl(), value, SmiShiftBitsConstant());
+ return __ WordShl(value, SmiShiftBitsConstant());
}
Node* EffectControlLinearizer::ChangeUint32ToSmi(Node* value) {
if (machine()->Is64()) {
- value = graph()->NewNode(machine()->ChangeUint32ToUint64(), value);
+ value = __ ChangeUint32ToUint64(value);
}
- return graph()->NewNode(machine()->WordShl(), value, SmiShiftBitsConstant());
-}
-
-Node* EffectControlLinearizer::ChangeInt32ToFloat64(Node* value) {
- return graph()->NewNode(machine()->ChangeInt32ToFloat64(), value);
-}
-
-Node* EffectControlLinearizer::ChangeUint32ToFloat64(Node* value) {
- return graph()->NewNode(machine()->ChangeUint32ToFloat64(), value);
+ return __ WordShl(value, SmiShiftBitsConstant());
}
Node* EffectControlLinearizer::ChangeSmiToInt32(Node* value) {
- value = graph()->NewNode(machine()->WordSar(), value, SmiShiftBitsConstant());
+ value = __ WordSar(value, SmiShiftBitsConstant());
if (machine()->Is64()) {
- value = graph()->NewNode(machine()->TruncateInt64ToInt32(), value);
+ value = __ TruncateInt64ToInt32(value);
}
return value;
}
+
Node* EffectControlLinearizer::ObjectIsSmi(Node* value) {
- return graph()->NewNode(
- machine()->WordEqual(),
- graph()->NewNode(machine()->WordAnd(), value,
- jsgraph()->IntPtrConstant(kSmiTagMask)),
- jsgraph()->IntPtrConstant(kSmiTag));
+ return __ WordEqual(__ WordAnd(value, __ IntPtrConstant(kSmiTagMask)),
+ __ IntPtrConstant(kSmiTag));
}
Node* EffectControlLinearizer::SmiMaxValueConstant() {
- return jsgraph()->Int32Constant(Smi::kMaxValue);
+ return __ Int32Constant(Smi::kMaxValue);
}
Node* EffectControlLinearizer::SmiShiftBitsConstant() {
- return jsgraph()->IntPtrConstant(kSmiShiftSize + kSmiTagSize);
+ return __ IntPtrConstant(kSmiShiftSize + kSmiTagSize);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerPlainPrimitiveToNumber(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerPlainPrimitiveToNumber(Node* node) {
Node* value = node->InputAt(0);
- Node* result = effect =
- graph()->NewNode(ToNumberOperator(), jsgraph()->ToNumberBuiltinConstant(),
- value, jsgraph()->NoContextConstant(), effect);
- return ValueEffectControl(result, effect, control);
+ return __ ToNumber(value);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerPlainPrimitiveToWord32(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerPlainPrimitiveToWord32(Node* node) {
Node* value = node->InputAt(0);
- Node* check0 = ObjectIsSmi(value);
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
-
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* etrue0 = effect;
- Node* vtrue0 = ChangeSmiToInt32(value);
+ auto if_not_smi = __ MakeDeferredLabel<1>();
+ auto if_to_number_smi = __ MakeLabel<1>();
+ auto done = __ MakeLabel<3>(MachineRepresentation::kWord32);
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* efalse0 = effect;
- Node* vfalse0;
- {
- vfalse0 = efalse0 = graph()->NewNode(
- ToNumberOperator(), jsgraph()->ToNumberBuiltinConstant(), value,
- jsgraph()->NoContextConstant(), efalse0);
-
- Node* check1 = ObjectIsSmi(vfalse0);
- Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
+ Node* check0 = ObjectIsSmi(value);
+ __ GotoUnless(check0, &if_not_smi);
+ __ Goto(&done, ChangeSmiToInt32(value));
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1 = efalse0;
- Node* vtrue1 = ChangeSmiToInt32(vfalse0);
+ __ Bind(&if_not_smi);
+ Node* to_number = __ ToNumber(value);
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* efalse1 = efalse0;
- Node* vfalse1;
- {
- vfalse1 = efalse1 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), efalse0,
- efalse1, if_false1);
- vfalse1 = graph()->NewNode(machine()->TruncateFloat64ToWord32(), vfalse1);
- }
+ Node* check1 = ObjectIsSmi(to_number);
+ __ GotoIf(check1, &if_to_number_smi);
+ Node* number = __ LoadField(AccessBuilder::ForHeapNumberValue(), to_number);
+ __ Goto(&done, __ TruncateFloat64ToWord32(number));
- if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- efalse0 =
- graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
- vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
- vtrue1, vfalse1, if_false0);
- }
+ __ Bind(&if_to_number_smi);
+ __ Goto(&done, ChangeSmiToInt32(to_number));
- control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
- vtrue0, vfalse0, control);
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerPlainPrimitiveToFloat64(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerPlainPrimitiveToFloat64(Node* node) {
Node* value = node->InputAt(0);
- Node* check0 = ObjectIsSmi(value);
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
-
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* etrue0 = effect;
- Node* vtrue0;
- {
- vtrue0 = ChangeSmiToInt32(value);
- vtrue0 = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue0);
- }
-
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* efalse0 = effect;
- Node* vfalse0;
- {
- vfalse0 = efalse0 = graph()->NewNode(
- ToNumberOperator(), jsgraph()->ToNumberBuiltinConstant(), value,
- jsgraph()->NoContextConstant(), efalse0);
+ auto if_not_smi = __ MakeDeferredLabel<1>();
+ auto if_to_number_smi = __ MakeLabel<1>();
+ auto done = __ MakeLabel<3>(MachineRepresentation::kFloat64);
- Node* check1 = ObjectIsSmi(vfalse0);
- Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
+ Node* check0 = ObjectIsSmi(value);
+ __ GotoUnless(check0, &if_not_smi);
+ Node* from_smi = ChangeSmiToInt32(value);
+ __ Goto(&done, __ ChangeInt32ToFloat64(from_smi));
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1 = efalse0;
- Node* vtrue1;
- {
- vtrue1 = ChangeSmiToInt32(vfalse0);
- vtrue1 = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue1);
- }
+ __ Bind(&if_not_smi);
+ Node* to_number = __ ToNumber(value);
+ Node* check1 = ObjectIsSmi(to_number);
+ __ GotoIf(check1, &if_to_number_smi);
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* efalse1 = efalse0;
- Node* vfalse1;
- {
- vfalse1 = efalse1 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), efalse0,
- efalse1, if_false1);
- }
+ Node* number = __ LoadField(AccessBuilder::ForHeapNumberValue(), to_number);
+ __ Goto(&done, number);
- if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- efalse0 =
- graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
- vfalse0 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue1, vfalse1, if_false0);
- }
+ __ Bind(&if_to_number_smi);
+ Node* number_from_smi = ChangeSmiToInt32(to_number);
+ number_from_smi = __ ChangeInt32ToFloat64(number_from_smi);
+ __ Goto(&done, number_from_smi);
- control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue0, vfalse0, control);
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerEnsureWritableFastElements(Node* node,
- Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerEnsureWritableFastElements(Node* node) {
Node* object = node->InputAt(0);
Node* elements = node->InputAt(1);
+ auto if_not_fixed_array = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
+
// Load the current map of {elements}.
- Node* elements_map = effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
- elements, effect, control);
+ Node* elements_map = __ LoadField(AccessBuilder::ForMap(), elements);
// Check if {elements} is not a copy-on-write FixedArray.
- Node* check = graph()->NewNode(machine()->WordEqual(), elements_map,
- jsgraph()->FixedArrayMapConstant());
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-
+ Node* check = __ WordEqual(elements_map, __ FixedArrayMapConstant());
+ __ GotoUnless(check, &if_not_fixed_array);
// Nothing to do if the {elements} are not copy-on-write.
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = elements;
+ __ Goto(&done, elements);
+ __ Bind(&if_not_fixed_array);
// We need to take a copy of the {elements} and set them up for {object}.
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- // We need to create a copy of the {elements} for {object}.
- Operator::Properties properties = Operator::kEliminatable;
- Callable callable = CodeFactory::CopyFastSmiOrObjectElements(isolate());
- CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0, flags,
- properties);
- vfalse = efalse = graph()->NewNode(
- common()->Call(desc), jsgraph()->HeapConstant(callable.code()), object,
- jsgraph()->NoContextConstant(), efalse);
- }
-
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- Node* value = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control);
+ Operator::Properties properties = Operator::kEliminatable;
+ Callable callable = CodeFactory::CopyFastSmiOrObjectElements(isolate());
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+ Node* result = __ Call(desc, __ HeapConstant(callable.code()), object,
+ __ NoContextConstant());
+ __ Goto(&done, result);
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
+ Node* frame_state) {
GrowFastElementsFlags flags = GrowFastElementsFlagsOf(node->op());
Node* object = node->InputAt(0);
Node* elements = node->InputAt(1);
Node* index = node->InputAt(2);
Node* length = node->InputAt(3);
- Node* check0 = graph()->NewNode((flags & GrowFastElementsFlag::kHoleyElements)
- ? machine()->Uint32LessThanOrEqual()
- : machine()->Word32Equal(),
- length, index);
- Node* branch0 = graph()->NewNode(common()->Branch(), check0, control);
+ auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
+ auto done_grow = __ MakeLabel<2>(MachineRepresentation::kTagged);
+ auto if_not_grow = __ MakeLabel<1>();
+ auto if_not_grow_backing_store = __ MakeLabel<1>();
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* etrue0 = effect;
- Node* vtrue0 = elements;
+ Node* check0 = (flags & GrowFastElementsFlag::kHoleyElements)
+ ? __ Uint32LessThanOrEqual(length, index)
+ : __ Word32Equal(length, index);
+ __ GotoUnless(check0, &if_not_grow);
{
// Load the length of the {elements} backing store.
- Node* elements_length = etrue0 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForFixedArrayLength()), elements,
- etrue0, if_true0);
+ Node* elements_length =
+ __ LoadField(AccessBuilder::ForFixedArrayLength(), elements);
elements_length = ChangeSmiToInt32(elements_length);
// Check if we need to grow the {elements} backing store.
- Node* check1 =
- graph()->NewNode(machine()->Uint32LessThan(), index, elements_length);
- Node* branch1 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check1, if_true0);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1 = etrue0;
- Node* vtrue1 = vtrue0;
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* efalse1 = etrue0;
- Node* vfalse1 = vtrue0;
- {
- // We need to grow the {elements} for {object}.
- Operator::Properties properties = Operator::kEliminatable;
- Callable callable =
- (flags & GrowFastElementsFlag::kDoubleElements)
- ? CodeFactory::GrowFastDoubleElements(isolate())
- : CodeFactory::GrowFastSmiOrObjectElements(isolate());
- CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0, flags,
- properties);
- vfalse1 = efalse1 = graph()->NewNode(
- common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
- object, ChangeInt32ToSmi(index), jsgraph()->NoContextConstant(),
- efalse1);
-
- // Ensure that we were able to grow the {elements}.
- // TODO(turbofan): We use kSmi as reason here similar to Crankshaft,
- // but maybe we should just introduce a reason that makes sense.
- efalse1 = if_false1 = graph()->NewNode(
- common()->DeoptimizeIf(DeoptimizeReason::kSmi), ObjectIsSmi(vfalse1),
- frame_state, efalse1, if_false1);
- }
+ Node* check1 = __ Uint32LessThan(index, elements_length);
+ __ GotoUnless(check1, &if_not_grow_backing_store);
+ __ Goto(&done_grow, elements);
+
+ __ Bind(&if_not_grow_backing_store);
+ // We need to grow the {elements} for {object}.
+ Operator::Properties properties = Operator::kEliminatable;
+ Callable callable =
+ (flags & GrowFastElementsFlag::kDoubleElements)
+ ? CodeFactory::GrowFastDoubleElements(isolate())
+ : CodeFactory::GrowFastSmiOrObjectElements(isolate());
+ CallDescriptor::Flags call_flags = CallDescriptor::kNoFlags;
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0, call_flags,
+ properties);
+ Node* new_object = __ Call(desc, __ HeapConstant(callable.code()), object,
+ ChangeInt32ToSmi(index), __ NoContextConstant());
+
+ // Ensure that we were able to grow the {elements}.
+ // TODO(turbofan): We use kSmi as reason here similar to Crankshaft,
+ // but maybe we should just introduce a reason that makes sense.
+ __ DeoptimizeIf(DeoptimizeReason::kSmi, ObjectIsSmi(new_object),
+ frame_state);
+ __ Goto(&done_grow, new_object);
- if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- etrue0 =
- graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
- vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
- vtrue1, vfalse1, if_true0);
+ __ Bind(&done_grow);
// For JSArray {object}s we also need to update the "length".
if (flags & GrowFastElementsFlag::kArrayObject) {
// Compute the new {length}.
- Node* object_length = ChangeInt32ToSmi(graph()->NewNode(
- machine()->Int32Add(), index, jsgraph()->Int32Constant(1)));
+ Node* object_length =
+ ChangeInt32ToSmi(__ Int32Add(index, __ Int32Constant(1)));
// Update the "length" property of the {object}.
- etrue0 =
- graph()->NewNode(simplified()->StoreField(
- AccessBuilder::ForJSArrayLength(FAST_ELEMENTS)),
- object, object_length, etrue0, if_true0);
+ __ StoreField(AccessBuilder::ForJSArrayLength(FAST_ELEMENTS), object,
+ object_length);
}
+ __ Goto(&done, done_grow.PhiAt(0));
}
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* efalse0 = effect;
- Node* vfalse0 = elements;
+ __ Bind(&if_not_grow);
{
// In case of non-holey {elements}, we need to verify that the {index} is
// in-bounds, otherwise for holey {elements}, the check above already
// guards the index (and the operator forces {index} to be unsigned).
if (!(flags & GrowFastElementsFlag::kHoleyElements)) {
- Node* check1 =
- graph()->NewNode(machine()->Uint32LessThan(), index, length);
- efalse0 = if_false0 = graph()->NewNode(
- common()->DeoptimizeUnless(DeoptimizeReason::kOutOfBounds), check1,
- frame_state, efalse0, if_false0);
+ Node* check1 = __ Uint32LessThan(index, length);
+ __ DeoptimizeUnless(DeoptimizeReason::kOutOfBounds, check1, frame_state);
}
+ __ Goto(&done, elements);
}
-
- control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), vtrue0,
- vfalse0, control);
-
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerTransitionElementsKind(Node* node, Node* effect,
- Node* control) {
+void EffectControlLinearizer::LowerTransitionElementsKind(Node* node) {
ElementsTransition const transition = ElementsTransitionOf(node->op());
Node* object = node->InputAt(0);
- Node* source_map = node->InputAt(1);
- Node* target_map = node->InputAt(2);
+
+ auto if_map_same = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>();
+
+ Node* source_map = __ HeapConstant(transition.source());
+ Node* target_map = __ HeapConstant(transition.target());
// Load the current map of {object}.
- Node* object_map = effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()), object,
- effect, control);
+ Node* object_map = __ LoadField(AccessBuilder::ForMap(), object);
// Check if {object_map} is the same as {source_map}.
- Node* check =
- graph()->NewNode(machine()->WordEqual(), object_map, source_map);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
-
- // Migrate the {object} from {source_map} to {target_map}.
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- {
- switch (transition) {
- case ElementsTransition::kFastTransition: {
- // In-place migration of {object}, just store the {target_map}.
- etrue =
- graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
- object, target_map, etrue, if_true);
- break;
- }
- case ElementsTransition::kSlowTransition: {
- // Instance migration, call out to the runtime for {object}.
- Operator::Properties properties =
- Operator::kNoDeopt | Operator::kNoThrow;
- Runtime::FunctionId id = Runtime::kTransitionElementsKind;
- CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
- graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
- etrue = graph()->NewNode(
- common()->Call(desc), jsgraph()->CEntryStubConstant(1), object,
- target_map,
- jsgraph()->ExternalConstant(ExternalReference(id, isolate())),
- jsgraph()->Int32Constant(2), jsgraph()->NoContextConstant(), etrue,
- if_true);
- break;
- }
+ Node* check = __ WordEqual(object_map, source_map);
+ __ GotoIf(check, &if_map_same);
+ __ Goto(&done);
+
+ __ Bind(&if_map_same);
+ switch (transition.mode()) {
+ case ElementsTransition::kFastTransition:
+ // In-place migration of {object}, just store the {target_map}.
+ __ StoreField(AccessBuilder::ForMap(), object, target_map);
+ break;
+ case ElementsTransition::kSlowTransition: {
+ // Instance migration, call out to the runtime for {object}.
+ Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
+ Runtime::FunctionId id = Runtime::kTransitionElementsKind;
+ CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+ graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
+ __ Call(desc, __ CEntryStubConstant(1), object, target_map,
+ __ ExternalConstant(ExternalReference(id, isolate())),
+ __ Int32Constant(2), __ NoContextConstant());
+ break;
}
}
+ __ Goto(&done);
- // Nothing to do if the {object} doesn't have the {source_map}.
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
-
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-
- return ValueEffectControl(nullptr, effect, control);
+ __ Bind(&done);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerLoadTypedElement(Node* node, Node* effect,
- Node* control) {
+Node* EffectControlLinearizer::LowerLoadTypedElement(Node* node) {
ExternalArrayType array_type = ExternalArrayTypeOf(node->op());
Node* buffer = node->InputAt(0);
Node* base = node->InputAt(1);
@@ -3221,24 +2336,20 @@ EffectControlLinearizer::LowerLoadTypedElement(Node* node, Node* effect,
// We need to keep the {buffer} alive so that the GC will not release the
// ArrayBuffer (if there's any) as long as we are still operating on it.
- effect = graph()->NewNode(common()->Retain(), buffer, effect);
+ __ Retain(buffer);
- // Compute the effective storage pointer.
- Node* storage = effect = graph()->NewNode(machine()->UnsafePointerAdd(), base,
- external, effect, control);
+ // Compute the effective storage pointer, handling the case where the
+ // {external} pointer is the effective storage pointer (i.e. the {base}
+ // is Smi zero).
+ Node* storage = NumberMatcher(base).Is(0) ? external : __ UnsafePointerAdd(
+ base, external);
// Perform the actual typed element access.
- Node* value = effect = graph()->NewNode(
- simplified()->LoadElement(
- AccessBuilder::ForTypedArrayElement(array_type, true)),
- storage, index, effect, control);
-
- return ValueEffectControl(value, effect, control);
+ return __ LoadElement(AccessBuilder::ForTypedArrayElement(array_type, true),
+ storage, index);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerStoreTypedElement(Node* node, Node* effect,
- Node* control) {
+void EffectControlLinearizer::LowerStoreTypedElement(Node* node) {
ExternalArrayType array_type = ExternalArrayTypeOf(node->op());
Node* buffer = node->InputAt(0);
Node* base = node->InputAt(1);
@@ -3248,34 +2359,25 @@ EffectControlLinearizer::LowerStoreTypedElement(Node* node, Node* effect,
// We need to keep the {buffer} alive so that the GC will not release the
// ArrayBuffer (if there's any) as long as we are still operating on it.
- effect = graph()->NewNode(common()->Retain(), buffer, effect);
+ __ Retain(buffer);
- // Compute the effective storage pointer.
- Node* storage = effect = graph()->NewNode(machine()->UnsafePointerAdd(), base,
- external, effect, control);
+ // Compute the effective storage pointer, handling the case where the
+ // {external} pointer is the effective storage pointer (i.e. the {base}
+ // is Smi zero).
+ Node* storage = NumberMatcher(base).Is(0) ? external : __ UnsafePointerAdd(
+ base, external);
// Perform the actual typed element access.
- effect = graph()->NewNode(
- simplified()->StoreElement(
- AccessBuilder::ForTypedArrayElement(array_type, true)),
- storage, index, value, effect, control);
-
- return ValueEffectControl(nullptr, effect, control);
+ __ StoreElement(AccessBuilder::ForTypedArrayElement(array_type, true),
+ storage, index, value);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerFloat64RoundUp(Node* node, Node* effect,
- Node* control) {
+Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundUp(Node* node) {
// Nothing to be done if a fast hardware instruction is available.
if (machine()->Float64RoundUp().IsSupported()) {
- return ValueEffectControl(node, effect, control);
+ return Nothing<Node*>();
}
- Node* const one = jsgraph()->Float64Constant(1.0);
- Node* const zero = jsgraph()->Float64Constant(0.0);
- Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
- Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
- Node* const minus_two_52 = jsgraph()->Float64Constant(-4503599627370496.0E0);
Node* const input = node->InputAt(0);
// General case for ceil.
@@ -3300,251 +2402,169 @@ EffectControlLinearizer::LowerFloat64RoundUp(Node* node, Node* effect,
// let temp2 = (2^52 + temp1) - 2^52 in
// let temp3 = (if temp1 < temp2 then temp2 - 1 else temp2) in
// -0 - temp3
- //
- // Note: We do not use the Diamond helper class here, because it really hurts
- // readability with nested diamonds.
-
- Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* vtrue0;
- {
- Node* check1 =
- graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
- Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
+ auto if_not_positive = __ MakeDeferredLabel<1>();
+ auto if_greater_than_two_52 = __ MakeDeferredLabel<1>();
+ auto if_less_than_minus_two_52 = __ MakeDeferredLabel<1>();
+ auto if_zero = __ MakeDeferredLabel<1>();
+ auto done_temp3 = __ MakeLabel<2>(MachineRepresentation::kFloat64);
+ auto done = __ MakeLabel<6>(MachineRepresentation::kFloat64);
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* vtrue1 = input;
+ Node* const zero = __ Float64Constant(0.0);
+ Node* const two_52 = __ Float64Constant(4503599627370496.0E0);
+ Node* const one = __ Float64Constant(1.0);
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* vfalse1;
+ Node* check0 = __ Float64LessThan(zero, input);
+ __ GotoUnless(check0, &if_not_positive);
+ {
+ Node* check1 = __ Float64LessThanOrEqual(two_52, input);
+ __ GotoIf(check1, &if_greater_than_two_52);
{
- Node* temp1 = graph()->NewNode(
- machine()->Float64Sub(),
- graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
- vfalse1 = graph()->NewNode(
- common()->Select(MachineRepresentation::kFloat64),
- graph()->NewNode(machine()->Float64LessThan(), temp1, input),
- graph()->NewNode(machine()->Float64Add(), temp1, one), temp1);
+ Node* temp1 = __ Float64Sub(__ Float64Add(two_52, input), two_52);
+ __ GotoUnless(__ Float64LessThan(temp1, input), &done, temp1);
+ __ Goto(&done, __ Float64Add(temp1, one));
}
- if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue1, vfalse1, if_true0);
+ __ Bind(&if_greater_than_two_52);
+ __ Goto(&done, input);
}
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* vfalse0;
+ __ Bind(&if_not_positive);
{
- Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
- Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check1, if_false0);
+ Node* check1 = __ Float64Equal(input, zero);
+ __ GotoIf(check1, &if_zero);
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* vtrue1 = input;
+ Node* const minus_two_52 = __ Float64Constant(-4503599627370496.0E0);
+ Node* check2 = __ Float64LessThanOrEqual(input, minus_two_52);
+ __ GotoIf(check2, &if_less_than_minus_two_52);
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* vfalse1;
{
- Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
- input, minus_two_52);
- Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check2, if_false1);
-
- Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
- Node* vtrue2 = input;
-
- Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
- Node* vfalse2;
- {
- Node* temp1 =
- graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
- Node* temp2 = graph()->NewNode(
- machine()->Float64Sub(),
- graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
- Node* temp3 = graph()->NewNode(
- common()->Select(MachineRepresentation::kFloat64),
- graph()->NewNode(machine()->Float64LessThan(), temp1, temp2),
- graph()->NewNode(machine()->Float64Sub(), temp2, one), temp2);
- vfalse2 = graph()->NewNode(machine()->Float64Sub(), minus_zero, temp3);
- }
-
- if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
- vfalse1 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue2, vfalse2, if_false1);
+ Node* const minus_zero = __ Float64Constant(-0.0);
+ Node* temp1 = __ Float64Sub(minus_zero, input);
+ Node* temp2 = __ Float64Sub(__ Float64Add(two_52, temp1), two_52);
+ Node* check3 = __ Float64LessThan(temp1, temp2);
+ __ GotoUnless(check3, &done_temp3, temp2);
+ __ Goto(&done_temp3, __ Float64Sub(temp2, one));
+
+ __ Bind(&done_temp3);
+ Node* temp3 = done_temp3.PhiAt(0);
+ __ Goto(&done, __ Float64Sub(minus_zero, temp3));
}
+ __ Bind(&if_less_than_minus_two_52);
+ __ Goto(&done, input);
- if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- vfalse0 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue1, vfalse1, if_false0);
+ __ Bind(&if_zero);
+ __ Goto(&done, input);
}
-
- Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue0, vfalse0, merge0);
- return ValueEffectControl(value, effect, merge0);
+ __ Bind(&done);
+ return Just(done.PhiAt(0));
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::BuildFloat64RoundDown(Node* value, Node* effect,
- Node* control) {
- if (machine()->Float64RoundDown().IsSupported()) {
- value = graph()->NewNode(machine()->Float64RoundDown().op(), value);
- } else {
- Node* const one = jsgraph()->Float64Constant(1.0);
- Node* const zero = jsgraph()->Float64Constant(0.0);
- Node* const minus_one = jsgraph()->Float64Constant(-1.0);
- Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
- Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
- Node* const minus_two_52 =
- jsgraph()->Float64Constant(-4503599627370496.0E0);
- Node* const input = value;
-
- // General case for floor.
- //
- // if 0.0 < input then
- // if 2^52 <= input then
- // input
- // else
- // let temp1 = (2^52 + input) - 2^52 in
- // if input < temp1 then
- // temp1 - 1
- // else
- // temp1
- // else
- // if input == 0 then
- // input
- // else
- // if input <= -2^52 then
- // input
- // else
- // let temp1 = -0 - input in
- // let temp2 = (2^52 + temp1) - 2^52 in
- // if temp2 < temp1 then
- // -1 - temp2
- // else
- // -0 - temp2
- //
- // Note: We do not use the Diamond helper class here, because it really
- // hurts
- // readability with nested diamonds.
-
- Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
-
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* vtrue0;
- {
- Node* check1 =
- graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
- Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* vtrue1 = input;
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* vfalse1;
- {
- Node* temp1 = graph()->NewNode(
- machine()->Float64Sub(),
- graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
- vfalse1 = graph()->NewNode(
- common()->Select(MachineRepresentation::kFloat64),
- graph()->NewNode(machine()->Float64LessThan(), input, temp1),
- graph()->NewNode(machine()->Float64Sub(), temp1, one), temp1);
- }
+Node* EffectControlLinearizer::BuildFloat64RoundDown(Node* value) {
+ Node* round_down = __ Float64RoundDown(value);
+ if (round_down != nullptr) {
+ return round_down;
+ }
- if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- vtrue0 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue1, vfalse1, if_true0);
- }
+ Node* const input = value;
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* vfalse0;
+ // General case for floor.
+ //
+ // if 0.0 < input then
+ // if 2^52 <= input then
+ // input
+ // else
+ // let temp1 = (2^52 + input) - 2^52 in
+ // if input < temp1 then
+ // temp1 - 1
+ // else
+ // temp1
+ // else
+ // if input == 0 then
+ // input
+ // else
+ // if input <= -2^52 then
+ // input
+ // else
+ // let temp1 = -0 - input in
+ // let temp2 = (2^52 + temp1) - 2^52 in
+ // if temp2 < temp1 then
+ // -1 - temp2
+ // else
+ // -0 - temp2
+
+ auto if_not_positive = __ MakeDeferredLabel<1>();
+ auto if_greater_than_two_52 = __ MakeDeferredLabel<1>();
+ auto if_less_than_minus_two_52 = __ MakeDeferredLabel<1>();
+ auto if_temp2_lt_temp1 = __ MakeLabel<1>();
+ auto if_zero = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<7>(MachineRepresentation::kFloat64);
+
+ Node* const zero = __ Float64Constant(0.0);
+ Node* const two_52 = __ Float64Constant(4503599627370496.0E0);
+
+ Node* check0 = __ Float64LessThan(zero, input);
+ __ GotoUnless(check0, &if_not_positive);
+ {
+ Node* check1 = __ Float64LessThanOrEqual(two_52, input);
+ __ GotoIf(check1, &if_greater_than_two_52);
{
- Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
- Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check1, if_false0);
+ Node* const one = __ Float64Constant(1.0);
+ Node* temp1 = __ Float64Sub(__ Float64Add(two_52, input), two_52);
+ __ GotoUnless(__ Float64LessThan(input, temp1), &done, temp1);
+ __ Goto(&done, __ Float64Sub(temp1, one));
+ }
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* vtrue1 = input;
+ __ Bind(&if_greater_than_two_52);
+ __ Goto(&done, input);
+ }
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* vfalse1;
- {
- Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
- input, minus_two_52);
- Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check2, if_false1);
-
- Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
- Node* vtrue2 = input;
-
- Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
- Node* vfalse2;
- {
- Node* temp1 =
- graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
- Node* temp2 = graph()->NewNode(
- machine()->Float64Sub(),
- graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
- vfalse2 = graph()->NewNode(
- common()->Select(MachineRepresentation::kFloat64),
- graph()->NewNode(machine()->Float64LessThan(), temp2, temp1),
- graph()->NewNode(machine()->Float64Sub(), minus_one, temp2),
- graph()->NewNode(machine()->Float64Sub(), minus_zero, temp2));
- }
+ __ Bind(&if_not_positive);
+ {
+ Node* check1 = __ Float64Equal(input, zero);
+ __ GotoIf(check1, &if_zero);
- if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
- vfalse1 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue2, vfalse2, if_false1);
- }
+ Node* const minus_two_52 = __ Float64Constant(-4503599627370496.0E0);
+ Node* check2 = __ Float64LessThanOrEqual(input, minus_two_52);
+ __ GotoIf(check2, &if_less_than_minus_two_52);
- if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- vfalse0 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue1, vfalse1, if_false0);
+ {
+ Node* const minus_zero = __ Float64Constant(-0.0);
+ Node* temp1 = __ Float64Sub(minus_zero, input);
+ Node* temp2 = __ Float64Sub(__ Float64Add(two_52, temp1), two_52);
+ Node* check3 = __ Float64LessThan(temp2, temp1);
+ __ GotoIf(check3, &if_temp2_lt_temp1);
+ __ Goto(&done, __ Float64Sub(minus_zero, temp2));
+
+ __ Bind(&if_temp2_lt_temp1);
+ __ Goto(&done, __ Float64Sub(__ Float64Constant(-1.0), temp2));
}
+ __ Bind(&if_less_than_minus_two_52);
+ __ Goto(&done, input);
- control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue0, vfalse0, control);
+ __ Bind(&if_zero);
+ __ Goto(&done, input);
}
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return done.PhiAt(0);
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerFloat64RoundDown(Node* node, Node* effect,
- Node* control) {
+Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundDown(Node* node) {
// Nothing to be done if a fast hardware instruction is available.
if (machine()->Float64RoundDown().IsSupported()) {
- return ValueEffectControl(node, effect, control);
+ return Nothing<Node*>();
}
Node* const input = node->InputAt(0);
- return BuildFloat64RoundDown(input, effect, control);
+ return Just(BuildFloat64RoundDown(input));
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerFloat64RoundTiesEven(Node* node, Node* effect,
- Node* control) {
+Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundTiesEven(Node* node) {
// Nothing to be done if a fast hardware instruction is available.
if (machine()->Float64RoundTiesEven().IsSupported()) {
- return ValueEffectControl(node, effect, control);
+ return Nothing<Node*>();
}
- Node* const one = jsgraph()->Float64Constant(1.0);
- Node* const two = jsgraph()->Float64Constant(2.0);
- Node* const half = jsgraph()->Float64Constant(0.5);
- Node* const zero = jsgraph()->Float64Constant(0.0);
Node* const input = node->InputAt(0);
// Generate case for round ties to even:
@@ -3561,79 +2581,38 @@ EffectControlLinearizer::LowerFloat64RoundTiesEven(Node* node, Node* effect,
// value
// else
// value + 1.0
- //
- // Note: We do not use the Diamond helper class here, because it really hurts
- // readability with nested diamonds.
-
- ValueEffectControl continuation =
- BuildFloat64RoundDown(input, effect, control);
- Node* value = continuation.value;
- effect = continuation.effect;
- control = continuation.control;
- Node* temp1 = graph()->NewNode(machine()->Float64Sub(), input, value);
+ auto if_is_half = __ MakeLabel<1>();
+ auto done = __ MakeLabel<4>(MachineRepresentation::kFloat64);
- Node* check0 = graph()->NewNode(machine()->Float64LessThan(), temp1, half);
- Node* branch0 = graph()->NewNode(common()->Branch(), check0, control);
+ Node* value = BuildFloat64RoundDown(input);
+ Node* temp1 = __ Float64Sub(input, value);
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* vtrue0 = value;
+ Node* const half = __ Float64Constant(0.5);
+ Node* check0 = __ Float64LessThan(temp1, half);
+ __ GotoIf(check0, &done, value);
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* vfalse0;
- {
- Node* check1 = graph()->NewNode(machine()->Float64LessThan(), half, temp1);
- Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* vtrue1 = graph()->NewNode(machine()->Float64Add(), value, one);
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* vfalse1;
- {
- Node* temp2 = graph()->NewNode(machine()->Float64Mod(), value, two);
-
- Node* check2 = graph()->NewNode(machine()->Float64Equal(), temp2, zero);
- Node* branch2 = graph()->NewNode(common()->Branch(), check2, if_false1);
-
- Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
- Node* vtrue2 = value;
+ Node* const one = __ Float64Constant(1.0);
+ Node* check1 = __ Float64LessThan(half, temp1);
+ __ GotoUnless(check1, &if_is_half);
+ __ Goto(&done, __ Float64Add(value, one));
- Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
- Node* vfalse2 = graph()->NewNode(machine()->Float64Add(), value, one);
+ __ Bind(&if_is_half);
+ Node* temp2 = __ Float64Mod(value, __ Float64Constant(2.0));
+ Node* check2 = __ Float64Equal(temp2, __ Float64Constant(0.0));
+ __ GotoIf(check2, &done, value);
+ __ Goto(&done, __ Float64Add(value, one));
- if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
- vfalse1 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue2, vfalse2, if_false1);
- }
-
- if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- vfalse0 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue1, vfalse1, if_false0);
- }
-
- control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- value = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue0, vfalse0, control);
-
- return ValueEffectControl(value, effect, control);
+ __ Bind(&done);
+ return Just(done.PhiAt(0));
}
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerFloat64RoundTruncate(Node* node, Node* effect,
- Node* control) {
+Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundTruncate(Node* node) {
// Nothing to be done if a fast hardware instruction is available.
if (machine()->Float64RoundTruncate().IsSupported()) {
- return ValueEffectControl(node, effect, control);
+ return Nothing<Node*>();
}
- Node* const one = jsgraph()->Float64Constant(1.0);
- Node* const zero = jsgraph()->Float64Constant(0.0);
- Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
- Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
- Node* const minus_two_52 = jsgraph()->Float64Constant(-4503599627370496.0E0);
Node* const input = node->InputAt(0);
// General case for trunc.
@@ -3662,92 +2641,65 @@ EffectControlLinearizer::LowerFloat64RoundTruncate(Node* node, Node* effect,
// Note: We do not use the Diamond helper class here, because it really hurts
// readability with nested diamonds.
- Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
-
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* vtrue0;
- {
- Node* check1 =
- graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
- Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
+ auto if_not_positive = __ MakeDeferredLabel<1>();
+ auto if_greater_than_two_52 = __ MakeDeferredLabel<1>();
+ auto if_less_than_minus_two_52 = __ MakeDeferredLabel<1>();
+ auto if_zero = __ MakeDeferredLabel<1>();
+ auto done_temp3 = __ MakeLabel<2>(MachineRepresentation::kFloat64);
+ auto done = __ MakeLabel<6>(MachineRepresentation::kFloat64);
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* vtrue1 = input;
+ Node* const zero = __ Float64Constant(0.0);
+ Node* const two_52 = __ Float64Constant(4503599627370496.0E0);
+ Node* const one = __ Float64Constant(1.0);
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* vfalse1;
+ Node* check0 = __ Float64LessThan(zero, input);
+ __ GotoUnless(check0, &if_not_positive);
+ {
+ Node* check1 = __ Float64LessThanOrEqual(two_52, input);
+ __ GotoIf(check1, &if_greater_than_two_52);
{
- Node* temp1 = graph()->NewNode(
- machine()->Float64Sub(),
- graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
- vfalse1 = graph()->NewNode(
- common()->Select(MachineRepresentation::kFloat64),
- graph()->NewNode(machine()->Float64LessThan(), input, temp1),
- graph()->NewNode(machine()->Float64Sub(), temp1, one), temp1);
+ Node* temp1 = __ Float64Sub(__ Float64Add(two_52, input), two_52);
+ __ GotoUnless(__ Float64LessThan(input, temp1), &done, temp1);
+ __ Goto(&done, __ Float64Sub(temp1, one));
}
- if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue1, vfalse1, if_true0);
+ __ Bind(&if_greater_than_two_52);
+ __ Goto(&done, input);
}
- Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
- Node* vfalse0;
+ __ Bind(&if_not_positive);
{
- Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
- Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check1, if_false0);
+ Node* check1 = __ Float64Equal(input, zero);
+ __ GotoIf(check1, &if_zero);
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* vtrue1 = input;
+ Node* const minus_two_52 = __ Float64Constant(-4503599627370496.0E0);
+ Node* check2 = __ Float64LessThanOrEqual(input, minus_two_52);
+ __ GotoIf(check2, &if_less_than_minus_two_52);
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
- Node* vfalse1;
{
- Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
- input, minus_two_52);
- Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
- check2, if_false1);
-
- Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
- Node* vtrue2 = input;
-
- Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
- Node* vfalse2;
- {
- Node* temp1 =
- graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
- Node* temp2 = graph()->NewNode(
- machine()->Float64Sub(),
- graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
- Node* temp3 = graph()->NewNode(
- common()->Select(MachineRepresentation::kFloat64),
- graph()->NewNode(machine()->Float64LessThan(), temp1, temp2),
- graph()->NewNode(machine()->Float64Sub(), temp2, one), temp2);
- vfalse2 = graph()->NewNode(machine()->Float64Sub(), minus_zero, temp3);
- }
-
- if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
- vfalse1 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue2, vfalse2, if_false1);
+ Node* const minus_zero = __ Float64Constant(-0.0);
+ Node* temp1 = __ Float64Sub(minus_zero, input);
+ Node* temp2 = __ Float64Sub(__ Float64Add(two_52, temp1), two_52);
+ Node* check3 = __ Float64LessThan(temp1, temp2);
+ __ GotoUnless(check3, &done_temp3, temp2);
+ __ Goto(&done_temp3, __ Float64Sub(temp2, one));
+
+ __ Bind(&done_temp3);
+ Node* temp3 = done_temp3.PhiAt(0);
+ __ Goto(&done, __ Float64Sub(minus_zero, temp3));
}
+ __ Bind(&if_less_than_minus_two_52);
+ __ Goto(&done, input);
- if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
- vfalse0 =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue1, vfalse1, if_false0);
+ __ Bind(&if_zero);
+ __ Goto(&done, input);
}
-
- Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
- Node* value =
- graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
- vtrue0, vfalse0, merge0);
- return ValueEffectControl(value, effect, merge0);
+ __ Bind(&done);
+ return Just(done.PhiAt(0));
}
+#undef __
+
Factory* EffectControlLinearizer::factory() const {
return isolate()->factory();
}
@@ -3756,18 +2708,6 @@ Isolate* EffectControlLinearizer::isolate() const {
return jsgraph()->isolate();
}
-Operator const* EffectControlLinearizer::ToNumberOperator() {
- if (!to_number_operator_.is_set()) {
- Callable callable = CodeFactory::ToNumber(isolate());
- CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0, flags,
- Operator::kEliminatable);
- to_number_operator_.set(common()->Call(desc));
- }
- return to_number_operator_.get();
-}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h
index 4ed03c6815..9d991cfb4b 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.h
+++ b/deps/v8/src/compiler/effect-control-linearizer.h
@@ -6,6 +6,7 @@
#define V8_COMPILER_EFFECT_CONTROL_LINEARIZER_H_
#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-assembler.h"
#include "src/compiler/node.h"
#include "src/compiler/simplified-operator.h"
#include "src/globals.h"
@@ -38,174 +39,90 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
void ProcessNode(Node* node, Node** frame_state, Node** effect,
Node** control);
- struct ValueEffectControl {
- Node* value;
- Node* effect;
- Node* control;
- ValueEffectControl(Node* value, Node* effect, Node* control)
- : value(value), effect(effect), control(control) {}
- };
-
bool TryWireInStateEffect(Node* node, Node* frame_state, Node** effect,
Node** control);
- ValueEffectControl LowerChangeBitToTagged(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerChangeInt31ToTaggedSigned(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerChangeInt32ToTagged(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerChangeUint32ToTagged(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerChangeFloat64ToTagged(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerChangeFloat64ToTaggedPointer(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerChangeTaggedSignedToInt32(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerChangeTaggedToBit(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerChangeTaggedToInt32(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerChangeTaggedToUint32(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerCheckBounds(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerCheckMaps(Node* node, Node* frame_state, Node* effect,
- Node* control);
- ValueEffectControl LowerCheckNumber(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerCheckString(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerCheckIf(Node* node, Node* frame_state, Node* effect,
- Node* control);
- ValueEffectControl LowerCheckedInt32Add(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerCheckedInt32Sub(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerCheckedInt32Div(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerCheckedInt32Mod(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerCheckedUint32Div(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerCheckedUint32Mod(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerCheckedInt32Mul(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerCheckedInt32ToTaggedSigned(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control);
- ValueEffectControl LowerCheckedUint32ToInt32(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerCheckedUint32ToTaggedSigned(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control);
- ValueEffectControl LowerCheckedFloat64ToInt32(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerCheckedTaggedSignedToInt32(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control);
- ValueEffectControl LowerCheckedTaggedToInt32(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerCheckedTaggedToFloat64(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerCheckedTaggedToTaggedSigned(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control);
- ValueEffectControl LowerCheckedTaggedToTaggedPointer(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control);
- ValueEffectControl LowerChangeTaggedToFloat64(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerTruncateTaggedToBit(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerTruncateTaggedToFloat64(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerTruncateTaggedToWord32(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerCheckedTruncateTaggedToWord32(Node* node,
- Node* frame_state,
- Node* effect,
- Node* control);
- ValueEffectControl LowerObjectIsCallable(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerObjectIsNumber(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerObjectIsReceiver(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerObjectIsSmi(Node* node, Node* effect, Node* control);
- ValueEffectControl LowerObjectIsString(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerObjectIsUndetectable(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerArrayBufferWasNeutered(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerStringCharCodeAt(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerStringFromCharCode(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerStringFromCodePoint(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerStringEqual(Node* node, Node* effect, Node* control);
- ValueEffectControl LowerStringLessThan(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerStringLessThanOrEqual(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerCheckFloat64Hole(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerCheckTaggedHole(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerConvertTaggedHoleToUndefined(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerPlainPrimitiveToNumber(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerPlainPrimitiveToWord32(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerPlainPrimitiveToFloat64(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerEnsureWritableFastElements(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerMaybeGrowFastElements(Node* node, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl LowerTransitionElementsKind(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerLoadTypedElement(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerStoreTypedElement(Node* node, Node* effect,
- Node* control);
+ Node* LowerChangeBitToTagged(Node* node);
+ Node* LowerChangeInt31ToTaggedSigned(Node* node);
+ Node* LowerChangeInt32ToTagged(Node* node);
+ Node* LowerChangeUint32ToTagged(Node* node);
+ Node* LowerChangeFloat64ToTagged(Node* node);
+ Node* LowerChangeFloat64ToTaggedPointer(Node* node);
+ Node* LowerChangeTaggedSignedToInt32(Node* node);
+ Node* LowerChangeTaggedToBit(Node* node);
+ Node* LowerChangeTaggedToInt32(Node* node);
+ Node* LowerChangeTaggedToUint32(Node* node);
+ Node* LowerCheckBounds(Node* node, Node* frame_state);
+ Node* LowerCheckInternalizedString(Node* node, Node* frame_state);
+ Node* LowerCheckMaps(Node* node, Node* frame_state);
+ Node* LowerCheckNumber(Node* node, Node* frame_state);
+ Node* LowerCheckString(Node* node, Node* frame_state);
+ Node* LowerCheckIf(Node* node, Node* frame_state);
+ Node* LowerCheckedInt32Add(Node* node, Node* frame_state);
+ Node* LowerCheckedInt32Sub(Node* node, Node* frame_state);
+ Node* LowerCheckedInt32Div(Node* node, Node* frame_state);
+ Node* LowerCheckedInt32Mod(Node* node, Node* frame_state);
+ Node* LowerCheckedUint32Div(Node* node, Node* frame_state);
+ Node* LowerCheckedUint32Mod(Node* node, Node* frame_state);
+ Node* LowerCheckedInt32Mul(Node* node, Node* frame_state);
+ Node* LowerCheckedInt32ToTaggedSigned(Node* node, Node* frame_state);
+ Node* LowerCheckedUint32ToInt32(Node* node, Node* frame_state);
+ Node* LowerCheckedUint32ToTaggedSigned(Node* node, Node* frame_state);
+ Node* LowerCheckedFloat64ToInt32(Node* node, Node* frame_state);
+ Node* LowerCheckedTaggedSignedToInt32(Node* node, Node* frame_state);
+ Node* LowerCheckedTaggedToInt32(Node* node, Node* frame_state);
+ Node* LowerCheckedTaggedToFloat64(Node* node, Node* frame_state);
+ Node* LowerCheckedTaggedToTaggedSigned(Node* node, Node* frame_state);
+ Node* LowerCheckedTaggedToTaggedPointer(Node* node, Node* frame_state);
+ Node* LowerChangeTaggedToFloat64(Node* node);
+ Node* LowerTruncateTaggedToBit(Node* node);
+ Node* LowerTruncateTaggedToFloat64(Node* node);
+ Node* LowerTruncateTaggedToWord32(Node* node);
+ Node* LowerCheckedTruncateTaggedToWord32(Node* node, Node* frame_state);
+ Node* LowerObjectIsCallable(Node* node);
+ Node* LowerObjectIsNumber(Node* node);
+ Node* LowerObjectIsReceiver(Node* node);
+ Node* LowerObjectIsSmi(Node* node);
+ Node* LowerObjectIsString(Node* node);
+ Node* LowerObjectIsUndetectable(Node* node);
+ Node* LowerNewRestParameterElements(Node* node);
+ Node* LowerNewUnmappedArgumentsElements(Node* node);
+ Node* LowerArrayBufferWasNeutered(Node* node);
+ Node* LowerStringCharAt(Node* node);
+ Node* LowerStringCharCodeAt(Node* node);
+ Node* LowerStringFromCharCode(Node* node);
+ Node* LowerStringFromCodePoint(Node* node);
+ Node* LowerStringEqual(Node* node);
+ Node* LowerStringLessThan(Node* node);
+ Node* LowerStringLessThanOrEqual(Node* node);
+ Node* LowerCheckFloat64Hole(Node* node, Node* frame_state);
+ Node* LowerCheckTaggedHole(Node* node, Node* frame_state);
+ Node* LowerConvertTaggedHoleToUndefined(Node* node);
+ Node* LowerPlainPrimitiveToNumber(Node* node);
+ Node* LowerPlainPrimitiveToWord32(Node* node);
+ Node* LowerPlainPrimitiveToFloat64(Node* node);
+ Node* LowerEnsureWritableFastElements(Node* node);
+ Node* LowerMaybeGrowFastElements(Node* node, Node* frame_state);
+ void LowerTransitionElementsKind(Node* node);
+ Node* LowerLoadTypedElement(Node* node);
+ void LowerStoreTypedElement(Node* node);
// Lowering of optional operators.
- ValueEffectControl LowerFloat64RoundUp(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerFloat64RoundDown(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerFloat64RoundTiesEven(Node* node, Node* effect,
- Node* control);
- ValueEffectControl LowerFloat64RoundTruncate(Node* node, Node* effect,
- Node* control);
-
- ValueEffectControl AllocateHeapNumberWithValue(Node* node, Node* effect,
- Node* control);
- ValueEffectControl BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode,
- Node* value, Node* frame_state,
- Node* effect, Node* control);
- ValueEffectControl BuildCheckedHeapNumberOrOddballToFloat64(
- CheckTaggedInputMode mode, Node* value, Node* frame_state, Node* effect,
- Node* control);
- ValueEffectControl BuildFloat64RoundDown(Node* value, Node* effect,
- Node* control);
- ValueEffectControl LowerStringComparison(Callable const& callable, Node* node,
- Node* effect, Node* control);
+ Maybe<Node*> LowerFloat64RoundUp(Node* node);
+ Maybe<Node*> LowerFloat64RoundDown(Node* node);
+ Maybe<Node*> LowerFloat64RoundTiesEven(Node* node);
+ Maybe<Node*> LowerFloat64RoundTruncate(Node* node);
+
+ Node* AllocateHeapNumberWithValue(Node* node);
+ Node* BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode, Node* value,
+ Node* frame_state);
+ Node* BuildCheckedHeapNumberOrOddballToFloat64(CheckTaggedInputMode mode,
+ Node* value,
+ Node* frame_state);
+ Node* BuildFloat64RoundDown(Node* value);
+ Node* LowerStringComparison(Callable const& callable, Node* node);
Node* ChangeInt32ToSmi(Node* value);
Node* ChangeUint32ToSmi(Node* value);
- Node* ChangeInt32ToFloat64(Node* value);
- Node* ChangeUint32ToFloat64(Node* value);
Node* ChangeSmiToInt32(Node* value);
Node* ObjectIsSmi(Node* value);
@@ -222,15 +139,14 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
SimplifiedOperatorBuilder* simplified() const;
MachineOperatorBuilder* machine() const;
- Operator const* ToNumberOperator();
+ GraphAssembler* gasm() { return &graph_assembler_; }
JSGraph* js_graph_;
Schedule* schedule_;
Zone* temp_zone_;
RegionObservability region_observability_ = RegionObservability::kObservable;
SourcePositionTable* source_positions_;
-
- SetOncePointer<Operator const> to_number_operator_;
+ GraphAssembler graph_assembler_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.cc b/deps/v8/src/compiler/escape-analysis-reducer.cc
index f7708f85da..10b7f285a6 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.cc
+++ b/deps/v8/src/compiler/escape-analysis-reducer.cc
@@ -31,7 +31,7 @@ EscapeAnalysisReducer::EscapeAnalysisReducer(Editor* editor, JSGraph* jsgraph,
fully_reduced_(static_cast<int>(jsgraph->graph()->NodeCount() * 2), zone),
exists_virtual_allocate_(escape_analysis->ExistsVirtualAllocate()) {}
-Reduction EscapeAnalysisReducer::Reduce(Node* node) {
+Reduction EscapeAnalysisReducer::ReduceNode(Node* node) {
if (node->id() < static_cast<NodeId>(fully_reduced_.length()) &&
fully_reduced_.Contains(node->id())) {
return NoChange();
@@ -61,8 +61,7 @@ Reduction EscapeAnalysisReducer::Reduce(Node* node) {
break;
}
bool depends_on_object_state = false;
- for (int i = 0; i < node->InputCount(); i++) {
- Node* input = node->InputAt(i);
+ for (Node* input : node->inputs()) {
switch (input->opcode()) {
case IrOpcode::kAllocate:
case IrOpcode::kFinishRegion:
@@ -97,9 +96,18 @@ Reduction EscapeAnalysisReducer::Reduce(Node* node) {
return NoChange();
}
+Reduction EscapeAnalysisReducer::Reduce(Node* node) {
+ Reduction reduction = ReduceNode(node);
+ if (reduction.Changed() && node != reduction.replacement()) {
+ escape_analysis()->SetReplacement(node, reduction.replacement());
+ }
+ return reduction;
+}
+
namespace {
-Node* MaybeGuard(JSGraph* jsgraph, Node* original, Node* replacement) {
+Node* MaybeGuard(JSGraph* jsgraph, Zone* zone, Node* original,
+ Node* replacement) {
// We might need to guard the replacement if the type of the {replacement}
// node is not in a sub-type relation to the type of the the {original} node.
Type* const replacement_type = NodeProperties::GetType(replacement);
@@ -108,10 +116,18 @@ Node* MaybeGuard(JSGraph* jsgraph, Node* original, Node* replacement) {
Node* const control = NodeProperties::GetControlInput(original);
replacement = jsgraph->graph()->NewNode(
jsgraph->common()->TypeGuard(original_type), replacement, control);
+ NodeProperties::SetType(replacement, original_type);
}
return replacement;
}
+Node* SkipTypeGuards(Node* node) {
+ while (node->opcode() == IrOpcode::kTypeGuard) {
+ node = NodeProperties::GetValueInput(node, 0);
+ }
+ return node;
+}
+
} // namespace
Reduction EscapeAnalysisReducer::ReduceLoad(Node* node) {
@@ -120,12 +136,13 @@ Reduction EscapeAnalysisReducer::ReduceLoad(Node* node) {
if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
fully_reduced_.Add(node->id());
}
- if (escape_analysis()->IsVirtual(NodeProperties::GetValueInput(node, 0))) {
+ if (escape_analysis()->IsVirtual(
+ SkipTypeGuards(NodeProperties::GetValueInput(node, 0)))) {
if (Node* rep = escape_analysis()->GetReplacement(node)) {
isolate()->counters()->turbo_escape_loads_replaced()->Increment();
TRACE("Replaced #%d (%s) with #%d (%s)\n", node->id(),
node->op()->mnemonic(), rep->id(), rep->op()->mnemonic());
- rep = MaybeGuard(jsgraph(), node, rep);
+ rep = MaybeGuard(jsgraph(), zone(), node, rep);
ReplaceWithValue(node, rep);
return Replace(rep);
}
@@ -140,7 +157,8 @@ Reduction EscapeAnalysisReducer::ReduceStore(Node* node) {
if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
fully_reduced_.Add(node->id());
}
- if (escape_analysis()->IsVirtual(NodeProperties::GetValueInput(node, 0))) {
+ if (escape_analysis()->IsVirtual(
+ SkipTypeGuards(NodeProperties::GetValueInput(node, 0)))) {
TRACE("Removed #%d (%s) from effect chain\n", node->id(),
node->op()->mnemonic());
RelaxEffectsAndControls(node);
@@ -195,14 +213,14 @@ Reduction EscapeAnalysisReducer::ReduceFinishRegion(Node* node) {
Reduction EscapeAnalysisReducer::ReduceReferenceEqual(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kReferenceEqual);
- Node* left = NodeProperties::GetValueInput(node, 0);
- Node* right = NodeProperties::GetValueInput(node, 1);
+ Node* left = SkipTypeGuards(NodeProperties::GetValueInput(node, 0));
+ Node* right = SkipTypeGuards(NodeProperties::GetValueInput(node, 1));
if (escape_analysis()->IsVirtual(left)) {
if (escape_analysis()->IsVirtual(right) &&
escape_analysis()->CompareVirtualObjects(left, right)) {
ReplaceWithValue(node, jsgraph()->TrueConstant());
TRACE("Replaced ref eq #%d with true\n", node->id());
- Replace(jsgraph()->TrueConstant());
+ return Replace(jsgraph()->TrueConstant());
}
// Right-hand side is not a virtual object, or a different one.
ReplaceWithValue(node, jsgraph()->FalseConstant());
@@ -220,7 +238,7 @@ Reduction EscapeAnalysisReducer::ReduceReferenceEqual(Node* node) {
Reduction EscapeAnalysisReducer::ReduceObjectIsSmi(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kObjectIsSmi);
- Node* input = NodeProperties::GetValueInput(node, 0);
+ Node* input = SkipTypeGuards(NodeProperties::GetValueInput(node, 0));
if (escape_analysis()->IsVirtual(input)) {
ReplaceWithValue(node, jsgraph()->FalseConstant());
TRACE("Replaced ObjectIsSmi #%d with false\n", node->id());
@@ -313,7 +331,7 @@ Node* EscapeAnalysisReducer::ReduceStateValueInput(Node* node, int node_index,
bool node_multiused,
bool already_cloned,
bool multiple_users) {
- Node* input = NodeProperties::GetValueInput(node, node_index);
+ Node* input = SkipTypeGuards(NodeProperties::GetValueInput(node, node_index));
if (node->id() < static_cast<NodeId>(fully_reduced_.length()) &&
fully_reduced_.Contains(node->id())) {
return nullptr;
diff --git a/deps/v8/src/compiler/escape-analysis-reducer.h b/deps/v8/src/compiler/escape-analysis-reducer.h
index 61e7607a36..746d84030e 100644
--- a/deps/v8/src/compiler/escape-analysis-reducer.h
+++ b/deps/v8/src/compiler/escape-analysis-reducer.h
@@ -33,6 +33,7 @@ class V8_EXPORT_PRIVATE EscapeAnalysisReducer final
bool compilation_failed() const { return compilation_failed_; }
private:
+ Reduction ReduceNode(Node* node);
Reduction ReduceLoad(Node* node);
Reduction ReduceStore(Node* node);
Reduction ReduceAllocate(Node* node);
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
index 0218045971..52c7e74c10 100644
--- a/deps/v8/src/compiler/escape-analysis.cc
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -12,6 +12,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-operator.h"
+#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
@@ -201,7 +202,7 @@ class VirtualObject : public ZoneObject {
}
bool UpdateFrom(const VirtualObject& other);
bool MergeFrom(MergeCache* cache, Node* at, Graph* graph,
- CommonOperatorBuilder* common);
+ CommonOperatorBuilder* common, bool initialMerge);
void SetObjectState(Node* node) { object_state_ = node; }
Node* GetObjectState() const { return object_state_; }
bool IsCopyRequired() const { return status_ & kCopyRequired; }
@@ -252,10 +253,14 @@ bool VirtualObject::UpdateFrom(const VirtualObject& other) {
class VirtualState : public ZoneObject {
public:
VirtualState(Node* owner, Zone* zone, size_t size)
- : info_(size, nullptr, zone), owner_(owner) {}
+ : info_(size, nullptr, zone),
+ initialized_(static_cast<int>(size), zone),
+ owner_(owner) {}
VirtualState(Node* owner, const VirtualState& state)
: info_(state.info_.size(), nullptr, state.info_.get_allocator().zone()),
+ initialized_(state.initialized_.length(),
+ state.info_.get_allocator().zone()),
owner_(owner) {
for (size_t i = 0; i < info_.size(); ++i) {
if (state.info_[i]) {
@@ -280,6 +285,7 @@ class VirtualState : public ZoneObject {
private:
ZoneVector<VirtualObject*> info_;
+ BitVector initialized_;
Node* owner_;
DISALLOW_COPY_AND_ASSIGN(VirtualState);
@@ -375,6 +381,7 @@ VirtualObject* VirtualState::VirtualObjectFromAlias(size_t alias) {
void VirtualState::SetVirtualObject(Alias alias, VirtualObject* obj) {
info_[alias] = obj;
+ if (obj) initialized_.Add(alias);
}
bool VirtualState::UpdateFrom(VirtualState* from, Zone* zone) {
@@ -431,7 +438,6 @@ bool IsEquivalentPhi(Node* phi, ZoneVector<Node*>& inputs) {
}
return true;
}
-
} // namespace
bool VirtualObject::MergeFields(size_t i, Node* at, MergeCache* cache,
@@ -440,12 +446,21 @@ bool VirtualObject::MergeFields(size_t i, Node* at, MergeCache* cache,
int value_input_count = static_cast<int>(cache->fields().size());
Node* rep = GetField(i);
if (!rep || !IsCreatedPhi(i)) {
+ Type* phi_type = Type::None();
+ for (Node* input : cache->fields()) {
+ CHECK_NOT_NULL(input);
+ CHECK(!input->IsDead());
+ Type* input_type = NodeProperties::GetType(input);
+ phi_type = Type::Union(phi_type, input_type, graph->zone());
+ }
Node* control = NodeProperties::GetControlInput(at);
cache->fields().push_back(control);
Node* phi = graph->NewNode(
common->Phi(MachineRepresentation::kTagged, value_input_count),
value_input_count + 1, &cache->fields().front());
+ NodeProperties::SetType(phi, phi_type);
SetField(i, phi, true);
+
#ifdef DEBUG
if (FLAG_trace_turbo_escape) {
PrintF(" Creating Phi #%d as merge of", phi->id());
@@ -471,12 +486,15 @@ bool VirtualObject::MergeFields(size_t i, Node* at, MergeCache* cache,
}
bool VirtualObject::MergeFrom(MergeCache* cache, Node* at, Graph* graph,
- CommonOperatorBuilder* common) {
+ CommonOperatorBuilder* common,
+ bool initialMerge) {
DCHECK(at->opcode() == IrOpcode::kEffectPhi ||
at->opcode() == IrOpcode::kPhi);
bool changed = false;
for (size_t i = 0; i < field_count(); ++i) {
- if (Node* field = cache->GetFields(i)) {
+ if (!initialMerge && GetField(i) == nullptr) continue;
+ Node* field = cache->GetFields(i);
+ if (field && !IsCreatedPhi(i)) {
changed = changed || GetField(i) != field;
SetField(i, field);
TRACE(" Field %zu agree on rep #%d\n", i, field->id());
@@ -516,8 +534,11 @@ bool VirtualState::MergeFrom(MergeCache* cache, Zone* zone, Graph* graph,
fields = std::min(obj->field_count(), fields);
}
}
- if (cache->objects().size() == cache->states().size()) {
+ if (cache->objects().size() == cache->states().size() &&
+ (mergeObject || !initialized_.Contains(alias))) {
+ bool initialMerge = false;
if (!mergeObject) {
+ initialMerge = true;
VirtualObject* obj = new (zone)
VirtualObject(cache->objects().front()->id(), this, zone, fields,
cache->objects().front()->IsInitialized());
@@ -542,7 +563,9 @@ bool VirtualState::MergeFrom(MergeCache* cache, Zone* zone, Graph* graph,
PrintF("\n");
}
#endif // DEBUG
- changed = mergeObject->MergeFrom(cache, at, graph, common) || changed;
+ changed =
+ mergeObject->MergeFrom(cache, at, graph, common, initialMerge) ||
+ changed;
} else {
if (mergeObject) {
TRACE(" Alias %d, virtual object removed\n", alias);
@@ -795,6 +818,7 @@ bool EscapeStatusAnalysis::CheckUsesForEscape(Node* uses, Node* rep,
case IrOpcode::kSelect:
// TODO(mstarzinger): The following list of operators will eventually be
// handled by the EscapeAnalysisReducer (similar to ObjectIsSmi).
+ case IrOpcode::kConvertTaggedHoleToUndefined:
case IrOpcode::kStringEqual:
case IrOpcode::kStringLessThan:
case IrOpcode::kStringLessThanOrEqual:
@@ -802,6 +826,7 @@ bool EscapeStatusAnalysis::CheckUsesForEscape(Node* uses, Node* rep,
case IrOpcode::kPlainPrimitiveToNumber:
case IrOpcode::kPlainPrimitiveToWord32:
case IrOpcode::kPlainPrimitiveToFloat64:
+ case IrOpcode::kStringCharAt:
case IrOpcode::kStringCharCodeAt:
case IrOpcode::kObjectIsCallable:
case IrOpcode::kObjectIsNumber:
@@ -863,7 +888,11 @@ EscapeAnalysis::EscapeAnalysis(Graph* graph, CommonOperatorBuilder* common,
virtual_states_(zone),
replacements_(zone),
cycle_detection_(zone),
- cache_(nullptr) {}
+ cache_(nullptr) {
+ // Type slot_not_analyzed_ manually.
+ double v = OpParameter<double>(slot_not_analyzed_);
+ NodeProperties::SetType(slot_not_analyzed_, Type::Range(v, v, zone));
+}
EscapeAnalysis::~EscapeAnalysis() {}
@@ -966,6 +995,7 @@ void EscapeAnalysis::RunObjectAnalysis() {
// VirtualObjects, and we want to delay phis to improve performance.
if (use->opcode() == IrOpcode::kEffectPhi) {
if (!status_analysis_->IsInQueue(use->id())) {
+ status_analysis_->SetInQueue(use->id(), true);
queue.push_front(use);
}
} else if ((use->opcode() != IrOpcode::kLoadField &&
@@ -1044,6 +1074,19 @@ bool EscapeStatusAnalysis::IsEffectBranchPoint(Node* node) {
return false;
}
+namespace {
+
+bool HasFrameStateInput(const Operator* op) {
+ if (op->opcode() == IrOpcode::kCall || op->opcode() == IrOpcode::kTailCall) {
+ const CallDescriptor* d = CallDescriptorOf(op);
+ return d->NeedsFrameState();
+ } else {
+ return OperatorProperties::HasFrameStateInput(op);
+ }
+}
+
+} // namespace
+
bool EscapeAnalysis::Process(Node* node) {
switch (node->opcode()) {
case IrOpcode::kAllocate:
@@ -1080,6 +1123,9 @@ bool EscapeAnalysis::Process(Node* node) {
ProcessAllocationUsers(node);
break;
}
+ if (HasFrameStateInput(node->op())) {
+ virtual_states_[node->id()]->SetCopyRequired();
+ }
return true;
}
@@ -1173,8 +1219,7 @@ void EscapeAnalysis::ForwardVirtualState(Node* node) {
static_cast<void*>(virtual_states_[effect->id()]),
effect->op()->mnemonic(), effect->id(), node->op()->mnemonic(),
node->id());
- if (status_analysis_->IsEffectBranchPoint(effect) ||
- OperatorProperties::HasFrameStateInput(node->op())) {
+ if (status_analysis_->IsEffectBranchPoint(effect)) {
virtual_states_[node->id()]->SetCopyRequired();
TRACE(", effect input %s#%d is branch point", effect->op()->mnemonic(),
effect->id());
@@ -1393,10 +1438,16 @@ void EscapeAnalysis::ProcessLoadFromPhi(int offset, Node* from, Node* load,
Node* rep = replacement(load);
if (!rep || !IsEquivalentPhi(rep, cache_->fields())) {
int value_input_count = static_cast<int>(cache_->fields().size());
+ Type* phi_type = Type::None();
+ for (Node* input : cache_->fields()) {
+ Type* input_type = NodeProperties::GetType(input);
+ phi_type = Type::Union(phi_type, input_type, graph()->zone());
+ }
cache_->fields().push_back(NodeProperties::GetControlInput(from));
Node* phi = graph()->NewNode(
common()->Phi(MachineRepresentation::kTagged, value_input_count),
value_input_count + 1, &cache_->fields().front());
+ NodeProperties::SetType(phi, phi_type);
status_analysis_->ResizeStatusVector();
SetReplacement(load, phi);
TRACE(" got phi created.\n");
@@ -1583,13 +1634,14 @@ Node* EscapeAnalysis::GetOrCreateObjectState(Node* effect, Node* node) {
cache_->fields().clear();
for (size_t i = 0; i < vobj->field_count(); ++i) {
if (Node* field = vobj->GetField(i)) {
- cache_->fields().push_back(field);
+ cache_->fields().push_back(ResolveReplacement(field));
}
}
int input_count = static_cast<int>(cache_->fields().size());
Node* new_object_state =
graph()->NewNode(common()->ObjectState(input_count), input_count,
&cache_->fields().front());
+ NodeProperties::SetType(new_object_state, Type::OtherInternal());
vobj->SetObjectState(new_object_state);
TRACE(
"Creating object state #%d for vobj %p (from node #%d) at effect "
diff --git a/deps/v8/src/compiler/escape-analysis.h b/deps/v8/src/compiler/escape-analysis.h
index b85efe7349..34960dde83 100644
--- a/deps/v8/src/compiler/escape-analysis.h
+++ b/deps/v8/src/compiler/escape-analysis.h
@@ -35,6 +35,7 @@ class V8_EXPORT_PRIVATE EscapeAnalysis {
Node* GetOrCreateObjectState(Node* effect, Node* node);
bool IsCyclicObjectState(Node* effect, Node* node);
bool ExistsVirtualAllocate();
+ bool SetReplacement(Node* node, Node* rep);
private:
void RunObjectAnalysis();
@@ -59,7 +60,6 @@ class V8_EXPORT_PRIVATE EscapeAnalysis {
Node* replacement(Node* node);
Node* ResolveReplacement(Node* node);
- bool SetReplacement(Node* node, Node* rep);
bool UpdateReplacement(VirtualState* state, Node* node, Node* rep);
VirtualObject* GetVirtualObject(VirtualState* state, Node* node);
diff --git a/deps/v8/src/compiler/frame-elider.cc b/deps/v8/src/compiler/frame-elider.cc
index bb17d1215f..dd8db83dd5 100644
--- a/deps/v8/src/compiler/frame-elider.cc
+++ b/deps/v8/src/compiler/frame-elider.cc
@@ -114,13 +114,36 @@ bool FrameElider::PropagateIntoBlock(InstructionBlock* block) {
}
}
- // Propagate towards start ("upwards") if there are successors and all of
- // them need a frame.
- for (RpoNumber& succ : block->successors()) {
- if (!InstructionBlockAt(succ)->needs_frame()) return false;
+ // Propagate towards start ("upwards")
+ bool need_frame_successors = false;
+ if (block->SuccessorCount() == 1) {
+ // For single successors, propagate the needs_frame information.
+ need_frame_successors =
+ InstructionBlockAt(block->successors()[0])->needs_frame();
+ } else {
+ // For multiple successors, each successor must only have a single
+ // predecessor (because the graph is in edge-split form), so each successor
+ // can independently create/dismantle a frame if needed. Given this
+ // independent control, only propagate needs_frame if all non-deferred
+ // blocks need a frame.
+ for (RpoNumber& succ : block->successors()) {
+ InstructionBlock* successor_block = InstructionBlockAt(succ);
+ DCHECK_EQ(1, successor_block->PredecessorCount());
+ if (!successor_block->IsDeferred()) {
+ if (successor_block->needs_frame()) {
+ need_frame_successors = true;
+ } else {
+ return false;
+ }
+ }
+ }
+ }
+ if (need_frame_successors) {
+ block->mark_needs_frame();
+ return true;
+ } else {
+ return false;
}
- block->mark_needs_frame();
- return true;
}
diff --git a/deps/v8/src/compiler/frame-states.cc b/deps/v8/src/compiler/frame-states.cc
index a02fb0121c..ec014dac94 100644
--- a/deps/v8/src/compiler/frame-states.cc
+++ b/deps/v8/src/compiler/frame-states.cc
@@ -6,6 +6,7 @@
#include "src/base/functional.h"
#include "src/handles-inl.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/frame.h b/deps/v8/src/compiler/frame.h
index 8d463dfb78..a4d6829cfa 100644
--- a/deps/v8/src/compiler/frame.h
+++ b/deps/v8/src/compiler/frame.h
@@ -113,9 +113,9 @@ class Frame : public ZoneObject {
int AllocateSpillSlot(int width) {
int frame_slot_count_before = frame_slot_count_;
- int slot = AllocateAlignedFrameSlot(width);
- spill_slot_count_ += (frame_slot_count_ - frame_slot_count_before);
- return slot;
+ AllocateAlignedFrameSlots(width);
+ spill_slot_count_ += frame_slot_count_ - frame_slot_count_before;
+ return frame_slot_count_ - 1;
}
int AlignFrame(int alignment = kDoubleSize);
@@ -131,23 +131,15 @@ class Frame : public ZoneObject {
static const int kJSFunctionSlot = 3 + StandardFrameConstants::kCPSlotCount;
private:
- int AllocateAlignedFrameSlot(int width) {
- DCHECK(width == 4 || width == 8 || width == 16);
- if (kPointerSize == 4) {
- // Skip one slot if necessary.
- if (width > kPointerSize) {
- frame_slot_count_++;
- frame_slot_count_ |= 1;
- // 2 extra slots if width == 16.
- frame_slot_count_ += (width & 16) / 8;
- }
- } else {
- // No alignment when slots are 8 bytes.
- DCHECK_EQ(8, kPointerSize);
- // 1 extra slot if width == 16.
- frame_slot_count_ += (width & 16) / 16;
- }
- return frame_slot_count_++;
+ void AllocateAlignedFrameSlots(int width) {
+ DCHECK_LT(0, width);
+ int new_frame_slots = (width + kPointerSize - 1) / kPointerSize;
+ // Align to 8 bytes if width is a multiple of 8 bytes, and to 16 bytes if
+ // multiple of 16.
+ int align_to = (width & 15) == 0 ? 16 : (width & 7) == 0 ? 8 : kPointerSize;
+ frame_slot_count_ =
+ RoundUp(frame_slot_count_ + new_frame_slots, align_to / kPointerSize);
+ DCHECK_LT(0, frame_slot_count_);
}
private:
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
new file mode 100644
index 0000000000..235826e746
--- /dev/null
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -0,0 +1,287 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-assembler.h"
+
+#include "src/code-factory.h"
+#include "src/compiler/linkage.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+GraphAssembler::GraphAssembler(JSGraph* jsgraph, Node* effect, Node* control,
+ Zone* zone)
+ : temp_zone_(zone),
+ jsgraph_(jsgraph),
+ current_effect_(effect),
+ current_control_(control) {}
+
+Node* GraphAssembler::IntPtrConstant(intptr_t value) {
+ return jsgraph()->IntPtrConstant(value);
+}
+
+Node* GraphAssembler::Int32Constant(int32_t value) {
+ return jsgraph()->Int32Constant(value);
+}
+
+Node* GraphAssembler::UniqueInt32Constant(int32_t value) {
+ return graph()->NewNode(common()->Int32Constant(value));
+}
+
+Node* GraphAssembler::SmiConstant(int32_t value) {
+ return jsgraph()->SmiConstant(value);
+}
+
+Node* GraphAssembler::Uint32Constant(int32_t value) {
+ return jsgraph()->Uint32Constant(value);
+}
+
+Node* GraphAssembler::Float64Constant(double value) {
+ return jsgraph()->Float64Constant(value);
+}
+
+Node* GraphAssembler::HeapConstant(Handle<HeapObject> object) {
+ return jsgraph()->HeapConstant(object);
+}
+
+
+Node* GraphAssembler::ExternalConstant(ExternalReference ref) {
+ return jsgraph()->ExternalConstant(ref);
+}
+
+Node* GraphAssembler::CEntryStubConstant(int result_size) {
+ return jsgraph()->CEntryStubConstant(result_size);
+}
+
+#define SINGLETON_CONST_DEF(Name) \
+ Node* GraphAssembler::Name() { return jsgraph()->Name(); }
+JSGRAPH_SINGLETON_CONSTANT_LIST(SINGLETON_CONST_DEF)
+#undef SINGLETON_CONST_DEF
+
+#define PURE_UNOP_DEF(Name) \
+ Node* GraphAssembler::Name(Node* input) { \
+ return graph()->NewNode(machine()->Name(), input); \
+ }
+PURE_ASSEMBLER_MACH_UNOP_LIST(PURE_UNOP_DEF)
+#undef PURE_UNOP_DEF
+
+#define PURE_BINOP_DEF(Name) \
+ Node* GraphAssembler::Name(Node* left, Node* right) { \
+ return graph()->NewNode(machine()->Name(), left, right); \
+ }
+PURE_ASSEMBLER_MACH_BINOP_LIST(PURE_BINOP_DEF)
+#undef PURE_BINOP_DEF
+
+#define CHECKED_BINOP_DEF(Name) \
+ Node* GraphAssembler::Name(Node* left, Node* right) { \
+ return graph()->NewNode(machine()->Name(), left, right, current_control_); \
+ }
+CHECKED_ASSEMBLER_MACH_BINOP_LIST(CHECKED_BINOP_DEF)
+#undef CHECKED_BINOP_DEF
+
+Node* GraphAssembler::Float64RoundDown(Node* value) {
+ if (machine()->Float64RoundDown().IsSupported()) {
+ return graph()->NewNode(machine()->Float64RoundDown().op(), value);
+ }
+ return nullptr;
+}
+
+Node* GraphAssembler::Projection(int index, Node* value) {
+ return graph()->NewNode(common()->Projection(index), value, current_control_);
+}
+
+Node* GraphAssembler::Allocate(PretenureFlag pretenure, Node* size) {
+ return current_effect_ =
+ graph()->NewNode(simplified()->Allocate(NOT_TENURED), size,
+ current_effect_, current_control_);
+}
+
+Node* GraphAssembler::LoadField(FieldAccess const& access, Node* object) {
+ return current_effect_ =
+ graph()->NewNode(simplified()->LoadField(access), object,
+ current_effect_, current_control_);
+}
+
+Node* GraphAssembler::LoadElement(ElementAccess const& access, Node* object,
+ Node* index) {
+ return current_effect_ =
+ graph()->NewNode(simplified()->LoadElement(access), object, index,
+ current_effect_, current_control_);
+}
+
+Node* GraphAssembler::StoreField(FieldAccess const& access, Node* object,
+ Node* value) {
+ return current_effect_ =
+ graph()->NewNode(simplified()->StoreField(access), object, value,
+ current_effect_, current_control_);
+}
+
+Node* GraphAssembler::StoreElement(ElementAccess const& access, Node* object,
+ Node* index, Node* value) {
+ return current_effect_ =
+ graph()->NewNode(simplified()->StoreElement(access), object, index,
+ value, current_effect_, current_control_);
+}
+
+Node* GraphAssembler::Store(StoreRepresentation rep, Node* object, Node* offset,
+ Node* value) {
+ return current_effect_ =
+ graph()->NewNode(machine()->Store(rep), object, offset, value,
+ current_effect_, current_control_);
+}
+
+Node* GraphAssembler::Load(MachineType rep, Node* object, Node* offset) {
+ return current_effect_ =
+ graph()->NewNode(machine()->Load(rep), object, offset,
+ current_effect_, current_control_);
+}
+
+Node* GraphAssembler::Retain(Node* buffer) {
+ return current_effect_ =
+ graph()->NewNode(common()->Retain(), buffer, current_effect_);
+}
+
+Node* GraphAssembler::UnsafePointerAdd(Node* base, Node* external) {
+ return current_effect_ =
+ graph()->NewNode(machine()->UnsafePointerAdd(), base, external,
+ current_effect_, current_control_);
+}
+
+Node* GraphAssembler::ToNumber(Node* value) {
+ return current_effect_ =
+ graph()->NewNode(ToNumberOperator(), ToNumberBuiltinConstant(),
+ value, NoContextConstant(), current_effect_);
+}
+
+Node* GraphAssembler::DeoptimizeIf(DeoptimizeReason reason, Node* condition,
+ Node* frame_state) {
+ return current_control_ = current_effect_ =
+ graph()->NewNode(common()->DeoptimizeIf(reason), condition,
+ frame_state, current_effect_, current_control_);
+}
+
+Node* GraphAssembler::DeoptimizeUnless(DeoptimizeReason reason, Node* condition,
+ Node* frame_state) {
+ return current_control_ = current_effect_ =
+ graph()->NewNode(common()->DeoptimizeUnless(reason), condition,
+ frame_state, current_effect_, current_control_);
+}
+
+void GraphAssembler::Branch(Node* condition,
+ GraphAssemblerStaticLabel<1>* if_true,
+ GraphAssemblerStaticLabel<1>* if_false) {
+ DCHECK_NOT_NULL(current_control_);
+
+ BranchHint hint = BranchHint::kNone;
+ if (if_true->IsDeferred() != if_false->IsDeferred()) {
+ hint = if_false->IsDeferred() ? BranchHint::kTrue : BranchHint::kFalse;
+ }
+
+ Node* branch =
+ graph()->NewNode(common()->Branch(hint), condition, current_control_);
+
+ current_control_ = graph()->NewNode(common()->IfTrue(), branch);
+ MergeState(if_true);
+
+ current_control_ = graph()->NewNode(common()->IfFalse(), branch);
+ MergeState(if_false);
+
+ current_control_ = nullptr;
+ current_effect_ = nullptr;
+}
+
+// Extractors (should be only used when destructing the assembler.
+Node* GraphAssembler::ExtractCurrentControl() {
+ Node* result = current_control_;
+ current_control_ = nullptr;
+ return result;
+}
+
+Node* GraphAssembler::ExtractCurrentEffect() {
+ Node* result = current_effect_;
+ current_effect_ = nullptr;
+ return result;
+}
+
+void GraphAssembler::Reset(Node* effect, Node* control) {
+ current_effect_ = effect;
+ current_control_ = control;
+}
+
+Operator const* GraphAssembler::ToNumberOperator() {
+ if (!to_number_operator_.is_set()) {
+ Callable callable = CodeFactory::ToNumber(jsgraph()->isolate());
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ jsgraph()->isolate(), graph()->zone(), callable.descriptor(), 0, flags,
+ Operator::kEliminatable);
+ to_number_operator_.set(common()->Call(desc));
+ }
+ return to_number_operator_.get();
+}
+
+Node* GraphAssemblerLabel::PhiAt(size_t index) {
+ DCHECK(IsBound());
+ return GetBindingsPtrFor(index)[0];
+}
+
+GraphAssemblerLabel::GraphAssemblerLabel(GraphAssemblerLabelType is_deferred,
+ size_t merge_count, size_t var_count,
+ MachineRepresentation* representations,
+ Zone* zone)
+ : is_deferred_(is_deferred == GraphAssemblerLabelType::kDeferred),
+ max_merge_count_(merge_count),
+ var_count_(var_count) {
+ effects_ = zone->NewArray<Node*>(MaxMergeCount() + 1);
+ for (size_t i = 0; i < MaxMergeCount() + 1; i++) {
+ effects_[i] = nullptr;
+ }
+
+ controls_ = zone->NewArray<Node*>(MaxMergeCount());
+ for (size_t i = 0; i < MaxMergeCount(); i++) {
+ controls_[i] = nullptr;
+ }
+
+ size_t num_bindings = (MaxMergeCount() + 1) * PhiCount() + 1;
+ bindings_ = zone->NewArray<Node*>(num_bindings);
+ for (size_t i = 0; i < num_bindings; i++) {
+ bindings_[i] = nullptr;
+ }
+
+ representations_ = zone->NewArray<MachineRepresentation>(PhiCount() + 1);
+ for (size_t i = 0; i < PhiCount(); i++) {
+ representations_[i] = representations[i];
+ }
+}
+
+GraphAssemblerLabel::~GraphAssemblerLabel() {
+ DCHECK(IsBound() || MergedCount() == 0);
+}
+
+Node** GraphAssemblerLabel::GetBindingsPtrFor(size_t phi_index) {
+ DCHECK_LT(phi_index, PhiCount());
+ return &bindings_[phi_index * (MaxMergeCount() + 1)];
+}
+
+void GraphAssemblerLabel::SetBinding(size_t phi_index, size_t merge_index,
+ Node* binding) {
+ DCHECK_LT(phi_index, PhiCount());
+ DCHECK_LT(merge_index, MaxMergeCount());
+ bindings_[phi_index * (MaxMergeCount() + 1) + merge_index] = binding;
+}
+
+MachineRepresentation GraphAssemblerLabel::GetRepresentationFor(
+ size_t phi_index) {
+ DCHECK_LT(phi_index, PhiCount());
+ return representations_[phi_index];
+}
+
+Node** GraphAssemblerLabel::GetControlsPtr() { return controls_; }
+
+Node** GraphAssemblerLabel::GetEffectsPtr() { return effects_; }
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
new file mode 100644
index 0000000000..61f8f5b61d
--- /dev/null
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -0,0 +1,449 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_ASSEMBLER_H_
+#define V8_COMPILER_GRAPH_ASSEMBLER_H_
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+
+class JSGraph;
+class Graph;
+
+namespace compiler {
+
+#define PURE_ASSEMBLER_MACH_UNOP_LIST(V) \
+ V(ChangeInt32ToInt64) \
+ V(ChangeInt32ToFloat64) \
+ V(ChangeUint32ToFloat64) \
+ V(ChangeUint32ToUint64) \
+ V(ChangeFloat64ToInt32) \
+ V(ChangeFloat64ToUint32) \
+ V(TruncateInt64ToInt32) \
+ V(RoundFloat64ToInt32) \
+ V(TruncateFloat64ToWord32) \
+ V(Float64ExtractHighWord32) \
+ V(Float64Abs) \
+ V(BitcastWordToTagged)
+
+#define PURE_ASSEMBLER_MACH_BINOP_LIST(V) \
+ V(WordShl) \
+ V(WordSar) \
+ V(WordAnd) \
+ V(Word32Or) \
+ V(Word32And) \
+ V(Word32Shr) \
+ V(Word32Shl) \
+ V(IntAdd) \
+ V(IntSub) \
+ V(UintLessThan) \
+ V(Int32Add) \
+ V(Int32Sub) \
+ V(Int32Mul) \
+ V(Int32LessThanOrEqual) \
+ V(Uint32LessThanOrEqual) \
+ V(Uint32LessThan) \
+ V(Int32LessThan) \
+ V(Float64Add) \
+ V(Float64Sub) \
+ V(Float64Mod) \
+ V(Float64Equal) \
+ V(Float64LessThan) \
+ V(Float64LessThanOrEqual) \
+ V(Word32Equal) \
+ V(WordEqual)
+
+#define CHECKED_ASSEMBLER_MACH_BINOP_LIST(V) \
+ V(Int32AddWithOverflow) \
+ V(Int32SubWithOverflow) \
+ V(Int32MulWithOverflow) \
+ V(Int32Mod) \
+ V(Int32Div) \
+ V(Uint32Mod) \
+ V(Uint32Div)
+
+#define JSGRAPH_SINGLETON_CONSTANT_LIST(V) \
+ V(TrueConstant) \
+ V(FalseConstant) \
+ V(HeapNumberMapConstant) \
+ V(NoContextConstant) \
+ V(EmptyStringConstant) \
+ V(UndefinedConstant) \
+ V(TheHoleConstant) \
+ V(FixedArrayMapConstant) \
+ V(ToNumberBuiltinConstant) \
+ V(AllocateInNewSpaceStubConstant) \
+ V(AllocateInOldSpaceStubConstant)
+
+class GraphAssembler;
+
+enum class GraphAssemblerLabelType { kDeferred, kNonDeferred };
+
+// Label with statically known count of incoming branches and phis.
+template <size_t MergeCount, size_t VarCount = 0u>
+class GraphAssemblerStaticLabel {
+ public:
+ Node* PhiAt(size_t index);
+
+ template <typename... Reps>
+ explicit GraphAssemblerStaticLabel(GraphAssemblerLabelType is_deferred,
+ Reps... reps)
+ : is_deferred_(is_deferred == GraphAssemblerLabelType::kDeferred) {
+ STATIC_ASSERT(VarCount == sizeof...(reps));
+ MachineRepresentation reps_array[] = {MachineRepresentation::kNone,
+ reps...};
+ for (size_t i = 0; i < VarCount; i++) {
+ representations_[i] = reps_array[i + 1];
+ }
+ }
+
+ ~GraphAssemblerStaticLabel() { DCHECK(IsBound() || MergedCount() == 0); }
+
+ private:
+ friend class GraphAssembler;
+
+ void SetBound() {
+ DCHECK(!IsBound());
+ DCHECK_EQ(merged_count_, MergeCount);
+ is_bound_ = true;
+ }
+ bool IsBound() const { return is_bound_; }
+
+ size_t PhiCount() const { return VarCount; }
+ size_t MaxMergeCount() const { return MergeCount; }
+ size_t MergedCount() const { return merged_count_; }
+ bool IsDeferred() const { return is_deferred_; }
+
+ // For each phi, the buffer must have at least MaxMergeCount() + 1
+ // node entries.
+ Node** GetBindingsPtrFor(size_t phi_index) {
+ DCHECK_LT(phi_index, PhiCount());
+ return &bindings_[phi_index * (MergeCount + 1)];
+ }
+ void SetBinding(size_t phi_index, size_t merge_index, Node* binding) {
+ DCHECK_LT(phi_index, PhiCount());
+ DCHECK_LT(merge_index, MergeCount);
+ bindings_[phi_index * (MergeCount + 1) + merge_index] = binding;
+ }
+ MachineRepresentation GetRepresentationFor(size_t phi_index) {
+ DCHECK_LT(phi_index, PhiCount());
+ return representations_[phi_index];
+ }
+ // The controls buffer must have at least MaxMergeCount() entries.
+ Node** GetControlsPtr() { return controls_; }
+ // The effects buffer must have at least MaxMergeCount() + 1 entries.
+ Node** GetEffectsPtr() { return effects_; }
+ void IncrementMergedCount() { merged_count_++; }
+
+ bool is_bound_ = false;
+ bool is_deferred_;
+ size_t merged_count_ = 0;
+ Node* effects_[MergeCount + 1]; // Extra element for control edge,
+ // so that we can use the array to
+ // construct EffectPhi.
+ Node* controls_[MergeCount];
+ Node* bindings_[(MergeCount + 1) * VarCount + 1];
+ MachineRepresentation representations_[VarCount + 1];
+};
+
+// General label (with zone allocated buffers for incoming branches and phi
+// inputs).
+class GraphAssemblerLabel {
+ public:
+ Node* PhiAt(size_t index);
+
+ GraphAssemblerLabel(GraphAssemblerLabelType is_deferred, size_t merge_count,
+ size_t var_count, MachineRepresentation* representations,
+ Zone* zone);
+
+ ~GraphAssemblerLabel();
+
+ private:
+ friend class GraphAssembler;
+
+ void SetBound() {
+ DCHECK(!is_bound_);
+ is_bound_ = true;
+ }
+ bool IsBound() const { return is_bound_; }
+ size_t PhiCount() const { return var_count_; }
+ size_t MaxMergeCount() const { return max_merge_count_; }
+ size_t MergedCount() const { return merged_count_; }
+ bool IsDeferred() const { return is_deferred_; }
+
+ // For each phi, the buffer must have at least MaxMergeCount() + 1
+ // node entries.
+ Node** GetBindingsPtrFor(size_t phi_index);
+ void SetBinding(size_t phi_index, size_t merge_index, Node* binding);
+ MachineRepresentation GetRepresentationFor(size_t phi_index);
+ // The controls buffer must have at least MaxMergeCount() entries.
+ Node** GetControlsPtr();
+ // The effects buffer must have at least MaxMergeCount() + 1 entries.
+ Node** GetEffectsPtr();
+ void IncrementMergedCount() { merged_count_++; }
+
+ bool is_bound_ = false;
+ bool is_deferred_;
+ size_t merged_count_ = 0;
+ size_t max_merge_count_;
+ size_t var_count_;
+ Node** effects_ = nullptr;
+ Node** controls_ = nullptr;
+ Node** bindings_ = nullptr;
+ MachineRepresentation* representations_ = nullptr;
+};
+
+class GraphAssembler {
+ public:
+ GraphAssembler(JSGraph* jsgraph, Node* effect, Node* control, Zone* zone);
+
+ void Reset(Node* effect, Node* control);
+
+ // Create non-deferred label with statically known number of incoming
+ // gotos/branches.
+ template <size_t MergeCount, typename... Reps>
+ static GraphAssemblerStaticLabel<MergeCount, sizeof...(Reps)> MakeLabel(
+ Reps... reps) {
+ return GraphAssemblerStaticLabel<MergeCount, sizeof...(Reps)>(
+ GraphAssemblerLabelType::kNonDeferred, reps...);
+ }
+
+ // Create deferred label with statically known number of incoming
+ // gotos/branches.
+ template <size_t MergeCount, typename... Reps>
+ static GraphAssemblerStaticLabel<MergeCount, sizeof...(Reps)>
+ MakeDeferredLabel(Reps... reps) {
+ return GraphAssemblerStaticLabel<MergeCount, sizeof...(Reps)>(
+ GraphAssemblerLabelType::kDeferred, reps...);
+ }
+
+ // Create label with number of incoming branches supplied at runtime.
+ template <typename... Reps>
+ GraphAssemblerLabel MakeLabelFor(GraphAssemblerLabelType is_deferred,
+ size_t merge_count, Reps... reps) {
+ MachineRepresentation reps_array[] = {MachineRepresentation::kNone,
+ reps...};
+ return GraphAssemblerLabel(is_deferred, merge_count, sizeof...(reps),
+ &(reps_array[1]), temp_zone());
+ }
+
+ // Value creation.
+ Node* IntPtrConstant(intptr_t value);
+ Node* Uint32Constant(int32_t value);
+ Node* Int32Constant(int32_t value);
+ Node* UniqueInt32Constant(int32_t value);
+ Node* SmiConstant(int32_t value);
+ Node* Float64Constant(double value);
+ Node* Projection(int index, Node* value);
+ Node* HeapConstant(Handle<HeapObject> object);
+ Node* CEntryStubConstant(int result_size);
+ Node* ExternalConstant(ExternalReference ref);
+
+#define SINGLETON_CONST_DECL(Name) Node* Name();
+ JSGRAPH_SINGLETON_CONSTANT_LIST(SINGLETON_CONST_DECL)
+#undef SINGLETON_CONST_DECL
+
+#define PURE_UNOP_DECL(Name) Node* Name(Node* input);
+ PURE_ASSEMBLER_MACH_UNOP_LIST(PURE_UNOP_DECL)
+#undef PURE_UNOP_DECL
+
+#define BINOP_DECL(Name) Node* Name(Node* left, Node* right);
+ PURE_ASSEMBLER_MACH_BINOP_LIST(BINOP_DECL)
+ CHECKED_ASSEMBLER_MACH_BINOP_LIST(BINOP_DECL)
+#undef BINOP_DECL
+
+ Node* Float64RoundDown(Node* value);
+
+ Node* ToNumber(Node* value);
+ Node* Allocate(PretenureFlag pretenure, Node* size);
+ Node* LoadField(FieldAccess const&, Node* object);
+ Node* LoadElement(ElementAccess const&, Node* object, Node* index);
+ Node* StoreField(FieldAccess const&, Node* object, Node* value);
+ Node* StoreElement(ElementAccess const&, Node* object, Node* index,
+ Node* value);
+
+ Node* Store(StoreRepresentation rep, Node* object, Node* offset, Node* value);
+ Node* Load(MachineType rep, Node* object, Node* offset);
+
+ Node* Retain(Node* buffer);
+ Node* UnsafePointerAdd(Node* base, Node* external);
+
+ Node* DeoptimizeIf(DeoptimizeReason reason, Node* condition,
+ Node* frame_state);
+ Node* DeoptimizeUnless(DeoptimizeReason reason, Node* condition,
+ Node* frame_state);
+ template <typename... Args>
+ Node* Call(const CallDescriptor* desc, Args... args);
+ template <typename... Args>
+ Node* Call(const Operator* op, Args... args);
+
+ // Basic control operations.
+ template <class LabelType>
+ void Bind(LabelType* label);
+
+ template <class LabelType, typename... vars>
+ void Goto(LabelType* label, vars...);
+
+ void Branch(Node* condition, GraphAssemblerStaticLabel<1>* if_true,
+ GraphAssemblerStaticLabel<1>* if_false);
+
+ // Control helpers.
+ // {GotoIf(c, l)} is equivalent to {Branch(c, l, templ);Bind(templ)}.
+ template <class LabelType, typename... vars>
+ void GotoIf(Node* condition, LabelType* label, vars...);
+
+ // {GotoUnless(c, l)} is equivalent to {Branch(c, templ, l);Bind(templ)}.
+ template <class LabelType, typename... vars>
+ void GotoUnless(Node* condition, LabelType* label, vars...);
+
+ // Extractors (should be only used when destructing/resetting the assembler).
+ Node* ExtractCurrentControl();
+ Node* ExtractCurrentEffect();
+
+ private:
+ template <class LabelType, typename... Vars>
+ void MergeState(LabelType label, Vars... vars);
+
+ Operator const* ToNumberOperator();
+
+ JSGraph* jsgraph() const { return jsgraph_; }
+ Graph* graph() const { return jsgraph_->graph(); }
+ Zone* temp_zone() const { return temp_zone_; }
+ CommonOperatorBuilder* common() const { return jsgraph()->common(); }
+ MachineOperatorBuilder* machine() const { return jsgraph()->machine(); }
+ SimplifiedOperatorBuilder* simplified() const {
+ return jsgraph()->simplified();
+ }
+
+ SetOncePointer<Operator const> to_number_operator_;
+ Zone* temp_zone_;
+ JSGraph* jsgraph_;
+ Node* current_effect_;
+ Node* current_control_;
+};
+
+template <size_t MergeCount, size_t VarCount>
+Node* GraphAssemblerStaticLabel<MergeCount, VarCount>::PhiAt(size_t index) {
+ DCHECK(IsBound());
+ return GetBindingsPtrFor(index)[0];
+}
+
+template <class LabelType, typename... Vars>
+void GraphAssembler::MergeState(LabelType label, Vars... vars) {
+ DCHECK(!label->IsBound());
+ size_t merged_count = label->MergedCount();
+ DCHECK_LT(merged_count, label->MaxMergeCount());
+ DCHECK_EQ(label->PhiCount(), sizeof...(vars));
+ label->GetEffectsPtr()[merged_count] = current_effect_;
+ label->GetControlsPtr()[merged_count] = current_control_;
+ // We need to start with nullptr to avoid 0-length arrays.
+ Node* var_array[] = {nullptr, vars...};
+ for (size_t i = 0; i < sizeof...(vars); i++) {
+ label->SetBinding(i, merged_count, var_array[i + 1]);
+ }
+ label->IncrementMergedCount();
+}
+
+template <class LabelType>
+void GraphAssembler::Bind(LabelType* label) {
+ DCHECK(current_control_ == nullptr);
+ DCHECK(current_effect_ == nullptr);
+ DCHECK(label->MaxMergeCount() > 0);
+ DCHECK_EQ(label->MaxMergeCount(), label->MergedCount());
+
+ int merge_count = static_cast<int>(label->MaxMergeCount());
+ if (merge_count == 1) {
+ current_control_ = label->GetControlsPtr()[0];
+ current_effect_ = label->GetEffectsPtr()[0];
+ label->SetBound();
+ return;
+ }
+
+ current_control_ = graph()->NewNode(common()->Merge(merge_count), merge_count,
+ label->GetControlsPtr());
+
+ Node** effects = label->GetEffectsPtr();
+ current_effect_ = effects[0];
+ for (size_t i = 1; i < label->MaxMergeCount(); i++) {
+ if (current_effect_ != effects[i]) {
+ effects[label->MaxMergeCount()] = current_control_;
+ current_effect_ = graph()->NewNode(common()->EffectPhi(merge_count),
+ merge_count + 1, effects);
+ break;
+ }
+ }
+
+ for (size_t var = 0; var < label->PhiCount(); var++) {
+ Node** bindings = label->GetBindingsPtrFor(var);
+ bindings[label->MaxMergeCount()] = current_control_;
+ bindings[0] = graph()->NewNode(
+ common()->Phi(label->GetRepresentationFor(var), merge_count),
+ merge_count + 1, bindings);
+ }
+
+ label->SetBound();
+}
+
+template <class LabelType, typename... Vars>
+void GraphAssembler::Goto(LabelType* label, Vars... vars) {
+ DCHECK_NOT_NULL(current_control_);
+ DCHECK_NOT_NULL(current_effect_);
+ MergeState(label, vars...);
+ current_control_ = nullptr;
+ current_effect_ = nullptr;
+}
+
+template <class LabelType, typename... Vars>
+void GraphAssembler::GotoIf(Node* condition, LabelType* label, Vars... vars) {
+ BranchHint hint =
+ label->IsDeferred() ? BranchHint::kFalse : BranchHint::kNone;
+ Node* branch =
+ graph()->NewNode(common()->Branch(hint), condition, current_control_);
+
+ current_control_ = graph()->NewNode(common()->IfTrue(), branch);
+ MergeState(label, vars...);
+
+ current_control_ = graph()->NewNode(common()->IfFalse(), branch);
+}
+
+template <class LabelType, typename... Vars>
+void GraphAssembler::GotoUnless(Node* condition, LabelType* label,
+ Vars... vars) {
+ BranchHint hint = label->IsDeferred() ? BranchHint::kTrue : BranchHint::kNone;
+ Node* branch =
+ graph()->NewNode(common()->Branch(hint), condition, current_control_);
+
+ current_control_ = graph()->NewNode(common()->IfFalse(), branch);
+ MergeState(label, vars...);
+
+ current_control_ = graph()->NewNode(common()->IfTrue(), branch);
+}
+
+template <typename... Args>
+Node* GraphAssembler::Call(const CallDescriptor* desc, Args... args) {
+ const Operator* op = common()->Call(desc);
+ return Call(op, args...);
+}
+
+template <typename... Args>
+Node* GraphAssembler::Call(const Operator* op, Args... args) {
+ DCHECK_EQ(IrOpcode::kCall, op->opcode());
+ Node* args_array[] = {args..., current_effect_, current_control_};
+ int size = static_cast<int>(sizeof...(args)) + op->EffectInputCount() +
+ op->ControlInputCount();
+ Node* call = graph()->NewNode(op, size, args_array);
+ DCHECK_EQ(0, op->ControlOutputCount());
+ current_effect_ = call;
+ return call;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_GRAPH_ASSEMBLER_H_
diff --git a/deps/v8/src/compiler/graph-reducer.cc b/deps/v8/src/compiler/graph-reducer.cc
index b13b954714..117e569ad8 100644
--- a/deps/v8/src/compiler/graph-reducer.cc
+++ b/deps/v8/src/compiler/graph-reducer.cc
@@ -25,15 +25,17 @@ enum class GraphReducer::State : uint8_t {
void Reducer::Finalize() {}
-
GraphReducer::GraphReducer(Zone* zone, Graph* graph, Node* dead)
: graph_(graph),
dead_(dead),
state_(graph, 4),
reducers_(zone),
revisit_(zone),
- stack_(zone) {}
-
+ stack_(zone) {
+ if (dead != nullptr) {
+ NodeProperties::SetType(dead_, Type::None());
+ }
+}
GraphReducer::~GraphReducer() {}
@@ -113,17 +115,23 @@ void GraphReducer::ReduceTop() {
if (node->IsDead()) return Pop(); // Node was killed while on stack.
+ Node::Inputs node_inputs = node->inputs();
+
// Recurse on an input if necessary.
- int start = entry.input_index < node->InputCount() ? entry.input_index : 0;
- for (int i = start; i < node->InputCount(); i++) {
- Node* input = node->InputAt(i);
- entry.input_index = i + 1;
- if (input != node && Recurse(input)) return;
+ int start = entry.input_index < node_inputs.count() ? entry.input_index : 0;
+ for (int i = start; i < node_inputs.count(); ++i) {
+ Node* input = node_inputs[i];
+ if (input != node && Recurse(input)) {
+ entry.input_index = i + 1;
+ return;
+ }
}
- for (int i = 0; i < start; i++) {
- Node* input = node->InputAt(i);
- entry.input_index = i + 1;
- if (input != node && Recurse(input)) return;
+ for (int i = 0; i < start; ++i) {
+ Node* input = node_inputs[i];
+ if (input != node && Recurse(input)) {
+ entry.input_index = i + 1;
+ return;
+ }
}
// Remember the max node id before reduction.
@@ -139,10 +147,13 @@ void GraphReducer::ReduceTop() {
Node* const replacement = reduction.replacement();
if (replacement == node) {
// In-place update of {node}, may need to recurse on an input.
- for (int i = 0; i < node->InputCount(); ++i) {
- Node* input = node->InputAt(i);
- entry.input_index = i + 1;
- if (input != node && Recurse(input)) return;
+ Node::Inputs node_inputs = node->inputs();
+ for (int i = 0; i < node_inputs.count(); ++i) {
+ Node* input = node_inputs[i];
+ if (input != node && Recurse(input)) {
+ entry.input_index = i + 1;
+ return;
+ }
}
}
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
index ab20f8f11f..1043c91e2a 100644
--- a/deps/v8/src/compiler/graph-visualizer.cc
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -497,7 +497,11 @@ void GraphC1Visualizer::PrintSchedule(const char* phase,
if (positions != nullptr) {
SourcePosition position = positions->GetSourcePosition(node);
if (position.IsKnown()) {
- os_ << " pos:" << position.ScriptOffset();
+ os_ << " pos:";
+ if (position.isInlined()) {
+ os_ << "inlining(" << position.InliningId() << "),";
+ }
+ os_ << position.ScriptOffset();
}
}
os_ << " <|@\n";
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index 20afdc104d..e004896ea2 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -66,9 +66,7 @@ class IA32OperandConverter : public InstructionOperandConverter {
Immediate ToImmediate(InstructionOperand* operand) {
Constant constant = ToConstant(operand);
if (constant.type() == Constant::kInt32 &&
- (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
- constant.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
- constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE)) {
+ RelocInfo::IsWasmReference(constant.rmode())) {
return Immediate(reinterpret_cast<Address>(constant.ToInt32()),
constant.rmode());
}
@@ -185,7 +183,6 @@ bool HasImmediateInput(Instruction* instr, size_t index) {
return instr->InputAt(index)->IsImmediate();
}
-
class OutOfLineLoadInteger final : public OutOfLineCode {
public:
OutOfLineLoadInteger(CodeGenerator* gen, Register result)
@@ -316,7 +313,6 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ bind(ool->exit()); \
} while (false)
-
#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr) \
do { \
auto offset = i.InputRegister(0); \
@@ -331,7 +327,6 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
__ bind(&done); \
} while (false)
-
#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
do { \
auto offset = i.InputRegister(0); \
@@ -896,10 +891,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ add(i.OutputRegister(0), i.InputRegister(2));
}
- __ adc(i.InputRegister(1), Operand(i.InputRegister(3)));
if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
__ Move(i.OutputRegister(1), i.InputRegister(1));
}
+ __ adc(i.OutputRegister(1), Operand(i.InputRegister(3)));
if (use_temp) {
__ Move(i.OutputRegister(0), i.TempRegister(0));
}
@@ -921,10 +916,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else {
__ sub(i.OutputRegister(0), i.InputRegister(2));
}
- __ sbb(i.InputRegister(1), Operand(i.InputRegister(3)));
if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
__ Move(i.OutputRegister(1), i.InputRegister(1));
}
+ __ sbb(i.OutputRegister(1), Operand(i.InputRegister(3)));
if (use_temp) {
__ Move(i.OutputRegister(0), i.TempRegister(0));
}
@@ -1611,61 +1606,66 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
return kSuccess;
} // NOLINT(readability/fn_size)
-
-// Assembles a branch after an instruction.
-void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
- IA32OperandConverter i(this, instr);
- Label::Distance flabel_distance =
- branch->fallthru ? Label::kNear : Label::kFar;
- Label* tlabel = branch->true_label;
- Label* flabel = branch->false_label;
- switch (branch->condition) {
+static Condition FlagsConditionToCondition(FlagsCondition condition) {
+ switch (condition) {
case kUnorderedEqual:
- __ j(parity_even, flabel, flabel_distance);
- // Fall through.
case kEqual:
- __ j(equal, tlabel);
+ return equal;
break;
case kUnorderedNotEqual:
- __ j(parity_even, tlabel);
- // Fall through.
case kNotEqual:
- __ j(not_equal, tlabel);
+ return not_equal;
break;
case kSignedLessThan:
- __ j(less, tlabel);
+ return less;
break;
case kSignedGreaterThanOrEqual:
- __ j(greater_equal, tlabel);
+ return greater_equal;
break;
case kSignedLessThanOrEqual:
- __ j(less_equal, tlabel);
+ return less_equal;
break;
case kSignedGreaterThan:
- __ j(greater, tlabel);
+ return greater;
break;
case kUnsignedLessThan:
- __ j(below, tlabel);
+ return below;
break;
case kUnsignedGreaterThanOrEqual:
- __ j(above_equal, tlabel);
+ return above_equal;
break;
case kUnsignedLessThanOrEqual:
- __ j(below_equal, tlabel);
+ return below_equal;
break;
case kUnsignedGreaterThan:
- __ j(above, tlabel);
+ return above;
break;
case kOverflow:
- __ j(overflow, tlabel);
+ return overflow;
break;
case kNotOverflow:
- __ j(no_overflow, tlabel);
+ return no_overflow;
break;
default:
UNREACHABLE();
+ return no_condition;
break;
}
+}
+
+// Assembles a branch after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+ Label::Distance flabel_distance =
+ branch->fallthru ? Label::kNear : Label::kFar;
+ Label* tlabel = branch->true_label;
+ Label* flabel = branch->false_label;
+ if (branch->condition == kUnorderedEqual) {
+ __ j(parity_even, flabel, flabel_distance);
+ } else if (branch->condition == kUnorderedNotEqual) {
+ __ j(parity_even, tlabel);
+ }
+ __ j(FlagsConditionToCondition(branch->condition), tlabel);
+
// Add a jump if not falling through to the next block.
if (!branch->fallthru) __ jmp(flabel);
}
@@ -1675,6 +1675,71 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
}
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+ FlagsCondition condition) {
+ class OutOfLineTrap final : public OutOfLineCode {
+ public:
+ OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+ : OutOfLineCode(gen),
+ frame_elided_(frame_elided),
+ instr_(instr),
+ gen_(gen) {}
+
+ void Generate() final {
+ IA32OperandConverter i(gen_, instr_);
+
+ Runtime::FunctionId trap_id = static_cast<Runtime::FunctionId>(
+ i.InputInt32(instr_->InputCount() - 1));
+ bool old_has_frame = __ has_frame();
+ if (frame_elided_) {
+ __ set_has_frame(true);
+ __ EnterFrame(StackFrame::WASM_COMPILED);
+ }
+ GenerateCallToTrap(trap_id);
+ if (frame_elided_) {
+ ReferenceMap* reference_map =
+ new (gen_->zone()) ReferenceMap(gen_->zone());
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ __ set_has_frame(old_has_frame);
+ }
+ if (FLAG_debug_code) {
+ __ ud2();
+ }
+ }
+
+ private:
+ void GenerateCallToTrap(Runtime::FunctionId trap_id) {
+ if (trap_id == Runtime::kNumFunctions) {
+ // We cannot test calls to the runtime in cctest/test-run-wasm.
+ // Therefore we emit a call to C here instead of a call to the runtime.
+ __ PrepareCallCFunction(0, esi);
+ __ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+ 0);
+ } else {
+ __ Move(esi, isolate()->native_context());
+ gen_->AssembleSourcePosition(instr_);
+ __ CallRuntime(trap_id);
+ }
+ }
+
+ bool frame_elided_;
+ Instruction* instr_;
+ CodeGenerator* gen_;
+ };
+ bool frame_elided = !frame_access_state()->has_frame();
+ auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+ Label* tlabel = ool->entry();
+ Label end;
+ if (condition == kUnorderedEqual) {
+ __ j(parity_even, &end);
+ } else if (condition == kUnorderedNotEqual) {
+ __ j(parity_even, tlabel);
+ }
+ __ j(FlagsConditionToCondition(condition), tlabel);
+ __ bind(&end);
+}
// Assembles boolean materializations after an instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -1687,58 +1752,17 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
Label check;
DCHECK_NE(0u, instr->OutputCount());
Register reg = i.OutputRegister(instr->OutputCount() - 1);
- Condition cc = no_condition;
- switch (condition) {
- case kUnorderedEqual:
- __ j(parity_odd, &check, Label::kNear);
- __ Move(reg, Immediate(0));
- __ jmp(&done, Label::kNear);
- // Fall through.
- case kEqual:
- cc = equal;
- break;
- case kUnorderedNotEqual:
- __ j(parity_odd, &check, Label::kNear);
- __ mov(reg, Immediate(1));
- __ jmp(&done, Label::kNear);
- // Fall through.
- case kNotEqual:
- cc = not_equal;
- break;
- case kSignedLessThan:
- cc = less;
- break;
- case kSignedGreaterThanOrEqual:
- cc = greater_equal;
- break;
- case kSignedLessThanOrEqual:
- cc = less_equal;
- break;
- case kSignedGreaterThan:
- cc = greater;
- break;
- case kUnsignedLessThan:
- cc = below;
- break;
- case kUnsignedGreaterThanOrEqual:
- cc = above_equal;
- break;
- case kUnsignedLessThanOrEqual:
- cc = below_equal;
- break;
- case kUnsignedGreaterThan:
- cc = above;
- break;
- case kOverflow:
- cc = overflow;
- break;
- case kNotOverflow:
- cc = no_overflow;
- break;
- default:
- UNREACHABLE();
- break;
+ if (condition == kUnorderedEqual) {
+ __ j(parity_odd, &check, Label::kNear);
+ __ Move(reg, Immediate(0));
+ __ jmp(&done, Label::kNear);
+ } else if (condition == kUnorderedNotEqual) {
+ __ j(parity_odd, &check, Label::kNear);
+ __ mov(reg, Immediate(1));
+ __ jmp(&done, Label::kNear);
}
+ Condition cc = FlagsConditionToCondition(condition);
+
__ bind(&check);
if (reg.is_byte_register()) {
// setcc for byte registers (al, bl, cl, dl).
@@ -2082,7 +2106,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ Move(dst, g.ToImmediate(source));
} else if (src_constant.type() == Constant::kFloat32) {
// TODO(turbofan): Can we do better here?
- uint32_t src = bit_cast<uint32_t>(src_constant.ToFloat32());
+ uint32_t src = src_constant.ToFloat32AsInt();
if (destination->IsFPRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
__ Move(dst, src);
@@ -2093,7 +2117,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
} else {
DCHECK_EQ(Constant::kFloat64, src_constant.type());
- uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
+ uint64_t src = src_constant.ToFloat64AsInt();
uint32_t lower = static_cast<uint32_t>(src);
uint32_t upper = static_cast<uint32_t>(src >> 32);
if (destination->IsFPRegister()) {
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index c827c68a5f..5548f55a1e 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -351,6 +351,11 @@ void InstructionSelector::VisitStore(Node* node) {
}
}
+void InstructionSelector::VisitProtectedStore(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
+
// Architecture supports unaligned access, therefore VisitLoad is used instead
void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
@@ -1203,10 +1208,13 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
cont->reason(), cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ } else if (cont->IsSet()) {
InstructionOperand output = g.DefineAsRegister(cont->result());
selector->Emit(opcode, 1, &output, input_count, inputs);
+ } else {
+ DCHECK(cont->IsTrap());
+ inputs[input_count++] = g.UseImmediate(cont->trap_id());
+ selector->Emit(opcode, 0, nullptr, input_count, inputs);
}
}
@@ -1222,9 +1230,12 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ } else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsByteRegister(cont->result()), left, right);
+ } else {
+ DCHECK(cont->IsTrap());
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.UseImmediate(cont->trap_id()));
}
}
@@ -1240,21 +1251,54 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
}
+MachineType MachineTypeForNarrow(Node* node, Node* hint_node) {
+ if (hint_node->opcode() == IrOpcode::kLoad) {
+ MachineType hint = LoadRepresentationOf(hint_node->op());
+ if (node->opcode() == IrOpcode::kInt32Constant ||
+ node->opcode() == IrOpcode::kInt64Constant) {
+ int64_t constant = node->opcode() == IrOpcode::kInt32Constant
+ ? OpParameter<int32_t>(node)
+ : OpParameter<int64_t>(node);
+ if (hint == MachineType::Int8()) {
+ if (constant >= std::numeric_limits<int8_t>::min() &&
+ constant <= std::numeric_limits<int8_t>::max()) {
+ return hint;
+ }
+ } else if (hint == MachineType::Uint8()) {
+ if (constant >= std::numeric_limits<uint8_t>::min() &&
+ constant <= std::numeric_limits<uint8_t>::max()) {
+ return hint;
+ }
+ } else if (hint == MachineType::Int16()) {
+ if (constant >= std::numeric_limits<int16_t>::min() &&
+ constant <= std::numeric_limits<int16_t>::max()) {
+ return hint;
+ }
+ } else if (hint == MachineType::Uint16()) {
+ if (constant >= std::numeric_limits<uint16_t>::min() &&
+ constant <= std::numeric_limits<uint16_t>::max()) {
+ return hint;
+ }
+ } else if (hint == MachineType::Int32()) {
+ return hint;
+ } else if (hint == MachineType::Uint32()) {
+ if (constant >= 0) return hint;
+ }
+ }
+ }
+ return node->opcode() == IrOpcode::kLoad ? LoadRepresentationOf(node->op())
+ : MachineType::None();
+}
+
// Tries to match the size of the given opcode to that of the operands, if
// possible.
InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
Node* right, FlagsContinuation* cont) {
- // Currently, if one of the two operands is not a Load, we don't know what its
- // machine representation is, so we bail out.
- // TODO(epertoso): we can probably get some size information out of immediates
- // and phi nodes.
- if (left->opcode() != IrOpcode::kLoad || right->opcode() != IrOpcode::kLoad) {
- return opcode;
- }
+ // TODO(epertoso): we can probably get some size information out of phi nodes.
// If the load representations don't match, both operands will be
// zero/sign-extended to 32bit.
- MachineType left_type = LoadRepresentationOf(left->op());
- MachineType right_type = LoadRepresentationOf(right->op());
+ MachineType left_type = MachineTypeForNarrow(left, right);
+ MachineType right_type = MachineTypeForNarrow(right, left);
if (left_type == right_type) {
switch (left_type.representation()) {
case MachineRepresentation::kBit:
@@ -1332,10 +1376,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
// Match immediates on right side of comparison.
if (g.CanBeImmediate(right)) {
- if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
- // TODO(epertoso): we should use `narrowed_opcode' here once we match
- // immediates too.
- return VisitCompareWithMemoryOperand(selector, opcode, left,
+ if (g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level)) {
+ return VisitCompareWithMemoryOperand(selector, narrowed_opcode, left,
g.UseImmediate(right), cont);
}
return VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right),
@@ -1352,11 +1394,6 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
cont);
}
- if (g.CanBeBetterLeftOperand(right)) {
- if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
- std::swap(left, right);
- }
-
return VisitCompare(selector, opcode, left, right, cont,
node->op()->HasProperty(Operator::kCommutative));
}
@@ -1501,6 +1538,19 @@ void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+ Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
IA32OperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
diff --git a/deps/v8/src/compiler/instruction-codes.h b/deps/v8/src/compiler/instruction-codes.h
index 6242e9804e..00b2733b3b 100644
--- a/deps/v8/src/compiler/instruction-codes.h
+++ b/deps/v8/src/compiler/instruction-codes.h
@@ -152,7 +152,8 @@ enum FlagsMode {
kFlags_none = 0,
kFlags_branch = 1,
kFlags_deoptimize = 2,
- kFlags_set = 3
+ kFlags_set = 3,
+ kFlags_trap = 4
};
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
@@ -207,9 +208,9 @@ typedef int32_t InstructionCode;
// the instruction.
typedef BitField<ArchOpcode, 0, 8> ArchOpcodeField;
typedef BitField<AddressingMode, 8, 5> AddressingModeField;
-typedef BitField<FlagsMode, 13, 2> FlagsModeField;
-typedef BitField<FlagsCondition, 15, 5> FlagsConditionField;
-typedef BitField<int, 20, 12> MiscField;
+typedef BitField<FlagsMode, 13, 3> FlagsModeField;
+typedef BitField<FlagsCondition, 16, 5> FlagsConditionField;
+typedef BitField<int, 21, 11> MiscField;
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/instruction-selector-impl.h b/deps/v8/src/compiler/instruction-selector-impl.h
index 6cb87ea0c0..1b1fa12e6e 100644
--- a/deps/v8/src/compiler/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/instruction-selector-impl.h
@@ -5,8 +5,8 @@
#ifndef V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
#define V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
-#include "src/compiler/instruction.h"
#include "src/compiler/instruction-selector.h"
+#include "src/compiler/instruction.h"
#include "src/compiler/linkage.h"
#include "src/compiler/schedule.h"
#include "src/macro-assembler.h"
@@ -345,10 +345,17 @@ class FlagsContinuation final {
return FlagsContinuation(condition, result);
}
+ // Creates a new flags continuation for a wasm trap.
+ static FlagsContinuation ForTrap(FlagsCondition condition,
+ Runtime::FunctionId trap_id, Node* result) {
+ return FlagsContinuation(condition, trap_id, result);
+ }
+
bool IsNone() const { return mode_ == kFlags_none; }
bool IsBranch() const { return mode_ == kFlags_branch; }
bool IsDeoptimize() const { return mode_ == kFlags_deoptimize; }
bool IsSet() const { return mode_ == kFlags_set; }
+ bool IsTrap() const { return mode_ == kFlags_trap; }
FlagsCondition condition() const {
DCHECK(!IsNone());
return condition_;
@@ -365,6 +372,10 @@ class FlagsContinuation final {
DCHECK(IsSet());
return frame_state_or_result_;
}
+ Runtime::FunctionId trap_id() const {
+ DCHECK(IsTrap());
+ return trap_id_;
+ }
BasicBlock* true_block() const {
DCHECK(IsBranch());
return true_block_;
@@ -437,6 +448,15 @@ class FlagsContinuation final {
DCHECK_NOT_NULL(result);
}
+ FlagsContinuation(FlagsCondition condition, Runtime::FunctionId trap_id,
+ Node* result)
+ : mode_(kFlags_trap),
+ condition_(condition),
+ frame_state_or_result_(result),
+ trap_id_(trap_id) {
+ DCHECK_NOT_NULL(result);
+ }
+
FlagsMode const mode_;
FlagsCondition condition_;
DeoptimizeReason reason_; // Only value if mode_ == kFlags_deoptimize
@@ -444,6 +464,7 @@ class FlagsContinuation final {
// or mode_ == kFlags_set.
BasicBlock* true_block_; // Only valid if mode_ == kFlags_branch.
BasicBlock* false_block_; // Only valid if mode_ == kFlags_branch.
+ Runtime::FunctionId trap_id_; // Only valid if mode_ == kFlags_trap.
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index 8f899f3c8c..ae96b9106f 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -127,7 +127,6 @@ void InstructionSelector::AddInstruction(Instruction* instr) {
}
}
-
Instruction* InstructionSelector::Emit(InstructionCode opcode,
InstructionOperand output,
size_t temp_count,
@@ -414,13 +413,10 @@ void InstructionSelector::MarkAsRepresentation(MachineRepresentation rep,
sequence()->MarkAsRepresentation(rep, GetVirtualRegister(node));
}
-
namespace {
-enum class FrameStateInputKind { kAny, kStackSlot };
-
-InstructionOperand OperandForDeopt(OperandGenerator* g, Node* input,
- FrameStateInputKind kind,
+InstructionOperand OperandForDeopt(Isolate* isolate, OperandGenerator* g,
+ Node* input, FrameStateInputKind kind,
MachineRepresentation rep) {
if (rep == MachineRepresentation::kNone) {
return g->TempImmediate(FrameStateDescriptor::kImpossibleValue);
@@ -432,8 +428,30 @@ InstructionOperand OperandForDeopt(OperandGenerator* g, Node* input,
case IrOpcode::kNumberConstant:
case IrOpcode::kFloat32Constant:
case IrOpcode::kFloat64Constant:
- case IrOpcode::kHeapConstant:
return g->UseImmediate(input);
+ case IrOpcode::kHeapConstant: {
+ if (!CanBeTaggedPointer(rep)) {
+ // If we have inconsistent static and dynamic types, e.g. if we
+ // smi-check a string, we can get here with a heap object that
+ // says it is a smi. In that case, we return an invalid instruction
+ // operand, which will be interpreted as an optimized-out value.
+
+ // TODO(jarin) Ideally, we should turn the current instruction
+ // into an abort (we should never execute it).
+ return InstructionOperand();
+ }
+
+ Handle<HeapObject> constant = OpParameter<Handle<HeapObject>>(input);
+ Heap::RootListIndex root_index;
+ if (isolate->heap()->IsRootHandle(constant, &root_index) &&
+ root_index == Heap::kOptimizedOutRootIndex) {
+ // For an optimized-out object we return an invalid instruction
+ // operand, so that we take the fast path for optimized-out values.
+ return InstructionOperand();
+ }
+
+ return g->UseImmediate(input);
+ }
case IrOpcode::kObjectState:
case IrOpcode::kTypedObjectState:
UNREACHABLE();
@@ -452,6 +470,7 @@ InstructionOperand OperandForDeopt(OperandGenerator* g, Node* input,
return InstructionOperand();
}
+} // namespace
class StateObjectDeduplicator {
public:
@@ -477,14 +496,16 @@ class StateObjectDeduplicator {
ZoneVector<Node*> objects_;
};
-
// Returns the number of instruction operands added to inputs.
-size_t AddOperandToStateValueDescriptor(StateValueDescriptor* descriptor,
- InstructionOperandVector* inputs,
- OperandGenerator* g,
- StateObjectDeduplicator* deduplicator,
- Node* input, MachineType type,
- FrameStateInputKind kind, Zone* zone) {
+size_t InstructionSelector::AddOperandToStateValueDescriptor(
+ StateValueList* values, InstructionOperandVector* inputs,
+ OperandGenerator* g, StateObjectDeduplicator* deduplicator, Node* input,
+ MachineType type, FrameStateInputKind kind, Zone* zone) {
+ if (input == nullptr) {
+ values->PushOptimizedOut();
+ return 0;
+ }
+
switch (input->opcode()) {
case IrOpcode::kObjectState: {
UNREACHABLE();
@@ -495,41 +516,45 @@ size_t AddOperandToStateValueDescriptor(StateValueDescriptor* descriptor,
if (id == StateObjectDeduplicator::kNotDuplicated) {
size_t entries = 0;
id = deduplicator->InsertObject(input);
- descriptor->fields().push_back(
- StateValueDescriptor::Recursive(zone, id));
- StateValueDescriptor* new_desc = &descriptor->fields().back();
+ StateValueList* nested = values->PushRecursiveField(zone, id);
int const input_count = input->op()->ValueInputCount();
ZoneVector<MachineType> const* types = MachineTypesOf(input->op());
for (int i = 0; i < input_count; ++i) {
entries += AddOperandToStateValueDescriptor(
- new_desc, inputs, g, deduplicator, input->InputAt(i),
- types->at(i), kind, zone);
+ nested, inputs, g, deduplicator, input->InputAt(i), types->at(i),
+ kind, zone);
}
return entries;
} else {
// Crankshaft counts duplicate objects for the running id, so we have
// to push the input again.
deduplicator->InsertObject(input);
- descriptor->fields().push_back(
- StateValueDescriptor::Duplicate(zone, id));
+ values->PushDuplicate(id);
return 0;
}
}
default: {
- inputs->push_back(OperandForDeopt(g, input, kind, type.representation()));
- descriptor->fields().push_back(StateValueDescriptor::Plain(zone, type));
- return 1;
+ InstructionOperand op =
+ OperandForDeopt(isolate(), g, input, kind, type.representation());
+ if (op.kind() == InstructionOperand::INVALID) {
+ // Invalid operand means the value is impossible or optimized-out.
+ values->PushOptimizedOut();
+ return 0;
+ } else {
+ inputs->push_back(op);
+ values->PushPlain(type);
+ return 1;
+ }
}
}
}
// Returns the number of instruction operands added to inputs.
-size_t AddInputsToFrameStateDescriptor(FrameStateDescriptor* descriptor,
- Node* state, OperandGenerator* g,
- StateObjectDeduplicator* deduplicator,
- InstructionOperandVector* inputs,
- FrameStateInputKind kind, Zone* zone) {
+size_t InstructionSelector::AddInputsToFrameStateDescriptor(
+ FrameStateDescriptor* descriptor, Node* state, OperandGenerator* g,
+ StateObjectDeduplicator* deduplicator, InstructionOperandVector* inputs,
+ FrameStateInputKind kind, Zone* zone) {
DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode());
size_t entries = 0;
@@ -553,8 +578,12 @@ size_t AddInputsToFrameStateDescriptor(FrameStateDescriptor* descriptor,
DCHECK_EQ(descriptor->locals_count(), StateValuesAccess(locals).size());
DCHECK_EQ(descriptor->stack_count(), StateValuesAccess(stack).size());
- StateValueDescriptor* values_descriptor =
- descriptor->GetStateValueDescriptor();
+ StateValueList* values_descriptor = descriptor->GetStateValueDescriptors();
+
+ DCHECK_EQ(values_descriptor->size(), 0u);
+ values_descriptor->ReserveSize(
+ descriptor->GetSize(OutputFrameStateCombine::Ignore()));
+
entries += AddOperandToStateValueDescriptor(
values_descriptor, inputs, g, deduplicator, function,
MachineType::AnyTagged(), FrameStateInputKind::kStackSlot, zone);
@@ -583,8 +612,6 @@ size_t AddInputsToFrameStateDescriptor(FrameStateDescriptor* descriptor,
return entries;
}
-} // namespace
-
// An internal helper class for generating the operands to calls.
// TODO(bmeurer): Get rid of the CallBuffer business and make
@@ -796,17 +823,30 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
}
}
+bool InstructionSelector::IsSourcePositionUsed(Node* node) {
+ return (source_position_mode_ == kAllSourcePositions ||
+ node->opcode() == IrOpcode::kCall ||
+ node->opcode() == IrOpcode::kTrapIf ||
+ node->opcode() == IrOpcode::kTrapUnless);
+}
+
void InstructionSelector::VisitBlock(BasicBlock* block) {
DCHECK(!current_block_);
current_block_ = block;
- int current_block_end = static_cast<int>(instructions_.size());
+ auto current_num_instructions = [&] {
+ DCHECK_GE(kMaxInt, instructions_.size());
+ return static_cast<int>(instructions_.size());
+ };
+ int current_block_end = current_num_instructions();
int effect_level = 0;
for (Node* const node : *block) {
if (node->opcode() == IrOpcode::kStore ||
node->opcode() == IrOpcode::kUnalignedStore ||
node->opcode() == IrOpcode::kCheckedStore ||
- node->opcode() == IrOpcode::kCall) {
+ node->opcode() == IrOpcode::kCall ||
+ node->opcode() == IrOpcode::kProtectedLoad ||
+ node->opcode() == IrOpcode::kProtectedStore) {
++effect_level;
}
SetEffectLevel(node, effect_level);
@@ -818,10 +858,25 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
SetEffectLevel(block->control_input(), effect_level);
}
+ auto FinishEmittedInstructions = [&](Node* node, int instruction_start) {
+ if (instruction_selection_failed()) return false;
+ if (current_num_instructions() == instruction_start) return true;
+ std::reverse(instructions_.begin() + instruction_start,
+ instructions_.end());
+ if (!node) return true;
+ SourcePosition source_position = source_positions_->GetSourcePosition(node);
+ if (source_position.IsKnown() && IsSourcePositionUsed(node)) {
+ sequence()->SetSourcePosition(instructions_[instruction_start],
+ source_position);
+ }
+ return true;
+ };
+
// Generate code for the block control "top down", but schedule the code
// "bottom up".
VisitControl(block);
- std::reverse(instructions_.begin() + current_block_end, instructions_.end());
+ if (!FinishEmittedInstructions(block->control_input(), current_block_end))
+ return;
// Visit code in reverse control flow order, because architecture-specific
// matching may cover more than one node at a time.
@@ -830,19 +885,9 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
if (!IsUsed(node) || IsDefined(node)) continue;
// Generate code for this node "top down", but schedule the code "bottom
// up".
- size_t current_node_end = instructions_.size();
+ int current_node_end = current_num_instructions();
VisitNode(node);
- if (instruction_selection_failed()) return;
- std::reverse(instructions_.begin() + current_node_end, instructions_.end());
- if (instructions_.size() == current_node_end) continue;
- // Mark source position on first instruction emitted.
- SourcePosition source_position = source_positions_->GetSourcePosition(node);
- if (source_position.IsKnown() &&
- (source_position_mode_ == kAllSourcePositions ||
- node->opcode() == IrOpcode::kCall)) {
- sequence()->SetSourcePosition(instructions_[current_node_end],
- source_position);
- }
+ if (!FinishEmittedInstructions(node, current_node_end)) return;
}
// We're done with the block.
@@ -1013,6 +1058,12 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitDeoptimizeIf(node);
case IrOpcode::kDeoptimizeUnless:
return VisitDeoptimizeUnless(node);
+ case IrOpcode::kTrapIf:
+ return VisitTrapIf(node, static_cast<Runtime::FunctionId>(
+ OpParameter<int32_t>(node->op())));
+ case IrOpcode::kTrapUnless:
+ return VisitTrapUnless(node, static_cast<Runtime::FunctionId>(
+ OpParameter<int32_t>(node->op())));
case IrOpcode::kFrameState:
case IrOpcode::kStateValues:
case IrOpcode::kObjectState:
@@ -1033,6 +1084,8 @@ void InstructionSelector::VisitNode(Node* node) {
}
case IrOpcode::kStore:
return VisitStore(node);
+ case IrOpcode::kProtectedStore:
+ return VisitProtectedStore(node);
case IrOpcode::kWord32And:
return MarkAsWord32(node), VisitWord32And(node);
case IrOpcode::kWord32Or:
@@ -1387,15 +1440,56 @@ void InstructionSelector::VisitNode(Node* node) {
}
case IrOpcode::kAtomicStore:
return VisitAtomicStore(node);
- case IrOpcode::kProtectedLoad:
+ case IrOpcode::kProtectedLoad: {
+ LoadRepresentation type = LoadRepresentationOf(node->op());
+ MarkAsRepresentation(type.representation(), node);
return VisitProtectedLoad(node);
+ }
case IrOpcode::kUnsafePointerAdd:
MarkAsRepresentation(MachineType::PointerRepresentation(), node);
return VisitUnsafePointerAdd(node);
+ case IrOpcode::kCreateFloat32x4:
+ return MarkAsSimd128(node), VisitCreateFloat32x4(node);
+ case IrOpcode::kFloat32x4ExtractLane:
+ return MarkAsFloat32(node), VisitFloat32x4ExtractLane(node);
+ case IrOpcode::kFloat32x4ReplaceLane:
+ return MarkAsSimd128(node), VisitFloat32x4ReplaceLane(node);
+ case IrOpcode::kFloat32x4FromInt32x4:
+ return MarkAsSimd128(node), VisitFloat32x4FromInt32x4(node);
+ case IrOpcode::kFloat32x4FromUint32x4:
+ return MarkAsSimd128(node), VisitFloat32x4FromUint32x4(node);
+ case IrOpcode::kFloat32x4Abs:
+ return MarkAsSimd128(node), VisitFloat32x4Abs(node);
+ case IrOpcode::kFloat32x4Neg:
+ return MarkAsSimd128(node), VisitFloat32x4Neg(node);
+ case IrOpcode::kFloat32x4Add:
+ return MarkAsSimd128(node), VisitFloat32x4Add(node);
+ case IrOpcode::kFloat32x4Sub:
+ return MarkAsSimd128(node), VisitFloat32x4Sub(node);
+ case IrOpcode::kFloat32x4Equal:
+ return MarkAsSimd128(node), VisitFloat32x4Equal(node);
+ case IrOpcode::kFloat32x4NotEqual:
+ return MarkAsSimd128(node), VisitFloat32x4NotEqual(node);
case IrOpcode::kCreateInt32x4:
return MarkAsSimd128(node), VisitCreateInt32x4(node);
case IrOpcode::kInt32x4ExtractLane:
return MarkAsWord32(node), VisitInt32x4ExtractLane(node);
+ case IrOpcode::kInt32x4ReplaceLane:
+ return MarkAsSimd128(node), VisitInt32x4ReplaceLane(node);
+ case IrOpcode::kInt32x4FromFloat32x4:
+ return MarkAsSimd128(node), VisitInt32x4FromFloat32x4(node);
+ case IrOpcode::kUint32x4FromFloat32x4:
+ return MarkAsSimd128(node), VisitUint32x4FromFloat32x4(node);
+ case IrOpcode::kInt32x4Add:
+ return MarkAsSimd128(node), VisitInt32x4Add(node);
+ case IrOpcode::kInt32x4Sub:
+ return MarkAsSimd128(node), VisitInt32x4Sub(node);
+ case IrOpcode::kInt32x4Equal:
+ return MarkAsSimd128(node), VisitInt32x4Equal(node);
+ case IrOpcode::kInt32x4NotEqual:
+ return MarkAsSimd128(node), VisitInt32x4NotEqual(node);
+ case IrOpcode::kSimd32x4Select:
+ return MarkAsSimd128(node), VisitSimd32x4Select(node);
default:
V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
node->opcode(), node->op()->mnemonic(), node->id());
@@ -1538,7 +1632,7 @@ void InstructionSelector::EmitLookupSwitch(const SwitchInfo& sw,
}
void InstructionSelector::VisitStackSlot(Node* node) {
- int size = 1 << ElementSizeLog2Of(StackSlotRepresentationOf(node->op()));
+ int size = StackSlotSizeOf(node->op());
int slot = frame_->AllocateSpillSlot(size);
OperandGenerator g(this);
@@ -1547,8 +1641,7 @@ void InstructionSelector::VisitStackSlot(Node* node) {
}
void InstructionSelector::VisitBitcastTaggedToWord(Node* node) {
- OperandGenerator g(this);
- Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(node->InputAt(0)));
+ EmitIdentity(node);
}
void InstructionSelector::VisitBitcastWordToTagged(Node* node) {
@@ -1723,13 +1816,70 @@ void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
#endif // V8_TARGET_ARCH_64_BIT
-#if !V8_TARGET_ARCH_X64
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitCreateInt32x4(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitInt32x4ExtractLane(Node* node) {
UNIMPLEMENTED();
}
-#endif // !V8_TARGET_ARCH_X64
+
+void InstructionSelector::VisitInt32x4ReplaceLane(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt32x4Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32x4Sub(Node* node) { UNIMPLEMENTED(); }
+
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
+
+#if !V8_TARGET_ARCH_ARM
+void InstructionSelector::VisitCreateFloat32x4(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitFloat32x4ExtractLane(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitFloat32x4ReplaceLane(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitFloat32x4FromInt32x4(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitFloat32x4FromUint32x4(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitFloat32x4Abs(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitFloat32x4Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitFloat32x4Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitFloat32x4Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitFloat32x4Equal(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitFloat32x4NotEqual(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt32x4FromFloat32x4(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint32x4FromFloat32x4(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt32x4Equal(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32x4NotEqual(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitSimd32x4Select(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
@@ -1970,7 +2120,8 @@ void InstructionSelector::VisitReturn(Node* ret) {
DCHECK_GE(input_count, 1);
auto value_locations = zone()->NewArray<InstructionOperand>(input_count);
Node* pop_count = ret->InputAt(0);
- value_locations[0] = pop_count->opcode() == IrOpcode::kInt32Constant
+ value_locations[0] = (pop_count->opcode() == IrOpcode::kInt32Constant ||
+ pop_count->opcode() == IrOpcode::kInt64Constant)
? g.UseImmediate(pop_count)
: g.UseRegister(pop_count);
for (int i = 1; i < input_count; ++i) {
diff --git a/deps/v8/src/compiler/instruction-selector.h b/deps/v8/src/compiler/instruction-selector.h
index 65ba8f7c71..b7753ce7b7 100644
--- a/deps/v8/src/compiler/instruction-selector.h
+++ b/deps/v8/src/compiler/instruction-selector.h
@@ -26,6 +26,7 @@ class FlagsContinuation;
class Linkage;
class OperandGenerator;
struct SwitchInfo;
+class StateObjectDeduplicator;
// This struct connects nodes of parameters which are going to be pushed on the
// call stack with their parameter index in the call descriptor of the callee.
@@ -42,6 +43,8 @@ class PushParameter {
MachineType type_;
};
+enum class FrameStateInputKind { kAny, kStackSlot };
+
// Instruction selection generates an InstructionSequence for a given Schedule.
class V8_EXPORT_PRIVATE InstructionSelector final {
public:
@@ -286,6 +289,17 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
int GetTempsCountForTailCallFromJSFunction();
FrameStateDescriptor* GetFrameStateDescriptor(Node* node);
+ size_t AddInputsToFrameStateDescriptor(FrameStateDescriptor* descriptor,
+ Node* state, OperandGenerator* g,
+ StateObjectDeduplicator* deduplicator,
+ InstructionOperandVector* inputs,
+ FrameStateInputKind kind, Zone* zone);
+ size_t AddOperandToStateValueDescriptor(StateValueList* values,
+ InstructionOperandVector* inputs,
+ OperandGenerator* g,
+ StateObjectDeduplicator* deduplicator,
+ Node* input, MachineType type,
+ FrameStateInputKind kind, Zone* zone);
// ===========================================================================
// ============= Architecture-specific graph covering methods. ===============
@@ -307,8 +321,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
#define DECLARE_GENERATOR(x) void Visit##x(Node* node);
MACHINE_OP_LIST(DECLARE_GENERATOR)
- MACHINE_SIMD_RETURN_NUM_OP_LIST(DECLARE_GENERATOR)
- MACHINE_SIMD_RETURN_SIMD_OP_LIST(DECLARE_GENERATOR)
+ MACHINE_SIMD_OP_LIST(DECLARE_GENERATOR)
#undef DECLARE_GENERATOR
void VisitFinishRegion(Node* node);
@@ -321,6 +334,8 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void VisitCall(Node* call, BasicBlock* handler = nullptr);
void VisitDeoptimizeIf(Node* node);
void VisitDeoptimizeUnless(Node* node);
+ void VisitTrapIf(Node* node, Runtime::FunctionId func_id);
+ void VisitTrapUnless(Node* node, Runtime::FunctionId func_id);
void VisitTailCall(Node* call);
void VisitGoto(BasicBlock* target);
void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
@@ -351,6 +366,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
bool instruction_selection_failed() { return instruction_selection_failed_; }
void MarkPairProjectionsAsWord32(Node* node);
+ bool IsSourcePositionUsed(Node* node);
// ===========================================================================
diff --git a/deps/v8/src/compiler/instruction.cc b/deps/v8/src/compiler/instruction.cc
index 3b2311a23f..c4560b6e76 100644
--- a/deps/v8/src/compiler/instruction.cc
+++ b/deps/v8/src/compiler/instruction.cc
@@ -433,6 +433,8 @@ std::ostream& operator<<(std::ostream& os, const FlagsMode& fm) {
return os << "deoptimize";
case kFlags_set:
return os << "set";
+ case kFlags_trap:
+ return os << "trap";
}
UNREACHABLE();
return os;
@@ -985,8 +987,18 @@ void InstructionSequence::PrintBlock(int block_id) const {
}
const RegisterConfiguration*
-InstructionSequence::GetRegisterConfigurationForTesting() {
- return GetRegConfig();
+ InstructionSequence::registerConfigurationForTesting_ = nullptr;
+
+const RegisterConfiguration*
+InstructionSequence::RegisterConfigurationForTesting() {
+ DCHECK(registerConfigurationForTesting_ != nullptr);
+ return registerConfigurationForTesting_;
+}
+
+void InstructionSequence::SetRegisterConfigurationForTesting(
+ const RegisterConfiguration* regConfig) {
+ registerConfigurationForTesting_ = regConfig;
+ GetRegConfig = InstructionSequence::RegisterConfigurationForTesting;
}
FrameStateDescriptor::FrameStateDescriptor(
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
index 327c8c1192..d62ffc43bd 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/instruction.h
@@ -1065,16 +1065,33 @@ class V8_EXPORT_PRIVATE Constant final {
}
float ToFloat32() const {
+ // TODO(ahaas): We should remove this function. If value_ has the bit
+ // representation of a signalling NaN, then returning it as float can cause
+ // the signalling bit to flip, and value_ is returned as a quiet NaN.
DCHECK_EQ(kFloat32, type());
return bit_cast<float>(static_cast<int32_t>(value_));
}
+ uint32_t ToFloat32AsInt() const {
+ DCHECK_EQ(kFloat32, type());
+ return bit_cast<uint32_t>(static_cast<int32_t>(value_));
+ }
+
double ToFloat64() const {
+ // TODO(ahaas): We should remove this function. If value_ has the bit
+ // representation of a signalling NaN, then returning it as float can cause
+ // the signalling bit to flip, and value_ is returned as a quiet NaN.
if (type() == kInt32) return ToInt32();
DCHECK_EQ(kFloat64, type());
return bit_cast<double>(value_);
}
+ uint64_t ToFloat64AsInt() const {
+ if (type() == kInt32) return ToInt32();
+ DCHECK_EQ(kFloat64, type());
+ return bit_cast<uint64_t>(value_);
+ }
+
ExternalReference ToExternalReference() const {
DCHECK_EQ(kExternalReference, type());
return bit_cast<ExternalReference>(static_cast<intptr_t>(value_));
@@ -1104,52 +1121,125 @@ std::ostream& operator<<(std::ostream& os, const Constant& constant);
// Forward declarations.
class FrameStateDescriptor;
-
-enum class StateValueKind { kPlain, kNested, kDuplicate };
-
+enum class StateValueKind : uint8_t {
+ kPlain,
+ kOptimizedOut,
+ kNested,
+ kDuplicate
+};
class StateValueDescriptor {
public:
- explicit StateValueDescriptor(Zone* zone)
+ StateValueDescriptor()
: kind_(StateValueKind::kPlain),
type_(MachineType::AnyTagged()),
- id_(0),
- fields_(zone) {}
+ id_(0) {}
- static StateValueDescriptor Plain(Zone* zone, MachineType type) {
- return StateValueDescriptor(StateValueKind::kPlain, zone, type, 0);
+ static StateValueDescriptor Plain(MachineType type) {
+ return StateValueDescriptor(StateValueKind::kPlain, type, 0);
+ }
+ static StateValueDescriptor OptimizedOut() {
+ return StateValueDescriptor(StateValueKind::kOptimizedOut,
+ MachineType::AnyTagged(), 0);
}
- static StateValueDescriptor Recursive(Zone* zone, size_t id) {
- return StateValueDescriptor(StateValueKind::kNested, zone,
+ static StateValueDescriptor Recursive(size_t id) {
+ return StateValueDescriptor(StateValueKind::kNested,
MachineType::AnyTagged(), id);
}
- static StateValueDescriptor Duplicate(Zone* zone, size_t id) {
- return StateValueDescriptor(StateValueKind::kDuplicate, zone,
+ static StateValueDescriptor Duplicate(size_t id) {
+ return StateValueDescriptor(StateValueKind::kDuplicate,
MachineType::AnyTagged(), id);
}
- size_t size() { return fields_.size(); }
- ZoneVector<StateValueDescriptor>& fields() { return fields_; }
int IsPlain() { return kind_ == StateValueKind::kPlain; }
+ int IsOptimizedOut() { return kind_ == StateValueKind::kOptimizedOut; }
int IsNested() { return kind_ == StateValueKind::kNested; }
int IsDuplicate() { return kind_ == StateValueKind::kDuplicate; }
MachineType type() const { return type_; }
- MachineType GetOperandType(size_t index) const {
- return fields_[index].type_;
- }
size_t id() const { return id_; }
private:
- StateValueDescriptor(StateValueKind kind, Zone* zone, MachineType type,
- size_t id)
- : kind_(kind), type_(type), id_(id), fields_(zone) {}
+ StateValueDescriptor(StateValueKind kind, MachineType type, size_t id)
+ : kind_(kind), type_(type), id_(id) {}
StateValueKind kind_;
MachineType type_;
size_t id_;
- ZoneVector<StateValueDescriptor> fields_;
};
+class StateValueList {
+ public:
+ explicit StateValueList(Zone* zone) : fields_(zone), nested_(zone) {}
+
+ size_t size() { return fields_.size(); }
+
+ struct Value {
+ StateValueDescriptor* desc;
+ StateValueList* nested;
+
+ Value(StateValueDescriptor* desc, StateValueList* nested)
+ : desc(desc), nested(nested) {}
+ };
+
+ class iterator {
+ public:
+ // Bare minimum of operators needed for range iteration.
+ bool operator!=(const iterator& other) const {
+ return field_iterator != other.field_iterator;
+ }
+ bool operator==(const iterator& other) const {
+ return field_iterator == other.field_iterator;
+ }
+ iterator& operator++() {
+ if (field_iterator->IsNested()) {
+ nested_iterator++;
+ }
+ ++field_iterator;
+ return *this;
+ }
+ Value operator*() {
+ StateValueDescriptor* desc = &(*field_iterator);
+ StateValueList* nested = desc->IsNested() ? *nested_iterator : nullptr;
+ return Value(desc, nested);
+ }
+
+ private:
+ friend class StateValueList;
+
+ iterator(ZoneVector<StateValueDescriptor>::iterator it,
+ ZoneVector<StateValueList*>::iterator nested)
+ : field_iterator(it), nested_iterator(nested) {}
+
+ ZoneVector<StateValueDescriptor>::iterator field_iterator;
+ ZoneVector<StateValueList*>::iterator nested_iterator;
+ };
+
+ void ReserveSize(size_t size) { fields_.reserve(size); }
+
+ StateValueList* PushRecursiveField(Zone* zone, size_t id) {
+ fields_.push_back(StateValueDescriptor::Recursive(id));
+ StateValueList* nested =
+ new (zone->New(sizeof(StateValueList))) StateValueList(zone);
+ nested_.push_back(nested);
+ return nested;
+ }
+ void PushDuplicate(size_t id) {
+ fields_.push_back(StateValueDescriptor::Duplicate(id));
+ }
+ void PushPlain(MachineType type) {
+ fields_.push_back(StateValueDescriptor::Plain(type));
+ }
+ void PushOptimizedOut() {
+ fields_.push_back(StateValueDescriptor::OptimizedOut());
+ }
+
+ iterator begin() { return iterator(fields_.begin(), nested_.begin()); }
+ iterator end() { return iterator(fields_.end(), nested_.end()); }
+
+ private:
+ ZoneVector<StateValueDescriptor> fields_;
+ ZoneVector<StateValueList*> nested_;
+};
class FrameStateDescriptor : public ZoneObject {
public:
@@ -1178,10 +1268,7 @@ class FrameStateDescriptor : public ZoneObject {
size_t GetFrameCount() const;
size_t GetJSFrameCount() const;
- MachineType GetType(size_t index) const {
- return values_.GetOperandType(index);
- }
- StateValueDescriptor* GetStateValueDescriptor() { return &values_; }
+ StateValueList* GetStateValueDescriptors() { return &values_; }
static const int kImpossibleValue = 0xdead;
@@ -1192,7 +1279,7 @@ class FrameStateDescriptor : public ZoneObject {
size_t parameters_count_;
size_t locals_count_;
size_t stack_count_;
- StateValueDescriptor values_;
+ StateValueList values_;
MaybeHandle<SharedFunctionInfo> const shared_info_;
FrameStateDescriptor* outer_state_;
};
@@ -1500,7 +1587,9 @@ class V8_EXPORT_PRIVATE InstructionSequence final
void ValidateDeferredBlockEntryPaths() const;
void ValidateSSA() const;
- const RegisterConfiguration* GetRegisterConfigurationForTesting();
+ static void SetRegisterConfigurationForTesting(
+ const RegisterConfiguration* regConfig);
+ static void ClearRegisterConfigurationForTesting();
private:
friend V8_EXPORT_PRIVATE std::ostream& operator<<(
@@ -1508,6 +1597,9 @@ class V8_EXPORT_PRIVATE InstructionSequence final
typedef ZoneMap<const Instruction*, SourcePosition> SourcePositionMap;
+ static const RegisterConfiguration* RegisterConfigurationForTesting();
+ static const RegisterConfiguration* registerConfigurationForTesting_;
+
Isolate* isolate_;
Zone* const zone_;
InstructionBlocks* const instruction_blocks_;
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index 62523ca45c..ff61aa765d 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -61,7 +61,8 @@ void Int64Lowering::LowerGraph() {
// that they are processed after all other nodes.
PreparePhiReplacement(input);
stack_.push_front({input, 0});
- } else if (input->opcode() == IrOpcode::kEffectPhi) {
+ } else if (input->opcode() == IrOpcode::kEffectPhi ||
+ input->opcode() == IrOpcode::kLoop) {
stack_.push_front({input, 0});
} else {
stack_.push_back({input, 0});
@@ -104,6 +105,9 @@ static int GetReturnCountAfterLowering(
void Int64Lowering::GetIndexNodes(Node* index, Node*& index_low,
Node*& index_high) {
+ if (HasReplacementLow(index)) {
+ index = GetReplacementLow(index);
+ }
#if defined(V8_TARGET_LITTLE_ENDIAN)
index_low = index;
index_high = graph()->NewNode(machine()->Int32Add(), index,
@@ -233,9 +237,7 @@ void Int64Lowering::LowerNode(Node* node) {
NodeProperties::ChangeOp(node, store_op);
ReplaceNode(node, node, high_node);
} else {
- if (HasReplacementLow(node->InputAt(2))) {
- node->ReplaceInput(2, GetReplacementLow(node->InputAt(2)));
- }
+ DefaultLowering(node);
}
break;
}
diff --git a/deps/v8/src/compiler/js-builtin-reducer.cc b/deps/v8/src/compiler/js-builtin-reducer.cc
index 2962e24502..ec1b01a2a1 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.cc
+++ b/deps/v8/src/compiler/js-builtin-reducer.cc
@@ -4,6 +4,7 @@
#include "src/compiler/js-builtin-reducer.h"
+#include "src/base/bits.h"
#include "src/compilation-dependencies.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/js-graph.h"
@@ -128,11 +129,10 @@ MaybeHandle<Map> GetMapWitness(Node* node) {
for (Node* dominator = effect;;) {
if (dominator->opcode() == IrOpcode::kCheckMaps &&
IsSame(dominator->InputAt(0), receiver)) {
- if (dominator->op()->ValueInputCount() == 2) {
- HeapObjectMatcher m(dominator->InputAt(1));
- if (m.HasValue()) return Handle<Map>::cast(m.Value());
- }
- return MaybeHandle<Map>();
+ ZoneHandleSet<Map> const& maps =
+ CheckMapsParametersOf(dominator->op()).maps();
+ return (maps.size() == 1) ? MaybeHandle<Map>(maps[0])
+ : MaybeHandle<Map>();
}
if (dominator->op()->EffectInputCount() != 1) {
// Didn't find any appropriate CheckMaps node.
@@ -235,17 +235,27 @@ Reduction JSBuiltinReducer::ReduceArrayIterator(Handle<Map> receiver_map,
Node* control = NodeProperties::GetControlInput(node);
if (iter_kind == ArrayIteratorKind::kTypedArray) {
- // For JSTypedArray iterator methods, deopt if the buffer is neutered. This
- // is potentially a deopt loop, but should be extremely unlikely.
- DCHECK_EQ(JS_TYPED_ARRAY_TYPE, receiver_map->instance_type());
- Node* buffer = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
- receiver, effect, control);
-
- Node* check = effect = graph()->NewNode(
- simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
- check = graph()->NewNode(simplified()->BooleanNot(), check);
- effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ // See if we can skip the neutering check.
+ if (isolate()->IsArrayBufferNeuteringIntact()) {
+ // Add a code dependency so we are deoptimized in case an ArrayBuffer
+ // gets neutered.
+ dependencies()->AssumePropertyCell(
+ factory()->array_buffer_neutering_protector());
+ } else {
+ // For JSTypedArray iterator methods, deopt if the buffer is neutered.
+ // This is potentially a deopt loop, but should be extremely unlikely.
+ DCHECK_EQ(JS_TYPED_ARRAY_TYPE, receiver_map->instance_type());
+ Node* buffer = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
+ receiver, effect, control);
+
+ // Deoptimize if the {buffer} has been neutered.
+ Node* check = effect = graph()->NewNode(
+ simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
+ check = graph()->NewNode(simplified()->BooleanNot(), check);
+ effect =
+ graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+ }
}
int map_index = -1;
@@ -310,6 +320,7 @@ Reduction JSBuiltinReducer::ReduceArrayIterator(Handle<Map> receiver_map,
Node* value = effect = graph()->NewNode(
simplified()->Allocate(NOT_TENURED),
jsgraph()->Constant(JSArrayIterator::kSize), effect, control);
+ NodeProperties::SetType(value, Type::OtherObject());
effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
value, jsgraph()->Constant(map), effect, control);
effect = graph()->NewNode(
@@ -403,12 +414,17 @@ Reduction JSBuiltinReducer::ReduceFastArrayIteratorNext(
} else {
// For value/entry iteration, first step is a mapcheck to ensure
// inlining is still valid.
+ Node* array_map = etrue1 =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ array, etrue1, if_true1);
Node* orig_map = etrue1 =
graph()->NewNode(simplified()->LoadField(
AccessBuilder::ForJSArrayIteratorObjectMap()),
iterator, etrue1, if_true1);
- etrue1 = graph()->NewNode(simplified()->CheckMaps(1), array, orig_map,
- etrue1, if_true1);
+ Node* check_map = graph()->NewNode(simplified()->ReferenceEqual(),
+ array_map, orig_map);
+ etrue1 = graph()->NewNode(simplified()->CheckIf(), check_map, etrue1,
+ if_true1);
}
if (kind != IterationKind::kKeys) {
@@ -536,11 +552,20 @@ Reduction JSBuiltinReducer::ReduceTypedArrayIteratorNext(
simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
array, efalse0, if_false0);
- Node* check1 = efalse0 = graph()->NewNode(
- simplified()->ArrayBufferWasNeutered(), buffer, efalse0, if_false0);
- check1 = graph()->NewNode(simplified()->BooleanNot(), check1);
- efalse0 =
- graph()->NewNode(simplified()->CheckIf(), check1, efalse0, if_false0);
+ // See if we can skip the neutering check.
+ if (isolate()->IsArrayBufferNeuteringIntact()) {
+ // Add a code dependency so we are deoptimized in case an ArrayBuffer
+ // gets neutered.
+ dependencies()->AssumePropertyCell(
+ factory()->array_buffer_neutering_protector());
+ } else {
+ // Deoptimize if the array buffer was neutered.
+ Node* check1 = efalse0 = graph()->NewNode(
+ simplified()->ArrayBufferWasNeutered(), buffer, efalse0, if_false0);
+ check1 = graph()->NewNode(simplified()->BooleanNot(), check1);
+ efalse0 =
+ graph()->NewNode(simplified()->CheckIf(), check1, efalse0, if_false0);
+ }
Node* length = efalse0 = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSTypedArrayLength()), array,
@@ -891,14 +916,11 @@ bool HasInstanceTypeWitness(Node* receiver, Node* effect,
for (Node* dominator = effect;;) {
if (dominator->opcode() == IrOpcode::kCheckMaps &&
IsSame(dominator->InputAt(0), receiver)) {
+ ZoneHandleSet<Map> const& maps =
+ CheckMapsParametersOf(dominator->op()).maps();
// Check if all maps have the given {instance_type}.
- for (int i = 1; i < dominator->op()->ValueInputCount(); ++i) {
- Node* const map = NodeProperties::GetValueInput(dominator, i);
- Type* const map_type = NodeProperties::GetType(map);
- if (!map_type->IsHeapConstant()) return false;
- Handle<Map> const map_value =
- Handle<Map>::cast(map_type->AsHeapConstant()->Value());
- if (map_value->instance_type() != instance_type) return false;
+ for (size_t i = 0; i < maps.size(); ++i) {
+ if (maps[i]->instance_type() != instance_type) return false;
}
return true;
}
@@ -930,6 +952,14 @@ bool HasInstanceTypeWitness(Node* receiver, Node* effect,
} // namespace
+// ES6 section 20.3.3.1 Date.now ( )
+Reduction JSBuiltinReducer::ReduceDateNow(Node* node) {
+ NodeProperties::RemoveValueInputs(node);
+ NodeProperties::ChangeOp(
+ node, javascript()->CallRuntime(Runtime::kDateCurrentTime));
+ return Changed(node);
+}
+
// ES6 section 20.3.4.10 Date.prototype.getTime ( )
Reduction JSBuiltinReducer::ReduceDateGetTime(Node* node) {
Node* receiver = NodeProperties::GetValueInput(node, 1);
@@ -945,34 +975,6 @@ Reduction JSBuiltinReducer::ReduceDateGetTime(Node* node) {
return NoChange();
}
-// ES6 section 19.2.3.6 Function.prototype [ @@hasInstance ] ( V )
-Reduction JSBuiltinReducer::ReduceFunctionHasInstance(Node* node) {
- Node* receiver = NodeProperties::GetValueInput(node, 1);
- Node* object = (node->op()->ValueInputCount() >= 3)
- ? NodeProperties::GetValueInput(node, 2)
- : jsgraph()->UndefinedConstant();
- Node* context = NodeProperties::GetContextInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
-
- // TODO(turbofan): If JSOrdinaryToInstance raises an exception, the
- // stack trace doesn't contain the @@hasInstance call; we have the
- // corresponding bug in the baseline case. Some massaging of the frame
- // state would be necessary here.
-
- // Morph this {node} into a JSOrdinaryHasInstance node.
- node->ReplaceInput(0, receiver);
- node->ReplaceInput(1, object);
- node->ReplaceInput(2, context);
- node->ReplaceInput(3, frame_state);
- node->ReplaceInput(4, effect);
- node->ReplaceInput(5, control);
- node->TrimInputCount(6);
- NodeProperties::ChangeOp(node, javascript()->OrdinaryHasInstance());
- return Changed(node);
-}
-
// ES6 section 18.2.2 isFinite ( number )
Reduction JSBuiltinReducer::ReduceGlobalIsFinite(Node* node) {
JSCallReduction r(node);
@@ -1485,6 +1487,117 @@ Reduction JSBuiltinReducer::ReduceNumberParseInt(Node* node) {
return NoChange();
}
+// ES6 section #sec-object.create Object.create(proto, properties)
+Reduction JSBuiltinReducer::ReduceObjectCreate(Node* node) {
+ // We need exactly target, receiver and value parameters.
+ int arg_count = node->op()->ValueInputCount();
+ if (arg_count != 3) return NoChange();
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* prototype = NodeProperties::GetValueInput(node, 2);
+ Type* prototype_type = NodeProperties::GetType(prototype);
+ Handle<Map> instance_map;
+ if (!prototype_type->IsHeapConstant()) return NoChange();
+ Handle<HeapObject> prototype_const =
+ prototype_type->AsHeapConstant()->Value();
+ if (!prototype_const->IsNull(isolate()) && !prototype_const->IsJSReceiver()) {
+ return NoChange();
+ }
+ instance_map = Map::GetObjectCreateMap(prototype_const);
+ Node* properties = jsgraph()->EmptyFixedArrayConstant();
+ if (instance_map->is_dictionary_map()) {
+ // Allocated an empty NameDictionary as backing store for the properties.
+ Handle<Map> map(isolate()->heap()->hash_table_map(), isolate());
+ int capacity =
+ NameDictionary::ComputeCapacity(NameDictionary::kInitialCapacity);
+ DCHECK(base::bits::IsPowerOfTwo32(capacity));
+ int length = NameDictionary::EntryToIndex(capacity);
+ int size = NameDictionary::SizeFor(length);
+
+ effect = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kNotObservable), effect);
+
+ Node* value = effect =
+ graph()->NewNode(simplified()->Allocate(NOT_TENURED),
+ jsgraph()->Constant(size), effect, control);
+ effect =
+ graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
+ value, jsgraph()->HeapConstant(map), effect, control);
+
+ // Initialize FixedArray fields.
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForFixedArrayLength()), value,
+ jsgraph()->SmiConstant(length), effect, control);
+ // Initialize HashTable fields.
+ effect =
+ graph()->NewNode(simplified()->StoreField(
+ AccessBuilder::ForHashTableBaseNumberOfElements()),
+ value, jsgraph()->SmiConstant(0), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(
+ AccessBuilder::ForHashTableBaseNumberOfDeletedElement()),
+ value, jsgraph()->SmiConstant(0), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForHashTableBaseCapacity()),
+ value, jsgraph()->SmiConstant(capacity), effect, control);
+ // Initialize Dictionary fields.
+ Node* undefined = jsgraph()->UndefinedConstant();
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForDictionaryMaxNumberKey()),
+ value, undefined, effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(
+ AccessBuilder::ForDictionaryNextEnumerationIndex()),
+ value, jsgraph()->SmiConstant(PropertyDetails::kInitialIndex), effect,
+ control);
+ // Initialize hte Properties fields.
+ for (int index = NameDictionary::kNextEnumerationIndexIndex + 1;
+ index < length; index++) {
+ effect = graph()->NewNode(
+ simplified()->StoreField(
+ AccessBuilder::ForFixedArraySlot(index, kNoWriteBarrier)),
+ value, undefined, effect, control);
+ }
+ properties = effect =
+ graph()->NewNode(common()->FinishRegion(), value, effect);
+ }
+
+ int const instance_size = instance_map->instance_size();
+ if (instance_size > kMaxRegularHeapObjectSize) return NoChange();
+ dependencies()->AssumeInitialMapCantChange(instance_map);
+
+ // Emit code to allocate the JSObject instance for the given
+ // {instance_map}.
+ effect = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kNotObservable), effect);
+ Node* value = effect =
+ graph()->NewNode(simplified()->Allocate(NOT_TENURED),
+ jsgraph()->Constant(instance_size), effect, control);
+ effect =
+ graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()), value,
+ jsgraph()->HeapConstant(instance_map), effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSObjectProperties()), value,
+ properties, effect, control);
+ effect = graph()->NewNode(
+ simplified()->StoreField(AccessBuilder::ForJSObjectElements()), value,
+ jsgraph()->EmptyFixedArrayConstant(), effect, control);
+ // Initialize Object fields.
+ Node* undefined = jsgraph()->UndefinedConstant();
+ for (int offset = JSObject::kHeaderSize; offset < instance_size;
+ offset += kPointerSize) {
+ effect = graph()->NewNode(
+ simplified()->StoreField(
+ AccessBuilder::ForJSObjectOffset(offset, kNoWriteBarrier)),
+ value, undefined, effect, control);
+ }
+ value = effect = graph()->NewNode(common()->FinishRegion(), value, effect);
+
+ // replace it
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
// ES6 section 21.1.2.1 String.fromCharCode ( ...codeUnits )
Reduction JSBuiltinReducer::ReduceStringFromCharCode(Node* node) {
JSCallReduction r(node);
@@ -1531,8 +1644,17 @@ Reduction JSBuiltinReducer::ReduceStringCharAt(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- if (index_type->Is(Type::Unsigned32())) {
+ if (index_type->Is(Type::Integral32OrMinusZeroOrNaN())) {
if (Node* receiver = GetStringWitness(node)) {
+ if (!index_type->Is(Type::Unsigned32())) {
+ // Map -0 and NaN to 0 (as per ToInteger), and the values in
+ // the [-2^31,-1] range to the [2^31,2^32-1] range, which will
+ // be considered out-of-bounds as well, because of the maximal
+ // String length limit in V8.
+ STATIC_ASSERT(String::kMaxLength <= kMaxInt);
+ index = graph()->NewNode(simplified()->NumberToUint32(), index);
+ }
+
// Determine the {receiver} length.
Node* receiver_length = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
@@ -1544,16 +1666,10 @@ Reduction JSBuiltinReducer::ReduceStringCharAt(Node* node) {
Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
check, control);
+ // Return the character from the {receiver} as single character string.
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* vtrue;
- {
- // Load the character from the {receiver}.
- vtrue = graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
- index, if_true);
-
- // Return it as single character string.
- vtrue = graph()->NewNode(simplified()->StringFromCharCode(), vtrue);
- }
+ Node* vtrue = graph()->NewNode(simplified()->StringCharAt(), receiver,
+ index, if_true);
// Return the empty string otherwise.
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
@@ -1582,8 +1698,17 @@ Reduction JSBuiltinReducer::ReduceStringCharCodeAt(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- if (index_type->Is(Type::Unsigned32())) {
+ if (index_type->Is(Type::Integral32OrMinusZeroOrNaN())) {
if (Node* receiver = GetStringWitness(node)) {
+ if (!index_type->Is(Type::Unsigned32())) {
+ // Map -0 and NaN to 0 (as per ToInteger), and the values in
+ // the [-2^31,-1] range to the [2^31,2^32-1] range, which will
+ // be considered out-of-bounds as well, because of the maximal
+ // String length limit in V8.
+ STATIC_ASSERT(String::kMaxLength <= kMaxInt);
+ index = graph()->NewNode(simplified()->NumberToUint32(), index);
+ }
+
// Determine the {receiver} length.
Node* receiver_length = effect = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
@@ -1632,6 +1757,7 @@ Reduction JSBuiltinReducer::ReduceStringIterator(Node* node) {
Node* value = effect = graph()->NewNode(
simplified()->Allocate(NOT_TENURED),
jsgraph()->Constant(JSStringIterator::kSize), effect, control);
+ NodeProperties::SetType(value, Type::OtherObject());
effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
value, map, effect, control);
effect = graph()->NewNode(
@@ -1805,21 +1931,29 @@ Reduction JSBuiltinReducer::ReduceArrayBufferViewAccessor(
Node* control = NodeProperties::GetControlInput(node);
if (HasInstanceTypeWitness(receiver, effect, instance_type)) {
// Load the {receiver}s field.
- Node* receiver_value = effect = graph()->NewNode(
- simplified()->LoadField(access), receiver, effect, control);
-
- // Check if the {receiver}s buffer was neutered.
- Node* receiver_buffer = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
- receiver, effect, control);
- Node* check = effect =
- graph()->NewNode(simplified()->ArrayBufferWasNeutered(),
- receiver_buffer, effect, control);
-
- // Default to zero if the {receiver}s buffer was neutered.
- Node* value = graph()->NewNode(
- common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
- check, jsgraph()->ZeroConstant(), receiver_value);
+ Node* value = effect = graph()->NewNode(simplified()->LoadField(access),
+ receiver, effect, control);
+
+ // See if we can skip the neutering check.
+ if (isolate()->IsArrayBufferNeuteringIntact()) {
+ // Add a code dependency so we are deoptimized in case an ArrayBuffer
+ // gets neutered.
+ dependencies()->AssumePropertyCell(
+ factory()->array_buffer_neutering_protector());
+ } else {
+ // Check if the {receiver}s buffer was neutered.
+ Node* receiver_buffer = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
+ receiver, effect, control);
+ Node* check = effect =
+ graph()->NewNode(simplified()->ArrayBufferWasNeutered(),
+ receiver_buffer, effect, control);
+
+ // Default to zero if the {receiver}s buffer was neutered.
+ value = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
+ check, jsgraph()->ZeroConstant(), value);
+ }
ReplaceWithValue(node, value, effect, control);
return Replace(value);
@@ -1846,11 +1980,10 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
return ReduceArrayPop(node);
case kArrayPush:
return ReduceArrayPush(node);
+ case kDateNow:
+ return ReduceDateNow(node);
case kDateGetTime:
return ReduceDateGetTime(node);
- case kFunctionHasInstance:
- return ReduceFunctionHasInstance(node);
- break;
case kGlobalIsFinite:
reduction = ReduceGlobalIsFinite(node);
break;
@@ -1971,6 +2104,9 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
case kNumberParseInt:
reduction = ReduceNumberParseInt(node);
break;
+ case kObjectCreate:
+ reduction = ReduceObjectCreate(node);
+ break;
case kStringFromCharCode:
reduction = ReduceStringFromCharCode(node);
break;
diff --git a/deps/v8/src/compiler/js-builtin-reducer.h b/deps/v8/src/compiler/js-builtin-reducer.h
index 4af3084ea3..295da8d1bc 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.h
+++ b/deps/v8/src/compiler/js-builtin-reducer.h
@@ -57,8 +57,8 @@ class V8_EXPORT_PRIVATE JSBuiltinReducer final
IterationKind kind);
Reduction ReduceArrayPop(Node* node);
Reduction ReduceArrayPush(Node* node);
+ Reduction ReduceDateNow(Node* node);
Reduction ReduceDateGetTime(Node* node);
- Reduction ReduceFunctionHasInstance(Node* node);
Reduction ReduceGlobalIsFinite(Node* node);
Reduction ReduceGlobalIsNaN(Node* node);
Reduction ReduceMathAbs(Node* node);
@@ -99,6 +99,7 @@ class V8_EXPORT_PRIVATE JSBuiltinReducer final
Reduction ReduceNumberIsNaN(Node* node);
Reduction ReduceNumberIsSafeInteger(Node* node);
Reduction ReduceNumberParseInt(Node* node);
+ Reduction ReduceObjectCreate(Node* node);
Reduction ReduceStringCharAt(Node* node);
Reduction ReduceStringCharCodeAt(Node* node);
Reduction ReduceStringFromCharCode(Node* node);
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index e48fce91c4..1caf65da01 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -4,7 +4,10 @@
#include "src/compiler/js-call-reducer.h"
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/simplified-operator.h"
#include "src/objects-inl.h"
@@ -189,6 +192,35 @@ Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
return reduction.Changed() ? reduction : Changed(node);
}
+// ES6 section 19.2.3.6 Function.prototype [ @@hasInstance ] (V)
+Reduction JSCallReducer::ReduceFunctionPrototypeHasInstance(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ Node* object = (node->op()->ValueInputCount() >= 3)
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // TODO(turbofan): If JSOrdinaryToInstance raises an exception, the
+ // stack trace doesn't contain the @@hasInstance call; we have the
+ // corresponding bug in the baseline case. Some massaging of the frame
+ // state would be necessary here.
+
+ // Morph this {node} into a JSOrdinaryHasInstance node.
+ node->ReplaceInput(0, receiver);
+ node->ReplaceInput(1, object);
+ node->ReplaceInput(2, context);
+ node->ReplaceInput(3, frame_state);
+ node->ReplaceInput(4, effect);
+ node->ReplaceInput(5, control);
+ node->TrimInputCount(6);
+ NodeProperties::ChangeOp(node, javascript()->OrdinaryHasInstance());
+ return Changed(node);
+}
+
namespace {
// TODO(turbofan): Shall we move this to the NodeProperties? Or some (untyped)
@@ -228,8 +260,59 @@ MaybeHandle<Map> InferReceiverMap(Node* node) {
}
}
+bool CanInlineApiCall(Isolate* isolate, Node* node,
+ Handle<FunctionTemplateInfo> function_template_info) {
+ DCHECK(node->opcode() == IrOpcode::kJSCallFunction);
+ if (V8_UNLIKELY(FLAG_runtime_stats)) return false;
+ if (function_template_info->call_code()->IsUndefined(isolate)) {
+ return false;
+ }
+ CallFunctionParameters const& params = CallFunctionParametersOf(node->op());
+ // CallApiCallbackStub expects the target in a register, so we count it out,
+ // and counts the receiver as an implicit argument, so we count the receiver
+ // out too.
+ int const argc = static_cast<int>(params.arity()) - 2;
+ if (argc > CallApiCallbackStub::kArgMax || !params.feedback().IsValid()) {
+ return false;
+ }
+ HeapObjectMatcher receiver(NodeProperties::GetValueInput(node, 1));
+ if (!receiver.HasValue()) {
+ return false;
+ }
+ return receiver.Value()->IsUndefined(isolate) ||
+ (receiver.Value()->map()->IsJSObjectMap() &&
+ !receiver.Value()->map()->is_access_check_needed());
+}
+
} // namespace
+JSCallReducer::HolderLookup JSCallReducer::LookupHolder(
+ Handle<JSObject> object,
+ Handle<FunctionTemplateInfo> function_template_info,
+ Handle<JSObject>* holder) {
+ DCHECK(object->map()->IsJSObjectMap());
+ Handle<Map> object_map(object->map());
+ Handle<FunctionTemplateInfo> expected_receiver_type;
+ if (!function_template_info->signature()->IsUndefined(isolate())) {
+ expected_receiver_type =
+ handle(FunctionTemplateInfo::cast(function_template_info->signature()));
+ }
+ if (expected_receiver_type.is_null() ||
+ expected_receiver_type->IsTemplateFor(*object_map)) {
+ *holder = Handle<JSObject>::null();
+ return kHolderIsReceiver;
+ }
+ while (object_map->has_hidden_prototype()) {
+ Handle<JSObject> prototype(JSObject::cast(object_map->prototype()));
+ object_map = handle(prototype->map());
+ if (expected_receiver_type->IsTemplateFor(*object_map)) {
+ *holder = prototype;
+ return kHolderFound;
+ }
+ }
+ return kHolderNotFound;
+}
+
// ES6 section B.2.2.1.1 get Object.prototype.__proto__
Reduction JSCallReducer::ReduceObjectPrototypeGetProto(Node* node) {
DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
@@ -251,6 +334,69 @@ Reduction JSCallReducer::ReduceObjectPrototypeGetProto(Node* node) {
return NoChange();
}
+Reduction JSCallReducer::ReduceCallApiFunction(
+ Node* node, Node* target,
+ Handle<FunctionTemplateInfo> function_template_info) {
+ Isolate* isolate = this->isolate();
+ CHECK(!isolate->serializer_enabled());
+ HeapObjectMatcher m(target);
+ DCHECK(m.HasValue() && m.Value()->IsJSFunction());
+ if (!CanInlineApiCall(isolate, node, function_template_info)) {
+ return NoChange();
+ }
+ Handle<CallHandlerInfo> call_handler_info(
+ handle(CallHandlerInfo::cast(function_template_info->call_code())));
+ Handle<Object> data(call_handler_info->data(), isolate);
+
+ Node* receiver_node = NodeProperties::GetValueInput(node, 1);
+ CallFunctionParameters const& params = CallFunctionParametersOf(node->op());
+
+ Handle<HeapObject> receiver = HeapObjectMatcher(receiver_node).Value();
+ bool const receiver_is_undefined = receiver->IsUndefined(isolate);
+ if (receiver_is_undefined) {
+ receiver = handle(Handle<JSFunction>::cast(m.Value())->global_proxy());
+ } else {
+ DCHECK(receiver->map()->IsJSObjectMap() &&
+ !receiver->map()->is_access_check_needed());
+ }
+
+ Handle<JSObject> holder;
+ HolderLookup lookup = LookupHolder(Handle<JSObject>::cast(receiver),
+ function_template_info, &holder);
+ if (lookup == kHolderNotFound) return NoChange();
+ if (receiver_is_undefined) {
+ receiver_node = jsgraph()->HeapConstant(receiver);
+ NodeProperties::ReplaceValueInput(node, receiver_node, 1);
+ }
+ Node* holder_node =
+ lookup == kHolderFound ? jsgraph()->HeapConstant(holder) : receiver_node;
+
+ Zone* zone = graph()->zone();
+ // Same as CanInlineApiCall: exclude the target (which goes in a register) and
+ // the receiver (which is implicitly counted by CallApiCallbackStub) from the
+ // arguments count.
+ int const argc = static_cast<int>(params.arity() - 2);
+ CallApiCallbackStub stub(isolate, argc, data->IsUndefined(isolate), false);
+ CallInterfaceDescriptor cid = stub.GetCallInterfaceDescriptor();
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate, zone, cid,
+ cid.GetStackParameterCount() + argc + 1 /* implicit receiver */,
+ CallDescriptor::kNeedsFrameState, Operator::kNoProperties,
+ MachineType::AnyTagged(), 1);
+ ApiFunction api_function(v8::ToCData<Address>(call_handler_info->callback()));
+ ExternalReference function_reference(
+ &api_function, ExternalReference::DIRECT_API_CALL, isolate);
+
+ // CallApiCallbackStub's register arguments: code, target, call data, holder,
+ // function address.
+ node->InsertInput(zone, 0, jsgraph()->HeapConstant(stub.GetCode()));
+ node->InsertInput(zone, 2, jsgraph()->Constant(data));
+ node->InsertInput(zone, 3, holder_node);
+ node->InsertInput(zone, 4, jsgraph()->ExternalConstant(function_reference));
+ NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+ return Changed(node);
+}
+
Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
@@ -280,6 +426,8 @@ Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
return ReduceFunctionPrototypeApply(node);
case Builtins::kFunctionPrototypeCall:
return ReduceFunctionPrototypeCall(node);
+ case Builtins::kFunctionPrototypeHasInstance:
+ return ReduceFunctionPrototypeHasInstance(node);
case Builtins::kNumberConstructor:
return ReduceNumberConstructor(node);
case Builtins::kObjectPrototypeGetProto:
@@ -292,6 +440,12 @@ Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
if (*function == function->native_context()->array_function()) {
return ReduceArrayConstructor(node);
}
+
+ if (shared->IsApiFunction()) {
+ return ReduceCallApiFunction(
+ node, target,
+ handle(FunctionTemplateInfo::cast(shared->function_data())));
+ }
} else if (m.Value()->IsJSBoundFunction()) {
Handle<JSBoundFunction> function =
Handle<JSBoundFunction>::cast(m.Value());
@@ -302,7 +456,7 @@ Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
isolate());
CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
ConvertReceiverMode const convert_mode =
- (bound_this->IsNull(isolate()) || bound_this->IsUndefined(isolate()))
+ (bound_this->IsNullOrUndefined(isolate()))
? ConvertReceiverMode::kNullOrUndefined
: ConvertReceiverMode::kNotNullOrUndefined;
size_t arity = p.arity();
@@ -332,26 +486,37 @@ Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
return NoChange();
}
- // Not much we can do if deoptimization support is disabled.
- if (!(flags() & kDeoptimizationEnabled)) return NoChange();
-
// Extract feedback from the {node} using the CallICNexus.
if (!p.feedback().IsValid()) return NoChange();
CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
- if (nexus.IsUninitialized() && (flags() & kBailoutOnUninitialized)) {
- Node* frame_state = NodeProperties::FindFrameStateBefore(node);
- Node* deoptimize = graph()->NewNode(
- common()->Deoptimize(
- DeoptimizeKind::kSoft,
- DeoptimizeReason::kInsufficientTypeFeedbackForCall),
- frame_state, effect, control);
- // TODO(bmeurer): This should be on the AdvancedReducer somehow.
- NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
- Revisit(graph()->end());
- node->TrimInputCount(0);
- NodeProperties::ChangeOp(node, common()->Dead());
+ if (nexus.IsUninitialized()) {
+ // TODO(turbofan): Tail-calling to a CallIC stub is not supported.
+ if (p.tail_call_mode() == TailCallMode::kAllow) return NoChange();
+
+ // Insert a CallIC here to collect feedback for uninitialized calls.
+ int const arg_count = static_cast<int>(p.arity() - 2);
+ Callable callable =
+ CodeFactory::CallICInOptimizedCode(isolate(), p.convert_mode());
+ CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
+ CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), arg_count + 1,
+ flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* stub_arity = jsgraph()->Constant(arg_count);
+ Node* slot_index =
+ jsgraph()->Constant(TypeFeedbackVector::GetIndex(p.feedback().slot()));
+ Node* feedback_vector = jsgraph()->HeapConstant(p.feedback().vector());
+ node->InsertInput(graph()->zone(), 0, stub_code);
+ node->InsertInput(graph()->zone(), 2, stub_arity);
+ node->InsertInput(graph()->zone(), 3, slot_index);
+ node->InsertInput(graph()->zone(), 4, feedback_vector);
+ NodeProperties::ChangeOp(node, common()->Call(desc));
return Changed(node);
}
+
+ // Not much we can do if deoptimization support is disabled.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
Handle<Object> feedback(nexus.GetFeedback(), isolate());
if (feedback->IsAllocationSite()) {
// Retrieve the Array function from the {node}.
@@ -412,7 +577,8 @@ Reduction JSCallReducer::ReduceJSCallConstruct(Node* node) {
if (!function->IsConstructor()) {
NodeProperties::ReplaceValueInputs(node, target);
NodeProperties::ChangeOp(
- node, javascript()->CallRuntime(Runtime::kThrowCalledNonCallable));
+ node, javascript()->CallRuntime(
+ Runtime::kThrowConstructedNonConstructable));
return Changed(node);
}
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index 81153f98dc..e39433a020 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -25,8 +25,7 @@ class JSCallReducer final : public AdvancedReducer {
// Flags that control the mode of operation.
enum Flag {
kNoFlags = 0u,
- kBailoutOnUninitialized = 1u << 0,
- kDeoptimizationEnabled = 1u << 1
+ kDeoptimizationEnabled = 1u << 0,
};
typedef base::Flags<Flag> Flags;
@@ -41,13 +40,23 @@ class JSCallReducer final : public AdvancedReducer {
private:
Reduction ReduceArrayConstructor(Node* node);
+ Reduction ReduceCallApiFunction(
+ Node* node, Node* target,
+ Handle<FunctionTemplateInfo> function_template_info);
Reduction ReduceNumberConstructor(Node* node);
Reduction ReduceFunctionPrototypeApply(Node* node);
Reduction ReduceFunctionPrototypeCall(Node* node);
+ Reduction ReduceFunctionPrototypeHasInstance(Node* node);
Reduction ReduceObjectPrototypeGetProto(Node* node);
Reduction ReduceJSCallConstruct(Node* node);
Reduction ReduceJSCallFunction(Node* node);
+ enum HolderLookup { kHolderNotFound, kHolderIsReceiver, kHolderFound };
+
+ HolderLookup LookupHolder(Handle<JSObject> object,
+ Handle<FunctionTemplateInfo> function_template_info,
+ Handle<JSObject>* holder);
+
Graph* graph() const;
Flags flags() const { return flags_; }
JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index e02fc49de8..9a2edc13e3 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -28,50 +28,81 @@ Reduction JSContextSpecialization::Reduce(Node* node) {
return NoChange();
}
+Reduction JSContextSpecialization::SimplifyJSLoadContext(Node* node,
+ Node* new_context,
+ size_t new_depth) {
+ DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
+ const ContextAccess& access = ContextAccessOf(node->op());
+ DCHECK_LE(new_depth, access.depth());
+
+ if (new_depth == access.depth() &&
+ new_context == NodeProperties::GetContextInput(node)) {
+ return NoChange();
+ }
-MaybeHandle<Context> JSContextSpecialization::GetSpecializationContext(
- Node* node) {
- DCHECK(node->opcode() == IrOpcode::kJSLoadContext ||
- node->opcode() == IrOpcode::kJSStoreContext);
- Node* const object = NodeProperties::GetValueInput(node, 0);
- return NodeProperties::GetSpecializationContext(object, context());
+ const Operator* op = jsgraph_->javascript()->LoadContext(
+ new_depth, access.index(), access.immutable());
+ NodeProperties::ReplaceContextInput(node, new_context);
+ NodeProperties::ChangeOp(node, op);
+ return Changed(node);
}
+Reduction JSContextSpecialization::SimplifyJSStoreContext(Node* node,
+ Node* new_context,
+ size_t new_depth) {
+ DCHECK_EQ(IrOpcode::kJSStoreContext, node->opcode());
+ const ContextAccess& access = ContextAccessOf(node->op());
+ DCHECK_LE(new_depth, access.depth());
+
+ if (new_depth == access.depth() &&
+ new_context == NodeProperties::GetContextInput(node)) {
+ return NoChange();
+ }
+
+ const Operator* op =
+ jsgraph_->javascript()->StoreContext(new_depth, access.index());
+ NodeProperties::ReplaceContextInput(node, new_context);
+ NodeProperties::ChangeOp(node, op);
+ return Changed(node);
+}
Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
- // Get the specialization context from the node.
- Handle<Context> context;
- if (!GetSpecializationContext(node).ToHandle(&context)) return NoChange();
-
- // Find the right parent context.
const ContextAccess& access = ContextAccessOf(node->op());
- for (size_t i = access.depth(); i > 0; --i) {
- context = handle(context->previous(), isolate());
+ size_t depth = access.depth();
+
+ // First walk up the context chain in the graph as far as possible.
+ Node* outer = NodeProperties::GetOuterContext(node, &depth);
+
+ Handle<Context> concrete;
+ if (!NodeProperties::GetSpecializationContext(outer, context())
+ .ToHandle(&concrete)) {
+ // We do not have a concrete context object, so we can only partially reduce
+ // the load by folding-in the outer context node.
+ return SimplifyJSLoadContext(node, outer, depth);
+ }
+
+ // Now walk up the concrete context chain for the remaining depth.
+ for (; depth > 0; --depth) {
+ concrete = handle(concrete->previous(), isolate());
}
- // If the access itself is mutable, only fold-in the parent.
if (!access.immutable()) {
- // The access does not have to look up a parent, nothing to fold.
- if (access.depth() == 0) {
- return NoChange();
- }
- const Operator* op = jsgraph_->javascript()->LoadContext(
- 0, access.index(), access.immutable());
- node->ReplaceInput(0, jsgraph_->Constant(context));
- NodeProperties::ChangeOp(node, op);
- return Changed(node);
+ // We found the requested context object but since the context slot is
+ // mutable we can only partially reduce the load.
+ return SimplifyJSLoadContext(node, jsgraph()->Constant(concrete), depth);
}
- Handle<Object> value =
- handle(context->get(static_cast<int>(access.index())), isolate());
// Even though the context slot is immutable, the context might have escaped
// before the function to which it belongs has initialized the slot.
- // We must be conservative and check if the value in the slot is currently the
- // hole or undefined. If it is neither of these, then it must be initialized.
+ // We must be conservative and check if the value in the slot is currently
+ // the hole or undefined. Only if it is neither of these, can we be sure that
+ // it won't change anymore.
+ Handle<Object> value(concrete->get(static_cast<int>(access.index())),
+ isolate());
if (value->IsUndefined(isolate()) || value->IsTheHole(isolate())) {
- return NoChange();
+ return SimplifyJSLoadContext(node, jsgraph()->Constant(concrete), depth);
}
// Success. The context load can be replaced with the constant.
@@ -86,24 +117,27 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
Reduction JSContextSpecialization::ReduceJSStoreContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSStoreContext, node->opcode());
- // Get the specialization context from the node.
- Handle<Context> context;
- if (!GetSpecializationContext(node).ToHandle(&context)) return NoChange();
-
- // The access does not have to look up a parent, nothing to fold.
const ContextAccess& access = ContextAccessOf(node->op());
- if (access.depth() == 0) {
- return NoChange();
+ size_t depth = access.depth();
+
+ // First walk up the context chain in the graph until we reduce the depth to 0
+ // or hit a node that does not have a CreateXYZContext operator.
+ Node* outer = NodeProperties::GetOuterContext(node, &depth);
+
+ Handle<Context> concrete;
+ if (!NodeProperties::GetSpecializationContext(outer, context())
+ .ToHandle(&concrete)) {
+ // We do not have a concrete context object, so we can only partially reduce
+ // the load by folding-in the outer context node.
+ return SimplifyJSStoreContext(node, outer, depth);
}
- // Find the right parent context.
- for (size_t i = access.depth(); i > 0; --i) {
- context = handle(context->previous(), isolate());
+ // Now walk up the concrete context chain for the remaining depth.
+ for (; depth > 0; --depth) {
+ concrete = handle(concrete->previous(), isolate());
}
- node->ReplaceInput(0, jsgraph_->Constant(context));
- NodeProperties::ChangeOp(node, javascript()->StoreContext(0, access.index()));
- return Changed(node);
+ return SimplifyJSStoreContext(node, jsgraph()->Constant(concrete), depth);
}
diff --git a/deps/v8/src/compiler/js-context-specialization.h b/deps/v8/src/compiler/js-context-specialization.h
index ef784fc442..99172af446 100644
--- a/deps/v8/src/compiler/js-context-specialization.h
+++ b/deps/v8/src/compiler/js-context-specialization.h
@@ -30,8 +30,10 @@ class JSContextSpecialization final : public AdvancedReducer {
Reduction ReduceJSLoadContext(Node* node);
Reduction ReduceJSStoreContext(Node* node);
- // Returns the {Context} to specialize {node} to (if any).
- MaybeHandle<Context> GetSpecializationContext(Node* node);
+ Reduction SimplifyJSStoreContext(Node* node, Node* new_context,
+ size_t new_depth);
+ Reduction SimplifyJSLoadContext(Node* node, Node* new_context,
+ size_t new_depth);
Isolate* isolate() const;
JSOperatorBuilder* javascript() const;
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index c54b76b6cb..9a3cbd7894 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -38,6 +38,7 @@ class AllocationBuilder final {
// Primitive allocation of static size.
void Allocate(int size, PretenureFlag pretenure = NOT_TENURED,
Type* type = Type::Any()) {
+ DCHECK_LE(size, kMaxRegularHeapObjectSize);
effect_ = graph()->NewNode(
common()->BeginRegion(RegionObservability::kNotObservable), effect_);
allocation_ =
@@ -161,7 +162,9 @@ bool IsFastLiteral(Handle<JSObject> boilerplate, int max_depth,
}
}
}
- } else if (!boilerplate->HasFastDoubleElements()) {
+ } else if (boilerplate->HasFastDoubleElements()) {
+ if (elements->Size() > kMaxRegularHeapObjectSize) return false;
+ } else {
return false;
}
}
@@ -176,7 +179,8 @@ bool IsFastLiteral(Handle<JSObject> boilerplate, int max_depth,
int limit = boilerplate->map()->NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
PropertyDetails details = descriptors->GetDetails(i);
- if (details.type() != DATA) continue;
+ if (details.location() != kField) continue;
+ DCHECK_EQ(kData, details.kind());
if ((*max_properties)-- == 0) return false;
FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
if (boilerplate->IsUnboxedDoubleField(field_index)) continue;
@@ -294,46 +298,130 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
if (outer_state->opcode() != IrOpcode::kFrameState) {
switch (type) {
case CreateArgumentsType::kMappedArguments: {
- // TODO(mstarzinger): Duplicate parameters are not handled yet.
+ // TODO(bmeurer): Make deoptimization mandatory for the various
+ // arguments objects, so that we always have a shared_info here.
Handle<SharedFunctionInfo> shared_info;
- if (!state_info.shared_info().ToHandle(&shared_info) ||
- shared_info->has_duplicate_parameters()) {
- return NoChange();
+ if (state_info.shared_info().ToHandle(&shared_info)) {
+ // TODO(mstarzinger): Duplicate parameters are not handled yet.
+ if (shared_info->has_duplicate_parameters()) return NoChange();
+ // If there is no aliasing, the arguments object elements are not
+ // special in any way, we can just return an unmapped backing store.
+ if (shared_info->internal_formal_parameter_count() == 0) {
+ Node* const callee = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ // Allocate the elements backing store.
+ Node* const elements = effect = graph()->NewNode(
+ simplified()->NewUnmappedArgumentsElements(0), effect);
+ Node* const length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
+ elements, effect, control);
+ // Load the arguments object map.
+ Node* const arguments_map = jsgraph()->HeapConstant(
+ handle(native_context()->sloppy_arguments_map(), isolate()));
+ // Actually allocate and initialize the arguments object.
+ AllocationBuilder a(jsgraph(), effect, control);
+ Node* properties = jsgraph()->EmptyFixedArrayConstant();
+ STATIC_ASSERT(JSSloppyArgumentsObject::kSize == 5 * kPointerSize);
+ a.Allocate(JSSloppyArgumentsObject::kSize);
+ a.Store(AccessBuilder::ForMap(), arguments_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+ a.Store(AccessBuilder::ForJSObjectElements(), elements);
+ a.Store(AccessBuilder::ForArgumentsLength(), length);
+ a.Store(AccessBuilder::ForArgumentsCallee(), callee);
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ } else {
+ Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
+ Operator::Properties properties = node->op()->properties();
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNoFlags, properties);
+ const Operator* new_op = common()->Call(desc);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ node->InsertInput(graph()->zone(), 0, stub_code);
+ node->RemoveInput(3); // Remove the frame state.
+ NodeProperties::ChangeOp(node, new_op);
+ }
+ return Changed(node);
}
- Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
- Operator::Properties properties = node->op()->properties();
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNoFlags, properties);
- const Operator* new_op = common()->Call(desc);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- node->InsertInput(graph()->zone(), 0, stub_code);
- node->RemoveInput(3); // Remove the frame state.
- NodeProperties::ChangeOp(node, new_op);
- return Changed(node);
+ return NoChange();
}
case CreateArgumentsType::kUnmappedArguments: {
- Callable callable = CodeFactory::FastNewStrictArguments(isolate());
- Operator::Properties properties = node->op()->properties();
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNeedsFrameState, properties);
- const Operator* new_op = common()->Call(desc);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- node->InsertInput(graph()->zone(), 0, stub_code);
- NodeProperties::ChangeOp(node, new_op);
+ Handle<SharedFunctionInfo> shared_info;
+ if (state_info.shared_info().ToHandle(&shared_info)) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ // Allocate the elements backing store.
+ Node* const elements = effect = graph()->NewNode(
+ simplified()->NewUnmappedArgumentsElements(
+ shared_info->internal_formal_parameter_count()),
+ effect);
+ Node* const length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
+ elements, effect, control);
+ // Load the arguments object map.
+ Node* const arguments_map = jsgraph()->HeapConstant(
+ handle(native_context()->strict_arguments_map(), isolate()));
+ // Actually allocate and initialize the arguments object.
+ AllocationBuilder a(jsgraph(), effect, control);
+ Node* properties = jsgraph()->EmptyFixedArrayConstant();
+ STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
+ a.Allocate(JSStrictArgumentsObject::kSize);
+ a.Store(AccessBuilder::ForMap(), arguments_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+ a.Store(AccessBuilder::ForJSObjectElements(), elements);
+ a.Store(AccessBuilder::ForArgumentsLength(), length);
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ } else {
+ Callable callable = CodeFactory::FastNewStrictArguments(isolate());
+ Operator::Properties properties = node->op()->properties();
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState, properties);
+ const Operator* new_op = common()->Call(desc);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ node->InsertInput(graph()->zone(), 0, stub_code);
+ NodeProperties::ChangeOp(node, new_op);
+ }
return Changed(node);
}
case CreateArgumentsType::kRestParameter: {
- Callable callable = CodeFactory::FastNewRestParameter(isolate());
- Operator::Properties properties = node->op()->properties();
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNeedsFrameState, properties);
- const Operator* new_op = common()->Call(desc);
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- node->InsertInput(graph()->zone(), 0, stub_code);
- NodeProperties::ChangeOp(node, new_op);
+ Handle<SharedFunctionInfo> shared_info;
+ if (state_info.shared_info().ToHandle(&shared_info)) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ // Allocate the elements backing store.
+ Node* const elements = effect = graph()->NewNode(
+ simplified()->NewRestParameterElements(
+ shared_info->internal_formal_parameter_count()),
+ effect);
+ Node* const length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
+ elements, effect, control);
+ // Load the JSArray object map.
+ Node* const jsarray_map = jsgraph()->HeapConstant(handle(
+ native_context()->js_array_fast_elements_map_index(), isolate()));
+ // Actually allocate and initialize the jsarray.
+ AllocationBuilder a(jsgraph(), effect, control);
+ Node* properties = jsgraph()->EmptyFixedArrayConstant();
+ STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+ a.Allocate(JSArray::kSize);
+ a.Store(AccessBuilder::ForMap(), jsarray_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+ a.Store(AccessBuilder::ForJSObjectElements(), elements);
+ a.Store(AccessBuilder::ForJSArrayLength(FAST_ELEMENTS), length);
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ } else {
+ Callable callable = CodeFactory::FastNewRestParameter(isolate());
+ Operator::Properties properties = node->op()->properties();
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState, properties);
+ const Operator* new_op = common()->Call(desc);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ node->InsertInput(graph()->zone(), 0, stub_code);
+ NodeProperties::ChangeOp(node, new_op);
+ }
return Changed(node);
}
}
@@ -663,17 +751,19 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
}
Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
+ if (!FLAG_turbo_lower_create_closure) return NoChange();
DCHECK_EQ(IrOpcode::kJSCreateClosure, node->opcode());
CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
Handle<SharedFunctionInfo> shared = p.shared_info();
-
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
Node* context = NodeProperties::GetContextInput(node);
+
int const function_map_index =
Context::FunctionMapIndex(shared->language_mode(), shared->kind());
Node* function_map = jsgraph()->HeapConstant(
handle(Map::cast(native_context()->get(function_map_index)), isolate()));
+
// Note that it is only safe to embed the raw entry point of the compile
// lazy stub into the code, because that stub is immortal and immovable.
Node* compile_entry = jsgraph()->PointerConstant(
@@ -785,7 +875,10 @@ Reduction JSCreateLowering::ReduceJSCreateLiteral(Node* node) {
Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateFunctionContext, node->opcode());
- int slot_count = OpParameter<int>(node->op());
+ const CreateFunctionContextParameters& parameters =
+ CreateFunctionContextParametersOf(node->op());
+ int slot_count = parameters.slot_count();
+ ScopeType scope_type = parameters.scope_type();
Node* const closure = NodeProperties::GetValueInput(node, 0);
// Use inline allocation for function contexts up to a size limit.
@@ -798,7 +891,18 @@ Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
AllocationBuilder a(jsgraph(), effect, control);
STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4); // Ensure fully covered.
int context_length = slot_count + Context::MIN_CONTEXT_SLOTS;
- a.AllocateArray(context_length, factory()->function_context_map());
+ Handle<Map> map;
+ switch (scope_type) {
+ case EVAL_SCOPE:
+ map = factory()->eval_context_map();
+ break;
+ case FUNCTION_SCOPE:
+ map = factory()->function_context_map();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ a.AllocateArray(context_length, map);
a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
@@ -929,6 +1033,7 @@ Node* JSCreateLowering::AllocateArguments(Node* effect, Node* control,
AllocationBuilder a(jsgraph(), effect, control);
a.AllocateArray(argument_count, factory()->fixed_array_map());
for (int i = 0; i < argument_count; ++i, ++parameters_it) {
+ DCHECK_NOT_NULL((*parameters_it).node);
a.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
}
return a.Finish();
@@ -958,6 +1063,7 @@ Node* JSCreateLowering::AllocateRestArguments(Node* effect, Node* control,
AllocationBuilder a(jsgraph(), effect, control);
a.AllocateArray(num_elements, factory()->fixed_array_map());
for (int i = 0; i < num_elements; ++i, ++parameters_it) {
+ DCHECK_NOT_NULL((*parameters_it).node);
a.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
}
return a.Finish();
@@ -987,18 +1093,19 @@ Node* JSCreateLowering::AllocateAliasedArguments(
// Prepare an iterator over argument values recorded in the frame state.
Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
StateValuesAccess parameters_access(parameters);
- auto paratemers_it = ++parameters_access.begin();
+ auto parameters_it = ++parameters_access.begin();
// The unmapped argument values recorded in the frame state are stored yet
// another indirection away and then linked into the parameter map below,
// whereas mapped argument values are replaced with a hole instead.
AllocationBuilder aa(jsgraph(), effect, control);
aa.AllocateArray(argument_count, factory()->fixed_array_map());
- for (int i = 0; i < mapped_count; ++i, ++paratemers_it) {
+ for (int i = 0; i < mapped_count; ++i, ++parameters_it) {
aa.Store(AccessBuilder::ForFixedArraySlot(i), jsgraph()->TheHoleConstant());
}
- for (int i = mapped_count; i < argument_count; ++i, ++paratemers_it) {
- aa.Store(AccessBuilder::ForFixedArraySlot(i), (*paratemers_it).node);
+ for (int i = mapped_count; i < argument_count; ++i, ++parameters_it) {
+ DCHECK_NOT_NULL((*parameters_it).node);
+ aa.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
}
Node* arguments = aa.Finish();
@@ -1081,13 +1188,15 @@ Node* JSCreateLowering::AllocateFastLiteral(
for (int i = 0; i < boilerplate_nof; ++i) {
PropertyDetails const property_details =
boilerplate_map->instance_descriptors()->GetDetails(i);
- if (property_details.type() != DATA) continue;
+ if (property_details.location() != kField) continue;
+ DCHECK_EQ(kData, property_details.kind());
Handle<Name> property_name(
boilerplate_map->instance_descriptors()->GetKey(i), isolate());
FieldIndex index = FieldIndex::ForDescriptor(*boilerplate_map, i);
- FieldAccess access = {
- kTaggedBase, index.offset(), property_name,
- Type::Any(), MachineType::AnyTagged(), kFullWriteBarrier};
+ FieldAccess access = {kTaggedBase, index.offset(),
+ property_name, MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
Node* value;
if (boilerplate->IsUnboxedDoubleField(index)) {
access.machine_type = MachineType::Float64();
@@ -1104,23 +1213,15 @@ Node* JSCreateLowering::AllocateFastLiteral(
boilerplate_object, site_context);
site_context->ExitScope(current_site, boilerplate_object);
} else if (property_details.representation().IsDouble()) {
+ double number = Handle<HeapNumber>::cast(boilerplate_value)->value();
// Allocate a mutable HeapNumber box and store the value into it.
- effect = graph()->NewNode(
- common()->BeginRegion(RegionObservability::kNotObservable), effect);
- value = effect = graph()->NewNode(
- simplified()->Allocate(pretenure),
- jsgraph()->Constant(HeapNumber::kSize), effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForMap()), value,
- jsgraph()->HeapConstant(factory()->mutable_heap_number_map()),
- effect, control);
- effect = graph()->NewNode(
- simplified()->StoreField(AccessBuilder::ForHeapNumberValue()),
- value, jsgraph()->Constant(
- Handle<HeapNumber>::cast(boilerplate_value)->value()),
- effect, control);
- value = effect =
- graph()->NewNode(common()->FinishRegion(), value, effect);
+ AllocationBuilder builder(jsgraph(), effect, control);
+ builder.Allocate(HeapNumber::kSize, pretenure);
+ builder.Store(AccessBuilder::ForMap(),
+ factory()->mutable_heap_number_map());
+ builder.Store(AccessBuilder::ForHeapNumberValue(),
+ jsgraph()->Constant(number));
+ value = effect = builder.Finish();
} else if (property_details.representation().IsSmi()) {
// Ensure that value is stored as smi.
value = boilerplate_value->IsUninitialized(isolate())
@@ -1156,7 +1257,7 @@ Node* JSCreateLowering::AllocateFastLiteral(
AccessBuilder::ForJSArrayLength(boilerplate_array->GetElementsKind()),
handle(boilerplate_array->length(), isolate()));
}
- for (auto const inobject_field : inobject_fields) {
+ for (auto const& inobject_field : inobject_fields) {
builder.Store(inobject_field.first, inobject_field.second);
}
return builder.Finish();
diff --git a/deps/v8/src/compiler/js-frame-specialization.cc b/deps/v8/src/compiler/js-frame-specialization.cc
index 55ec1bf41d..73e1b7dd24 100644
--- a/deps/v8/src/compiler/js-frame-specialization.cc
+++ b/deps/v8/src/compiler/js-frame-specialization.cc
@@ -27,6 +27,9 @@ Reduction JSFrameSpecialization::Reduce(Node* node) {
}
Reduction JSFrameSpecialization::ReduceOsrValue(Node* node) {
+ // JSFrameSpecialization should never run on interpreted frames, since the
+ // code below assumes standard stack frame layouts.
+ DCHECK(!frame()->is_interpreted());
DCHECK_EQ(IrOpcode::kOsrValue, node->opcode());
Handle<Object> value;
int index = OsrValueIndexOf(node->op());
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index 250a9c26f6..ee844e9ee2 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -5,6 +5,7 @@
#include "src/compiler/js-generic-lowering.h"
#include "src/ast/ast.h"
+#include "src/builtins/builtins-constructor.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/compiler/common-operator.h"
@@ -153,75 +154,37 @@ void JSGenericLowering::LowerJSTypeOf(Node* node) {
void JSGenericLowering::LowerJSLoadProperty(Node* node) {
- Node* closure = NodeProperties::GetValueInput(node, 2);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
const PropertyAccess& p = PropertyAccessOf(node->op());
Callable callable = CodeFactory::KeyedLoadICInOptimizedCode(isolate());
- // Load the type feedback vector from the closure.
- Node* literals = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), closure,
- jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
- effect, control);
- Node* vector = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), literals,
- jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
- kHeapObjectTag),
- effect, control);
+ Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
- node->ReplaceInput(3, vector);
- node->ReplaceInput(6, effect);
+ node->InsertInput(zone(), 3, vector);
ReplaceWithStubCall(node, callable, flags);
}
void JSGenericLowering::LowerJSLoadNamed(Node* node) {
- Node* closure = NodeProperties::GetValueInput(node, 1);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
NamedAccess const& p = NamedAccessOf(node->op());
Callable callable = CodeFactory::LoadICInOptimizedCode(isolate());
- // Load the type feedback vector from the closure.
- Node* literals = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), closure,
- jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
- effect, control);
- Node* vector = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), literals,
- jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
- kHeapObjectTag),
- effect, control);
+ Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
- node->ReplaceInput(3, vector);
- node->ReplaceInput(6, effect);
+ node->InsertInput(zone(), 3, vector);
ReplaceWithStubCall(node, callable, flags);
}
void JSGenericLowering::LowerJSLoadGlobal(Node* node) {
- Node* closure = NodeProperties::GetValueInput(node, 0);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
const LoadGlobalParameters& p = LoadGlobalParametersOf(node->op());
Callable callable =
CodeFactory::LoadGlobalICInOptimizedCode(isolate(), p.typeof_mode());
- // Load the type feedback vector from the closure.
- Node* literals = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), closure,
- jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
- effect, control);
- Node* vector = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), literals,
- jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
- kHeapObjectTag),
- effect, control);
- node->InsertInput(zone(), 0, jsgraph()->SmiConstant(p.feedback().index()));
- node->ReplaceInput(1, vector);
- node->ReplaceInput(4, effect);
+ Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
+ node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.name()));
+ node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.feedback().index()));
+ node->InsertInput(zone(), 2, vector);
ReplaceWithStubCall(node, callable, flags);
}
@@ -230,33 +193,20 @@ void JSGenericLowering::LowerJSStoreProperty(Node* node) {
Node* receiver = NodeProperties::GetValueInput(node, 0);
Node* key = NodeProperties::GetValueInput(node, 1);
Node* value = NodeProperties::GetValueInput(node, 2);
- Node* closure = NodeProperties::GetValueInput(node, 3);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
PropertyAccess const& p = PropertyAccessOf(node->op());
LanguageMode language_mode = p.language_mode();
Callable callable =
CodeFactory::KeyedStoreICInOptimizedCode(isolate(), language_mode);
- // Load the type feedback vector from the closure.
- Node* literals = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), closure,
- jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
- effect, control);
- Node* vector = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), literals,
- jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
- kHeapObjectTag),
- effect, control);
+ Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
typedef StoreWithVectorDescriptor Descriptor;
- node->InsertInputs(zone(), 0, 1);
+ node->InsertInputs(zone(), 0, 2);
node->ReplaceInput(Descriptor::kReceiver, receiver);
node->ReplaceInput(Descriptor::kName, key);
node->ReplaceInput(Descriptor::kValue, value);
node->ReplaceInput(Descriptor::kSlot,
jsgraph()->SmiConstant(p.feedback().index()));
node->ReplaceInput(Descriptor::kVector, vector);
- node->ReplaceInput(7, effect);
ReplaceWithStubCall(node, callable, flags);
}
@@ -264,39 +214,25 @@ void JSGenericLowering::LowerJSStoreProperty(Node* node) {
void JSGenericLowering::LowerJSStoreNamed(Node* node) {
Node* receiver = NodeProperties::GetValueInput(node, 0);
Node* value = NodeProperties::GetValueInput(node, 1);
- Node* closure = NodeProperties::GetValueInput(node, 2);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
NamedAccess const& p = NamedAccessOf(node->op());
Callable callable =
CodeFactory::StoreICInOptimizedCode(isolate(), p.language_mode());
- // Load the type feedback vector from the closure.
- Node* literals = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), closure,
- jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
- effect, control);
- Node* vector = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), literals,
- jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
- kHeapObjectTag),
- effect, control);
+ Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
typedef StoreWithVectorDescriptor Descriptor;
- node->InsertInputs(zone(), 0, 2);
+ node->InsertInputs(zone(), 0, 3);
node->ReplaceInput(Descriptor::kReceiver, receiver);
node->ReplaceInput(Descriptor::kName, jsgraph()->HeapConstant(p.name()));
node->ReplaceInput(Descriptor::kValue, value);
node->ReplaceInput(Descriptor::kSlot,
jsgraph()->SmiConstant(p.feedback().index()));
node->ReplaceInput(Descriptor::kVector, vector);
- node->ReplaceInput(7, effect);
ReplaceWithStubCall(node, callable, flags);
}
void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
Node* value = NodeProperties::GetValueInput(node, 0);
- Node* closure = NodeProperties::GetValueInput(node, 1);
Node* context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@@ -304,16 +240,7 @@ void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
const StoreGlobalParameters& p = StoreGlobalParametersOf(node->op());
Callable callable =
CodeFactory::StoreICInOptimizedCode(isolate(), p.language_mode());
- // Load the type feedback vector from the closure.
- Node* literals = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), closure,
- jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
- effect, control);
- Node* vector = effect = graph()->NewNode(
- machine()->Load(MachineType::AnyTagged()), literals,
- jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
- kHeapObjectTag),
- effect, control);
+ Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
// Load global object from the context.
Node* native_context = effect =
graph()->NewNode(machine()->Load(MachineType::AnyTagged()), context,
@@ -325,7 +252,7 @@ void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
jsgraph()->IntPtrConstant(Context::SlotOffset(Context::EXTENSION_INDEX)),
effect, control);
typedef StoreWithVectorDescriptor Descriptor;
- node->InsertInputs(zone(), 0, 3);
+ node->InsertInputs(zone(), 0, 4);
node->ReplaceInput(Descriptor::kReceiver, global);
node->ReplaceInput(Descriptor::kName, jsgraph()->HeapConstant(p.name()));
node->ReplaceInput(Descriptor::kValue, value);
@@ -336,6 +263,13 @@ void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
ReplaceWithStubCall(node, callable, flags);
}
+void JSGenericLowering::LowerJSStoreDataPropertyInLiteral(Node* node) {
+ DataPropertyParameters const& p = DataPropertyParametersOf(node->op());
+ node->InsertInputs(zone(), 4, 2);
+ node->ReplaceInput(4, jsgraph()->HeapConstant(p.feedback().vector()));
+ node->ReplaceInput(5, jsgraph()->SmiConstant(p.feedback().index()));
+ ReplaceWithRuntimeCall(node, Runtime::kDefineDataPropertyInLiteral);
+}
void JSGenericLowering::LowerJSDeleteProperty(Node* node) {
LanguageMode language_mode = OpParameter<LanguageMode>(node);
@@ -344,6 +278,11 @@ void JSGenericLowering::LowerJSDeleteProperty(Node* node) {
: Runtime::kDeleteProperty_Sloppy);
}
+void JSGenericLowering::LowerJSGetSuperConstructor(Node* node) {
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ Callable callable = CodeFactory::GetSuperConstructor(isolate());
+ ReplaceWithStubCall(node, callable, flags);
+}
void JSGenericLowering::LowerJSInstanceOf(Node* node) {
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
@@ -358,40 +297,12 @@ void JSGenericLowering::LowerJSOrdinaryHasInstance(Node* node) {
}
void JSGenericLowering::LowerJSLoadContext(Node* node) {
- const ContextAccess& access = ContextAccessOf(node->op());
- for (size_t i = 0; i < access.depth(); ++i) {
- node->ReplaceInput(
- 0, graph()->NewNode(machine()->Load(MachineType::AnyTagged()),
- NodeProperties::GetValueInput(node, 0),
- jsgraph()->Int32Constant(
- Context::SlotOffset(Context::PREVIOUS_INDEX)),
- NodeProperties::GetEffectInput(node),
- graph()->start()));
- }
- node->ReplaceInput(1, jsgraph()->Int32Constant(Context::SlotOffset(
- static_cast<int>(access.index()))));
- node->AppendInput(zone(), graph()->start());
- NodeProperties::ChangeOp(node, machine()->Load(MachineType::AnyTagged()));
+ UNREACHABLE(); // Eliminated in typed lowering.
}
void JSGenericLowering::LowerJSStoreContext(Node* node) {
- const ContextAccess& access = ContextAccessOf(node->op());
- for (size_t i = 0; i < access.depth(); ++i) {
- node->ReplaceInput(
- 0, graph()->NewNode(machine()->Load(MachineType::AnyTagged()),
- NodeProperties::GetValueInput(node, 0),
- jsgraph()->Int32Constant(
- Context::SlotOffset(Context::PREVIOUS_INDEX)),
- NodeProperties::GetEffectInput(node),
- graph()->start()));
- }
- node->ReplaceInput(2, NodeProperties::GetValueInput(node, 1));
- node->ReplaceInput(1, jsgraph()->Int32Constant(Context::SlotOffset(
- static_cast<int>(access.index()))));
- NodeProperties::ChangeOp(
- node, machine()->Store(StoreRepresentation(MachineRepresentation::kTagged,
- kFullWriteBarrier)));
+ UNREACHABLE(); // Eliminated in typed lowering.
}
@@ -438,11 +349,18 @@ void JSGenericLowering::LowerJSCreateClosure(Node* node) {
Handle<SharedFunctionInfo> const shared_info = p.shared_info();
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(shared_info));
- // Use the FastNewClosureStub only for functions allocated in new space.
+ // Use the FastNewClosurebuiltin only for functions allocated in new
+ // space.
if (p.pretenure() == NOT_TENURED) {
Callable callable = CodeFactory::FastNewClosure(isolate());
+ node->InsertInput(zone(), 1,
+ jsgraph()->HeapConstant(p.feedback().vector()));
+ node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
ReplaceWithStubCall(node, callable, flags);
} else {
+ node->InsertInput(zone(), 1,
+ jsgraph()->HeapConstant(p.feedback().vector()));
+ node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
ReplaceWithRuntimeCall(node, (p.pretenure() == TENURED)
? Runtime::kNewClosure_Tenured
: Runtime::kNewClosure);
@@ -451,14 +369,20 @@ void JSGenericLowering::LowerJSCreateClosure(Node* node) {
void JSGenericLowering::LowerJSCreateFunctionContext(Node* node) {
- int const slot_count = OpParameter<int>(node->op());
+ const CreateFunctionContextParameters& parameters =
+ CreateFunctionContextParametersOf(node->op());
+ int slot_count = parameters.slot_count();
+ ScopeType scope_type = parameters.scope_type();
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
- if (slot_count <= FastNewFunctionContextStub::kMaximumSlots) {
- Callable callable = CodeFactory::FastNewFunctionContext(isolate());
+ if (slot_count <=
+ ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+ Callable callable =
+ CodeFactory::FastNewFunctionContext(isolate(), scope_type);
node->InsertInput(zone(), 1, jsgraph()->Int32Constant(slot_count));
ReplaceWithStubCall(node, callable, flags);
} else {
+ node->InsertInput(zone(), 1, jsgraph()->SmiConstant(scope_type));
ReplaceWithRuntimeCall(node, Runtime::kNewFunctionContext);
}
}
@@ -478,11 +402,13 @@ void JSGenericLowering::LowerJSCreateLiteralArray(Node* node) {
node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.index()));
node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
- // Use the FastCloneShallowArrayStub only for shallow boilerplates up to the
- // initial length limit for arrays with "fast" elements kind.
+ // Use the FastCloneShallowArray builtin only for shallow boilerplates without
+ // properties up to the number of elements that the stubs can handle.
if ((p.flags() & ArrayLiteral::kShallowElements) != 0 &&
- p.length() < JSArray::kInitialMaxFastElementArray) {
- Callable callable = CodeFactory::FastCloneShallowArray(isolate());
+ p.length() <
+ ConstructorBuiltinsAssembler::kMaximumClonedShallowArrayElements) {
+ Callable callable = CodeFactory::FastCloneShallowArray(
+ isolate(), DONT_TRACK_ALLOCATION_SITE);
ReplaceWithStubCall(node, callable, flags);
} else {
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
@@ -498,10 +424,11 @@ void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
- // Use the FastCloneShallowObjectStub only for shallow boilerplates without
- // elements up to the number of properties that the stubs can handle.
+ // Use the FastCloneShallowObject builtin only for shallow boilerplates
+ // without elements up to the number of properties that the stubs can handle.
if ((p.flags() & ObjectLiteral::kShallowProperties) != 0 &&
- p.length() <= FastCloneShallowObjectStub::kMaximumClonedProperties) {
+ p.length() <=
+ ConstructorBuiltinsAssembler::kMaximumClonedShallowObjectProperties) {
Callable callable =
CodeFactory::FastCloneShallowObject(isolate(), p.length());
ReplaceWithStubCall(node, callable, flags);
@@ -574,6 +501,12 @@ void JSGenericLowering::LowerJSCallConstruct(Node* node) {
NodeProperties::ChangeOp(node, common()->Call(desc));
}
+void JSGenericLowering::LowerJSCallConstructWithSpread(Node* node) {
+ CallConstructWithSpreadParameters const& p =
+ CallConstructWithSpreadParametersOf(node->op());
+ ReplaceWithRuntimeCall(node, Runtime::kNewWithSpread,
+ static_cast<int>(p.arity()));
+}
void JSGenericLowering::LowerJSCallFunction(Node* node) {
CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
@@ -613,24 +546,12 @@ void JSGenericLowering::LowerJSForInPrepare(Node* node) {
}
void JSGenericLowering::LowerJSLoadMessage(Node* node) {
- ExternalReference message_address =
- ExternalReference::address_of_pending_message_obj(isolate());
- node->RemoveInput(NodeProperties::FirstContextIndex(node));
- node->InsertInput(zone(), 0, jsgraph()->ExternalConstant(message_address));
- node->InsertInput(zone(), 1, jsgraph()->IntPtrConstant(0));
- NodeProperties::ChangeOp(node, machine()->Load(MachineType::AnyTagged()));
+ UNREACHABLE(); // Eliminated in typed lowering.
}
void JSGenericLowering::LowerJSStoreMessage(Node* node) {
- ExternalReference message_address =
- ExternalReference::address_of_pending_message_obj(isolate());
- node->RemoveInput(NodeProperties::FirstContextIndex(node));
- node->InsertInput(zone(), 0, jsgraph()->ExternalConstant(message_address));
- node->InsertInput(zone(), 1, jsgraph()->IntPtrConstant(0));
- StoreRepresentation representation(MachineRepresentation::kTagged,
- kNoWriteBarrier);
- NodeProperties::ChangeOp(node, machine()->Store(representation));
+ UNREACHABLE(); // Eliminated in typed lowering.
}
void JSGenericLowering::LowerJSLoadModule(Node* node) {
diff --git a/deps/v8/src/compiler/js-global-object-specialization.cc b/deps/v8/src/compiler/js-global-object-specialization.cc
index e9ff060dd8..2fe5cabc22 100644
--- a/deps/v8/src/compiler/js-global-object-specialization.cc
+++ b/deps/v8/src/compiler/js-global-object-specialization.cc
@@ -49,7 +49,8 @@ Reduction JSGlobalObjectSpecialization::Reduce(Node* node) {
namespace {
FieldAccess ForPropertyCellValue(MachineRepresentation representation,
- Type* type, Handle<Name> name) {
+ Type* type, MaybeHandle<Map> map,
+ Handle<Name> name) {
WriteBarrierKind kind = kFullWriteBarrier;
if (representation == MachineRepresentation::kTaggedSigned) {
kind = kNoWriteBarrier;
@@ -57,8 +58,8 @@ FieldAccess ForPropertyCellValue(MachineRepresentation representation,
kind = kPointerWriteBarrier;
}
MachineType r = MachineType::TypeForRepresentation(representation);
- FieldAccess access = {kTaggedBase, PropertyCell::kValueOffset, name, type, r,
- kind};
+ FieldAccess access = {
+ kTaggedBase, PropertyCell::kValueOffset, name, map, type, r, kind};
return access;
}
} // namespace
@@ -76,7 +77,7 @@ Reduction JSGlobalObjectSpecialization::ReduceJSLoadGlobal(Node* node) {
Node* context = jsgraph()->HeapConstant(result.context);
Node* value = effect = graph()->NewNode(
javascript()->LoadContext(0, result.index, result.immutable), context,
- context, effect);
+ effect);
ReplaceWithValue(node, value, effect);
return Replace(value);
}
@@ -115,6 +116,7 @@ Reduction JSGlobalObjectSpecialization::ReduceJSLoadGlobal(Node* node) {
}
// Load from constant type cell can benefit from type feedback.
+ MaybeHandle<Map> map;
Type* property_cell_value_type = Type::NonInternal();
MachineRepresentation representation = MachineRepresentation::kTagged;
if (property_details.cell_type() == PropertyCellType::kConstantType) {
@@ -126,18 +128,24 @@ Reduction JSGlobalObjectSpecialization::ReduceJSLoadGlobal(Node* node) {
property_cell_value_type = Type::Number();
representation = MachineRepresentation::kTaggedPointer;
} else {
- // TODO(turbofan): Track the property_cell_value_map on the FieldAccess
- // below and use it in LoadElimination to eliminate map checks.
Handle<Map> property_cell_value_map(
Handle<HeapObject>::cast(property_cell_value)->map(), isolate());
property_cell_value_type = Type::For(property_cell_value_map);
representation = MachineRepresentation::kTaggedPointer;
+
+ // We can only use the property cell value map for map check elimination
+ // if it's stable, i.e. the HeapObject wasn't mutated without the cell
+ // state being updated.
+ if (property_cell_value_map->is_stable()) {
+ dependencies()->AssumeMapStable(property_cell_value_map);
+ map = property_cell_value_map;
+ }
}
}
- Node* value = effect =
- graph()->NewNode(simplified()->LoadField(ForPropertyCellValue(
- representation, property_cell_value_type, name)),
- jsgraph()->HeapConstant(property_cell), effect, control);
+ Node* value = effect = graph()->NewNode(
+ simplified()->LoadField(ForPropertyCellValue(
+ representation, property_cell_value_type, map, name)),
+ jsgraph()->HeapConstant(property_cell), effect, control);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
@@ -157,7 +165,7 @@ Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
if (result.immutable) return NoChange();
Node* context = jsgraph()->HeapConstant(result.context);
effect = graph()->NewNode(javascript()->StoreContext(0, result.index),
- context, value, context, effect, control);
+ value, context, effect, control);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
@@ -206,9 +214,11 @@ Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
value, effect, control);
// Check {value} map agains the {property_cell} map.
- effect = graph()->NewNode(
- simplified()->CheckMaps(1), value,
- jsgraph()->HeapConstant(property_cell_value_map), effect, control);
+ effect =
+ graph()->NewNode(simplified()->CheckMaps(
+ CheckMapsFlag::kNone,
+ ZoneHandleSet<Map>(property_cell_value_map)),
+ value, effect, control);
property_cell_value_type = Type::OtherInternal();
representation = MachineRepresentation::kTaggedPointer;
} else {
@@ -218,24 +228,21 @@ Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
property_cell_value_type = Type::SignedSmall();
representation = MachineRepresentation::kTaggedSigned;
}
- effect = graph()->NewNode(
- simplified()->StoreField(ForPropertyCellValue(
- representation, property_cell_value_type, name)),
- jsgraph()->HeapConstant(property_cell), value, effect, control);
+ effect = graph()->NewNode(simplified()->StoreField(ForPropertyCellValue(
+ representation, property_cell_value_type,
+ MaybeHandle<Map>(), name)),
+ jsgraph()->HeapConstant(property_cell), value,
+ effect, control);
break;
}
case PropertyCellType::kMutable: {
- // Store to non-configurable, data property on the global can be lowered
- // to a field store, even without recording a code dependency on the cell,
- // because the property cannot be deleted or reconfigured to an accessor
- // or interceptor property.
- if (property_details.IsConfigurable()) {
- // Protect lowering by recording a code dependency on the cell.
- dependencies()->AssumePropertyCell(property_cell);
- }
+ // Record a code dependency on the cell, and just deoptimize if the
+ // property ever becomes read-only.
+ dependencies()->AssumePropertyCell(property_cell);
effect = graph()->NewNode(
simplified()->StoreField(ForPropertyCellValue(
- MachineRepresentation::kTagged, Type::NonInternal(), name)),
+ MachineRepresentation::kTagged, Type::NonInternal(),
+ MaybeHandle<Map>(), name)),
jsgraph()->HeapConstant(property_cell), value, effect, control);
break;
}
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index 8626cd1821..1fa7861d49 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -31,11 +31,26 @@ Node* JSGraph::ToNumberBuiltinConstant() {
Node* JSGraph::CEntryStubConstant(int result_size, SaveFPRegsMode save_doubles,
ArgvMode argv_mode, bool builtin_exit_frame) {
- if (save_doubles == kDontSaveFPRegs && argv_mode == kArgvOnStack &&
- result_size == 1) {
+ if (save_doubles == kDontSaveFPRegs && argv_mode == kArgvOnStack) {
+ DCHECK(result_size >= 1 && result_size <= 3);
+ if (!builtin_exit_frame) {
+ CachedNode key;
+ if (result_size == 1) {
+ key = kCEntryStub1Constant;
+ } else if (result_size == 2) {
+ key = kCEntryStub2Constant;
+ } else {
+ DCHECK(result_size == 3);
+ key = kCEntryStub3Constant;
+ }
+ return CACHED(
+ key, HeapConstant(CEntryStub(isolate(), result_size, save_doubles,
+ argv_mode, builtin_exit_frame)
+ .GetCode()));
+ }
CachedNode key = builtin_exit_frame
- ? kCEntryStubWithBuiltinExitFrameConstant
- : kCEntryStubConstant;
+ ? kCEntryStub1WithBuiltinExitFrameConstant
+ : kCEntryStub1Constant;
return CACHED(key,
HeapConstant(CEntryStub(isolate(), result_size, save_doubles,
argv_mode, builtin_exit_frame)
@@ -264,7 +279,8 @@ Node* JSGraph::ExternalConstant(Runtime::FunctionId function_id) {
}
Node* JSGraph::EmptyStateValues() {
- return CACHED(kEmptyStateValues, graph()->NewNode(common()->StateValues(0)));
+ return CACHED(kEmptyStateValues, graph()->NewNode(common()->StateValues(
+ 0, SparseInputMask::Dense())));
}
Node* JSGraph::Dead() {
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index c2c0c77f42..e10591998c 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -162,8 +162,10 @@ class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
kAllocateInNewSpaceStubConstant,
kAllocateInOldSpaceStubConstant,
kToNumberBuiltinConstant,
- kCEntryStubConstant,
- kCEntryStubWithBuiltinExitFrameConstant,
+ kCEntryStub1Constant,
+ kCEntryStub2Constant,
+ kCEntryStub3Constant,
+ kCEntryStub1WithBuiltinExitFrameConstant,
kEmptyFixedArrayConstant,
kEmptyLiteralsArrayConstant,
kEmptyStringConstant,
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index d6229c2d64..672d322a24 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -46,8 +46,8 @@ bool CanInlineFunction(Handle<JSFunction> function) {
// Built-in functions are handled by the JSBuiltinReducer.
if (function->shared()->HasBuiltinFunctionId()) return false;
- // Don't inline builtins.
- if (function->shared()->IsBuiltin()) return false;
+ // Only choose user code for inlining.
+ if (!function->shared()->IsUserJavaScript()) return false;
// Quick check on the size of the AST to avoid parsing large candidate.
if (function->shared()->ast_node_count() > FLAG_max_inlined_nodes) {
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index 0e122a6c14..1717d4118a 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -4,25 +4,21 @@
#include "src/compiler/js-inlining.h"
-#include "src/ast/ast-numbering.h"
#include "src/ast/ast.h"
#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/compiler/all-nodes.h"
-#include "src/compiler/ast-graph-builder.h"
-#include "src/compiler/ast-loop-assignment-analyzer.h"
#include "src/compiler/bytecode-graph-builder.h"
#include "src/compiler/common-operator.h"
+#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/simplified-operator.h"
-#include "src/compiler/type-hint-analyzer.h"
#include "src/isolate-inl.h"
#include "src/parsing/parse-info.h"
-#include "src/parsing/rewriter.h"
namespace v8 {
namespace internal {
@@ -235,14 +231,14 @@ Node* JSInliner::CreateArtificialFrameState(Node* node, Node* outer_frame_state,
const Operator* op = common()->FrameState(
BailoutId(-1), OutputFrameStateCombine::Ignore(), state_info);
- const Operator* op0 = common()->StateValues(0);
+ const Operator* op0 = common()->StateValues(0, SparseInputMask::Dense());
Node* node0 = graph()->NewNode(op0);
NodeVector params(local_zone_);
for (int parameter = 0; parameter < parameter_count + 1; ++parameter) {
params.push_back(node->InputAt(1 + parameter));
}
- const Operator* op_param =
- common()->StateValues(static_cast<int>(params.size()));
+ const Operator* op_param = common()->StateValues(
+ static_cast<int>(params.size()), SparseInputMask::Dense());
Node* params_node = graph()->NewNode(
op_param, static_cast<int>(params.size()), &params.front());
return graph()->NewNode(op, params_node, node0, node0,
@@ -273,7 +269,7 @@ Node* JSInliner::CreateTailCallerFrameState(Node* node, Node* frame_state) {
const Operator* op = common()->FrameState(
BailoutId(-1), OutputFrameStateCombine::Ignore(), state_info);
- const Operator* op0 = common()->StateValues(0);
+ const Operator* op0 = common()->StateValues(0, SparseInputMask::Dense());
Node* node0 = graph()->NewNode(op0);
return graph()->NewNode(op, node0, node0, node0,
jsgraph()->UndefinedConstant(), function,
@@ -311,11 +307,10 @@ bool NeedsConvertReceiver(Node* receiver, Node* effect) {
if (dominator->opcode() == IrOpcode::kCheckMaps &&
IsSame(dominator->InputAt(0), receiver)) {
// Check if all maps have the given {instance_type}.
- for (int i = 1; i < dominator->op()->ValueInputCount(); ++i) {
- HeapObjectMatcher m(NodeProperties::GetValueInput(dominator, i));
- if (!m.HasValue()) return true;
- Handle<Map> const map = Handle<Map>::cast(m.Value());
- if (!map->IsJSReceiverMap()) return true;
+ ZoneHandleSet<Map> const& maps =
+ CheckMapsParametersOf(dominator->op()).maps();
+ for (size_t i = 0; i < maps.size(); ++i) {
+ if (!maps[i]->IsJSReceiverMap()) return true;
}
return false;
}
@@ -385,6 +380,14 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
JSCallAccessor call(node);
Handle<SharedFunctionInfo> shared_info(function->shared());
+ // Inlining is only supported in the bytecode pipeline.
+ if (!info_->is_optimizing_from_bytecode()) {
+ TRACE("Inlining %s into %s is not supported in the deprecated pipeline\n",
+ shared_info->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get());
+ return NoChange();
+ }
+
// Function must be inlineable.
if (!shared_info->IsInlineable()) {
TRACE("Not inlining %s into %s because callee is not inlineable\n",
@@ -486,12 +489,11 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
Zone zone(info_->isolate()->allocator(), ZONE_NAME);
ParseInfo parse_info(&zone, shared_info);
- CompilationInfo info(&parse_info, function);
+ CompilationInfo info(&parse_info, Handle<JSFunction>::null());
if (info_->is_deoptimization_enabled()) info.MarkAsDeoptimizationEnabled();
- if (info_->is_type_feedback_enabled()) info.MarkAsTypeFeedbackEnabled();
- if (info_->is_optimizing_from_bytecode()) info.MarkAsOptimizeFromBytecode();
+ info.MarkAsOptimizeFromBytecode();
- if (info.is_optimizing_from_bytecode() && !Compiler::EnsureBytecode(&info)) {
+ if (!Compiler::EnsureBytecode(&info)) {
TRACE("Not inlining %s into %s because bytecode generation failed\n",
shared_info->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
@@ -501,25 +503,6 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
return NoChange();
}
- if (!info.is_optimizing_from_bytecode() &&
- !Compiler::ParseAndAnalyze(info.parse_info())) {
- TRACE("Not inlining %s into %s because parsing failed\n",
- shared_info->DebugName()->ToCString().get(),
- info_->shared_info()->DebugName()->ToCString().get());
- if (info_->isolate()->has_pending_exception()) {
- info_->isolate()->clear_pending_exception();
- }
- return NoChange();
- }
-
- if (!info.is_optimizing_from_bytecode() &&
- !Compiler::EnsureDeoptimizationSupport(&info)) {
- TRACE("Not inlining %s into %s because deoptimization support failed\n",
- shared_info->DebugName()->ToCString().get(),
- info_->shared_info()->DebugName()->ToCString().get());
- return NoChange();
- }
-
// Remember that we inlined this function. This needs to be called right
// after we ensure deoptimization support so that the code flusher
// does not remove the code with the deoptimization support.
@@ -540,33 +523,13 @@ Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
// Create the subgraph for the inlinee.
Node* start;
Node* end;
- if (info.is_optimizing_from_bytecode()) {
+ {
// Run the BytecodeGraphBuilder to create the subgraph.
Graph::SubgraphScope scope(graph());
- BytecodeGraphBuilder graph_builder(&zone, &info, jsgraph(),
- call.frequency(), source_positions_,
- inlining_id);
- graph_builder.CreateGraph(false);
-
- // Extract the inlinee start/end nodes.
- start = graph()->start();
- end = graph()->end();
- } else {
- // Run the loop assignment analyzer on the inlinee.
- AstLoopAssignmentAnalyzer loop_assignment_analyzer(&zone, &info);
- LoopAssignmentAnalysis* loop_assignment =
- loop_assignment_analyzer.Analyze();
-
- // Run the type hint analyzer on the inlinee.
- TypeHintAnalyzer type_hint_analyzer(&zone);
- TypeHintAnalysis* type_hint_analysis =
- type_hint_analyzer.Analyze(handle(shared_info->code(), info.isolate()));
-
- // Run the AstGraphBuilder to create the subgraph.
- Graph::SubgraphScope scope(graph());
- AstGraphBuilderWithPositions graph_builder(
- &zone, &info, jsgraph(), call.frequency(), loop_assignment,
- type_hint_analysis, source_positions_, inlining_id);
+ BytecodeGraphBuilder graph_builder(
+ &zone, shared_info, handle(function->feedback_vector()),
+ BailoutId::None(), jsgraph(), call.frequency(), source_positions_,
+ inlining_id);
graph_builder.CreateGraph(false);
// Extract the inlinee start/end nodes.
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index 52903232d7..2a7a3a3896 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -32,6 +32,8 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
switch (f->function_id) {
case Runtime::kInlineCreateIterResultObject:
return ReduceCreateIterResultObject(node);
+ case Runtime::kInlineDebugIsActive:
+ return ReduceDebugIsActive(node);
case Runtime::kInlineDeoptimizeNow:
return ReduceDeoptimizeNow(node);
case Runtime::kInlineGeneratorClose:
@@ -40,12 +42,12 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceGeneratorGetInputOrDebugPos(node);
case Runtime::kInlineGeneratorGetResumeMode:
return ReduceGeneratorGetResumeMode(node);
+ case Runtime::kInlineGeneratorGetContext:
+ return ReduceGeneratorGetContext(node);
case Runtime::kInlineIsArray:
return ReduceIsInstanceType(node, JS_ARRAY_TYPE);
case Runtime::kInlineIsTypedArray:
return ReduceIsInstanceType(node, JS_TYPED_ARRAY_TYPE);
- case Runtime::kInlineIsRegExp:
- return ReduceIsInstanceType(node, JS_REGEXP_TYPE);
case Runtime::kInlineIsJSReceiver:
return ReduceIsJSReceiver(node);
case Runtime::kInlineIsSmi:
@@ -70,8 +72,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceToString(node);
case Runtime::kInlineCall:
return ReduceCall(node);
- case Runtime::kInlineNewObject:
- return ReduceNewObject(node);
case Runtime::kInlineGetSuperConstructor:
return ReduceGetSuperConstructor(node);
default:
@@ -90,6 +90,15 @@ Reduction JSIntrinsicLowering::ReduceCreateIterResultObject(Node* node) {
context, effect);
}
+Reduction JSIntrinsicLowering::ReduceDebugIsActive(Node* node) {
+ Node* const value = jsgraph()->ExternalConstant(
+ ExternalReference::debug_is_active_address(isolate()));
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
+ Operator const* const op =
+ simplified()->LoadField(AccessBuilder::ForExternalUint8Value());
+ return Change(node, op, value, effect, control);
+}
Reduction JSIntrinsicLowering::ReduceDeoptimizeNow(Node* node) {
if (mode() != kDeoptimizationEnabled) return NoChange();
@@ -133,6 +142,16 @@ Reduction JSIntrinsicLowering::ReduceGeneratorGetInputOrDebugPos(Node* node) {
return Change(node, op, generator, effect, control);
}
+Reduction JSIntrinsicLowering::ReduceGeneratorGetContext(Node* node) {
+ Node* const generator = NodeProperties::GetValueInput(node, 0);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
+ Operator const* const op =
+ simplified()->LoadField(AccessBuilder::ForJSGeneratorObjectContext());
+
+ return Change(node, op, generator, effect, control);
+}
+
Reduction JSIntrinsicLowering::ReduceGeneratorGetResumeMode(Node* node) {
Node* const generator = NodeProperties::GetValueInput(node, 0);
Node* const effect = NodeProperties::GetEffectInput(node);
@@ -277,10 +296,6 @@ Reduction JSIntrinsicLowering::ReduceCall(Node* node) {
return Changed(node);
}
-Reduction JSIntrinsicLowering::ReduceNewObject(Node* node) {
- return Change(node, CodeFactory::FastNewObject(isolate()), 0);
-}
-
Reduction JSIntrinsicLowering::ReduceGetSuperConstructor(Node* node) {
Node* active_function = NodeProperties::GetValueInput(node, 0);
Node* effect = NodeProperties::GetEffectInput(node);
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.h b/deps/v8/src/compiler/js-intrinsic-lowering.h
index 6e984ff496..2bc7cafa3d 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.h
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.h
@@ -40,8 +40,10 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
private:
Reduction ReduceCreateIterResultObject(Node* node);
+ Reduction ReduceDebugIsActive(Node* node);
Reduction ReduceDeoptimizeNow(Node* node);
Reduction ReduceGeneratorClose(Node* node);
+ Reduction ReduceGeneratorGetContext(Node* node);
Reduction ReduceGeneratorGetInputOrDebugPos(Node* node);
Reduction ReduceGeneratorGetResumeMode(Node* node);
Reduction ReduceIsInstanceType(Node* node, InstanceType instance_type);
@@ -57,7 +59,6 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
Reduction ReduceToObject(Node* node);
Reduction ReduceToString(Node* node);
Reduction ReduceCall(Node* node);
- Reduction ReduceNewObject(Node* node);
Reduction ReduceGetSuperConstructor(Node* node);
Reduction Change(Node* node, const Operator* op);
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index a849fec5aa..4ea15c10a2 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -71,6 +71,8 @@ Reduction JSNativeContextSpecialization::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kJSInstanceOf:
return ReduceJSInstanceOf(node);
+ case IrOpcode::kJSOrdinaryHasInstance:
+ return ReduceJSOrdinaryHasInstance(node);
case IrOpcode::kJSLoadContext:
return ReduceJSLoadContext(node);
case IrOpcode::kJSLoadNamed:
@@ -81,6 +83,8 @@ Reduction JSNativeContextSpecialization::Reduce(Node* node) {
return ReduceJSLoadProperty(node);
case IrOpcode::kJSStoreProperty:
return ReduceJSStoreProperty(node);
+ case IrOpcode::kJSStoreDataPropertyInLiteral:
+ return ReduceJSStoreDataPropertyInLiteral(node);
default:
break;
}
@@ -125,15 +129,16 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
}
// Monomorphic property access.
- effect =
- BuildCheckMaps(constructor, effect, control, MapList{receiver_map});
+ effect = BuildCheckMaps(constructor, effect, control,
+ access_info.receiver_maps());
// Lower to OrdinaryHasInstance(C, O).
NodeProperties::ReplaceValueInput(node, constructor, 0);
NodeProperties::ReplaceValueInput(node, object, 1);
NodeProperties::ReplaceEffectInput(node, effect);
NodeProperties::ChangeOp(node, javascript()->OrdinaryHasInstance());
- return Changed(node);
+ Reduction const reduction = ReduceJSOrdinaryHasInstance(node);
+ return reduction.Changed() ? reduction : Changed(node);
}
} else if (access_info.IsDataConstant()) {
DCHECK(access_info.constant()->IsCallable());
@@ -145,8 +150,8 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
}
// Monomorphic property access.
- effect =
- BuildCheckMaps(constructor, effect, control, MapList{receiver_map});
+ effect = BuildCheckMaps(constructor, effect, control,
+ access_info.receiver_maps());
// Call the @@hasInstance handler.
Node* target = jsgraph()->Constant(access_info.constant());
@@ -174,6 +179,31 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
return NoChange();
}
+Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
+ Node* node) {
+ DCHECK_EQ(IrOpcode::kJSOrdinaryHasInstance, node->opcode());
+ Node* constructor = NodeProperties::GetValueInput(node, 0);
+ Node* object = NodeProperties::GetValueInput(node, 1);
+
+ // Check if the {constructor} is a JSBoundFunction.
+ HeapObjectMatcher m(constructor);
+ if (m.HasValue() && m.Value()->IsJSBoundFunction()) {
+ // OrdinaryHasInstance on bound functions turns into a recursive
+ // invocation of the instanceof operator again.
+ // ES6 section 7.3.19 OrdinaryHasInstance (C, O) step 2.
+ Handle<JSBoundFunction> function = Handle<JSBoundFunction>::cast(m.Value());
+ Handle<JSReceiver> bound_target_function(function->bound_target_function());
+ NodeProperties::ReplaceValueInput(node, object, 0);
+ NodeProperties::ReplaceValueInput(
+ node, jsgraph()->HeapConstant(bound_target_function), 1);
+ NodeProperties::ChangeOp(node, javascript()->InstanceOf());
+ Reduction const reduction = ReduceJSInstanceOf(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
+
+ return NoChange();
+}
+
Reduction JSNativeContextSpecialization::ReduceJSLoadContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
ContextAccess const& access = ContextAccessOf(node->op());
@@ -217,7 +247,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
// TODO(turbofan): Add support for inlining into try blocks.
bool is_exceptional = NodeProperties::IsExceptionalCall(node);
- for (auto access_info : access_infos) {
+ for (const auto& access_info : access_infos) {
if (access_info.IsAccessorConstant()) {
// Accessor in try-blocks are not supported yet.
if (is_exceptional || !(flags() & kAccessorInliningEnabled)) {
@@ -260,8 +290,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
receiver, effect, control);
} else {
// Monomorphic property access.
- receiver = effect = graph()->NewNode(simplified()->CheckHeapObject(),
- receiver, effect, control);
+ receiver = BuildCheckHeapObject(receiver, &effect, control);
effect = BuildCheckMaps(receiver, effect, control,
access_info.receiver_maps());
}
@@ -299,8 +328,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
receiverissmi_control = graph()->NewNode(common()->IfTrue(), branch);
receiverissmi_effect = effect;
} else {
- receiver = effect = graph()->NewNode(simplified()->CheckHeapObject(),
- receiver, effect, control);
+ receiver = BuildCheckHeapObject(receiver, &effect, control);
}
// Load the {receiver} map. The resulting effect is the dominating effect
@@ -547,12 +575,9 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
length, effect, control);
- // Load the character from the {receiver}.
- value = graph()->NewNode(simplified()->StringCharCodeAt(), receiver, index,
+ // Return the character from the {receiver} as single character string.
+ value = graph()->NewNode(simplified()->StringCharAt(), receiver, index,
control);
-
- // Return it as a single character string.
- value = graph()->NewNode(simplified()->StringFromCharCode(), value);
} else {
// Retrieve the native context from the given {node}.
// Compute element access infos for the receiver maps.
@@ -609,8 +634,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
}
// Ensure that {receiver} is a heap object.
- receiver = effect = graph()->NewNode(simplified()->CheckHeapObject(),
- receiver, effect, control);
+ receiver = BuildCheckHeapObject(receiver, &effect, control);
// Check for the monomorphic case.
if (access_infos.size() == 1) {
@@ -621,13 +645,13 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
Handle<Map> const transition_source = transition.first;
Handle<Map> const transition_target = transition.second;
effect = graph()->NewNode(
- simplified()->TransitionElementsKind(
+ simplified()->TransitionElementsKind(ElementsTransition(
IsSimpleMapChangeTransition(transition_source->elements_kind(),
transition_target->elements_kind())
? ElementsTransition::kFastTransition
- : ElementsTransition::kSlowTransition),
- receiver, jsgraph()->HeapConstant(transition_source),
- jsgraph()->HeapConstant(transition_target), effect, control);
+ : ElementsTransition::kSlowTransition,
+ transition_source, transition_target)),
+ receiver, effect, control);
}
// TODO(turbofan): The effect/control linearization will not find a
@@ -672,14 +696,13 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
Handle<Map> const transition_target = transition.second;
this_effect = graph()->NewNode(
simplified()->TransitionElementsKind(
- IsSimpleMapChangeTransition(
- transition_source->elements_kind(),
- transition_target->elements_kind())
- ? ElementsTransition::kFastTransition
- : ElementsTransition::kSlowTransition),
- receiver, jsgraph()->HeapConstant(transition_source),
- jsgraph()->HeapConstant(transition_target), this_effect,
- this_control);
+ ElementsTransition(IsSimpleMapChangeTransition(
+ transition_source->elements_kind(),
+ transition_target->elements_kind())
+ ? ElementsTransition::kFastTransition
+ : ElementsTransition::kSlowTransition,
+ transition_source, transition_target)),
+ receiver, this_effect, this_control);
}
// Load the {receiver} map.
@@ -806,12 +829,9 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
length, effect, control);
- // Load the character from the {receiver}.
- value = graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
- index, control);
-
- // Return it as a single character string.
- value = graph()->NewNode(simplified()->StringFromCharCode(), value);
+ // Return the character from the {receiver} as single character string.
+ value = graph()->NewNode(simplified()->StringCharAt(), receiver, index,
+ control);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
}
@@ -948,6 +968,7 @@ JSNativeContextSpecialization::BuildPropertyAccess(
// Determine actual holder and perform prototype chain checks.
Handle<JSObject> holder;
if (access_info.holder().ToHandle(&holder)) {
+ DCHECK_NE(AccessMode::kStoreInLiteral, access_mode);
AssumePrototypesStable(access_info.receiver_maps(), holder);
}
@@ -981,7 +1002,8 @@ JSNativeContextSpecialization::BuildPropertyAccess(
common()->FrameState(BailoutId::None(),
OutputFrameStateCombine::Ignore(),
frame_info0),
- graph()->NewNode(common()->StateValues(1), receiver),
+ graph()->NewNode(common()->StateValues(1, SparseInputMask::Dense()),
+ receiver),
jsgraph()->EmptyStateValues(), jsgraph()->EmptyStateValues(),
context, target, frame_state);
@@ -998,16 +1020,16 @@ JSNativeContextSpecialization::BuildPropertyAccess(
Handle<FunctionTemplateInfo> function_template_info(
Handle<FunctionTemplateInfo>::cast(access_info.constant()));
DCHECK(!function_template_info->call_code()->IsUndefined(isolate()));
- ZoneVector<Node*> stack_parameters(graph()->zone());
ValueEffectControl value_effect_control = InlineApiCall(
- receiver, context, target, frame_state0, &stack_parameters,
- effect, control, shared_info, function_template_info);
+ receiver, context, target, frame_state0, nullptr, effect, control,
+ shared_info, function_template_info);
value = value_effect_control.value();
effect = value_effect_control.effect();
control = value_effect_control.control();
}
break;
}
+ case AccessMode::kStoreInLiteral:
case AccessMode::kStore: {
// We need a FrameState for the setter stub to restore the correct
// context and return the appropriate value to fullcodegen.
@@ -1018,7 +1040,8 @@ JSNativeContextSpecialization::BuildPropertyAccess(
common()->FrameState(BailoutId::None(),
OutputFrameStateCombine::Ignore(),
frame_info0),
- graph()->NewNode(common()->StateValues(2), receiver, value),
+ graph()->NewNode(common()->StateValues(2, SparseInputMask::Dense()),
+ receiver, value),
jsgraph()->EmptyStateValues(), jsgraph()->EmptyStateValues(),
context, target, frame_state);
@@ -1035,11 +1058,9 @@ JSNativeContextSpecialization::BuildPropertyAccess(
Handle<FunctionTemplateInfo> function_template_info(
Handle<FunctionTemplateInfo>::cast(access_info.constant()));
DCHECK(!function_template_info->call_code()->IsUndefined(isolate()));
- ZoneVector<Node*> stack_parameters(graph()->zone());
- stack_parameters.push_back(value);
ValueEffectControl value_effect_control = InlineApiCall(
- receiver, context, target, frame_state0, &stack_parameters,
- effect, control, shared_info, function_template_info);
+ receiver, context, target, frame_state0, value, effect, control,
+ shared_info, function_template_info);
value = value_effect_control.value();
effect = value_effect_control.effect();
control = value_effect_control.control();
@@ -1059,12 +1080,21 @@ JSNativeContextSpecialization::BuildPropertyAccess(
// Optimize immutable property loads.
HeapObjectMatcher m(receiver);
if (m.HasValue() && m.Value()->IsJSObject()) {
+ // TODO(ishell): Use something simpler like
+ //
+ // Handle<Object> value =
+ // JSObject::FastPropertyAt(Handle<JSObject>::cast(m.Value()),
+ // Representation::Tagged(), field_index);
+ //
+ // here, once we have the immutable bit in the access_info.
+
// TODO(turbofan): Given that we already have the field_index here, we
// might be smarter in the future and not rely on the LookupIterator,
// but for now let's just do what Crankshaft does.
LookupIterator it(m.Value(), name,
LookupIterator::OWN_SKIP_INTERCEPTOR);
- if (it.IsFound() && it.IsReadOnly() && !it.IsConfigurable()) {
+ if (it.state() == LookupIterator::DATA && it.IsReadOnly() &&
+ !it.IsConfigurable()) {
Node* value = jsgraph()->Constant(JSReceiver::GetDataProperty(&it));
return ValueEffectControl(value, effect, control);
}
@@ -1080,6 +1110,7 @@ JSNativeContextSpecialization::BuildPropertyAccess(
kTaggedBase,
field_index.offset(),
name,
+ MaybeHandle<Map>(),
field_type,
MachineType::TypeForRepresentation(field_representation),
kFullWriteBarrier};
@@ -1090,6 +1121,7 @@ JSNativeContextSpecialization::BuildPropertyAccess(
FieldAccess const storage_access = {kTaggedBase,
field_index.offset(),
name,
+ MaybeHandle<Map>(),
Type::OtherInternal(),
MachineType::TaggedPointer(),
kPointerWriteBarrier};
@@ -1099,9 +1131,18 @@ JSNativeContextSpecialization::BuildPropertyAccess(
field_access.offset = HeapNumber::kValueOffset;
field_access.name = MaybeHandle<Name>();
}
+ } else if (field_representation ==
+ MachineRepresentation::kTaggedPointer) {
+ // Remember the map of the field value, if its map is stable. This is
+ // used by the LoadElimination to eliminate map checks on the result.
+ Handle<Map> field_map;
+ if (access_info.field_map().ToHandle(&field_map)) {
+ if (field_map->is_stable()) {
+ dependencies()->AssumeMapStable(field_map);
+ field_access.map = field_map;
+ }
+ }
}
- // TODO(turbofan): Track the field_map (if any) on the {field_access} and
- // use it in LoadElimination to eliminate map checks.
value = effect = graph()->NewNode(simplified()->LoadField(field_access),
storage, effect, control);
} else {
@@ -1138,6 +1179,7 @@ JSNativeContextSpecialization::BuildPropertyAccess(
FieldAccess const storage_access = {kTaggedBase,
field_index.offset(),
name,
+ MaybeHandle<Map>(),
Type::OtherInternal(),
MachineType::TaggedPointer(),
kPointerWriteBarrier};
@@ -1159,14 +1201,14 @@ JSNativeContextSpecialization::BuildPropertyAccess(
}
case MachineRepresentation::kTaggedPointer: {
// Ensure that {value} is a HeapObject.
- value = effect = graph()->NewNode(simplified()->CheckHeapObject(),
- value, effect, control);
+ value = BuildCheckHeapObject(value, &effect, control);
Handle<Map> field_map;
if (access_info.field_map().ToHandle(&field_map)) {
// Emit a map check for the value.
- effect = graph()->NewNode(simplified()->CheckMaps(1), value,
- jsgraph()->HeapConstant(field_map),
- effect, control);
+ effect = graph()->NewNode(
+ simplified()->CheckMaps(CheckMapsFlag::kNone,
+ ZoneHandleSet<Map>(field_map)),
+ value, effect, control);
}
field_access.write_barrier_kind = kPointerWriteBarrier;
break;
@@ -1226,6 +1268,79 @@ JSNativeContextSpecialization::BuildPropertyAccess(
return ValueEffectControl(value, effect, control);
}
+Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
+ Node* node) {
+ DCHECK_EQ(IrOpcode::kJSStoreDataPropertyInLiteral, node->opcode());
+
+ // If deoptimization is disabled, we cannot optimize.
+ if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
+ DataPropertyParameters const& p = DataPropertyParametersOf(node->op());
+
+ if (!p.feedback().IsValid()) return NoChange();
+
+ StoreDataPropertyInLiteralICNexus nexus(p.feedback().vector(),
+ p.feedback().slot());
+ if (nexus.IsUninitialized()) {
+ return NoChange();
+ }
+
+ if (nexus.ic_state() == MEGAMORPHIC) {
+ return NoChange();
+ }
+
+ DCHECK_EQ(MONOMORPHIC, nexus.ic_state());
+
+ Handle<Map> receiver_map(nexus.FindFirstMap(), isolate());
+ Handle<Name> cached_name =
+ handle(Name::cast(nexus.GetFeedbackExtra()), isolate());
+
+ PropertyAccessInfo access_info;
+ AccessInfoFactory access_info_factory(dependencies(), native_context(),
+ graph()->zone());
+ if (!access_info_factory.ComputePropertyAccessInfo(
+ receiver_map, cached_name, AccessMode::kStoreInLiteral,
+ &access_info)) {
+ return NoChange();
+ }
+
+ if (access_info.IsGeneric()) {
+ return NoChange();
+ }
+
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Monomorphic property access.
+ receiver = BuildCheckHeapObject(receiver, &effect, control);
+
+ effect =
+ BuildCheckMaps(receiver, effect, control, access_info.receiver_maps());
+
+ // Ensure that {name} matches the cached name.
+ Node* name = NodeProperties::GetValueInput(node, 1);
+ Node* check = graph()->NewNode(simplified()->ReferenceEqual(), name,
+ jsgraph()->HeapConstant(cached_name));
+ effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+
+ Node* value = NodeProperties::GetValueInput(node, 2);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state_lazy = NodeProperties::GetFrameStateInput(node);
+
+ // Generate the actual property access.
+ ValueEffectControl continuation = BuildPropertyAccess(
+ receiver, value, context, frame_state_lazy, effect, control, cached_name,
+ access_info, AccessMode::kStoreInLiteral, LanguageMode::SLOPPY,
+ p.feedback().vector(), p.feedback().slot());
+ value = continuation.value();
+ effect = continuation.effect();
+ control = continuation.control();
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
namespace {
ExternalArrayType GetArrayTypeFromElementsKind(ElementsKind kind) {
@@ -1249,42 +1364,79 @@ JSNativeContextSpecialization::BuildElementAccess(
Node* receiver, Node* index, Node* value, Node* effect, Node* control,
ElementAccessInfo const& access_info, AccessMode access_mode,
KeyedAccessStoreMode store_mode) {
+ DCHECK_NE(AccessMode::kStoreInLiteral, access_mode);
+
// TODO(bmeurer): We currently specialize based on elements kind. We should
// also be able to properly support strings and other JSObjects here.
ElementsKind elements_kind = access_info.elements_kind();
MapList const& receiver_maps = access_info.receiver_maps();
- // Load the elements for the {receiver}.
- Node* elements = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
- effect, control);
-
- // Don't try to store to a copy-on-write backing store.
- if (access_mode == AccessMode::kStore &&
- IsFastSmiOrObjectElementsKind(elements_kind) &&
- store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
- effect =
- graph()->NewNode(simplified()->CheckMaps(1), elements,
- jsgraph()->FixedArrayMapConstant(), effect, control);
- }
-
if (IsFixedTypedArrayElementsKind(elements_kind)) {
- // Load the {receiver}s length.
- Node* length = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSTypedArrayLength()),
- receiver, effect, control);
-
- // Check if the {receiver}s buffer was neutered.
- Node* buffer = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
- receiver, effect, control);
- Node* check = effect = graph()->NewNode(
- simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
+ Node* buffer;
+ Node* length;
+ Node* base_pointer;
+ Node* external_pointer;
+
+ // Check if we can constant-fold information about the {receiver} (i.e.
+ // for asm.js-like code patterns).
+ HeapObjectMatcher m(receiver);
+ if (m.HasValue() && m.Value()->IsJSTypedArray()) {
+ Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(m.Value());
+
+ // Determine the {receiver}s (known) length.
+ length = jsgraph()->Constant(typed_array->length_value());
+
+ // Check if the {receiver}s buffer was neutered.
+ buffer = jsgraph()->HeapConstant(typed_array->GetBuffer());
+
+ // Load the (known) base and external pointer for the {receiver}. The
+ // {external_pointer} might be invalid if the {buffer} was neutered, so
+ // we need to make sure that any access is properly guarded.
+ base_pointer = jsgraph()->ZeroConstant();
+ external_pointer = jsgraph()->PointerConstant(
+ FixedTypedArrayBase::cast(typed_array->elements())
+ ->external_pointer());
+ } else {
+ // Load the {receiver}s length.
+ length = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSTypedArrayLength()),
+ receiver, effect, control);
+
+ // Load the buffer for the {receiver}.
+ buffer = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
+ receiver, effect, control);
+
+ // Load the elements for the {receiver}.
+ Node* elements = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+ receiver, effect, control);
+
+ // Load the base and external pointer for the {receiver}s {elements}.
+ base_pointer = effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForFixedTypedArrayBaseBasePointer()),
+ elements, effect, control);
+ external_pointer = effect = graph()->NewNode(
+ simplified()->LoadField(
+ AccessBuilder::ForFixedTypedArrayBaseExternalPointer()),
+ elements, effect, control);
+ }
- // Default to zero if the {receiver}s buffer was neutered.
- length = graph()->NewNode(
- common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
- check, jsgraph()->ZeroConstant(), length);
+ // See if we can skip the neutering check.
+ if (isolate()->IsArrayBufferNeuteringIntact()) {
+ // Add a code dependency so we are deoptimized in case an ArrayBuffer
+ // gets neutered.
+ dependencies()->AssumePropertyCell(
+ factory()->array_buffer_neutering_protector());
+ } else {
+ // Default to zero if the {receiver}s buffer was neutered.
+ Node* check = effect = graph()->NewNode(
+ simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
+ length = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
+ check, jsgraph()->ZeroConstant(), length);
+ }
if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
// Check that the {index} is a valid array index, we do the actual
@@ -1300,16 +1452,6 @@ JSNativeContextSpecialization::BuildElementAccess(
length, effect, control);
}
- // Load the base and external pointer for the {receiver}.
- Node* base_pointer = effect = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForFixedTypedArrayBaseBasePointer()),
- elements, effect, control);
- Node* external_pointer = effect = graph()->NewNode(
- simplified()->LoadField(
- AccessBuilder::ForFixedTypedArrayBaseExternalPointer()),
- elements, effect, control);
-
// Access the actual element.
ExternalArrayType external_array_type =
GetArrayTypeFromElementsKind(elements_kind);
@@ -1320,6 +1462,9 @@ JSNativeContextSpecialization::BuildElementAccess(
base_pointer, external_pointer, index, effect, control);
break;
}
+ case AccessMode::kStoreInLiteral:
+ UNREACHABLE();
+ break;
case AccessMode::kStore: {
// Ensure that the {value} is actually a Number.
value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
@@ -1369,6 +1514,22 @@ JSNativeContextSpecialization::BuildElementAccess(
}
}
} else {
+ // Load the elements for the {receiver}.
+ Node* elements = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
+ effect, control);
+
+ // Don't try to store to a copy-on-write backing store.
+ if (access_mode == AccessMode::kStore &&
+ IsFastSmiOrObjectElementsKind(elements_kind) &&
+ store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
+ effect = graph()->NewNode(
+ simplified()->CheckMaps(
+ CheckMapsFlag::kNone,
+ ZoneHandleSet<Map>(factory()->fixed_array_map())),
+ elements, effect, control);
+ }
+
// Check if the {receiver} is a JSArray.
bool receiver_is_jsarray = HasOnlyJSArrayMaps(receiver_maps);
@@ -1500,25 +1661,25 @@ JSNativeContextSpecialization::BuildElementAccess(
JSNativeContextSpecialization::ValueEffectControl
JSNativeContextSpecialization::InlineApiCall(
- Node* receiver, Node* context, Node* target, Node* frame_state,
- ZoneVector<Node*>* stack_parameters, Node* effect, Node* control,
- Handle<SharedFunctionInfo> shared_info,
+ Node* receiver, Node* context, Node* target, Node* frame_state, Node* value,
+ Node* effect, Node* control, Handle<SharedFunctionInfo> shared_info,
Handle<FunctionTemplateInfo> function_template_info) {
Handle<CallHandlerInfo> call_handler_info = handle(
CallHandlerInfo::cast(function_template_info->call_code()), isolate());
Handle<Object> call_data_object(call_handler_info->data(), isolate());
+ // Only setters have a value.
+ int const argc = value == nullptr ? 0 : 1;
// The stub always expects the receiver as the first param on the stack.
CallApiCallbackStub stub(
- isolate(), static_cast<int>(stack_parameters->size()),
- call_data_object->IsUndefined(isolate()),
- true /* TODO(epertoso): similar to CallOptimization */);
+ isolate(), argc, call_data_object->IsUndefined(isolate()),
+ true /* FunctionTemplateInfo doesn't have an associated context. */);
CallInterfaceDescriptor call_interface_descriptor =
stub.GetCallInterfaceDescriptor();
CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), call_interface_descriptor,
- call_interface_descriptor.GetStackParameterCount() +
- static_cast<int>(stack_parameters->size()) + 1,
+ call_interface_descriptor.GetStackParameterCount() + argc +
+ 1 /* implicit receiver */,
CallDescriptor::kNeedsFrameState, Operator::kNoProperties,
MachineType::AnyTagged(), 1);
@@ -1529,42 +1690,62 @@ JSNativeContextSpecialization::InlineApiCall(
&function, ExternalReference::DIRECT_API_CALL, isolate())));
Node* code = jsgraph()->HeapConstant(stub.GetCode());
- ZoneVector<Node*> inputs(zone());
- inputs.push_back(code);
-
- // CallApiCallbackStub's register arguments.
- inputs.push_back(target);
- inputs.push_back(data);
- inputs.push_back(receiver);
- inputs.push_back(function_reference);
-
- // Stack parameters: CallApiCallbackStub expects the first one to be the
- // receiver.
- inputs.push_back(receiver);
- for (Node* node : *stack_parameters) {
- inputs.push_back(node);
+ // Add CallApiCallbackStub's register argument as well.
+ Node* inputs[11] = {
+ code, target, data, receiver /* holder */, function_reference, receiver};
+ int index = 6 + argc;
+ inputs[index++] = context;
+ inputs[index++] = frame_state;
+ inputs[index++] = effect;
+ inputs[index++] = control;
+ // This needs to stay here because of the edge case described in
+ // http://crbug.com/675648.
+ if (value != nullptr) {
+ inputs[6] = value;
}
- inputs.push_back(context);
- inputs.push_back(frame_state);
- inputs.push_back(effect);
- inputs.push_back(control);
Node* effect0;
Node* value0 = effect0 =
- graph()->NewNode(common()->Call(call_descriptor),
- static_cast<int>(inputs.size()), inputs.data());
+ graph()->NewNode(common()->Call(call_descriptor), index, inputs);
Node* control0 = graph()->NewNode(common()->IfSuccess(), value0);
return ValueEffectControl(value0, effect0, control0);
}
+Node* JSNativeContextSpecialization::BuildCheckHeapObject(Node* receiver,
+ Node** effect,
+ Node* control) {
+ switch (receiver->opcode()) {
+ case IrOpcode::kHeapConstant:
+ case IrOpcode::kJSCreate:
+ case IrOpcode::kJSCreateArguments:
+ case IrOpcode::kJSCreateArray:
+ case IrOpcode::kJSCreateClosure:
+ case IrOpcode::kJSCreateIterResultObject:
+ case IrOpcode::kJSCreateLiteralArray:
+ case IrOpcode::kJSCreateLiteralObject:
+ case IrOpcode::kJSCreateLiteralRegExp:
+ case IrOpcode::kJSConvertReceiver:
+ case IrOpcode::kJSToName:
+ case IrOpcode::kJSToString:
+ case IrOpcode::kJSToObject:
+ case IrOpcode::kJSTypeOf: {
+ return receiver;
+ }
+ default: {
+ return *effect = graph()->NewNode(simplified()->CheckHeapObject(),
+ receiver, *effect, control);
+ }
+ }
+}
+
Node* JSNativeContextSpecialization::BuildCheckMaps(
Node* receiver, Node* effect, Node* control,
- std::vector<Handle<Map>> const& maps) {
+ std::vector<Handle<Map>> const& receiver_maps) {
HeapObjectMatcher m(receiver);
if (m.HasValue()) {
Handle<Map> receiver_map(m.Value()->map(), isolate());
if (receiver_map->is_stable()) {
- for (Handle<Map> map : maps) {
+ for (Handle<Map> map : receiver_maps) {
if (map.is_identical_to(receiver_map)) {
dependencies()->AssumeMapStable(receiver_map);
return effect;
@@ -1572,17 +1753,16 @@ Node* JSNativeContextSpecialization::BuildCheckMaps(
}
}
}
- int const map_input_count = static_cast<int>(maps.size());
- int const input_count = 1 + map_input_count + 1 + 1;
- Node** inputs = zone()->NewArray<Node*>(input_count);
- inputs[0] = receiver;
- for (int i = 0; i < map_input_count; ++i) {
- inputs[1 + i] = jsgraph()->HeapConstant(maps[i]);
+ ZoneHandleSet<Map> maps;
+ CheckMapsFlags flags = CheckMapsFlag::kNone;
+ for (Handle<Map> map : receiver_maps) {
+ maps.insert(map, graph()->zone());
+ if (map->is_migration_target()) {
+ flags |= CheckMapsFlag::kTryMigrateInstance;
+ }
}
- inputs[input_count - 2] = effect;
- inputs[input_count - 1] = control;
- return graph()->NewNode(simplified()->CheckMaps(map_input_count), input_count,
- inputs);
+ return graph()->NewNode(simplified()->CheckMaps(flags, maps), receiver,
+ effect, control);
}
void JSNativeContextSpecialization::AssumePrototypesStable(
@@ -1671,11 +1851,11 @@ MaybeHandle<Map> JSNativeContextSpecialization::InferReceiverMap(Node* receiver,
HeapObjectMatcher mtarget(m.InputAt(0));
HeapObjectMatcher mnewtarget(m.InputAt(1));
if (mtarget.HasValue() && mnewtarget.HasValue()) {
- Handle<JSFunction> constructor =
- Handle<JSFunction>::cast(mtarget.Value());
- if (constructor->has_initial_map()) {
- Handle<Map> initial_map(constructor->initial_map(), isolate());
- if (initial_map->constructor_or_backpointer() == *mnewtarget.Value()) {
+ Handle<JSFunction> original_constructor =
+ Handle<JSFunction>::cast(mnewtarget.Value());
+ if (original_constructor->has_initial_map()) {
+ Handle<Map> initial_map(original_constructor->initial_map(), isolate());
+ if (initial_map->constructor_or_backpointer() == *mtarget.Value()) {
// Walk up the {effect} chain to see if the {receiver} is the
// dominating effect and there's no other observable write in
// between.
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index 2d07061d11..7fedf32e92 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -54,11 +54,13 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
private:
Reduction ReduceJSInstanceOf(Node* node);
+ Reduction ReduceJSOrdinaryHasInstance(Node* node);
Reduction ReduceJSLoadContext(Node* node);
Reduction ReduceJSLoadNamed(Node* node);
Reduction ReduceJSStoreNamed(Node* node);
Reduction ReduceJSLoadProperty(Node* node);
Reduction ReduceJSStoreProperty(Node* node);
+ Reduction ReduceJSStoreDataPropertyInLiteral(Node* node);
Reduction ReduceElementAccess(Node* node, Node* index, Node* value,
MapHandleList const& receiver_maps,
@@ -116,6 +118,9 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
AccessMode access_mode,
KeyedAccessStoreMode store_mode);
+ // Construct an appropriate heap object check.
+ Node* BuildCheckHeapObject(Node* receiver, Node** effect, Node* control);
+
// Construct an appropriate map check.
Node* BuildCheckMaps(Node* receiver, Node* effect, Node* control,
std::vector<Handle<Map>> const& maps);
@@ -146,7 +151,7 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
ValueEffectControl InlineApiCall(
Node* receiver, Node* context, Node* target, Node* frame_state,
- ZoneVector<Node*>* stack_parameters, Node* effect, Node* control,
+ Node* parameter, Node* effect, Node* control,
Handle<SharedFunctionInfo> shared_info,
Handle<FunctionTemplateInfo> function_template_info);
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index f64630c589..d9674c1bed 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -10,6 +10,7 @@
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
#include "src/handles-inl.h"
+#include "src/objects-inl.h"
#include "src/type-feedback-vector.h"
namespace v8 {
@@ -20,7 +21,7 @@ VectorSlotPair::VectorSlotPair() {}
int VectorSlotPair::index() const {
- return vector_.is_null() ? -1 : vector_->GetIndex(slot_);
+ return vector_.is_null() ? -1 : TypeFeedbackVector::GetIndex(slot_);
}
@@ -80,6 +81,30 @@ CallConstructParameters const& CallConstructParametersOf(Operator const* op) {
return OpParameter<CallConstructParameters>(op);
}
+bool operator==(CallConstructWithSpreadParameters const& lhs,
+ CallConstructWithSpreadParameters const& rhs) {
+ return lhs.arity() == rhs.arity();
+}
+
+bool operator!=(CallConstructWithSpreadParameters const& lhs,
+ CallConstructWithSpreadParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(CallConstructWithSpreadParameters const& p) {
+ return base::hash_combine(p.arity());
+}
+
+std::ostream& operator<<(std::ostream& os,
+ CallConstructWithSpreadParameters const& p) {
+ return os << p.arity();
+}
+
+CallConstructWithSpreadParameters const& CallConstructWithSpreadParametersOf(
+ Operator const* op) {
+ DCHECK_EQ(IrOpcode::kJSCallConstructWithSpread, op->opcode());
+ return OpParameter<CallConstructWithSpreadParameters>(op);
+}
std::ostream& operator<<(std::ostream& os, CallFunctionParameters const& p) {
os << p.arity() << ", " << p.frequency() << ", " << p.convert_mode() << ", "
@@ -191,6 +216,60 @@ CreateCatchContextParameters const& CreateCatchContextParametersOf(
return OpParameter<CreateCatchContextParameters>(op);
}
+CreateFunctionContextParameters::CreateFunctionContextParameters(
+ int slot_count, ScopeType scope_type)
+ : slot_count_(slot_count), scope_type_(scope_type) {}
+
+bool operator==(CreateFunctionContextParameters const& lhs,
+ CreateFunctionContextParameters const& rhs) {
+ return lhs.slot_count() == rhs.slot_count() &&
+ lhs.scope_type() == rhs.scope_type();
+}
+
+bool operator!=(CreateFunctionContextParameters const& lhs,
+ CreateFunctionContextParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(CreateFunctionContextParameters const& parameters) {
+ return base::hash_combine(parameters.slot_count(),
+ static_cast<int>(parameters.scope_type()));
+}
+
+std::ostream& operator<<(std::ostream& os,
+ CreateFunctionContextParameters const& parameters) {
+ return os << parameters.slot_count() << ", " << parameters.scope_type();
+}
+
+CreateFunctionContextParameters const& CreateFunctionContextParametersOf(
+ Operator const* op) {
+ DCHECK_EQ(IrOpcode::kJSCreateFunctionContext, op->opcode());
+ return OpParameter<CreateFunctionContextParameters>(op);
+}
+
+bool operator==(DataPropertyParameters const& lhs,
+ DataPropertyParameters const& rhs) {
+ return lhs.feedback() == rhs.feedback();
+}
+
+bool operator!=(DataPropertyParameters const& lhs,
+ DataPropertyParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(DataPropertyParameters const& p) {
+ return base::hash_combine(p.feedback());
+}
+
+std::ostream& operator<<(std::ostream& os, DataPropertyParameters const& p) {
+ return os;
+}
+
+DataPropertyParameters const& DataPropertyParametersOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kJSStoreDataPropertyInLiteral);
+ return OpParameter<DataPropertyParameters>(op);
+}
+
bool operator==(NamedAccess const& lhs, NamedAccess const& rhs) {
return lhs.name().location() == rhs.name().location() &&
lhs.language_mode() == rhs.language_mode() &&
@@ -350,6 +429,7 @@ const CreateArrayParameters& CreateArrayParametersOf(const Operator* op) {
bool operator==(CreateClosureParameters const& lhs,
CreateClosureParameters const& rhs) {
return lhs.pretenure() == rhs.pretenure() &&
+ lhs.feedback() == rhs.feedback() &&
lhs.shared_info().location() == rhs.shared_info().location();
}
@@ -361,7 +441,8 @@ bool operator!=(CreateClosureParameters const& lhs,
size_t hash_value(CreateClosureParameters const& p) {
- return base::hash_combine(p.pretenure(), p.shared_info().location());
+ return base::hash_combine(p.pretenure(), p.shared_info().location(),
+ p.feedback());
}
@@ -436,26 +517,27 @@ CompareOperationHint CompareOperationHintOf(const Operator* op) {
return OpParameter<CompareOperationHint>(op);
}
-#define CACHED_OP_LIST(V) \
- V(ToInteger, Operator::kNoProperties, 1, 1) \
- V(ToLength, Operator::kNoProperties, 1, 1) \
- V(ToName, Operator::kNoProperties, 1, 1) \
- V(ToNumber, Operator::kNoProperties, 1, 1) \
- V(ToObject, Operator::kFoldable, 1, 1) \
- V(ToString, Operator::kNoProperties, 1, 1) \
- V(Create, Operator::kEliminatable, 2, 1) \
- V(CreateIterResultObject, Operator::kEliminatable, 2, 1) \
- V(CreateKeyValueArray, Operator::kEliminatable, 2, 1) \
- V(HasProperty, Operator::kNoProperties, 2, 1) \
- V(TypeOf, Operator::kPure, 1, 1) \
- V(InstanceOf, Operator::kNoProperties, 2, 1) \
- V(OrdinaryHasInstance, Operator::kNoProperties, 2, 1) \
- V(ForInNext, Operator::kNoProperties, 4, 1) \
- V(ForInPrepare, Operator::kNoProperties, 1, 3) \
- V(LoadMessage, Operator::kNoThrow, 0, 1) \
- V(StoreMessage, Operator::kNoThrow, 1, 0) \
- V(GeneratorRestoreContinuation, Operator::kNoThrow, 1, 1) \
- V(StackCheck, Operator::kNoWrite, 0, 0)
+#define CACHED_OP_LIST(V) \
+ V(ToInteger, Operator::kNoProperties, 1, 1) \
+ V(ToLength, Operator::kNoProperties, 1, 1) \
+ V(ToName, Operator::kNoProperties, 1, 1) \
+ V(ToNumber, Operator::kNoProperties, 1, 1) \
+ V(ToObject, Operator::kFoldable, 1, 1) \
+ V(ToString, Operator::kNoProperties, 1, 1) \
+ V(Create, Operator::kEliminatable, 2, 1) \
+ V(CreateIterResultObject, Operator::kEliminatable, 2, 1) \
+ V(CreateKeyValueArray, Operator::kEliminatable, 2, 1) \
+ V(HasProperty, Operator::kNoProperties, 2, 1) \
+ V(TypeOf, Operator::kPure, 1, 1) \
+ V(InstanceOf, Operator::kNoProperties, 2, 1) \
+ V(OrdinaryHasInstance, Operator::kNoProperties, 2, 1) \
+ V(ForInNext, Operator::kNoProperties, 4, 1) \
+ V(ForInPrepare, Operator::kNoProperties, 1, 3) \
+ V(LoadMessage, Operator::kNoThrow | Operator::kNoWrite, 0, 1) \
+ V(StoreMessage, Operator::kNoRead | Operator::kNoThrow, 1, 0) \
+ V(GeneratorRestoreContinuation, Operator::kNoThrow, 1, 1) \
+ V(StackCheck, Operator::kNoWrite, 0, 0) \
+ V(GetSuperConstructor, Operator::kNoWrite, 1, 1)
#define BINARY_OP_LIST(V) \
V(BitwiseOr) \
@@ -527,6 +609,9 @@ struct JSOperatorGlobalCache final {
Name##Operator<CompareOperationHint::kNumber> k##Name##NumberOperator; \
Name##Operator<CompareOperationHint::kNumberOrOddball> \
k##Name##NumberOrOddballOperator; \
+ Name##Operator<CompareOperationHint::kString> k##Name##StringOperator; \
+ Name##Operator<CompareOperationHint::kInternalizedString> \
+ k##Name##InternalizedStringOperator; \
Name##Operator<CompareOperationHint::kAny> k##Name##AnyOperator;
COMPARE_OP_LIST(COMPARE_OP)
#undef COMPARE_OP
@@ -578,6 +663,10 @@ BINARY_OP_LIST(BINARY_OP)
return &cache_.k##Name##NumberOperator; \
case CompareOperationHint::kNumberOrOddball: \
return &cache_.k##Name##NumberOrOddballOperator; \
+ case CompareOperationHint::kInternalizedString: \
+ return &cache_.k##Name##InternalizedStringOperator; \
+ case CompareOperationHint::kString: \
+ return &cache_.k##Name##StringOperator; \
case CompareOperationHint::kAny: \
return &cache_.k##Name##AnyOperator; \
} \
@@ -587,6 +676,17 @@ BINARY_OP_LIST(BINARY_OP)
COMPARE_OP_LIST(COMPARE_OP)
#undef COMPARE_OP
+const Operator* JSOperatorBuilder::StoreDataPropertyInLiteral(
+ const VectorSlotPair& feedback) {
+ DataPropertyParameters parameters(feedback);
+ return new (zone()) Operator1<DataPropertyParameters>( // --
+ IrOpcode::kJSStoreDataPropertyInLiteral,
+ Operator::kNoThrow, // opcode
+ "JSStoreDataPropertyInLiteral", // name
+ 4, 1, 1, 0, 1, 0, // counts
+ parameters); // parameter
+}
+
const Operator* JSOperatorBuilder::ToBoolean(ToBooleanHints hints) {
// TODO(turbofan): Cache most important versions of this operator.
return new (zone()) Operator1<ToBooleanHints>( //--
@@ -643,6 +743,14 @@ const Operator* JSOperatorBuilder::CallConstruct(
parameters); // parameter
}
+const Operator* JSOperatorBuilder::CallConstructWithSpread(uint32_t arity) {
+ CallConstructWithSpreadParameters parameters(arity);
+ return new (zone()) Operator1<CallConstructWithSpreadParameters>( // --
+ IrOpcode::kJSCallConstructWithSpread, Operator::kNoProperties, // opcode
+ "JSCallConstructWithSpread", // name
+ parameters.arity(), 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
+}
const Operator* JSOperatorBuilder::ConvertReceiver(
ConvertReceiverMode convert_mode) {
@@ -659,7 +767,7 @@ const Operator* JSOperatorBuilder::LoadNamed(Handle<Name> name,
return new (zone()) Operator1<NamedAccess>( // --
IrOpcode::kJSLoadNamed, Operator::kNoProperties, // opcode
"JSLoadNamed", // name
- 2, 1, 1, 1, 1, 2, // counts
+ 1, 1, 1, 1, 1, 2, // counts
access); // parameter
}
@@ -669,7 +777,7 @@ const Operator* JSOperatorBuilder::LoadProperty(
return new (zone()) Operator1<PropertyAccess>( // --
IrOpcode::kJSLoadProperty, Operator::kNoProperties, // opcode
"JSLoadProperty", // name
- 3, 1, 1, 1, 1, 2, // counts
+ 2, 1, 1, 1, 1, 2, // counts
access); // parameter
}
@@ -696,7 +804,7 @@ const Operator* JSOperatorBuilder::StoreNamed(LanguageMode language_mode,
return new (zone()) Operator1<NamedAccess>( // --
IrOpcode::kJSStoreNamed, Operator::kNoProperties, // opcode
"JSStoreNamed", // name
- 3, 1, 1, 0, 1, 2, // counts
+ 2, 1, 1, 0, 1, 2, // counts
access); // parameter
}
@@ -707,7 +815,7 @@ const Operator* JSOperatorBuilder::StoreProperty(
return new (zone()) Operator1<PropertyAccess>( // --
IrOpcode::kJSStoreProperty, Operator::kNoProperties, // opcode
"JSStoreProperty", // name
- 4, 1, 1, 0, 1, 2, // counts
+ 3, 1, 1, 0, 1, 2, // counts
access); // parameter
}
@@ -728,7 +836,7 @@ const Operator* JSOperatorBuilder::LoadGlobal(const Handle<Name>& name,
return new (zone()) Operator1<LoadGlobalParameters>( // --
IrOpcode::kJSLoadGlobal, Operator::kNoProperties, // opcode
"JSLoadGlobal", // name
- 1, 1, 1, 1, 1, 2, // counts
+ 0, 1, 1, 1, 1, 2, // counts
parameters); // parameter
}
@@ -740,7 +848,7 @@ const Operator* JSOperatorBuilder::StoreGlobal(LanguageMode language_mode,
return new (zone()) Operator1<StoreGlobalParameters>( // --
IrOpcode::kJSStoreGlobal, Operator::kNoProperties, // opcode
"JSStoreGlobal", // name
- 2, 1, 1, 0, 1, 2, // counts
+ 1, 1, 1, 0, 1, 2, // counts
parameters); // parameter
}
@@ -752,7 +860,7 @@ const Operator* JSOperatorBuilder::LoadContext(size_t depth, size_t index,
IrOpcode::kJSLoadContext, // opcode
Operator::kNoWrite | Operator::kNoThrow, // flags
"JSLoadContext", // name
- 1, 1, 0, 1, 1, 0, // counts
+ 0, 1, 0, 1, 1, 0, // counts
access); // parameter
}
@@ -763,7 +871,7 @@ const Operator* JSOperatorBuilder::StoreContext(size_t depth, size_t index) {
IrOpcode::kJSStoreContext, // opcode
Operator::kNoRead | Operator::kNoThrow, // flags
"JSStoreContext", // name
- 2, 1, 1, 0, 1, 0, // counts
+ 1, 1, 1, 0, 1, 0, // counts
access); // parameter
}
@@ -806,10 +914,10 @@ const Operator* JSOperatorBuilder::CreateArray(size_t arity,
parameters); // parameter
}
-
const Operator* JSOperatorBuilder::CreateClosure(
- Handle<SharedFunctionInfo> shared_info, PretenureFlag pretenure) {
- CreateClosureParameters parameters(shared_info, pretenure);
+ Handle<SharedFunctionInfo> shared_info, VectorSlotPair const& feedback,
+ PretenureFlag pretenure) {
+ CreateClosureParameters parameters(shared_info, feedback, pretenure);
return new (zone()) Operator1<CreateClosureParameters>( // --
IrOpcode::kJSCreateClosure, Operator::kNoThrow, // opcode
"JSCreateClosure", // name
@@ -818,8 +926,8 @@ const Operator* JSOperatorBuilder::CreateClosure(
}
const Operator* JSOperatorBuilder::CreateLiteralArray(
- Handle<FixedArray> constant_elements, int literal_flags, int literal_index,
- int number_of_elements) {
+ Handle<ConstantElementsPair> constant_elements, int literal_flags,
+ int literal_index, int number_of_elements) {
CreateLiteralParameters parameters(constant_elements, number_of_elements,
literal_flags, literal_index);
return new (zone()) Operator1<CreateLiteralParameters>( // --
@@ -853,13 +961,14 @@ const Operator* JSOperatorBuilder::CreateLiteralRegExp(
parameters); // parameter
}
-
-const Operator* JSOperatorBuilder::CreateFunctionContext(int slot_count) {
- return new (zone()) Operator1<int>( // --
+const Operator* JSOperatorBuilder::CreateFunctionContext(int slot_count,
+ ScopeType scope_type) {
+ CreateFunctionContextParameters parameters(slot_count, scope_type);
+ return new (zone()) Operator1<CreateFunctionContextParameters>( // --
IrOpcode::kJSCreateFunctionContext, Operator::kNoProperties, // opcode
"JSCreateFunctionContext", // name
1, 1, 1, 1, 1, 2, // counts
- slot_count); // parameter
+ parameters); // parameter
}
const Operator* JSOperatorBuilder::CreateCatchContext(
@@ -882,22 +991,21 @@ const Operator* JSOperatorBuilder::CreateWithContext(
}
const Operator* JSOperatorBuilder::CreateBlockContext(
- const Handle<ScopeInfo>& scpope_info) {
+ const Handle<ScopeInfo>& scope_info) {
return new (zone()) Operator1<Handle<ScopeInfo>>( // --
IrOpcode::kJSCreateBlockContext, Operator::kNoProperties, // opcode
"JSCreateBlockContext", // name
1, 1, 1, 1, 1, 2, // counts
- scpope_info); // parameter
+ scope_info); // parameter
}
-
const Operator* JSOperatorBuilder::CreateScriptContext(
- const Handle<ScopeInfo>& scpope_info) {
+ const Handle<ScopeInfo>& scope_info) {
return new (zone()) Operator1<Handle<ScopeInfo>>( // --
IrOpcode::kJSCreateScriptContext, Operator::kNoProperties, // opcode
"JSCreateScriptContext", // name
1, 1, 1, 1, 1, 2, // counts
- scpope_info); // parameter
+ scope_info); // parameter
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 9cdd30594a..b9902931fc 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -80,6 +80,31 @@ std::ostream& operator<<(std::ostream&, CallConstructParameters const&);
CallConstructParameters const& CallConstructParametersOf(Operator const*);
+// Defines the arity for a JavaScript constructor call with a spread as the last
+// parameters. This is used as a parameter by JSCallConstructWithSpread
+// operators.
+class CallConstructWithSpreadParameters final {
+ public:
+ explicit CallConstructWithSpreadParameters(uint32_t arity) : arity_(arity) {}
+
+ uint32_t arity() const { return arity_; }
+
+ private:
+ uint32_t const arity_;
+};
+
+bool operator==(CallConstructWithSpreadParameters const&,
+ CallConstructWithSpreadParameters const&);
+bool operator!=(CallConstructWithSpreadParameters const&,
+ CallConstructWithSpreadParameters const&);
+
+size_t hash_value(CallConstructWithSpreadParameters const&);
+
+std::ostream& operator<<(std::ostream&,
+ CallConstructWithSpreadParameters const&);
+
+CallConstructWithSpreadParameters const& CallConstructWithSpreadParametersOf(
+ Operator const*);
// Defines the arity and the call flags for a JavaScript function call. This is
// used as a parameter by JSCallFunction operators.
@@ -216,6 +241,56 @@ std::ostream& operator<<(std::ostream& os,
CreateCatchContextParameters const& CreateCatchContextParametersOf(
Operator const*);
+// Defines the slot count and ScopeType for a new function or eval context. This
+// is used as a parameter by the JSCreateFunctionContext operator.
+class CreateFunctionContextParameters final {
+ public:
+ CreateFunctionContextParameters(int slot_count, ScopeType scope_type);
+
+ int slot_count() const { return slot_count_; }
+ ScopeType scope_type() const { return scope_type_; }
+
+ private:
+ int const slot_count_;
+ ScopeType const scope_type_;
+};
+
+bool operator==(CreateFunctionContextParameters const& lhs,
+ CreateFunctionContextParameters const& rhs);
+bool operator!=(CreateFunctionContextParameters const& lhs,
+ CreateFunctionContextParameters const& rhs);
+
+size_t hash_value(CreateFunctionContextParameters const& parameters);
+
+std::ostream& operator<<(std::ostream& os,
+ CreateFunctionContextParameters const& parameters);
+
+CreateFunctionContextParameters const& CreateFunctionContextParametersOf(
+ Operator const*);
+
+// Defines the feedback, i.e., vector and index, for storing a data property in
+// an object literal. This is
+// used as a parameter by the JSStoreDataPropertyInLiteral operator.
+class DataPropertyParameters final {
+ public:
+ explicit DataPropertyParameters(VectorSlotPair const& feedback)
+ : feedback_(feedback) {}
+
+ VectorSlotPair const& feedback() const { return feedback_; }
+
+ private:
+ VectorSlotPair const feedback_;
+};
+
+bool operator==(DataPropertyParameters const&, DataPropertyParameters const&);
+bool operator!=(DataPropertyParameters const&, DataPropertyParameters const&);
+
+size_t hash_value(DataPropertyParameters const&);
+
+std::ostream& operator<<(std::ostream&, DataPropertyParameters const&);
+
+const DataPropertyParameters& DataPropertyParametersOf(const Operator* op);
+
// Defines the property of an object for a named access. This is
// used as a parameter by the JSLoadNamed and JSStoreNamed operators.
class NamedAccess final {
@@ -361,14 +436,17 @@ const CreateArrayParameters& CreateArrayParametersOf(const Operator* op);
class CreateClosureParameters final {
public:
CreateClosureParameters(Handle<SharedFunctionInfo> shared_info,
+ VectorSlotPair const& feedback,
PretenureFlag pretenure)
- : shared_info_(shared_info), pretenure_(pretenure) {}
+ : shared_info_(shared_info), feedback_(feedback), pretenure_(pretenure) {}
Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
+ VectorSlotPair const& feedback() const { return feedback_; }
PretenureFlag pretenure() const { return pretenure_; }
private:
const Handle<SharedFunctionInfo> shared_info_;
+ VectorSlotPair const feedback_;
const PretenureFlag pretenure_;
};
@@ -456,10 +534,11 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* CreateArguments(CreateArgumentsType type);
const Operator* CreateArray(size_t arity, Handle<AllocationSite> site);
const Operator* CreateClosure(Handle<SharedFunctionInfo> shared_info,
+ VectorSlotPair const& feedback,
PretenureFlag pretenure);
const Operator* CreateIterResultObject();
const Operator* CreateKeyValueArray();
- const Operator* CreateLiteralArray(Handle<FixedArray> constant_elements,
+ const Operator* CreateLiteralArray(Handle<ConstantElementsPair> constant,
int literal_flags, int literal_index,
int number_of_elements);
const Operator* CreateLiteralObject(Handle<FixedArray> constant_properties,
@@ -478,6 +557,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* CallRuntime(const Runtime::Function* function, size_t arity);
const Operator* CallConstruct(uint32_t arity, float frequency,
VectorSlotPair const& feedback);
+ const Operator* CallConstructWithSpread(uint32_t arity);
const Operator* ConvertReceiver(ConvertReceiverMode convert_mode);
@@ -489,10 +569,14 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* StoreNamed(LanguageMode language_mode, Handle<Name> name,
VectorSlotPair const& feedback);
+ const Operator* StoreDataPropertyInLiteral(const VectorSlotPair& feedback);
+
const Operator* DeleteProperty(LanguageMode language_mode);
const Operator* HasProperty();
+ const Operator* GetSuperConstructor();
+
const Operator* LoadGlobal(const Handle<Name>& name,
const VectorSlotPair& feedback,
TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
@@ -525,7 +609,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* StackCheck();
- const Operator* CreateFunctionContext(int slot_count);
+ const Operator* CreateFunctionContext(int slot_count, ScopeType scope_type);
const Operator* CreateCatchContext(const Handle<String>& name,
const Handle<ScopeInfo>& scope_info);
const Operator* CreateWithContext(const Handle<ScopeInfo>& scope_info);
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index dbbeca6e96..54c8713578 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -69,12 +69,24 @@ class JSBinopReduction final {
return true;
case CompareOperationHint::kAny:
case CompareOperationHint::kNone:
+ case CompareOperationHint::kString:
+ case CompareOperationHint::kInternalizedString:
break;
}
}
return false;
}
+ bool IsInternalizedStringCompareOperation() {
+ if (lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) {
+ DCHECK_EQ(1, node_->op()->EffectOutputCount());
+ return (CompareOperationHintOf(node_->op()) ==
+ CompareOperationHint::kInternalizedString) &&
+ BothInputsMaybe(Type::InternalizedString());
+ }
+ return false;
+ }
+
// Check if a string addition will definitely result in creating a ConsString,
// i.e. if the combined length of the resulting string exceeds the ConsString
// minimum length.
@@ -103,6 +115,25 @@ class JSBinopReduction final {
return false;
}
+ // Checks that both inputs are InternalizedString, and if we don't know
+ // statically that one side is already an InternalizedString, insert a
+ // CheckInternalizedString node.
+ void CheckInputsToInternalizedString() {
+ if (!left_type()->Is(Type::UniqueName())) {
+ Node* left_input = graph()->NewNode(
+ simplified()->CheckInternalizedString(), left(), effect(), control());
+ node_->ReplaceInput(0, left_input);
+ update_effect(left_input);
+ }
+ if (!right_type()->Is(Type::UniqueName())) {
+ Node* right_input =
+ graph()->NewNode(simplified()->CheckInternalizedString(), right(),
+ effect(), control());
+ node_->ReplaceInput(1, right_input);
+ update_effect(right_input);
+ }
+ }
+
void ConvertInputsToNumber() {
// To convert the inputs to numbers, we have to provide frame states
// for lazy bailouts in the ToNumber conversions.
@@ -316,6 +347,10 @@ class JSBinopReduction final {
bool BothInputsAre(Type* t) { return LeftInputIs(t) && RightInputIs(t); }
+ bool BothInputsMaybe(Type* t) {
+ return left_type()->Maybe(t) && right_type()->Maybe(t);
+ }
+
bool OneInputCannotBe(Type* t) {
return !left_type()->Maybe(t) || !right_type()->Maybe(t);
}
@@ -459,8 +494,6 @@ JSTypedLowering::JSTypedLowering(Editor* editor,
dependencies_(dependencies),
flags_(flags),
jsgraph_(jsgraph),
- the_hole_type_(
- Type::HeapConstant(factory()->the_hole_value(), graph()->zone())),
type_cache_(TypeCache::Get()) {
for (size_t k = 0; k < arraysize(shifted_int32_ranges_); ++k) {
double min = kMinInt / (1 << k);
@@ -850,6 +883,13 @@ Reduction JSTypedLowering::ReduceJSEqual(Node* node, bool invert) {
JSBinopReduction r(this, node);
+ if (r.BothInputsAre(Type::UniqueName())) {
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
+ }
+ if (r.IsInternalizedStringCompareOperation()) {
+ r.CheckInputsToInternalizedString();
+ return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
+ }
if (r.BothInputsAre(Type::String())) {
return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
}
@@ -912,25 +952,14 @@ Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node, bool invert) {
Reduction const reduction = ReduceJSEqualTypeOf(node, invert);
if (reduction.Changed()) return reduction;
- if (r.OneInputIs(the_hole_type_)) {
- return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
- }
- if (r.OneInputIs(Type::Undefined())) {
- return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
- }
- if (r.OneInputIs(Type::Null())) {
- return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
- }
- if (r.OneInputIs(Type::Boolean())) {
+ if (r.BothInputsAre(Type::Unique())) {
return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
}
- if (r.OneInputIs(Type::Object())) {
+ if (r.OneInputIs(Type::NonStringUniqueOrHole())) {
return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
}
- if (r.OneInputIs(Type::Receiver())) {
- return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
- }
- if (r.BothInputsAre(Type::Unique())) {
+ if (r.IsInternalizedStringCompareOperation()) {
+ r.CheckInputsToInternalizedString();
return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
}
if (r.BothInputsAre(Type::String())) {
@@ -958,7 +987,6 @@ Reduction JSTypedLowering::ReduceJSToBoolean(Node* node) {
return Replace(input);
} else if (input_type->Is(Type::OrderedNumber())) {
// JSToBoolean(x:ordered-number) => BooleanNot(NumberEqual(x,#0))
- RelaxEffectsAndControls(node);
node->ReplaceInput(0, graph()->NewNode(simplified()->NumberEqual(), input,
jsgraph()->ZeroConstant()));
node->TrimInputCount(1);
@@ -966,10 +994,25 @@ Reduction JSTypedLowering::ReduceJSToBoolean(Node* node) {
return Changed(node);
} else if (input_type->Is(Type::Number())) {
// JSToBoolean(x:number) => NumberToBoolean(x)
- RelaxEffectsAndControls(node);
node->TrimInputCount(1);
NodeProperties::ChangeOp(node, simplified()->NumberToBoolean());
return Changed(node);
+ } else if (input_type->Is(Type::DetectableReceiverOrNull())) {
+ // JSToBoolean(x:detectable receiver \/ null)
+ // => BooleanNot(ReferenceEqual(x,#null))
+ node->ReplaceInput(0, graph()->NewNode(simplified()->ReferenceEqual(),
+ input, jsgraph()->NullConstant()));
+ node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, simplified()->BooleanNot());
+ return Changed(node);
+ } else if (input_type->Is(Type::ReceiverOrNullOrUndefined())) {
+ // JSToBoolean(x:receiver \/ null \/ undefined)
+ // => BooleanNot(ObjectIsUndetectable(x))
+ node->ReplaceInput(
+ 0, graph()->NewNode(simplified()->ObjectIsUndetectable(), input));
+ node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, simplified()->BooleanNot());
+ return Changed(node);
}
return NoChange();
}
@@ -1239,6 +1282,9 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
Node* value = NodeProperties::GetValueInput(node, 2);
Type* key_type = NodeProperties::GetType(key);
Type* value_type = NodeProperties::GetType(value);
+
+ if (!value_type->Is(Type::PlainPrimitive())) return NoChange();
+
HeapObjectMatcher mbase(base);
if (mbase.HasValue() && mbase.Value()->IsJSTypedArray()) {
Handle<JSTypedArray> const array =
@@ -1257,7 +1303,6 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
Handle<FixedTypedArrayBase>::cast(handle(array->elements()));
Node* buffer = jsgraph()->PointerConstant(elements->external_pointer());
Node* length = jsgraph()->Constant(byte_length);
- Node* context = NodeProperties::GetContextInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
// Convert to a number first.
@@ -1266,12 +1311,8 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
if (number_reduction.Changed()) {
value = number_reduction.replacement();
} else {
- Node* frame_state_for_to_number =
- NodeProperties::FindFrameStateBefore(node);
- value = effect =
- graph()->NewNode(javascript()->ToNumber(), value, context,
- frame_state_for_to_number, effect, control);
- control = graph()->NewNode(common()->IfSuccess(), value);
+ value =
+ graph()->NewNode(simplified()->PlainPrimitiveToNumber(), value);
}
}
// Check if we can avoid the bounds check.
@@ -1316,11 +1357,30 @@ Reduction JSTypedLowering::ReduceJSOrdinaryHasInstance(Node* node) {
Node* constructor = NodeProperties::GetValueInput(node, 0);
Type* constructor_type = NodeProperties::GetType(constructor);
Node* object = NodeProperties::GetValueInput(node, 1);
+ Type* object_type = NodeProperties::GetType(object);
Node* context = NodeProperties::GetContextInput(node);
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
+ // Check if the {constructor} cannot be callable.
+ // See ES6 section 7.3.19 OrdinaryHasInstance ( C, O ) step 1.
+ if (!constructor_type->Maybe(Type::Callable())) {
+ Node* value = jsgraph()->FalseConstant();
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+
+ // If the {constructor} cannot be a JSBoundFunction and then {object}
+ // cannot be a JSReceiver, then this can be constant-folded to false.
+ // See ES6 section 7.3.19 OrdinaryHasInstance ( C, O ) step 2 and 3.
+ if (!object_type->Maybe(Type::Receiver()) &&
+ !constructor_type->Maybe(Type::BoundFunction())) {
+ Node* value = jsgraph()->FalseConstant();
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+
// Check if the {constructor} is a (known) JSFunction.
if (!constructor_type->IsHeapConstant() ||
!constructor_type->AsHeapConstant()->Value()->IsJSFunction()) {
@@ -1473,16 +1533,17 @@ Reduction JSTypedLowering::ReduceJSLoadContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
ContextAccess const& access = ContextAccessOf(node->op());
Node* effect = NodeProperties::GetEffectInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
Node* control = graph()->start();
for (size_t i = 0; i < access.depth(); ++i) {
- Node* previous = effect = graph()->NewNode(
+ context = effect = graph()->NewNode(
simplified()->LoadField(
AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX)),
- NodeProperties::GetValueInput(node, 0), effect, control);
- node->ReplaceInput(0, previous);
+ context, effect, control);
}
+ node->ReplaceInput(0, context);
node->ReplaceInput(1, effect);
- node->ReplaceInput(2, control);
+ node->AppendInput(jsgraph()->zone(), control);
NodeProperties::ChangeOp(
node,
simplified()->LoadField(AccessBuilder::ForContextSlot(access.index())));
@@ -1493,15 +1554,17 @@ Reduction JSTypedLowering::ReduceJSStoreContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSStoreContext, node->opcode());
ContextAccess const& access = ContextAccessOf(node->op());
Node* effect = NodeProperties::GetEffectInput(node);
+ Node* context = NodeProperties::GetContextInput(node);
Node* control = graph()->start();
+ Node* value = NodeProperties::GetValueInput(node, 0);
for (size_t i = 0; i < access.depth(); ++i) {
- Node* previous = effect = graph()->NewNode(
+ context = effect = graph()->NewNode(
simplified()->LoadField(
AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX)),
- NodeProperties::GetValueInput(node, 0), effect, control);
- node->ReplaceInput(0, previous);
+ context, effect, control);
}
- node->RemoveInput(2);
+ node->ReplaceInput(0, context);
+ node->ReplaceInput(1, value);
node->ReplaceInput(2, effect);
NodeProperties::ChangeOp(
node,
@@ -1614,10 +1677,10 @@ Reduction JSTypedLowering::ReduceJSConvertReceiver(Node* node) {
} else {
Node* native_context = effect = graph()->NewNode(
javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, context, effect);
+ context, effect);
receiver = effect = graph()->NewNode(
javascript()->LoadContext(0, Context::GLOBAL_PROXY_INDEX, true),
- native_context, native_context, effect);
+ native_context, effect);
}
ReplaceWithValue(node, receiver, effect, control);
return Replace(receiver);
@@ -1719,10 +1782,10 @@ Reduction JSTypedLowering::ReduceJSConvertReceiver(Node* node) {
} else {
Node* native_context = eglobal = graph()->NewNode(
javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
- context, context, eglobal);
+ context, eglobal);
rglobal = eglobal = graph()->NewNode(
javascript()->LoadContext(0, Context::GLOBAL_PROXY_INDEX, true),
- native_context, native_context, eglobal);
+ native_context, eglobal);
}
}
@@ -2031,6 +2094,15 @@ Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
+ // We know that the {index} is in Unsigned32 range here, otherwise executing
+ // the JSForInNext wouldn't be valid. Unfortunately due to OSR and generators
+ // this is not always reflected in the types, hence we might need to rename
+ // the {index} here.
+ if (!NodeProperties::GetType(index)->Is(Type::Unsigned32())) {
+ index = graph()->NewNode(common()->TypeGuard(Type::Unsigned32()), index,
+ control);
+ }
+
// Load the next {key} from the {cache_array}.
Node* key = effect = graph()->NewNode(
simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()),
@@ -2085,6 +2157,28 @@ Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
return Changed(node);
}
+Reduction JSTypedLowering::ReduceJSLoadMessage(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSLoadMessage, node->opcode());
+ ExternalReference const ref =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ node->ReplaceInput(0, jsgraph()->ExternalConstant(ref));
+ NodeProperties::ChangeOp(
+ node, simplified()->LoadField(AccessBuilder::ForExternalTaggedValue()));
+ return Changed(node);
+}
+
+Reduction JSTypedLowering::ReduceJSStoreMessage(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSStoreMessage, node->opcode());
+ ExternalReference const ref =
+ ExternalReference::address_of_pending_message_obj(isolate());
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ node->ReplaceInput(0, jsgraph()->ExternalConstant(ref));
+ node->ReplaceInput(1, value);
+ NodeProperties::ChangeOp(
+ node, simplified()->StoreField(AccessBuilder::ForExternalTaggedValue()));
+ return Changed(node);
+}
+
Reduction JSTypedLowering::ReduceJSGeneratorStore(Node* node) {
DCHECK_EQ(IrOpcode::kJSGeneratorStore, node->opcode());
Node* generator = NodeProperties::GetValueInput(node, 0);
@@ -2095,7 +2189,7 @@ Reduction JSTypedLowering::ReduceJSGeneratorStore(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
int register_count = OpParameter<int>(node);
- FieldAccess array_field = AccessBuilder::ForJSGeneratorObjectOperandStack();
+ FieldAccess array_field = AccessBuilder::ForJSGeneratorObjectRegisterFile();
FieldAccess context_field = AccessBuilder::ForJSGeneratorObjectContext();
FieldAccess continuation_field =
AccessBuilder::ForJSGeneratorObjectContinuation();
@@ -2149,7 +2243,7 @@ Reduction JSTypedLowering::ReduceJSGeneratorRestoreRegister(Node* node) {
Node* control = NodeProperties::GetControlInput(node);
int index = OpParameter<int>(node);
- FieldAccess array_field = AccessBuilder::ForJSGeneratorObjectOperandStack();
+ FieldAccess array_field = AccessBuilder::ForJSGeneratorObjectRegisterFile();
FieldAccess element_field = AccessBuilder::ForFixedArraySlot(index);
Node* array = effect = graph()->NewNode(simplified()->LoadField(array_field),
@@ -2235,6 +2329,10 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSCallFunction(node);
case IrOpcode::kJSForInNext:
return ReduceJSForInNext(node);
+ case IrOpcode::kJSLoadMessage:
+ return ReduceJSLoadMessage(node);
+ case IrOpcode::kJSStoreMessage:
+ return ReduceJSStoreMessage(node);
case IrOpcode::kJSGeneratorStore:
return ReduceJSGeneratorStore(node);
case IrOpcode::kJSGeneratorRestoreContinuation:
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index 3e710226b4..20f35f1fe1 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -73,6 +73,8 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Reduction ReduceJSCallConstruct(Node* node);
Reduction ReduceJSCallFunction(Node* node);
Reduction ReduceJSForInNext(Node* node);
+ Reduction ReduceJSLoadMessage(Node* node);
+ Reduction ReduceJSStoreMessage(Node* node);
Reduction ReduceJSGeneratorStore(Node* node);
Reduction ReduceJSGeneratorRestoreContinuation(Node* node);
Reduction ReduceJSGeneratorRestoreRegister(Node* node);
@@ -96,7 +98,6 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Flags flags_;
JSGraph* jsgraph_;
Type* shifted_int32_ranges_[4];
- Type* const the_hole_type_;
TypeCache const& type_cache_;
};
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 971ea7212d..2458f65867 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -5,7 +5,6 @@
#include "src/compiler/linkage.h"
#include "src/ast/scopes.h"
-#include "src/builtins/builtins-utils.h"
#include "src/code-stubs.h"
#include "src/compilation-info.h"
#include "src/compiler/common-operator.h"
@@ -152,7 +151,6 @@ bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
case Runtime::kDefineGetterPropertyUnchecked: // TODO(jarin): Is it safe?
case Runtime::kDefineSetterPropertyUnchecked: // TODO(jarin): Is it safe?
case Runtime::kGeneratorGetContinuation:
- case Runtime::kGetSuperConstructor:
case Runtime::kIsFunction:
case Runtime::kNewClosure:
case Runtime::kNewClosure_Tenured:
@@ -179,7 +177,6 @@ bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
case Runtime::kInlineGeneratorClose:
case Runtime::kInlineGeneratorGetInputOrDebugPos:
case Runtime::kInlineGeneratorGetResumeMode:
- case Runtime::kInlineGetSuperConstructor:
case Runtime::kInlineIsArray:
case Runtime::kInlineIsJSReceiver:
case Runtime::kInlineIsRegExp:
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index e50ebe1919..6c2935f7ca 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -8,6 +8,8 @@
#include "src/compiler/js-graph.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
+#include "src/factory.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -320,6 +322,42 @@ void LoadElimination::AbstractField::Print() const {
}
}
+bool LoadElimination::AbstractMaps::Lookup(
+ Node* object, ZoneHandleSet<Map>* object_maps) const {
+ for (auto pair : info_for_node_) {
+ if (MustAlias(object, pair.first)) {
+ *object_maps = pair.second;
+ return true;
+ }
+ }
+ return false;
+}
+
+LoadElimination::AbstractMaps const* LoadElimination::AbstractMaps::Kill(
+ Node* object, Zone* zone) const {
+ for (auto pair : this->info_for_node_) {
+ if (MayAlias(object, pair.first)) {
+ AbstractMaps* that = new (zone) AbstractMaps(zone);
+ for (auto pair : this->info_for_node_) {
+ if (!MayAlias(object, pair.first)) that->info_for_node_.insert(pair);
+ }
+ return that;
+ }
+ }
+ return this;
+}
+
+void LoadElimination::AbstractMaps::Print() const {
+ for (auto pair : info_for_node_) {
+ PrintF(" #%d:%s\n", pair.first->id(), pair.first->op()->mnemonic());
+ OFStream os(stdout);
+ ZoneHandleSet<Map> const& maps = pair.second;
+ for (size_t i = 0; i < maps.size(); ++i) {
+ os << " - " << Brief(*maps[i]) << "\n";
+ }
+ }
+}
+
bool LoadElimination::AbstractState::Equals(AbstractState const* that) const {
if (this->checks_) {
if (!that->checks_ || !that->checks_->Equals(this->checks_)) {
@@ -344,6 +382,13 @@ bool LoadElimination::AbstractState::Equals(AbstractState const* that) const {
return false;
}
}
+ if (this->maps_) {
+ if (!that->maps_ || !that->maps_->Equals(this->maps_)) {
+ return false;
+ }
+ } else if (that->maps_) {
+ return false;
+ }
return true;
}
@@ -372,6 +417,11 @@ void LoadElimination::AbstractState::Merge(AbstractState const* that,
}
}
}
+
+ // Merge the information we have about the maps.
+ if (this->maps_) {
+ this->maps_ = that->maps_ ? that->maps_->Merge(this->maps_, zone) : nullptr;
+ }
}
Node* LoadElimination::AbstractState::LookupCheck(Node* node) const {
@@ -389,6 +439,35 @@ LoadElimination::AbstractState const* LoadElimination::AbstractState::AddCheck(
return that;
}
+bool LoadElimination::AbstractState::LookupMaps(
+ Node* object, ZoneHandleSet<Map>* object_map) const {
+ return this->maps_ && this->maps_->Lookup(object, object_map);
+}
+
+LoadElimination::AbstractState const* LoadElimination::AbstractState::AddMaps(
+ Node* object, ZoneHandleSet<Map> maps, Zone* zone) const {
+ AbstractState* that = new (zone) AbstractState(*this);
+ if (that->maps_) {
+ that->maps_ = that->maps_->Extend(object, maps, zone);
+ } else {
+ that->maps_ = new (zone) AbstractMaps(object, maps, zone);
+ }
+ return that;
+}
+
+LoadElimination::AbstractState const* LoadElimination::AbstractState::KillMaps(
+ Node* object, Zone* zone) const {
+ if (this->maps_) {
+ AbstractMaps const* that_maps = this->maps_->Kill(object, zone);
+ if (this->maps_ != that_maps) {
+ AbstractState* that = new (zone) AbstractState(*this);
+ that->maps_ = that_maps;
+ return that;
+ }
+ }
+ return this;
+}
+
Node* LoadElimination::AbstractState::LookupElement(Node* object,
Node* index) const {
if (this->elements_) {
@@ -456,7 +535,7 @@ LoadElimination::AbstractState::KillFields(Node* object, Zone* zone) const {
AbstractField const* that_field = this_field->Kill(object, zone);
if (that_field != this_field) {
AbstractState* that = new (zone) AbstractState(*this);
- that->fields_[i] = this_field;
+ that->fields_[i] = that_field;
while (++i < arraysize(fields_)) {
if (this->fields_[i] != nullptr) {
that->fields_[i] = this->fields_[i]->Kill(object, zone);
@@ -481,6 +560,10 @@ void LoadElimination::AbstractState::Print() const {
PrintF(" checks:\n");
checks_->Print();
}
+ if (maps_) {
+ PrintF(" maps:\n");
+ maps_->Print();
+ }
if (elements_) {
PrintF(" elements:\n");
elements_->Print();
@@ -520,23 +603,18 @@ Reduction LoadElimination::ReduceArrayBufferWasNeutered(Node* node) {
}
Reduction LoadElimination::ReduceCheckMaps(Node* node) {
+ ZoneHandleSet<Map> const maps = CheckMapsParametersOf(node->op()).maps();
Node* const object = NodeProperties::GetValueInput(node, 0);
Node* const effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = node_states_.Get(effect);
if (state == nullptr) return NoChange();
- int const map_input_count = node->op()->ValueInputCount() - 1;
- if (Node* const object_map =
- state->LookupField(object, FieldIndexOf(HeapObject::kMapOffset))) {
- for (int i = 0; i < map_input_count; ++i) {
- Node* map = NodeProperties::GetValueInput(node, 1 + i);
- if (map == object_map) return Replace(effect);
- }
- }
- if (map_input_count == 1) {
- Node* const map0 = NodeProperties::GetValueInput(node, 1);
- state = state->AddField(object, FieldIndexOf(HeapObject::kMapOffset), map0,
- zone());
+ ZoneHandleSet<Map> object_maps;
+ if (state->LookupMaps(object, &object_maps)) {
+ if (maps.contains(object_maps)) return Replace(effect);
+ state = state->KillMaps(object, zone());
+ // TODO(turbofan): Compute the intersection.
}
+ state = state->AddMaps(object, maps, zone());
return UpdateState(node, state);
}
@@ -546,18 +624,16 @@ Reduction LoadElimination::ReduceEnsureWritableFastElements(Node* node) {
Node* const effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = node_states_.Get(effect);
if (state == nullptr) return NoChange();
- Node* fixed_array_map = jsgraph()->FixedArrayMapConstant();
- if (Node* const elements_map =
- state->LookupField(elements, FieldIndexOf(HeapObject::kMapOffset))) {
// Check if the {elements} already have the fixed array map.
- if (elements_map == fixed_array_map) {
- ReplaceWithValue(node, elements, effect);
- return Replace(elements);
- }
+ ZoneHandleSet<Map> elements_maps;
+ ZoneHandleSet<Map> fixed_array_maps(factory()->fixed_array_map());
+ if (state->LookupMaps(elements, &elements_maps) &&
+ fixed_array_maps.contains(elements_maps)) {
+ ReplaceWithValue(node, elements, effect);
+ return Replace(elements);
}
// We know that the resulting elements have the fixed array map.
- state = state->AddField(node, FieldIndexOf(HeapObject::kMapOffset),
- fixed_array_map, zone());
+ state = state->AddMaps(node, fixed_array_maps, zone());
// Kill the previous elements on {object}.
state =
state->KillField(object, FieldIndexOf(JSObject::kElementsOffset), zone());
@@ -575,14 +651,12 @@ Reduction LoadElimination::ReduceMaybeGrowFastElements(Node* node) {
if (state == nullptr) return NoChange();
if (flags & GrowFastElementsFlag::kDoubleElements) {
// We know that the resulting elements have the fixed double array map.
- Node* fixed_double_array_map = jsgraph()->FixedDoubleArrayMapConstant();
- state = state->AddField(node, FieldIndexOf(HeapObject::kMapOffset),
- fixed_double_array_map, zone());
+ state = state->AddMaps(
+ node, ZoneHandleSet<Map>(factory()->fixed_double_array_map()), zone());
} else {
// We know that the resulting elements have the fixed array map.
- Node* fixed_array_map = jsgraph()->FixedArrayMapConstant();
- state = state->AddField(node, FieldIndexOf(HeapObject::kMapOffset),
- fixed_array_map, zone());
+ state = state->AddMaps(
+ node, ZoneHandleSet<Map>(factory()->fixed_array_map()), zone());
}
if (flags & GrowFastElementsFlag::kArrayObject) {
// Kill the previous Array::length on {object}.
@@ -599,31 +673,30 @@ Reduction LoadElimination::ReduceMaybeGrowFastElements(Node* node) {
}
Reduction LoadElimination::ReduceTransitionElementsKind(Node* node) {
+ ElementsTransition transition = ElementsTransitionOf(node->op());
Node* const object = NodeProperties::GetValueInput(node, 0);
- Node* const source_map = NodeProperties::GetValueInput(node, 1);
- Node* const target_map = NodeProperties::GetValueInput(node, 2);
+ Handle<Map> source_map(transition.source());
+ Handle<Map> target_map(transition.target());
Node* const effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = node_states_.Get(effect);
if (state == nullptr) return NoChange();
- if (Node* const object_map =
- state->LookupField(object, FieldIndexOf(HeapObject::kMapOffset))) {
- if (target_map == object_map) {
+ ZoneHandleSet<Map> object_maps;
+ if (state->LookupMaps(object, &object_maps)) {
+ if (ZoneHandleSet<Map>(target_map).contains(object_maps)) {
// The {object} already has the {target_map}, so this TransitionElements
// {node} is fully redundant (independent of what {source_map} is).
return Replace(effect);
}
- state =
- state->KillField(object, FieldIndexOf(HeapObject::kMapOffset), zone());
- if (source_map == object_map) {
- state = state->AddField(object, FieldIndexOf(HeapObject::kMapOffset),
- target_map, zone());
+ if (object_maps.contains(ZoneHandleSet<Map>(source_map))) {
+ object_maps.remove(source_map, zone());
+ object_maps.insert(target_map, zone());
+ state = state->KillMaps(object, zone());
+ state = state->AddMaps(object, object_maps, zone());
}
} else {
- state =
- state->KillField(object, FieldIndexOf(HeapObject::kMapOffset), zone());
+ state = state->KillMaps(object, zone());
}
- ElementsTransition transition = ElementsTransitionOf(node->op());
- switch (transition) {
+ switch (transition.mode()) {
case ElementsTransition::kFastTransition:
break;
case ElementsTransition::kSlowTransition:
@@ -642,23 +715,40 @@ Reduction LoadElimination::ReduceLoadField(Node* node) {
Node* const control = NodeProperties::GetControlInput(node);
AbstractState const* state = node_states_.Get(effect);
if (state == nullptr) return NoChange();
- int field_index = FieldIndexOf(access);
- if (field_index >= 0) {
- if (Node* replacement = state->LookupField(object, field_index)) {
- // Make sure we don't resurrect dead {replacement} nodes.
- if (!replacement->IsDead()) {
- // We might need to guard the {replacement} if the type of the
- // {node} is more precise than the type of the {replacement}.
- Type* const node_type = NodeProperties::GetType(node);
- if (!NodeProperties::GetType(replacement)->Is(node_type)) {
- replacement = graph()->NewNode(common()->TypeGuard(node_type),
- replacement, control);
+ if (access.offset == HeapObject::kMapOffset &&
+ access.base_is_tagged == kTaggedBase) {
+ DCHECK(IsAnyTagged(access.machine_type.representation()));
+ ZoneHandleSet<Map> object_maps;
+ if (state->LookupMaps(object, &object_maps) && object_maps.size() == 1) {
+ Node* value = jsgraph()->HeapConstant(object_maps[0]);
+ NodeProperties::SetType(value, Type::OtherInternal());
+ ReplaceWithValue(node, value, effect);
+ return Replace(value);
+ }
+ } else {
+ int field_index = FieldIndexOf(access);
+ if (field_index >= 0) {
+ if (Node* replacement = state->LookupField(object, field_index)) {
+ // Make sure we don't resurrect dead {replacement} nodes.
+ if (!replacement->IsDead()) {
+ // We might need to guard the {replacement} if the type of the
+ // {node} is more precise than the type of the {replacement}.
+ Type* const node_type = NodeProperties::GetType(node);
+ if (!NodeProperties::GetType(replacement)->Is(node_type)) {
+ replacement = graph()->NewNode(common()->TypeGuard(node_type),
+ replacement, control);
+ NodeProperties::SetType(replacement, node_type);
+ }
+ ReplaceWithValue(node, replacement, effect);
+ return Replace(replacement);
}
- ReplaceWithValue(node, replacement, effect);
- return Replace(replacement);
}
+ state = state->AddField(object, field_index, node, zone());
}
- state = state->AddField(object, field_index, node, zone());
+ }
+ Handle<Map> field_map;
+ if (access.map.ToHandle(&field_map)) {
+ state = state->AddMaps(node, ZoneHandleSet<Map>(field_map), zone());
}
return UpdateState(node, state);
}
@@ -670,19 +760,33 @@ Reduction LoadElimination::ReduceStoreField(Node* node) {
Node* const effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = node_states_.Get(effect);
if (state == nullptr) return NoChange();
- int field_index = FieldIndexOf(access);
- if (field_index >= 0) {
- Node* const old_value = state->LookupField(object, field_index);
- if (old_value == new_value) {
- // This store is fully redundant.
- return Replace(effect);
+ if (access.offset == HeapObject::kMapOffset &&
+ access.base_is_tagged == kTaggedBase) {
+ DCHECK(IsAnyTagged(access.machine_type.representation()));
+ // Kill all potential knowledge about the {object}s map.
+ state = state->KillMaps(object, zone());
+ Type* const new_value_type = NodeProperties::GetType(new_value);
+ if (new_value_type->IsHeapConstant()) {
+ // Record the new {object} map information.
+ ZoneHandleSet<Map> object_maps(
+ Handle<Map>::cast(new_value_type->AsHeapConstant()->Value()));
+ state = state->AddMaps(object, object_maps, zone());
}
- // Kill all potentially aliasing fields and record the new value.
- state = state->KillField(object, field_index, zone());
- state = state->AddField(object, field_index, new_value, zone());
} else {
- // Unsupported StoreField operator.
- state = state->KillFields(object, zone());
+ int field_index = FieldIndexOf(access);
+ if (field_index >= 0) {
+ Node* const old_value = state->LookupField(object, field_index);
+ if (old_value == new_value) {
+ // This store is fully redundant.
+ return Replace(effect);
+ }
+ // Kill all potentially aliasing fields and record the new value.
+ state = state->KillField(object, field_index, zone());
+ state = state->AddField(object, field_index, new_value, zone());
+ } else {
+ // Unsupported StoreField operator.
+ state = state->KillFields(object, zone());
+ }
}
return UpdateState(node, state);
}
@@ -703,6 +807,7 @@ Reduction LoadElimination::ReduceLoadElement(Node* node) {
if (!NodeProperties::GetType(replacement)->Is(node_type)) {
replacement = graph()->NewNode(common()->TypeGuard(node_type),
replacement, control);
+ NodeProperties::SetType(replacement, node_type);
}
ReplaceWithValue(node, replacement, effect);
return Replace(replacement);
@@ -865,21 +970,31 @@ LoadElimination::AbstractState const* LoadElimination::ComputeLoopState(
break;
}
case IrOpcode::kTransitionElementsKind: {
+ ElementsTransition transition = ElementsTransitionOf(current->op());
Node* const object = NodeProperties::GetValueInput(current, 0);
- state = state->KillField(
- object, FieldIndexOf(HeapObject::kMapOffset), zone());
- state = state->KillField(
- object, FieldIndexOf(JSObject::kElementsOffset), zone());
+ ZoneHandleSet<Map> object_maps;
+ if (!state->LookupMaps(object, &object_maps) ||
+ !ZoneHandleSet<Map>(transition.target())
+ .contains(object_maps)) {
+ state = state->KillMaps(object, zone());
+ state = state->KillField(
+ object, FieldIndexOf(JSObject::kElementsOffset), zone());
+ }
break;
}
case IrOpcode::kStoreField: {
FieldAccess const& access = FieldAccessOf(current->op());
Node* const object = NodeProperties::GetValueInput(current, 0);
- int field_index = FieldIndexOf(access);
- if (field_index < 0) {
- state = state->KillFields(object, zone());
+ if (access.offset == HeapObject::kMapOffset) {
+ // Invalidate what we know about the {object}s map.
+ state = state->KillMaps(object, zone());
} else {
- state = state->KillField(object, field_index, zone());
+ int field_index = FieldIndexOf(access);
+ if (field_index < 0) {
+ state = state->KillFields(object, zone());
+ } else {
+ state = state->KillField(object, field_index, zone());
+ }
}
break;
}
@@ -911,7 +1026,8 @@ int LoadElimination::FieldIndexOf(int offset) {
DCHECK_EQ(0, offset % kPointerSize);
int field_index = offset / kPointerSize;
if (field_index >= static_cast<int>(kMaxTrackedFields)) return -1;
- return field_index;
+ DCHECK_LT(0, field_index);
+ return field_index - 1;
}
// static
@@ -957,6 +1073,8 @@ CommonOperatorBuilder* LoadElimination::common() const {
Graph* LoadElimination::graph() const { return jsgraph()->graph(); }
+Factory* LoadElimination::factory() const { return jsgraph()->factory(); }
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/load-elimination.h b/deps/v8/src/compiler/load-elimination.h
index 50979e4da8..cd486a2cd7 100644
--- a/deps/v8/src/compiler/load-elimination.h
+++ b/deps/v8/src/compiler/load-elimination.h
@@ -8,9 +8,14 @@
#include "src/base/compiler-specific.h"
#include "src/compiler/graph-reducer.h"
#include "src/globals.h"
+#include "src/zone/zone-handle-set.h"
namespace v8 {
namespace internal {
+
+// Forward declarations.
+class Factory;
+
namespace compiler {
// Foward declarations.
@@ -152,6 +157,49 @@ class V8_EXPORT_PRIVATE LoadElimination final
static size_t const kMaxTrackedFields = 32;
+ // Abstract state to approximate the current map of an object along the
+ // effect paths through the graph.
+ class AbstractMaps final : public ZoneObject {
+ public:
+ explicit AbstractMaps(Zone* zone) : info_for_node_(zone) {}
+ AbstractMaps(Node* object, ZoneHandleSet<Map> maps, Zone* zone)
+ : info_for_node_(zone) {
+ info_for_node_.insert(std::make_pair(object, maps));
+ }
+
+ AbstractMaps const* Extend(Node* object, ZoneHandleSet<Map> maps,
+ Zone* zone) const {
+ AbstractMaps* that = new (zone) AbstractMaps(zone);
+ that->info_for_node_ = this->info_for_node_;
+ that->info_for_node_.insert(std::make_pair(object, maps));
+ return that;
+ }
+ bool Lookup(Node* object, ZoneHandleSet<Map>* object_maps) const;
+ AbstractMaps const* Kill(Node* object, Zone* zone) const;
+ bool Equals(AbstractMaps const* that) const {
+ return this == that || this->info_for_node_ == that->info_for_node_;
+ }
+ AbstractMaps const* Merge(AbstractMaps const* that, Zone* zone) const {
+ if (this->Equals(that)) return this;
+ AbstractMaps* copy = new (zone) AbstractMaps(zone);
+ for (auto this_it : this->info_for_node_) {
+ Node* this_object = this_it.first;
+ ZoneHandleSet<Map> this_maps = this_it.second;
+ auto that_it = that->info_for_node_.find(this_object);
+ if (that_it != that->info_for_node_.end() &&
+ that_it->second == this_maps) {
+ copy->info_for_node_.insert(this_it);
+ }
+ }
+ return copy;
+ }
+
+ void Print() const;
+
+ private:
+ ZoneMap<Node*, ZoneHandleSet<Map>> info_for_node_;
+ };
+
class AbstractState final : public ZoneObject {
public:
AbstractState() {
@@ -163,6 +211,11 @@ class V8_EXPORT_PRIVATE LoadElimination final
bool Equals(AbstractState const* that) const;
void Merge(AbstractState const* that, Zone* zone);
+ AbstractState const* AddMaps(Node* object, ZoneHandleSet<Map> maps,
+ Zone* zone) const;
+ AbstractState const* KillMaps(Node* object, Zone* zone) const;
+ bool LookupMaps(Node* object, ZoneHandleSet<Map>* object_maps) const;
+
AbstractState const* AddField(Node* object, size_t index, Node* value,
Zone* zone) const;
AbstractState const* KillField(Node* object, size_t index,
@@ -185,6 +238,7 @@ class V8_EXPORT_PRIVATE LoadElimination final
AbstractChecks const* checks_ = nullptr;
AbstractElements const* elements_ = nullptr;
AbstractField const* fields_[kMaxTrackedFields];
+ AbstractMaps const* maps_ = nullptr;
};
class AbstractStateForEffectNodes final : public ZoneObject {
@@ -223,6 +277,7 @@ class V8_EXPORT_PRIVATE LoadElimination final
CommonOperatorBuilder* common() const;
AbstractState const* empty_state() const { return &empty_state_; }
+ Factory* factory() const;
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
Zone* zone() const { return node_states_.zone(); }
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index a8f7a25e1f..ecabbe0575 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -30,6 +30,10 @@ class MachineRepresentationInferrer {
Run();
}
+ CallDescriptor* call_descriptor() const {
+ return linkage_->GetIncomingDescriptor();
+ }
+
MachineRepresentation GetRepresentation(Node const* node) const {
return representation_vector_.at(node->id());
}
@@ -66,6 +70,18 @@ class MachineRepresentationInferrer {
}
}
+ MachineRepresentation PromoteRepresentation(MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
+ return MachineRepresentation::kWord32;
+ default:
+ break;
+ }
+ return rep;
+ }
+
void Run() {
auto blocks = schedule_->all_blocks();
for (BasicBlock* block : *blocks) {
@@ -82,6 +98,11 @@ class MachineRepresentationInferrer {
linkage_->GetParameterType(ParameterIndexOf(node->op()))
.representation();
break;
+ case IrOpcode::kReturn: {
+ representation_vector_[node->id()] = PromoteRepresentation(
+ linkage_->GetReturnType().representation());
+ break;
+ }
case IrOpcode::kProjection: {
representation_vector_[node->id()] = GetProjectionType(node);
} break;
@@ -91,12 +112,12 @@ class MachineRepresentationInferrer {
case IrOpcode::kAtomicLoad:
case IrOpcode::kLoad:
case IrOpcode::kProtectedLoad:
- representation_vector_[node->id()] =
- LoadRepresentationOf(node->op()).representation();
+ representation_vector_[node->id()] = PromoteRepresentation(
+ LoadRepresentationOf(node->op()).representation());
break;
case IrOpcode::kCheckedLoad:
- representation_vector_[node->id()] =
- CheckedLoadRepresentationOf(node->op()).representation();
+ representation_vector_[node->id()] = PromoteRepresentation(
+ CheckedLoadRepresentationOf(node->op()).representation());
break;
case IrOpcode::kLoadStackPointer:
case IrOpcode::kLoadFramePointer:
@@ -104,6 +125,10 @@ class MachineRepresentationInferrer {
representation_vector_[node->id()] =
MachineType::PointerRepresentation();
break;
+ case IrOpcode::kUnalignedLoad:
+ representation_vector_[node->id()] = PromoteRepresentation(
+ UnalignedLoadRepresentationOf(node->op()).representation());
+ break;
case IrOpcode::kPhi:
representation_vector_[node->id()] =
PhiRepresentationOf(node->op());
@@ -119,9 +144,22 @@ class MachineRepresentationInferrer {
}
break;
}
- case IrOpcode::kUnalignedLoad:
+ case IrOpcode::kAtomicStore:
+ representation_vector_[node->id()] =
+ PromoteRepresentation(AtomicStoreRepresentationOf(node->op()));
+ break;
+ case IrOpcode::kStore:
+ case IrOpcode::kProtectedStore:
+ representation_vector_[node->id()] = PromoteRepresentation(
+ StoreRepresentationOf(node->op()).representation());
+ break;
+ case IrOpcode::kCheckedStore:
representation_vector_[node->id()] =
- UnalignedLoadRepresentationOf(node->op()).representation();
+ PromoteRepresentation(CheckedStoreRepresentationOf(node->op()));
+ break;
+ case IrOpcode::kUnalignedStore:
+ representation_vector_[node->id()] = PromoteRepresentation(
+ UnalignedStoreRepresentationOf(node->op()));
break;
case IrOpcode::kHeapConstant:
case IrOpcode::kNumberConstant:
@@ -237,8 +275,12 @@ class MachineRepresentationChecker {
public:
MachineRepresentationChecker(
Schedule const* const schedule,
- MachineRepresentationInferrer const* const inferrer)
- : schedule_(schedule), inferrer_(inferrer) {}
+ MachineRepresentationInferrer const* const inferrer, bool is_stub,
+ const char* name)
+ : schedule_(schedule),
+ inferrer_(inferrer),
+ is_stub_(is_stub),
+ name_(name) {}
void Run() {
BasicBlockVector const* blocks = schedule_->all_blocks();
@@ -290,9 +332,17 @@ class MachineRepresentationChecker {
CheckValueInputForFloat64Op(node, 0);
break;
case IrOpcode::kWord64Equal:
- CheckValueInputIsTaggedOrPointer(node, 0);
- CheckValueInputRepresentationIs(
- node, 1, inferrer_->GetRepresentation(node->InputAt(0)));
+ if (Is64()) {
+ CheckValueInputIsTaggedOrPointer(node, 0);
+ CheckValueInputIsTaggedOrPointer(node, 1);
+ if (!is_stub_) {
+ CheckValueInputRepresentationIs(
+ node, 1, inferrer_->GetRepresentation(node->InputAt(0)));
+ }
+ } else {
+ CheckValueInputForInt64Op(node, 0);
+ CheckValueInputForInt64Op(node, 1);
+ }
break;
case IrOpcode::kInt64LessThan:
case IrOpcode::kInt64LessThanOrEqual:
@@ -317,6 +367,19 @@ class MachineRepresentationChecker {
MACHINE_UNOP_32_LIST(LABEL) { CheckValueInputForInt32Op(node, 0); }
break;
case IrOpcode::kWord32Equal:
+ if (Is32()) {
+ CheckValueInputIsTaggedOrPointer(node, 0);
+ CheckValueInputIsTaggedOrPointer(node, 1);
+ if (!is_stub_) {
+ CheckValueInputRepresentationIs(
+ node, 1, inferrer_->GetRepresentation(node->InputAt(0)));
+ }
+ } else {
+ CheckValueInputForInt32Op(node, 0);
+ CheckValueInputForInt32Op(node, 1);
+ }
+ break;
+
case IrOpcode::kInt32LessThan:
case IrOpcode::kInt32LessThanOrEqual:
case IrOpcode::kUint32LessThan:
@@ -374,7 +437,7 @@ class MachineRepresentationChecker {
CheckValueInputIsTaggedOrPointer(node, 0);
CheckValueInputRepresentationIs(
node, 1, MachineType::PointerRepresentation());
- switch (StoreRepresentationOf(node->op()).representation()) {
+ switch (inferrer_->GetRepresentation(node)) {
case MachineRepresentation::kTagged:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTaggedSigned:
@@ -382,15 +445,14 @@ class MachineRepresentationChecker {
break;
default:
CheckValueInputRepresentationIs(
- node, 2,
- StoreRepresentationOf(node->op()).representation());
+ node, 2, inferrer_->GetRepresentation(node));
}
break;
case IrOpcode::kAtomicStore:
CheckValueInputIsTaggedOrPointer(node, 0);
CheckValueInputRepresentationIs(
node, 1, MachineType::PointerRepresentation());
- switch (AtomicStoreRepresentationOf(node->op())) {
+ switch (inferrer_->GetRepresentation(node)) {
case MachineRepresentation::kTagged:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTaggedSigned:
@@ -398,7 +460,7 @@ class MachineRepresentationChecker {
break;
default:
CheckValueInputRepresentationIs(
- node, 2, AtomicStoreRepresentationOf(node->op()));
+ node, 2, inferrer_->GetRepresentation(node));
}
break;
case IrOpcode::kPhi:
@@ -410,6 +472,11 @@ class MachineRepresentationChecker {
CheckValueInputIsTagged(node, i);
}
break;
+ case MachineRepresentation::kWord32:
+ for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
+ CheckValueInputForInt32Op(node, i);
+ }
+ break;
default:
for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
CheckValueInputRepresentationIs(
@@ -422,10 +489,33 @@ class MachineRepresentationChecker {
case IrOpcode::kSwitch:
CheckValueInputForInt32Op(node, 0);
break;
- case IrOpcode::kReturn:
- // TODO(epertoso): use the linkage to determine which tipe we
- // should have here.
+ case IrOpcode::kReturn: {
+ // TODO(ishell): enable once the pop count parameter type becomes
+ // MachineType::PointerRepresentation(). Currently it's int32 or
+ // word-size.
+ // CheckValueInputRepresentationIs(
+ // node, 0, MachineType::PointerRepresentation()); // Pop count
+ size_t return_count = inferrer_->call_descriptor()->ReturnCount();
+ for (size_t i = 0; i < return_count; i++) {
+ MachineType type = inferrer_->call_descriptor()->GetReturnType(i);
+ int input_index = static_cast<int>(i + 1);
+ switch (type.representation()) {
+ case MachineRepresentation::kTagged:
+ case MachineRepresentation::kTaggedPointer:
+ case MachineRepresentation::kTaggedSigned:
+ CheckValueInputIsTagged(node, input_index);
+ break;
+ case MachineRepresentation::kWord32:
+ CheckValueInputForInt32Op(node, input_index);
+ break;
+ default:
+ CheckValueInputRepresentationIs(
+ node, 2, inferrer_->GetRepresentation(node));
+ }
+ break;
+ }
break;
+ }
case IrOpcode::kTypedStateValues:
case IrOpcode::kFrameState:
break;
@@ -434,6 +524,7 @@ class MachineRepresentationChecker {
std::stringstream str;
str << "Node #" << node->id() << ":" << *node->op()
<< " in the machine graph is not being checked.";
+ PrintDebugHelp(str, node);
FATAL(str.str().c_str());
}
break;
@@ -443,6 +534,15 @@ class MachineRepresentationChecker {
}
private:
+ static bool Is32() {
+ return MachineType::PointerRepresentation() ==
+ MachineRepresentation::kWord32;
+ }
+ static bool Is64() {
+ return MachineType::PointerRepresentation() ==
+ MachineRepresentation::kWord64;
+ }
+
void CheckValueInputRepresentationIs(Node const* node, int index,
MachineRepresentation representation) {
Node const* input = node->InputAt(index);
@@ -450,10 +550,11 @@ class MachineRepresentationChecker {
inferrer_->GetRepresentation(input);
if (input_representation != representation) {
std::stringstream str;
- str << "TypeError: node #" << node->id() << ":" << *node->op() << ":"
- << MachineReprToString(input_representation) << " uses node #"
- << input->id() << ":" << *input->op() << " which doesn't have a "
- << MachineReprToString(representation) << " representation.";
+ str << "TypeError: node #" << node->id() << ":" << *node->op()
+ << " uses node #" << input->id() << ":" << *input->op() << ":"
+ << input_representation << " which doesn't have a " << representation
+ << " representation.";
+ PrintDebugHelp(str, node);
FATAL(str.str().c_str());
}
}
@@ -472,6 +573,7 @@ class MachineRepresentationChecker {
str << "TypeError: node #" << node->id() << ":" << *node->op()
<< " uses node #" << input->id() << ":" << *input->op()
<< " which doesn't have a tagged representation.";
+ PrintDebugHelp(str, node);
FATAL(str.str().c_str());
}
@@ -482,6 +584,19 @@ class MachineRepresentationChecker {
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTaggedSigned:
return;
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
+ if (Is32()) {
+ return;
+ }
+ break;
+ case MachineRepresentation::kWord64:
+ if (Is64()) {
+ return;
+ }
+ break;
default:
break;
}
@@ -491,6 +606,7 @@ class MachineRepresentationChecker {
str << "TypeError: node #" << node->id() << ":" << *node->op()
<< " uses node #" << input->id() << ":" << *input->op()
<< " which doesn't have a tagged or pointer representation.";
+ PrintDebugHelp(str, node);
FATAL(str.str().c_str());
}
}
@@ -507,6 +623,7 @@ class MachineRepresentationChecker {
std::ostringstream str;
str << "TypeError: node #" << input->id() << ":" << *input->op()
<< " is untyped.";
+ PrintDebugHelp(str, node);
FATAL(str.str().c_str());
break;
}
@@ -517,6 +634,7 @@ class MachineRepresentationChecker {
str << "TypeError: node #" << node->id() << ":" << *node->op()
<< " uses node #" << input->id() << ":" << *input->op()
<< " which doesn't have an int32-compatible representation.";
+ PrintDebugHelp(str, node);
FATAL(str.str().c_str());
}
@@ -531,6 +649,7 @@ class MachineRepresentationChecker {
std::ostringstream str;
str << "TypeError: node #" << input->id() << ":" << *input->op()
<< " is untyped.";
+ PrintDebugHelp(str, node);
FATAL(str.str().c_str());
break;
}
@@ -539,9 +658,11 @@ class MachineRepresentationChecker {
break;
}
std::ostringstream str;
- str << "TypeError: node #" << node->id() << ":" << *node->op() << ":"
- << input_representation << " uses node #" << input->id() << ":"
- << *input->op() << " which doesn't have a kWord64 representation.";
+ str << "TypeError: node #" << node->id() << ":" << *node->op()
+ << " uses node #" << input->id() << ":" << *input->op() << ":"
+ << input_representation
+ << " which doesn't have a kWord64 representation.";
+ PrintDebugHelp(str, node);
FATAL(str.str().c_str());
}
@@ -555,6 +676,7 @@ class MachineRepresentationChecker {
str << "TypeError: node #" << node->id() << ":" << *node->op()
<< " uses node #" << input->id() << ":" << *input->op()
<< " which doesn't have a kFloat32 representation.";
+ PrintDebugHelp(str, node);
FATAL(str.str().c_str());
}
@@ -568,6 +690,7 @@ class MachineRepresentationChecker {
str << "TypeError: node #" << node->id() << ":" << *node->op()
<< " uses node #" << input->id() << ":" << *input->op()
<< " which doesn't have a kFloat64 representation.";
+ PrintDebugHelp(str, node);
FATAL(str.str().c_str());
}
@@ -590,11 +713,11 @@ class MachineRepresentationChecker {
str << std::endl;
}
str << " * input " << i << " (" << input->id() << ":" << *input->op()
- << ") doesn't have a " << MachineReprToString(expected_input_type)
- << " representation.";
+ << ") doesn't have a " << expected_input_type << " representation.";
}
}
if (should_log_error) {
+ PrintDebugHelp(str, node);
FATAL(str.str().c_str());
}
}
@@ -657,17 +780,28 @@ class MachineRepresentationChecker {
return false;
}
+ void PrintDebugHelp(std::ostream& out, Node const* node) {
+ if (DEBUG_BOOL) {
+ out << "\n#\n# Specify option --csa-trap-on-node=" << name_ << ","
+ << node->id() << " for debugging.";
+ }
+ }
+
Schedule const* const schedule_;
MachineRepresentationInferrer const* const inferrer_;
+ bool is_stub_;
+ const char* name_;
};
} // namespace
void MachineGraphVerifier::Run(Graph* graph, Schedule const* const schedule,
- Linkage* linkage, Zone* temp_zone) {
+ Linkage* linkage, bool is_stub, const char* name,
+ Zone* temp_zone) {
MachineRepresentationInferrer representation_inferrer(schedule, graph,
linkage, temp_zone);
- MachineRepresentationChecker checker(schedule, &representation_inferrer);
+ MachineRepresentationChecker checker(schedule, &representation_inferrer,
+ is_stub, name);
checker.Run();
}
diff --git a/deps/v8/src/compiler/machine-graph-verifier.h b/deps/v8/src/compiler/machine-graph-verifier.h
index b7d7b6166c..26e5d772c2 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.h
+++ b/deps/v8/src/compiler/machine-graph-verifier.h
@@ -21,7 +21,8 @@ class Schedule;
class MachineGraphVerifier {
public:
static void Run(Graph* graph, Schedule const* const schedule,
- Linkage* linkage, Zone* temp_zone);
+ Linkage* linkage, bool is_stub, const char* name,
+ Zone* temp_zone);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 0ad20f0684..f7fe19d494 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -17,9 +17,9 @@ namespace v8 {
namespace internal {
namespace compiler {
-MachineOperatorReducer::MachineOperatorReducer(JSGraph* jsgraph)
- : jsgraph_(jsgraph) {}
-
+MachineOperatorReducer::MachineOperatorReducer(JSGraph* jsgraph,
+ bool allow_signalling_nan)
+ : jsgraph_(jsgraph), allow_signalling_nan_(allow_signalling_nan) {}
MachineOperatorReducer::~MachineOperatorReducer() {}
@@ -50,12 +50,12 @@ Node* MachineOperatorReducer::Float64Mul(Node* lhs, Node* rhs) {
Node* MachineOperatorReducer::Float64PowHalf(Node* value) {
value =
graph()->NewNode(machine()->Float64Add(), Float64Constant(0.0), value);
- return graph()->NewNode(
- common()->Select(MachineRepresentation::kFloat64, BranchHint::kFalse),
- graph()->NewNode(machine()->Float64LessThanOrEqual(), value,
- Float64Constant(-V8_INFINITY)),
- Float64Constant(V8_INFINITY),
- graph()->NewNode(machine()->Float64Sqrt(), value));
+ Diamond d(graph(), common(),
+ graph()->NewNode(machine()->Float64LessThanOrEqual(), value,
+ Float64Constant(-V8_INFINITY)),
+ BranchHint::kFalse);
+ return d.Phi(MachineRepresentation::kFloat64, Float64Constant(V8_INFINITY),
+ graph()->NewNode(machine()->Float64Sqrt(), value));
}
Node* MachineOperatorReducer::Word32And(Node* lhs, Node* rhs) {
@@ -316,14 +316,17 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kFloat32Sub: {
Float32BinopMatcher m(node);
- if (m.right().Is(0) && (copysign(1.0, m.right().Value()) > 0)) {
+ if (allow_signalling_nan_ && m.right().Is(0) &&
+ (copysign(1.0, m.right().Value()) > 0)) {
return Replace(m.left().node()); // x - 0 => x
}
if (m.right().IsNaN()) { // x - NaN => NaN
- return Replace(m.right().node());
+ // Do some calculation to make a signalling NaN quiet.
+ return ReplaceFloat32(m.right().Value() - m.right().Value());
}
if (m.left().IsNaN()) { // NaN - x => NaN
- return Replace(m.left().node());
+ // Do some calculation to make a signalling NaN quiet.
+ return ReplaceFloat32(m.left().Value() - m.left().Value());
}
if (m.IsFoldable()) { // L - R => (L - R)
return ReplaceFloat32(m.left().Value() - m.right().Value());
@@ -350,7 +353,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kFloat64Add: {
Float64BinopMatcher m(node);
if (m.right().IsNaN()) { // x + NaN => NaN
- return Replace(m.right().node());
+ // Do some calculation to make a signalling NaN quiet.
+ return ReplaceFloat64(m.right().Value() - m.right().Value());
}
if (m.IsFoldable()) { // K + K => K
return ReplaceFloat64(m.left().Value() + m.right().Value());
@@ -359,14 +363,17 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kFloat64Sub: {
Float64BinopMatcher m(node);
- if (m.right().Is(0) && (Double(m.right().Value()).Sign() > 0)) {
+ if (allow_signalling_nan_ && m.right().Is(0) &&
+ (Double(m.right().Value()).Sign() > 0)) {
return Replace(m.left().node()); // x - 0 => x
}
if (m.right().IsNaN()) { // x - NaN => NaN
- return Replace(m.right().node());
+ // Do some calculation to make a signalling NaN quiet.
+ return ReplaceFloat64(m.right().Value() - m.right().Value());
}
if (m.left().IsNaN()) { // NaN - x => NaN
- return Replace(m.left().node());
+ // Do some calculation to make a signalling NaN quiet.
+ return ReplaceFloat64(m.left().Value() - m.left().Value());
}
if (m.IsFoldable()) { // L - R => (L - R)
return ReplaceFloat64(m.left().Value() - m.right().Value());
@@ -392,15 +399,17 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kFloat64Mul: {
Float64BinopMatcher m(node);
+ if (allow_signalling_nan_ && m.right().Is(1))
+ return Replace(m.left().node()); // x * 1.0 => x
if (m.right().Is(-1)) { // x * -1.0 => -0.0 - x
node->ReplaceInput(0, Float64Constant(-0.0));
node->ReplaceInput(1, m.left().node());
NodeProperties::ChangeOp(node, machine()->Float64Sub());
return Changed(node);
}
- if (m.right().Is(1)) return Replace(m.left().node()); // x * 1.0 => x
if (m.right().IsNaN()) { // x * NaN => NaN
- return Replace(m.right().node());
+ // Do some calculation to make a signalling NaN quiet.
+ return ReplaceFloat64(m.right().Value() - m.right().Value());
}
if (m.IsFoldable()) { // K * K => K
return ReplaceFloat64(m.left().Value() * m.right().Value());
@@ -414,12 +423,16 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kFloat64Div: {
Float64BinopMatcher m(node);
- if (m.right().Is(1)) return Replace(m.left().node()); // x / 1.0 => x
+ if (allow_signalling_nan_ && m.right().Is(1))
+ return Replace(m.left().node()); // x / 1.0 => x
+ // TODO(ahaas): We could do x / 1.0 = x if we knew that x is not an sNaN.
if (m.right().IsNaN()) { // x / NaN => NaN
- return Replace(m.right().node());
+ // Do some calculation to make a signalling NaN quiet.
+ return ReplaceFloat64(m.right().Value() - m.right().Value());
}
if (m.left().IsNaN()) { // NaN / x => NaN
- return Replace(m.left().node());
+ // Do some calculation to make a signalling NaN quiet.
+ return ReplaceFloat64(m.left().Value() - m.left().Value());
}
if (m.IsFoldable()) { // K / K => K
return ReplaceFloat64(m.left().Value() / m.right().Value());
@@ -664,6 +677,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kFloat64LessThan:
case IrOpcode::kFloat64LessThanOrEqual:
return ReduceFloat64Compare(node);
+ case IrOpcode::kFloat64RoundDown:
+ return ReduceFloat64RoundDown(node);
default:
break;
}
@@ -841,14 +856,13 @@ Reduction MachineOperatorReducer::ReduceInt32Mod(Node* node) {
if (base::bits::IsPowerOfTwo32(divisor)) {
uint32_t const mask = divisor - 1;
Node* const zero = Int32Constant(0);
- node->ReplaceInput(
- 0, graph()->NewNode(machine()->Int32LessThan(), dividend, zero));
- node->ReplaceInput(
- 1, Int32Sub(zero, Word32And(Int32Sub(zero, dividend), mask)));
- node->ReplaceInput(2, Word32And(dividend, mask));
- NodeProperties::ChangeOp(
- node,
- common()->Select(MachineRepresentation::kWord32, BranchHint::kFalse));
+ Diamond d(graph(), common(),
+ graph()->NewNode(machine()->Int32LessThan(), dividend, zero),
+ BranchHint::kFalse);
+ return Replace(
+ d.Phi(MachineRepresentation::kWord32,
+ Int32Sub(zero, Word32And(Int32Sub(zero, dividend), mask)),
+ Word32And(dividend, mask)));
} else {
Node* quotient = Int32Div(dividend, divisor);
DCHECK_EQ(dividend, node->InputAt(0));
@@ -1392,6 +1406,14 @@ Reduction MachineOperatorReducer::ReduceFloat64Compare(Node* node) {
return NoChange();
}
+Reduction MachineOperatorReducer::ReduceFloat64RoundDown(Node* node) {
+ DCHECK_EQ(IrOpcode::kFloat64RoundDown, node->opcode());
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue()) {
+ return ReplaceFloat64(Floor(m.Value()));
+ }
+ return NoChange();
+}
CommonOperatorBuilder* MachineOperatorReducer::common() const {
return jsgraph()->common();
diff --git a/deps/v8/src/compiler/machine-operator-reducer.h b/deps/v8/src/compiler/machine-operator-reducer.h
index d0845d9fab..593f7f2d22 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.h
+++ b/deps/v8/src/compiler/machine-operator-reducer.h
@@ -24,7 +24,8 @@ class JSGraph;
class V8_EXPORT_PRIVATE MachineOperatorReducer final
: public NON_EXPORTED_BASE(Reducer) {
public:
- explicit MachineOperatorReducer(JSGraph* jsgraph);
+ explicit MachineOperatorReducer(JSGraph* jsgraph,
+ bool allow_signalling_nan = true);
~MachineOperatorReducer();
Reduction Reduce(Node* node) override;
@@ -96,6 +97,7 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final
Reduction ReduceFloat64InsertLowWord32(Node* node);
Reduction ReduceFloat64InsertHighWord32(Node* node);
Reduction ReduceFloat64Compare(Node* node);
+ Reduction ReduceFloat64RoundDown(Node* node);
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
@@ -103,6 +105,7 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final
MachineOperatorBuilder* machine() const;
JSGraph* jsgraph_;
+ bool allow_signalling_nan_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index e36a61e733..80310e1f5a 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -43,7 +43,8 @@ LoadRepresentation LoadRepresentationOf(Operator const* op) {
StoreRepresentation const& StoreRepresentationOf(Operator const* op) {
- DCHECK_EQ(IrOpcode::kStore, op->opcode());
+ DCHECK(IrOpcode::kStore == op->opcode() ||
+ IrOpcode::kProtectedStore == op->opcode());
return OpParameter<StoreRepresentation>(op);
}
@@ -69,9 +70,9 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
return OpParameter<CheckedStoreRepresentation>(op);
}
-MachineRepresentation StackSlotRepresentationOf(Operator const* op) {
+int StackSlotSizeOf(Operator const* op) {
DCHECK_EQ(IrOpcode::kStackSlot, op->opcode());
- return OpParameter<MachineRepresentation>(op);
+ return OpParameter<int>(op);
}
MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
@@ -241,9 +242,6 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
V(Float32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
V(Float32x4GreaterThan, Operator::kNoProperties, 2, 0, 1) \
V(Float32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Float32x4Select, Operator::kNoProperties, 3, 0, 1) \
- V(Float32x4Swizzle, Operator::kNoProperties, 5, 0, 1) \
- V(Float32x4Shuffle, Operator::kNoProperties, 6, 0, 1) \
V(Float32x4FromInt32x4, Operator::kNoProperties, 1, 0, 1) \
V(Float32x4FromUint32x4, Operator::kNoProperties, 1, 0, 1) \
V(CreateInt32x4, Operator::kNoProperties, 4, 0, 1) \
@@ -263,9 +261,6 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
V(Int32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
V(Int32x4GreaterThan, Operator::kNoProperties, 2, 0, 1) \
V(Int32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
- V(Int32x4Select, Operator::kNoProperties, 3, 0, 1) \
- V(Int32x4Swizzle, Operator::kNoProperties, 5, 0, 1) \
- V(Int32x4Shuffle, Operator::kNoProperties, 6, 0, 1) \
V(Int32x4FromFloat32x4, Operator::kNoProperties, 1, 0, 1) \
V(Uint32x4Min, Operator::kCommutative, 2, 0, 1) \
V(Uint32x4Max, Operator::kCommutative, 2, 0, 1) \
@@ -390,7 +385,10 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
V(Simd128And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Simd128Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Simd128Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
- V(Simd128Not, Operator::kNoProperties, 1, 0, 1)
+ V(Simd128Not, Operator::kNoProperties, 1, 0, 1) \
+ V(Simd32x4Select, Operator::kNoProperties, 3, 0, 1) \
+ V(Simd32x4Swizzle, Operator::kNoProperties, 5, 0, 1) \
+ V(Simd32x4Shuffle, Operator::kNoProperties, 6, 0, 1)
#define PURE_OPTIONAL_OP_LIST(V) \
V(Word32Ctz, Operator::kNoProperties, 1, 0, 1) \
@@ -460,6 +458,15 @@ MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
V(kWord16) \
V(kWord32)
+#define STACK_SLOT_CACHED_SIZES_LIST(V) V(4) V(8) V(16)
+
+struct StackSlotOperator : public Operator1<int> {
+ explicit StackSlotOperator(int size)
+ : Operator1<int>(IrOpcode::kStackSlot,
+ Operator::kNoDeopt | Operator::kNoThrow, "StackSlot", 0,
+ 0, 0, 1, 0, 0, size) {}
+};
+
struct MachineOperatorGlobalCache {
#define PURE(Name, properties, value_input_count, control_input_count, \
output_count) \
@@ -485,56 +492,51 @@ struct MachineOperatorGlobalCache {
OVERFLOW_OP_LIST(OVERFLOW_OP)
#undef OVERFLOW_OP
-#define LOAD(Type) \
- struct Load##Type##Operator final : public Operator1<LoadRepresentation> { \
- Load##Type##Operator() \
- : Operator1<LoadRepresentation>( \
- IrOpcode::kLoad, \
- Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
- "Load", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
- struct UnalignedLoad##Type##Operator final \
- : public Operator1<UnalignedLoadRepresentation> { \
- UnalignedLoad##Type##Operator() \
- : Operator1<UnalignedLoadRepresentation>( \
- IrOpcode::kUnalignedLoad, \
- Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
- "UnalignedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
- struct CheckedLoad##Type##Operator final \
- : public Operator1<CheckedLoadRepresentation> { \
- CheckedLoad##Type##Operator() \
- : Operator1<CheckedLoadRepresentation>( \
- IrOpcode::kCheckedLoad, \
- Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
- "CheckedLoad", 3, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
- struct ProtectedLoad##Type##Operator final \
- : public Operator1<ProtectedLoadRepresentation> { \
- ProtectedLoad##Type##Operator() \
- : Operator1<ProtectedLoadRepresentation>( \
- IrOpcode::kProtectedLoad, \
- Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
- "ProtectedLoad", 4, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
- Load##Type##Operator kLoad##Type; \
- UnalignedLoad##Type##Operator kUnalignedLoad##Type; \
- CheckedLoad##Type##Operator kCheckedLoad##Type; \
+#define LOAD(Type) \
+ struct Load##Type##Operator final : public Operator1<LoadRepresentation> { \
+ Load##Type##Operator() \
+ : Operator1<LoadRepresentation>( \
+ IrOpcode::kLoad, \
+ Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
+ "Load", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
+ struct UnalignedLoad##Type##Operator final \
+ : public Operator1<UnalignedLoadRepresentation> { \
+ UnalignedLoad##Type##Operator() \
+ : Operator1<UnalignedLoadRepresentation>( \
+ IrOpcode::kUnalignedLoad, \
+ Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
+ "UnalignedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
+ struct CheckedLoad##Type##Operator final \
+ : public Operator1<CheckedLoadRepresentation> { \
+ CheckedLoad##Type##Operator() \
+ : Operator1<CheckedLoadRepresentation>( \
+ IrOpcode::kCheckedLoad, \
+ Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, \
+ "CheckedLoad", 3, 1, 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
+ struct ProtectedLoad##Type##Operator final \
+ : public Operator1<LoadRepresentation> { \
+ ProtectedLoad##Type##Operator() \
+ : Operator1<LoadRepresentation>( \
+ IrOpcode::kProtectedLoad, \
+ Operator::kNoDeopt | Operator::kNoThrow, "ProtectedLoad", 3, 1, \
+ 1, 1, 1, 0, MachineType::Type()) {} \
+ }; \
+ Load##Type##Operator kLoad##Type; \
+ UnalignedLoad##Type##Operator kUnalignedLoad##Type; \
+ CheckedLoad##Type##Operator kCheckedLoad##Type; \
ProtectedLoad##Type##Operator kProtectedLoad##Type;
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
-#define STACKSLOT(Type) \
- struct StackSlot##Type##Operator final \
- : public Operator1<MachineRepresentation> { \
- StackSlot##Type##Operator() \
- : Operator1<MachineRepresentation>( \
- IrOpcode::kStackSlot, Operator::kNoDeopt | Operator::kNoThrow, \
- "StackSlot", 0, 0, 0, 1, 0, 0, \
- MachineType::Type().representation()) {} \
- }; \
- StackSlot##Type##Operator kStackSlot##Type;
- MACHINE_TYPE_LIST(STACKSLOT)
+#define STACKSLOT(Size) \
+ struct StackSlotOfSize##Size##Operator final : public StackSlotOperator { \
+ StackSlotOfSize##Size##Operator() : StackSlotOperator(Size) {} \
+ }; \
+ StackSlotOfSize##Size##Operator kStackSlotSize##Size;
+ STACK_SLOT_CACHED_SIZES_LIST(STACKSLOT)
#undef STACKSLOT
#define STORE(Type) \
@@ -585,13 +587,24 @@ struct MachineOperatorGlobalCache {
"CheckedStore", 4, 1, 1, 0, 1, 0, MachineRepresentation::Type) { \
} \
}; \
+ struct ProtectedStore##Type##Operator \
+ : public Operator1<StoreRepresentation> { \
+ explicit ProtectedStore##Type##Operator() \
+ : Operator1<StoreRepresentation>( \
+ IrOpcode::kProtectedStore, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
+ "Store", 4, 1, 1, 0, 1, 0, \
+ StoreRepresentation(MachineRepresentation::Type, \
+ kNoWriteBarrier)) {} \
+ }; \
Store##Type##NoWriteBarrier##Operator kStore##Type##NoWriteBarrier; \
Store##Type##MapWriteBarrier##Operator kStore##Type##MapWriteBarrier; \
Store##Type##PointerWriteBarrier##Operator \
kStore##Type##PointerWriteBarrier; \
Store##Type##FullWriteBarrier##Operator kStore##Type##FullWriteBarrier; \
UnalignedStore##Type##Operator kUnalignedStore##Type; \
- CheckedStore##Type##Operator kCheckedStore##Type;
+ CheckedStore##Type##Operator kCheckedStore##Type; \
+ ProtectedStore##Type##Operator kProtectedStore##Type;
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
@@ -726,15 +739,21 @@ const Operator* MachineOperatorBuilder::ProtectedLoad(LoadRepresentation rep) {
return nullptr;
}
-const Operator* MachineOperatorBuilder::StackSlot(MachineRepresentation rep) {
-#define STACKSLOT(Type) \
- if (rep == MachineType::Type().representation()) { \
- return &cache_.kStackSlot##Type; \
+const Operator* MachineOperatorBuilder::StackSlot(int size) {
+ DCHECK_LE(0, size);
+#define CASE_CACHED_SIZE(Size) \
+ case Size: \
+ return &cache_.kStackSlotSize##Size;
+ switch (size) {
+ STACK_SLOT_CACHED_SIZES_LIST(CASE_CACHED_SIZE);
+ default:
+ return new (zone_) StackSlotOperator(size);
}
- MACHINE_TYPE_LIST(STACKSLOT)
-#undef STACKSLOT
- UNREACHABLE();
- return nullptr;
+#undef CASE_CACHED_SIZE
+}
+
+const Operator* MachineOperatorBuilder::StackSlot(MachineRepresentation rep) {
+ return StackSlot(1 << ElementSizeLog2Of(rep));
}
const Operator* MachineOperatorBuilder::Store(StoreRepresentation store_rep) {
@@ -762,6 +781,23 @@ const Operator* MachineOperatorBuilder::Store(StoreRepresentation store_rep) {
return nullptr;
}
+const Operator* MachineOperatorBuilder::ProtectedStore(
+ MachineRepresentation rep) {
+ switch (rep) {
+#define STORE(kRep) \
+ case MachineRepresentation::kRep: \
+ return &cache_.kProtectedStore##kRep; \
+ break;
+ MACHINE_REPRESENTATION_LIST(STORE)
+#undef STORE
+ case MachineRepresentation::kBit:
+ case MachineRepresentation::kNone:
+ break;
+ }
+ UNREACHABLE();
+ return nullptr;
+}
+
const Operator* MachineOperatorBuilder::UnsafePointerAdd() {
return &cache_.kUnsafePointerAdd;
}
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 1cbec994a8..d226879521 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -43,7 +43,6 @@ class OptionalOperator final {
// A Load needs a MachineType.
typedef MachineType LoadRepresentation;
-typedef LoadRepresentation ProtectedLoadRepresentation;
LoadRepresentation LoadRepresentationOf(Operator const*);
@@ -94,7 +93,7 @@ typedef MachineRepresentation CheckedStoreRepresentation;
CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const*);
-MachineRepresentation StackSlotRepresentationOf(Operator const* op);
+int StackSlotSizeOf(Operator const* op);
MachineRepresentation AtomicStoreRepresentationOf(Operator const* op);
@@ -448,9 +447,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Float32x4LessThanOrEqual();
const Operator* Float32x4GreaterThan();
const Operator* Float32x4GreaterThanOrEqual();
- const Operator* Float32x4Select();
- const Operator* Float32x4Swizzle();
- const Operator* Float32x4Shuffle();
const Operator* Float32x4FromInt32x4();
const Operator* Float32x4FromUint32x4();
@@ -471,9 +467,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Int32x4LessThanOrEqual();
const Operator* Int32x4GreaterThan();
const Operator* Int32x4GreaterThanOrEqual();
- const Operator* Int32x4Select();
- const Operator* Int32x4Swizzle();
- const Operator* Int32x4Shuffle();
const Operator* Int32x4FromFloat32x4();
const Operator* Uint32x4Min();
@@ -608,6 +601,9 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* Simd128Or();
const Operator* Simd128Xor();
const Operator* Simd128Not();
+ const Operator* Simd32x4Select();
+ const Operator* Simd32x4Swizzle();
+ const Operator* Simd32x4Shuffle();
// load [base + index]
const Operator* Load(LoadRepresentation rep);
@@ -615,6 +611,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// store [base + index], value
const Operator* Store(StoreRepresentation rep);
+ const Operator* ProtectedStore(MachineRepresentation rep);
// unaligned load [base + index]
const Operator* UnalignedLoad(UnalignedLoadRepresentation rep);
@@ -622,6 +619,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// unaligned store [base + index], value
const Operator* UnalignedStore(UnalignedStoreRepresentation rep);
+ const Operator* StackSlot(int size);
const Operator* StackSlot(MachineRepresentation rep);
// Access to the machine stack.
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index 66fcbb9362..7e9a522a70 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -20,7 +20,8 @@ MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone)
empty_state_(AllocationState::Empty(zone)),
pending_(zone),
tokens_(zone),
- zone_(zone) {}
+ zone_(zone),
+ graph_assembler_(jsgraph, nullptr, nullptr, zone) {}
void MemoryOptimizer::Optimize() {
EnqueueUses(graph()->start(), empty_state());
@@ -91,7 +92,9 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
case IrOpcode::kDeoptimizeUnless:
case IrOpcode::kIfException:
case IrOpcode::kLoad:
+ case IrOpcode::kProtectedLoad:
case IrOpcode::kStore:
+ case IrOpcode::kProtectedStore:
case IrOpcode::kRetain:
case IrOpcode::kUnsafePointerAdd:
return VisitOtherEffect(node, state);
@@ -101,12 +104,17 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
DCHECK_EQ(0, node->op()->EffectOutputCount());
}
+#define __ gasm()->
+
void MemoryOptimizer::VisitAllocate(Node* node, AllocationState const* state) {
DCHECK_EQ(IrOpcode::kAllocate, node->opcode());
Node* value;
Node* size = node->InputAt(0);
Node* effect = node->InputAt(1);
Node* control = node->InputAt(2);
+
+ gasm()->Reset(effect, control);
+
PretenureFlag pretenure = PretenureFlagOf(node->op());
// Propagate tenuring from outer allocations to inner allocations, i.e.
@@ -141,11 +149,11 @@ void MemoryOptimizer::VisitAllocate(Node* node, AllocationState const* state) {
}
// Determine the top/limit addresses.
- Node* top_address = jsgraph()->ExternalConstant(
+ Node* top_address = __ ExternalConstant(
pretenure == NOT_TENURED
? ExternalReference::new_space_allocation_top_address(isolate())
: ExternalReference::old_space_allocation_top_address(isolate()));
- Node* limit_address = jsgraph()->ExternalConstant(
+ Node* limit_address = __ ExternalConstant(
pretenure == NOT_TENURED
? ExternalReference::new_space_allocation_limit_address(isolate())
: ExternalReference::old_space_allocation_limit_address(isolate()));
@@ -171,89 +179,69 @@ void MemoryOptimizer::VisitAllocate(Node* node, AllocationState const* state) {
// Update the allocation top with the new object allocation.
// TODO(bmeurer): Defer writing back top as much as possible.
- Node* top = graph()->NewNode(machine()->IntAdd(), state->top(),
- jsgraph()->IntPtrConstant(object_size));
- effect = graph()->NewNode(
- machine()->Store(StoreRepresentation(
- MachineType::PointerRepresentation(), kNoWriteBarrier)),
- top_address, jsgraph()->IntPtrConstant(0), top, effect, control);
+ Node* top = __ IntAdd(state->top(), __ IntPtrConstant(object_size));
+ __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
+ kNoWriteBarrier),
+ top_address, __ IntPtrConstant(0), top);
// Compute the effective inner allocated address.
- value = graph()->NewNode(
- machine()->BitcastWordToTagged(),
- graph()->NewNode(machine()->IntAdd(), state->top(),
- jsgraph()->IntPtrConstant(kHeapObjectTag)));
+ value = __ BitcastWordToTagged(
+ __ IntAdd(state->top(), __ IntPtrConstant(kHeapObjectTag)));
// Extend the allocation {group}.
group->Add(value);
state = AllocationState::Open(group, state_size, top, zone());
} else {
+ auto call_runtime = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineType::PointerRepresentation());
+
// Setup a mutable reservation size node; will be patched as we fold
// additional allocations into this new group.
- Node* size = graph()->NewNode(common()->Int32Constant(object_size));
+ Node* size = __ UniqueInt32Constant(object_size);
// Load allocation top and limit.
- Node* top = effect =
- graph()->NewNode(machine()->Load(MachineType::Pointer()), top_address,
- jsgraph()->IntPtrConstant(0), effect, control);
- Node* limit = effect = graph()->NewNode(
- machine()->Load(MachineType::Pointer()), limit_address,
- jsgraph()->IntPtrConstant(0), effect, control);
+ Node* top =
+ __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
+ Node* limit =
+ __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
// Check if we need to collect garbage before we can start bump pointer
// allocation (always done for folded allocations).
- Node* check = graph()->NewNode(
- machine()->UintLessThan(),
- graph()->NewNode(
- machine()->IntAdd(), top,
- machine()->Is64()
- ? graph()->NewNode(machine()->ChangeInt32ToInt64(), size)
- : size),
+ Node* check = __ UintLessThan(
+ __ IntAdd(top,
+ machine()->Is64() ? __ ChangeInt32ToInt64(size) : size),
limit);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue = top;
+ __ GotoUnless(check, &call_runtime);
+ __ Goto(&done, top);
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
+ __ Bind(&call_runtime);
{
- Node* target = pretenure == NOT_TENURED
- ? jsgraph()->AllocateInNewSpaceStubConstant()
- : jsgraph()->AllocateInOldSpaceStubConstant();
+ Node* target =
+ pretenure == NOT_TENURED ? __ AllocateInNewSpaceStubConstant()
+ : __
+ AllocateInOldSpaceStubConstant();
if (!allocate_operator_.is_set()) {
CallDescriptor* descriptor =
Linkage::GetAllocateCallDescriptor(graph()->zone());
allocate_operator_.set(common()->Call(descriptor));
}
- vfalse = efalse = graph()->NewNode(allocate_operator_.get(), target,
- size, efalse, if_false);
- vfalse = graph()->NewNode(machine()->IntSub(), vfalse,
- jsgraph()->IntPtrConstant(kHeapObjectTag));
+ Node* vfalse = __ Call(allocate_operator_.get(), target, size);
+ vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag));
+ __ Goto(&done, vfalse);
}
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- value = graph()->NewNode(
- common()->Phi(MachineType::PointerRepresentation(), 2), vtrue, vfalse,
- control);
+ __ Bind(&done);
// Compute the new top and write it back.
- top = graph()->NewNode(machine()->IntAdd(), value,
- jsgraph()->IntPtrConstant(object_size));
- effect = graph()->NewNode(
- machine()->Store(StoreRepresentation(
- MachineType::PointerRepresentation(), kNoWriteBarrier)),
- top_address, jsgraph()->IntPtrConstant(0), top, effect, control);
+ top = __ IntAdd(done.PhiAt(0), __ IntPtrConstant(object_size));
+ __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
+ kNoWriteBarrier),
+ top_address, __ IntPtrConstant(0), top);
// Compute the initial object address.
- value = graph()->NewNode(
- machine()->BitcastWordToTagged(),
- graph()->NewNode(machine()->IntAdd(), value,
- jsgraph()->IntPtrConstant(kHeapObjectTag)));
+ value = __ BitcastWordToTagged(
+ __ IntAdd(done.PhiAt(0), __ IntPtrConstant(kHeapObjectTag)));
// Start a new allocation group.
AllocationGroup* group =
@@ -261,61 +249,42 @@ void MemoryOptimizer::VisitAllocate(Node* node, AllocationState const* state) {
state = AllocationState::Open(group, object_size, top, zone());
}
} else {
+ auto call_runtime = __ MakeDeferredLabel<1>();
+ auto done = __ MakeLabel<2>(MachineRepresentation::kTaggedPointer);
+
// Load allocation top and limit.
- Node* top = effect =
- graph()->NewNode(machine()->Load(MachineType::Pointer()), top_address,
- jsgraph()->IntPtrConstant(0), effect, control);
- Node* limit = effect =
- graph()->NewNode(machine()->Load(MachineType::Pointer()), limit_address,
- jsgraph()->IntPtrConstant(0), effect, control);
+ Node* top =
+ __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
+ Node* limit =
+ __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
// Compute the new top.
- Node* new_top = graph()->NewNode(
- machine()->IntAdd(), top,
- machine()->Is64()
- ? graph()->NewNode(machine()->ChangeInt32ToInt64(), size)
- : size);
+ Node* new_top =
+ __ IntAdd(top, machine()->Is64() ? __ ChangeInt32ToInt64(size) : size);
// Check if we can do bump pointer allocation here.
- Node* check = graph()->NewNode(machine()->UintLessThan(), new_top, limit);
- Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-
- Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
- Node* etrue = effect;
- Node* vtrue;
- {
- etrue = graph()->NewNode(
- machine()->Store(StoreRepresentation(
- MachineType::PointerRepresentation(), kNoWriteBarrier)),
- top_address, jsgraph()->IntPtrConstant(0), new_top, etrue, if_true);
- vtrue = graph()->NewNode(
- machine()->BitcastWordToTagged(),
- graph()->NewNode(machine()->IntAdd(), top,
- jsgraph()->IntPtrConstant(kHeapObjectTag)));
+ Node* check = __ UintLessThan(new_top, limit);
+ __ GotoUnless(check, &call_runtime);
+ __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
+ kNoWriteBarrier),
+ top_address, __ IntPtrConstant(0), new_top);
+ __ Goto(&done, __ BitcastWordToTagged(
+ __ IntAdd(top, __ IntPtrConstant(kHeapObjectTag))));
+
+ __ Bind(&call_runtime);
+ Node* target =
+ pretenure == NOT_TENURED ? __ AllocateInNewSpaceStubConstant()
+ : __
+ AllocateInOldSpaceStubConstant();
+ if (!allocate_operator_.is_set()) {
+ CallDescriptor* descriptor =
+ Linkage::GetAllocateCallDescriptor(graph()->zone());
+ allocate_operator_.set(common()->Call(descriptor));
}
+ __ Goto(&done, __ Call(allocate_operator_.get(), target, size));
- Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- Node* efalse = effect;
- Node* vfalse;
- {
- Node* target = pretenure == NOT_TENURED
- ? jsgraph()->AllocateInNewSpaceStubConstant()
- : jsgraph()->AllocateInOldSpaceStubConstant();
- if (!allocate_operator_.is_set()) {
- CallDescriptor* descriptor =
- Linkage::GetAllocateCallDescriptor(graph()->zone());
- allocate_operator_.set(common()->Call(descriptor));
- }
- vfalse = efalse = graph()->NewNode(allocate_operator_.get(), target, size,
- efalse, if_false);
- }
-
- control = graph()->NewNode(common()->Merge(2), if_true, if_false);
- effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
- value = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTaggedPointer, 2), vtrue, vfalse,
- control);
+ __ Bind(&done);
+ value = done.PhiAt(0);
// Create an unfoldable allocation group.
AllocationGroup* group =
@@ -323,6 +292,10 @@ void MemoryOptimizer::VisitAllocate(Node* node, AllocationState const* state) {
state = AllocationState::Closed(group, zone());
}
+ effect = __ ExtractCurrentEffect();
+ control = __ ExtractCurrentControl();
+ USE(control); // Floating control, dropped on the floor.
+
// Replace all effect uses of {node} with the {effect}, enqueue the
// effect uses for further processing, and replace all value uses of
// {node} with the {value}.
@@ -340,6 +313,8 @@ void MemoryOptimizer::VisitAllocate(Node* node, AllocationState const* state) {
node->Kill();
}
+#undef __
+
void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) {
DCHECK_EQ(IrOpcode::kCall, node->opcode());
// If the call can allocate, we start with a fresh state.
diff --git a/deps/v8/src/compiler/memory-optimizer.h b/deps/v8/src/compiler/memory-optimizer.h
index ba1d6dd72b..1541d22896 100644
--- a/deps/v8/src/compiler/memory-optimizer.h
+++ b/deps/v8/src/compiler/memory-optimizer.h
@@ -5,6 +5,7 @@
#ifndef V8_COMPILER_MEMORY_OPTIMIZER_H_
#define V8_COMPILER_MEMORY_OPTIMIZER_H_
+#include "src/compiler/graph-assembler.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -131,6 +132,7 @@ class MemoryOptimizer final {
CommonOperatorBuilder* common() const;
MachineOperatorBuilder* machine() const;
Zone* zone() const { return zone_; }
+ GraphAssembler* gasm() { return &graph_assembler_; }
SetOncePointer<const Operator> allocate_operator_;
JSGraph* const jsgraph_;
@@ -138,6 +140,7 @@ class MemoryOptimizer final {
ZoneMap<NodeId, AllocationStates> pending_;
ZoneQueue<Token> tokens_;
Zone* const zone_;
+ GraphAssembler graph_assembler_;
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryOptimizer);
};
diff --git a/deps/v8/src/compiler/mips/code-generator-mips.cc b/deps/v8/src/compiler/mips/code-generator-mips.cc
index 0a62b52d4f..60f634254c 100644
--- a/deps/v8/src/compiler/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/mips/code-generator-mips.cc
@@ -270,6 +270,26 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
bool must_save_lr_;
};
+#define CREATE_OOL_CLASS(ool_name, masm_ool_name, T) \
+ class ool_name final : public OutOfLineCode { \
+ public: \
+ ool_name(CodeGenerator* gen, T dst, T src1, T src2) \
+ : OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \
+ \
+ void Generate() final { __ masm_ool_name(dst_, src1_, src2_); } \
+ \
+ private: \
+ T const dst_; \
+ T const src1_; \
+ T const src2_; \
+ }
+
+CREATE_OOL_CLASS(OutOfLineFloat32Max, Float32MaxOutOfLine, FPURegister);
+CREATE_OOL_CLASS(OutOfLineFloat32Min, Float32MinOutOfLine, FPURegister);
+CREATE_OOL_CLASS(OutOfLineFloat64Max, Float64MaxOutOfLine, DoubleRegister);
+CREATE_OOL_CLASS(OutOfLineFloat64Min, Float64MinOutOfLine, DoubleRegister);
+
+#undef CREATE_OOL_CLASS
Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
switch (condition) {
@@ -1132,36 +1152,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputDoubleRegister(1));
break;
case kMipsMaddS:
- __ madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
- i.InputFloatRegister(1), i.InputFloatRegister(2));
+ __ Madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
+ i.InputFloatRegister(1), i.InputFloatRegister(2),
+ kScratchDoubleReg);
break;
case kMipsMaddD:
- __ madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1), i.InputDoubleRegister(2));
- break;
- case kMipsMaddfS:
- __ maddf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
- i.InputFloatRegister(2));
- break;
- case kMipsMaddfD:
- __ maddf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
- i.InputDoubleRegister(2));
+ __ Madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1), i.InputDoubleRegister(2),
+ kScratchDoubleReg);
break;
case kMipsMsubS:
- __ msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
- i.InputFloatRegister(1), i.InputFloatRegister(2));
+ __ Msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
+ i.InputFloatRegister(1), i.InputFloatRegister(2),
+ kScratchDoubleReg);
break;
case kMipsMsubD:
- __ msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1), i.InputDoubleRegister(2));
- break;
- case kMipsMsubfS:
- __ msubf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
- i.InputFloatRegister(2));
- break;
- case kMipsMsubfD:
- __ msubf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
- i.InputDoubleRegister(2));
+ __ Msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1), i.InputDoubleRegister(2),
+ kScratchDoubleReg);
break;
case kMipsMulD:
// TODO(plind): add special case: right op is -1.0, see arm port.
@@ -1239,47 +1247,39 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMipsFloat32Max: {
- Label compare_nan, done_compare;
- __ MaxNaNCheck_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
- i.InputSingleRegister(1), &compare_nan);
- __ Branch(&done_compare);
- __ bind(&compare_nan);
- __ Move(i.OutputSingleRegister(),
- std::numeric_limits<float>::quiet_NaN());
- __ bind(&done_compare);
+ FPURegister dst = i.OutputSingleRegister();
+ FPURegister src1 = i.InputSingleRegister(0);
+ FPURegister src2 = i.InputSingleRegister(1);
+ auto ool = new (zone()) OutOfLineFloat32Max(this, dst, src1, src2);
+ __ Float32Max(dst, src1, src2, ool->entry());
+ __ bind(ool->exit());
break;
}
case kMipsFloat64Max: {
- Label compare_nan, done_compare;
- __ MaxNaNCheck_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1), &compare_nan);
- __ Branch(&done_compare);
- __ bind(&compare_nan);
- __ Move(i.OutputDoubleRegister(),
- std::numeric_limits<double>::quiet_NaN());
- __ bind(&done_compare);
+ DoubleRegister dst = i.OutputDoubleRegister();
+ DoubleRegister src1 = i.InputDoubleRegister(0);
+ DoubleRegister src2 = i.InputDoubleRegister(1);
+ auto ool = new (zone()) OutOfLineFloat64Max(this, dst, src1, src2);
+ __ Float64Max(dst, src1, src2, ool->entry());
+ __ bind(ool->exit());
break;
}
case kMipsFloat32Min: {
- Label compare_nan, done_compare;
- __ MinNaNCheck_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
- i.InputSingleRegister(1), &compare_nan);
- __ Branch(&done_compare);
- __ bind(&compare_nan);
- __ Move(i.OutputSingleRegister(),
- std::numeric_limits<float>::quiet_NaN());
- __ bind(&done_compare);
+ FPURegister dst = i.OutputSingleRegister();
+ FPURegister src1 = i.InputSingleRegister(0);
+ FPURegister src2 = i.InputSingleRegister(1);
+ auto ool = new (zone()) OutOfLineFloat32Min(this, dst, src1, src2);
+ __ Float32Min(dst, src1, src2, ool->entry());
+ __ bind(ool->exit());
break;
}
case kMipsFloat64Min: {
- Label compare_nan, done_compare;
- __ MinNaNCheck_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1), &compare_nan);
- __ Branch(&done_compare);
- __ bind(&compare_nan);
- __ Move(i.OutputDoubleRegister(),
- std::numeric_limits<double>::quiet_NaN());
- __ bind(&done_compare);
+ DoubleRegister dst = i.OutputDoubleRegister();
+ DoubleRegister src1 = i.InputDoubleRegister(0);
+ DoubleRegister src2 = i.InputDoubleRegister(1);
+ auto ool = new (zone()) OutOfLineFloat64Min(this, dst, src1, src2);
+ __ Float64Min(dst, src1, src2, ool->entry());
+ __ bind(ool->exit());
break;
}
case kMipsCvtSD: {
@@ -1628,12 +1628,12 @@ static bool convertCondition(FlagsCondition condition, Condition& cc) {
return false;
}
+void AssembleBranchToLabels(CodeGenerator* gen, MacroAssembler* masm,
+ Instruction* instr, FlagsCondition condition,
+ Label* tlabel, Label* flabel, bool fallthru) {
+#undef __
+#define __ masm->
-// Assembles branches after an instruction.
-void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
- MipsOperandConverter i(this, instr);
- Label* tlabel = branch->true_label;
- Label* flabel = branch->false_label;
Condition cc = kNoCondition;
// MIPS does not have condition code flags, so compare and branch are
// implemented differently than on the other arch's. The compare operations
@@ -1642,12 +1642,13 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
// registers to compare pseudo-op are not modified before this branch op, as
// they are tested here.
+ MipsOperandConverter i(gen, instr);
if (instr->arch_opcode() == kMipsTst) {
- cc = FlagsConditionToConditionTst(branch->condition);
+ cc = FlagsConditionToConditionTst(condition);
__ And(at, i.InputRegister(0), i.InputOperand(1));
__ Branch(tlabel, cc, at, Operand(zero_reg));
} else if (instr->arch_opcode() == kMipsAddOvf) {
- switch (branch->condition) {
+ switch (condition) {
case kOverflow:
__ AddBranchOvf(i.OutputRegister(), i.InputRegister(0),
i.InputOperand(1), tlabel, flabel);
@@ -1657,11 +1658,11 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
i.InputOperand(1), flabel, tlabel);
break;
default:
- UNSUPPORTED_COND(kMipsAddOvf, branch->condition);
+ UNSUPPORTED_COND(kMipsAddOvf, condition);
break;
}
} else if (instr->arch_opcode() == kMipsSubOvf) {
- switch (branch->condition) {
+ switch (condition) {
case kOverflow:
__ SubBranchOvf(i.OutputRegister(), i.InputRegister(0),
i.InputOperand(1), tlabel, flabel);
@@ -1671,11 +1672,11 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
i.InputOperand(1), flabel, tlabel);
break;
default:
- UNSUPPORTED_COND(kMipsAddOvf, branch->condition);
+ UNSUPPORTED_COND(kMipsAddOvf, condition);
break;
}
} else if (instr->arch_opcode() == kMipsMulOvf) {
- switch (branch->condition) {
+ switch (condition) {
case kOverflow:
__ MulBranchOvf(i.OutputRegister(), i.InputRegister(0),
i.InputOperand(1), tlabel, flabel);
@@ -1685,15 +1686,15 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
i.InputOperand(1), flabel, tlabel);
break;
default:
- UNSUPPORTED_COND(kMipsMulOvf, branch->condition);
+ UNSUPPORTED_COND(kMipsMulOvf, condition);
break;
}
} else if (instr->arch_opcode() == kMipsCmp) {
- cc = FlagsConditionToConditionCmp(branch->condition);
+ cc = FlagsConditionToConditionCmp(condition);
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
} else if (instr->arch_opcode() == kMipsCmpS) {
- if (!convertCondition(branch->condition, cc)) {
- UNSUPPORTED_COND(kMips64CmpS, branch->condition);
+ if (!convertCondition(condition, cc)) {
+ UNSUPPORTED_COND(kMips64CmpS, condition);
}
FPURegister left = i.InputOrZeroSingleRegister(0);
FPURegister right = i.InputOrZeroSingleRegister(1);
@@ -1703,8 +1704,8 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
}
__ BranchF32(tlabel, nullptr, cc, left, right);
} else if (instr->arch_opcode() == kMipsCmpD) {
- if (!convertCondition(branch->condition, cc)) {
- UNSUPPORTED_COND(kMips64CmpD, branch->condition);
+ if (!convertCondition(condition, cc)) {
+ UNSUPPORTED_COND(kMips64CmpD, condition);
}
FPURegister left = i.InputOrZeroDoubleRegister(0);
FPURegister right = i.InputOrZeroDoubleRegister(1);
@@ -1718,7 +1719,17 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
instr->arch_opcode());
UNIMPLEMENTED();
}
- if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
+ if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
+#undef __
+#define __ masm()->
+}
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+ Label* tlabel = branch->true_label;
+ Label* flabel = branch->false_label;
+ AssembleBranchToLabels(this, masm(), instr, branch->condition, tlabel, flabel,
+ branch->fallthru);
}
@@ -1726,6 +1737,66 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
}
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+ FlagsCondition condition) {
+ class OutOfLineTrap final : public OutOfLineCode {
+ public:
+ OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+ : OutOfLineCode(gen),
+ frame_elided_(frame_elided),
+ instr_(instr),
+ gen_(gen) {}
+
+ void Generate() final {
+ MipsOperandConverter i(gen_, instr_);
+
+ Runtime::FunctionId trap_id = static_cast<Runtime::FunctionId>(
+ i.InputInt32(instr_->InputCount() - 1));
+ bool old_has_frame = __ has_frame();
+ if (frame_elided_) {
+ __ set_has_frame(true);
+ __ EnterFrame(StackFrame::WASM_COMPILED);
+ }
+ GenerateCallToTrap(trap_id);
+ if (frame_elided_) {
+ __ set_has_frame(old_has_frame);
+ }
+ if (FLAG_debug_code) {
+ __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+ }
+ }
+
+ private:
+ void GenerateCallToTrap(Runtime::FunctionId trap_id) {
+ if (trap_id == Runtime::kNumFunctions) {
+ // We cannot test calls to the runtime in cctest/test-run-wasm.
+ // Therefore we emit a call to C here instead of a call to the runtime.
+ // We use the context register as the scratch register, because we do
+ // not have a context here.
+ __ PrepareCallCFunction(0, 0, cp);
+ __ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+ 0);
+ } else {
+ __ Move(cp, isolate()->native_context());
+ gen_->AssembleSourcePosition(instr_);
+ __ CallRuntime(trap_id);
+ }
+ ReferenceMap* reference_map =
+ new (gen_->zone()) ReferenceMap(gen_->zone());
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ }
+
+ bool frame_elided_;
+ Instruction* instr_;
+ CodeGenerator* gen_;
+ };
+ bool frame_elided = !frame_access_state()->has_frame();
+ auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+ Label* tlabel = ool->entry();
+ AssembleBranchToLabels(this, masm(), instr, condition, tlabel, nullptr, true);
+}
// Assembles boolean materializations after an instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -2080,9 +2151,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
switch (src.type()) {
case Constant::kInt32:
- if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
- src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
- src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+ if (RelocInfo::IsWasmReference(src.rmode())) {
__ li(dst, Operand(src.ToInt32(), src.rmode()));
} else {
__ li(dst, Operand(src.ToInt32()));
diff --git a/deps/v8/src/compiler/mips/instruction-codes-mips.h b/deps/v8/src/compiler/mips/instruction-codes-mips.h
index 45ed041175..edff56f72b 100644
--- a/deps/v8/src/compiler/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/mips/instruction-codes-mips.h
@@ -71,12 +71,8 @@ namespace compiler {
V(MipsMulPair) \
V(MipsMaddS) \
V(MipsMaddD) \
- V(MipsMaddfS) \
- V(MipsMaddfD) \
V(MipsMsubS) \
V(MipsMsubD) \
- V(MipsMsubfS) \
- V(MipsMsubfD) \
V(MipsFloat32RoundDown) \
V(MipsFloat32RoundTruncate) \
V(MipsFloat32RoundUp) \
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
index 1e4b996531..3dcf708349 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -188,6 +188,8 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
if (cont->IsBranch()) {
inputs[input_count++] = g.Label(cont->true_block());
inputs[input_count++] = g.Label(cont->false_block());
+ } else if (cont->IsTrap()) {
+ inputs[input_count++] = g.TempImmediate(cont->trap_id());
}
if (cont->IsDeoptimize()) {
@@ -368,6 +370,10 @@ void InstructionSelector::VisitStore(Node* node) {
}
}
+void InstructionSelector::VisitProtectedStore(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
void InstructionSelector::VisitWord32And(Node* node) {
MipsOperandGenerator g(this);
@@ -652,7 +658,7 @@ void InstructionSelector::VisitInt32Add(Node* node) {
if (m.right().opcode() == IrOpcode::kWord32Shl &&
CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
Int32BinopMatcher mright(m.right().node());
- if (mright.right().HasValue()) {
+ if (mright.right().HasValue() && !m.left().HasValue()) {
int32_t shift_value = static_cast<int32_t>(mright.right().Value());
Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(mright.left().node()), g.TempImmediate(shift_value));
@@ -664,7 +670,7 @@ void InstructionSelector::VisitInt32Add(Node* node) {
if (m.left().opcode() == IrOpcode::kWord32Shl &&
CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) {
+ if (mleft.right().HasValue() && !m.right().HasValue()) {
int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.right().node()),
g.UseRegister(mleft.left().node()), g.TempImmediate(shift_value));
@@ -900,35 +906,23 @@ void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
void InstructionSelector::VisitFloat32Add(Node* node) {
MipsOperandGenerator g(this);
- Float32BinopMatcher m(node);
- if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
- // For Add.S(Mul.S(x, y), z):
- Float32BinopMatcher mleft(m.left().node());
- if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y).
+ if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y).
+ Float32BinopMatcher m(node);
+ if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
+ // For Add.S(Mul.S(x, y), z):
+ Float32BinopMatcher mleft(m.left().node());
Emit(kMipsMaddS, g.DefineAsRegister(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
- } else if (IsMipsArchVariant(kMips32r6)) { // Select Maddf.S(z, x, y).
- Emit(kMipsMaddfS, g.DefineSameAsFirst(node),
- g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
- g.UseRegister(mleft.right().node()));
- return;
}
- }
- if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
- // For Add.S(x, Mul.S(y, z)):
- Float32BinopMatcher mright(m.right().node());
- if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(x, y, z).
+ if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
+ // For Add.S(x, Mul.S(y, z)):
+ Float32BinopMatcher mright(m.right().node());
Emit(kMipsMaddS, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
- } else if (IsMipsArchVariant(kMips32r6)) { // Select Maddf.S(x, y, z).
- Emit(kMipsMaddfS, g.DefineSameAsFirst(node),
- g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()));
- return;
}
}
VisitRRR(this, kMipsAddS, node);
@@ -937,35 +931,23 @@ void InstructionSelector::VisitFloat32Add(Node* node) {
void InstructionSelector::VisitFloat64Add(Node* node) {
MipsOperandGenerator g(this);
- Float64BinopMatcher m(node);
- if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
- // For Add.D(Mul.D(x, y), z):
- Float64BinopMatcher mleft(m.left().node());
- if (IsMipsArchVariant(kMips32r2)) { // Select Madd.D(z, x, y).
+ if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y).
+ Float64BinopMatcher m(node);
+ if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
+ // For Add.D(Mul.D(x, y), z):
+ Float64BinopMatcher mleft(m.left().node());
Emit(kMipsMaddD, g.DefineAsRegister(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
- } else if (IsMipsArchVariant(kMips32r6)) { // Select Maddf.D(z, x, y).
- Emit(kMipsMaddfD, g.DefineSameAsFirst(node),
- g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
- g.UseRegister(mleft.right().node()));
- return;
}
- }
- if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
- // For Add.D(x, Mul.D(y, z)):
- Float64BinopMatcher mright(m.right().node());
- if (IsMipsArchVariant(kMips32r2)) { // Select Madd.D(x, y, z).
+ if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
+ // For Add.D(x, Mul.D(y, z)):
+ Float64BinopMatcher mright(m.right().node());
Emit(kMipsMaddD, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
- } else if (IsMipsArchVariant(kMips32r6)) { // Select Maddf.D(x, y, z).
- Emit(kMipsMaddfD, g.DefineSameAsFirst(node),
- g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()));
- return;
}
}
VisitRRR(this, kMipsAddD, node);
@@ -974,9 +956,9 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
void InstructionSelector::VisitFloat32Sub(Node* node) {
MipsOperandGenerator g(this);
- Float32BinopMatcher m(node);
- if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
- if (IsMipsArchVariant(kMips32r2)) {
+ if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y).
+ Float32BinopMatcher m(node);
+ if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
// For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y).
Float32BinopMatcher mleft(m.left().node());
Emit(kMipsMsubS, g.DefineAsRegister(node),
@@ -984,24 +966,15 @@ void InstructionSelector::VisitFloat32Sub(Node* node) {
g.UseRegister(mleft.right().node()));
return;
}
- } else if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
- if (IsMipsArchVariant(kMips32r6)) {
- // For Sub.S(x,Mul.S(y,z)) select Msubf.S(x, y, z).
- Float32BinopMatcher mright(m.right().node());
- Emit(kMipsMsubfS, g.DefineSameAsFirst(node),
- g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()));
- return;
- }
}
VisitRRR(this, kMipsSubS, node);
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
MipsOperandGenerator g(this);
- Float64BinopMatcher m(node);
- if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
- if (IsMipsArchVariant(kMips32r2)) {
+ if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y).
+ Float64BinopMatcher m(node);
+ if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
// For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y).
Float64BinopMatcher mleft(m.left().node());
Emit(kMipsMsubD, g.DefineAsRegister(node),
@@ -1009,15 +982,6 @@ void InstructionSelector::VisitFloat64Sub(Node* node) {
g.UseRegister(mleft.right().node()));
return;
}
- } else if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
- if (IsMipsArchVariant(kMips32r6)) {
- // For Sub.D(x,Mul.S(y,z)) select Msubf.D(x, y, z).
- Float64BinopMatcher mright(m.right().node());
- Emit(kMipsMsubfD, g.DefineSameAsFirst(node),
- g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()));
- return;
- }
}
VisitRRR(this, kMipsSubD, node);
}
@@ -1406,9 +1370,12 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ } else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+ } else {
+ DCHECK(cont->IsTrap());
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.TempImmediate(cont->trap_id()));
}
}
@@ -1616,10 +1583,13 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
g.TempImmediate(0), cont->reason(),
cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ } else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
g.TempImmediate(0));
+ } else {
+ DCHECK(cont->IsTrap());
+ selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
+ g.TempImmediate(cont->trap_id()));
}
}
@@ -1643,6 +1613,19 @@ void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+ Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
MipsOperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
index a3bf433d4a..ba921e265b 100644
--- a/deps/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
@@ -270,6 +270,26 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
bool must_save_lr_;
};
+#define CREATE_OOL_CLASS(ool_name, masm_ool_name, T) \
+ class ool_name final : public OutOfLineCode { \
+ public: \
+ ool_name(CodeGenerator* gen, T dst, T src1, T src2) \
+ : OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \
+ \
+ void Generate() final { __ masm_ool_name(dst_, src1_, src2_); } \
+ \
+ private: \
+ T const dst_; \
+ T const src1_; \
+ T const src2_; \
+ }
+
+CREATE_OOL_CLASS(OutOfLineFloat32Max, Float32MaxOutOfLine, FPURegister);
+CREATE_OOL_CLASS(OutOfLineFloat32Min, Float32MinOutOfLine, FPURegister);
+CREATE_OOL_CLASS(OutOfLineFloat64Max, Float64MaxOutOfLine, FPURegister);
+CREATE_OOL_CLASS(OutOfLineFloat64Min, Float64MinOutOfLine, FPURegister);
+
+#undef CREATE_OOL_CLASS
Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
switch (condition) {
@@ -366,85 +386,108 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
}
} // namespace
-
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr) \
+#define ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, length, out_of_bounds) \
do { \
- auto result = i.Output##width##Register(); \
- auto ool = new (zone()) OutOfLineLoad##width(this, result); \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
- __ And(kScratchReg, offset, Operand(0xffffffff)); \
- __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg); \
- __ asm_instr(result, MemOperand(kScratchReg, 0)); \
+ if (!length.is_reg() && base::bits::IsPowerOfTwo64(length.immediate())) { \
+ __ And(kScratchReg, offset, Operand(~(length.immediate() - 1))); \
+ __ Branch(USE_DELAY_SLOT, out_of_bounds, ne, kScratchReg, \
+ Operand(zero_reg)); \
} else { \
- int offset = static_cast<int>(i.InputOperand(0).immediate()); \
- __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
- __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
+ __ Branch(USE_DELAY_SLOT, out_of_bounds, hs, offset, length); \
} \
- __ bind(ool->exit()); \
} while (0)
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
+#define ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, length, out_of_bounds) \
do { \
- auto result = i.OutputRegister(); \
- auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
- __ And(kScratchReg, offset, Operand(0xffffffff)); \
- __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg); \
- __ asm_instr(result, MemOperand(kScratchReg, 0)); \
+ if (!length.is_reg() && base::bits::IsPowerOfTwo64(length.immediate())) { \
+ __ Or(kScratchReg, zero_reg, Operand(offset)); \
+ __ And(kScratchReg, kScratchReg, Operand(~(length.immediate() - 1))); \
+ __ Branch(out_of_bounds, ne, kScratchReg, Operand(zero_reg)); \
} else { \
- int offset = static_cast<int>(i.InputOperand(0).immediate()); \
- __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
- __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
+ __ Branch(out_of_bounds, ls, length.rm(), Operand(offset)); \
} \
- __ bind(ool->exit()); \
} while (0)
-#define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr) \
- do { \
- Label done; \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- auto value = i.InputOrZero##width##Register(2); \
- if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { \
- __ Move(kDoubleRegZero, 0.0); \
- } \
- __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
- __ And(kScratchReg, offset, Operand(0xffffffff)); \
- __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg); \
- __ asm_instr(value, MemOperand(kScratchReg, 0)); \
- } else { \
- int offset = static_cast<int>(i.InputOperand(0).immediate()); \
- auto value = i.InputOrZero##width##Register(2); \
- if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { \
- __ Move(kDoubleRegZero, 0.0); \
- } \
- __ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
- __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
- } \
- __ bind(&done); \
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr) \
+ do { \
+ auto result = i.Output##width##Register(); \
+ auto ool = new (zone()) OutOfLineLoad##width(this, result); \
+ if (instr->InputAt(0)->IsRegister()) { \
+ auto offset = i.InputRegister(0); \
+ ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), ool->entry()); \
+ __ And(kScratchReg, offset, Operand(0xffffffff)); \
+ __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg); \
+ __ asm_instr(result, MemOperand(kScratchReg, 0)); \
+ } else { \
+ int offset = static_cast<int>(i.InputOperand(0).immediate()); \
+ ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1), \
+ ool->entry()); \
+ __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
+ } \
+ __ bind(ool->exit()); \
+ } while (0)
+
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
+ do { \
+ auto result = i.OutputRegister(); \
+ auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
+ if (instr->InputAt(0)->IsRegister()) { \
+ auto offset = i.InputRegister(0); \
+ ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), ool->entry()); \
+ __ And(kScratchReg, offset, Operand(0xffffffff)); \
+ __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg); \
+ __ asm_instr(result, MemOperand(kScratchReg, 0)); \
+ } else { \
+ int offset = static_cast<int>(i.InputOperand(0).immediate()); \
+ ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1), \
+ ool->entry()); \
+ __ asm_instr(result, MemOperand(i.InputRegister(2), offset)); \
+ } \
+ __ bind(ool->exit()); \
+ } while (0)
+
+#define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr) \
+ do { \
+ Label done; \
+ if (instr->InputAt(0)->IsRegister()) { \
+ auto offset = i.InputRegister(0); \
+ auto value = i.InputOrZero##width##Register(2); \
+ if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { \
+ __ Move(kDoubleRegZero, 0.0); \
+ } \
+ ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), &done); \
+ __ And(kScratchReg, offset, Operand(0xffffffff)); \
+ __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg); \
+ __ asm_instr(value, MemOperand(kScratchReg, 0)); \
+ } else { \
+ int offset = static_cast<int>(i.InputOperand(0).immediate()); \
+ auto value = i.InputOrZero##width##Register(2); \
+ if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { \
+ __ Move(kDoubleRegZero, 0.0); \
+ } \
+ ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1), &done); \
+ __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
+ } \
+ __ bind(&done); \
} while (0)
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
- do { \
- Label done; \
- if (instr->InputAt(0)->IsRegister()) { \
- auto offset = i.InputRegister(0); \
- auto value = i.InputOrZeroRegister(2); \
- __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
- __ And(kScratchReg, offset, Operand(0xffffffff)); \
- __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg); \
- __ asm_instr(value, MemOperand(kScratchReg, 0)); \
- } else { \
- int offset = static_cast<int>(i.InputOperand(0).immediate()); \
- auto value = i.InputOrZeroRegister(2); \
- __ Branch(&done, ls, i.InputRegister(1), Operand(offset)); \
- __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
- } \
- __ bind(&done); \
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
+ do { \
+ Label done; \
+ if (instr->InputAt(0)->IsRegister()) { \
+ auto offset = i.InputRegister(0); \
+ auto value = i.InputOrZeroRegister(2); \
+ ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), &done); \
+ __ And(kScratchReg, offset, Operand(0xffffffff)); \
+ __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg); \
+ __ asm_instr(value, MemOperand(kScratchReg, 0)); \
+ } else { \
+ int offset = static_cast<int>(i.InputOperand(0).immediate()); \
+ auto value = i.InputOrZeroRegister(2); \
+ ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1), &done); \
+ __ asm_instr(value, MemOperand(i.InputRegister(3), offset)); \
+ } \
+ __ bind(&done); \
} while (0)
#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode) \
@@ -1326,36 +1369,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputDoubleRegister(1));
break;
case kMips64MaddS:
- __ madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
- i.InputFloatRegister(1), i.InputFloatRegister(2));
+ __ Madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
+ i.InputFloatRegister(1), i.InputFloatRegister(2),
+ kScratchDoubleReg);
break;
case kMips64MaddD:
- __ madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1), i.InputDoubleRegister(2));
- break;
- case kMips64MaddfS:
- __ maddf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
- i.InputFloatRegister(2));
- break;
- case kMips64MaddfD:
- __ maddf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
- i.InputDoubleRegister(2));
+ __ Madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1), i.InputDoubleRegister(2),
+ kScratchDoubleReg);
break;
case kMips64MsubS:
- __ msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
- i.InputFloatRegister(1), i.InputFloatRegister(2));
+ __ Msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
+ i.InputFloatRegister(1), i.InputFloatRegister(2),
+ kScratchDoubleReg);
break;
case kMips64MsubD:
- __ msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1), i.InputDoubleRegister(2));
- break;
- case kMips64MsubfS:
- __ msubf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
- i.InputFloatRegister(2));
- break;
- case kMips64MsubfD:
- __ msubf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
- i.InputDoubleRegister(2));
+ __ Msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1), i.InputDoubleRegister(2),
+ kScratchDoubleReg);
break;
case kMips64MulD:
// TODO(plind): add special case: right op is -1.0, see arm port.
@@ -1430,47 +1461,39 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64Float32Max: {
- Label compare_nan, done_compare;
- __ MaxNaNCheck_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
- i.InputSingleRegister(1), &compare_nan);
- __ Branch(&done_compare);
- __ bind(&compare_nan);
- __ Move(i.OutputSingleRegister(),
- std::numeric_limits<float>::quiet_NaN());
- __ bind(&done_compare);
+ FPURegister dst = i.OutputSingleRegister();
+ FPURegister src1 = i.InputSingleRegister(0);
+ FPURegister src2 = i.InputSingleRegister(1);
+ auto ool = new (zone()) OutOfLineFloat32Max(this, dst, src1, src2);
+ __ Float32Max(dst, src1, src2, ool->entry());
+ __ bind(ool->exit());
break;
}
case kMips64Float64Max: {
- Label compare_nan, done_compare;
- __ MaxNaNCheck_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1), &compare_nan);
- __ Branch(&done_compare);
- __ bind(&compare_nan);
- __ Move(i.OutputDoubleRegister(),
- std::numeric_limits<double>::quiet_NaN());
- __ bind(&done_compare);
+ FPURegister dst = i.OutputDoubleRegister();
+ FPURegister src1 = i.InputDoubleRegister(0);
+ FPURegister src2 = i.InputDoubleRegister(1);
+ auto ool = new (zone()) OutOfLineFloat64Max(this, dst, src1, src2);
+ __ Float64Max(dst, src1, src2, ool->entry());
+ __ bind(ool->exit());
break;
}
case kMips64Float32Min: {
- Label compare_nan, done_compare;
- __ MinNaNCheck_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
- i.InputSingleRegister(1), &compare_nan);
- __ Branch(&done_compare);
- __ bind(&compare_nan);
- __ Move(i.OutputSingleRegister(),
- std::numeric_limits<float>::quiet_NaN());
- __ bind(&done_compare);
+ FPURegister dst = i.OutputSingleRegister();
+ FPURegister src1 = i.InputSingleRegister(0);
+ FPURegister src2 = i.InputSingleRegister(1);
+ auto ool = new (zone()) OutOfLineFloat32Min(this, dst, src1, src2);
+ __ Float32Min(dst, src1, src2, ool->entry());
+ __ bind(ool->exit());
break;
}
case kMips64Float64Min: {
- Label compare_nan, done_compare;
- __ MinNaNCheck_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputDoubleRegister(1), &compare_nan);
- __ Branch(&done_compare);
- __ bind(&compare_nan);
- __ Move(i.OutputDoubleRegister(),
- std::numeric_limits<double>::quiet_NaN());
- __ bind(&done_compare);
+ FPURegister dst = i.OutputDoubleRegister();
+ FPURegister src1 = i.InputDoubleRegister(0);
+ FPURegister src2 = i.InputDoubleRegister(1);
+ auto ool = new (zone()) OutOfLineFloat64Min(this, dst, src1, src2);
+ __ Float64Min(dst, src1, src2, ool->entry());
+ __ bind(ool->exit());
break;
}
case kMips64Float64SilenceNaN:
@@ -1935,12 +1958,13 @@ static bool convertCondition(FlagsCondition condition, Condition& cc) {
return false;
}
+void AssembleBranchToLabels(CodeGenerator* gen, MacroAssembler* masm,
+ Instruction* instr, FlagsCondition condition,
+ Label* tlabel, Label* flabel, bool fallthru) {
+#undef __
+#define __ masm->
+ MipsOperandConverter i(gen, instr);
-// Assembles branches after an instruction.
-void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
- MipsOperandConverter i(this, instr);
- Label* tlabel = branch->true_label;
- Label* flabel = branch->false_label;
Condition cc = kNoCondition;
// MIPS does not have condition code flags, so compare and branch are
// implemented differently than on the other arch's. The compare operations
@@ -1950,17 +1974,17 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
// they are tested here.
if (instr->arch_opcode() == kMips64Tst) {
- cc = FlagsConditionToConditionTst(branch->condition);
+ cc = FlagsConditionToConditionTst(condition);
__ And(at, i.InputRegister(0), i.InputOperand(1));
__ Branch(tlabel, cc, at, Operand(zero_reg));
} else if (instr->arch_opcode() == kMips64Dadd ||
instr->arch_opcode() == kMips64Dsub) {
- cc = FlagsConditionToConditionOvf(branch->condition);
+ cc = FlagsConditionToConditionOvf(condition);
__ dsra32(kScratchReg, i.OutputRegister(), 0);
__ sra(at, i.OutputRegister(), 31);
__ Branch(tlabel, cc, at, Operand(kScratchReg));
} else if (instr->arch_opcode() == kMips64DaddOvf) {
- switch (branch->condition) {
+ switch (condition) {
case kOverflow:
__ DaddBranchOvf(i.OutputRegister(), i.InputRegister(0),
i.InputOperand(1), tlabel, flabel);
@@ -1970,11 +1994,11 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
i.InputOperand(1), flabel, tlabel);
break;
default:
- UNSUPPORTED_COND(kMips64DaddOvf, branch->condition);
+ UNSUPPORTED_COND(kMips64DaddOvf, condition);
break;
}
} else if (instr->arch_opcode() == kMips64DsubOvf) {
- switch (branch->condition) {
+ switch (condition) {
case kOverflow:
__ DsubBranchOvf(i.OutputRegister(), i.InputRegister(0),
i.InputOperand(1), tlabel, flabel);
@@ -1984,11 +2008,11 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
i.InputOperand(1), flabel, tlabel);
break;
default:
- UNSUPPORTED_COND(kMips64DsubOvf, branch->condition);
+ UNSUPPORTED_COND(kMips64DsubOvf, condition);
break;
}
} else if (instr->arch_opcode() == kMips64MulOvf) {
- switch (branch->condition) {
+ switch (condition) {
case kOverflow: {
__ MulBranchOvf(i.OutputRegister(), i.InputRegister(0),
i.InputOperand(1), tlabel, flabel, kScratchReg);
@@ -1998,15 +2022,15 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
i.InputOperand(1), flabel, tlabel, kScratchReg);
} break;
default:
- UNSUPPORTED_COND(kMips64MulOvf, branch->condition);
+ UNSUPPORTED_COND(kMips64MulOvf, condition);
break;
}
} else if (instr->arch_opcode() == kMips64Cmp) {
- cc = FlagsConditionToConditionCmp(branch->condition);
+ cc = FlagsConditionToConditionCmp(condition);
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
} else if (instr->arch_opcode() == kMips64CmpS) {
- if (!convertCondition(branch->condition, cc)) {
- UNSUPPORTED_COND(kMips64CmpS, branch->condition);
+ if (!convertCondition(condition, cc)) {
+ UNSUPPORTED_COND(kMips64CmpS, condition);
}
FPURegister left = i.InputOrZeroSingleRegister(0);
FPURegister right = i.InputOrZeroSingleRegister(1);
@@ -2016,8 +2040,8 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
}
__ BranchF32(tlabel, nullptr, cc, left, right);
} else if (instr->arch_opcode() == kMips64CmpD) {
- if (!convertCondition(branch->condition, cc)) {
- UNSUPPORTED_COND(kMips64CmpD, branch->condition);
+ if (!convertCondition(condition, cc)) {
+ UNSUPPORTED_COND(kMips64CmpD, condition);
}
FPURegister left = i.InputOrZeroDoubleRegister(0);
FPURegister right = i.InputOrZeroDoubleRegister(1);
@@ -2031,7 +2055,18 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
instr->arch_opcode());
UNIMPLEMENTED();
}
- if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
+ if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
+#undef __
+#define __ masm()->
+}
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+ Label* tlabel = branch->true_label;
+ Label* flabel = branch->false_label;
+
+ AssembleBranchToLabels(this, masm(), instr, branch->condition, tlabel, flabel,
+ branch->fallthru);
}
@@ -2039,6 +2074,63 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
}
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+ FlagsCondition condition) {
+ class OutOfLineTrap final : public OutOfLineCode {
+ public:
+ OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+ : OutOfLineCode(gen),
+ frame_elided_(frame_elided),
+ instr_(instr),
+ gen_(gen) {}
+ void Generate() final {
+ MipsOperandConverter i(gen_, instr_);
+ Runtime::FunctionId trap_id = static_cast<Runtime::FunctionId>(
+ i.InputInt32(instr_->InputCount() - 1));
+ bool old_has_frame = __ has_frame();
+ if (frame_elided_) {
+ __ set_has_frame(true);
+ __ EnterFrame(StackFrame::WASM_COMPILED);
+ }
+ GenerateCallToTrap(trap_id);
+ if (frame_elided_) {
+ __ set_has_frame(old_has_frame);
+ }
+ if (FLAG_debug_code) {
+ __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+ }
+ }
+
+ private:
+ void GenerateCallToTrap(Runtime::FunctionId trap_id) {
+ if (trap_id == Runtime::kNumFunctions) {
+ // We cannot test calls to the runtime in cctest/test-run-wasm.
+ // Therefore we emit a call to C here instead of a call to the runtime.
+ // We use the context register as the scratch register, because we do
+ // not have a context here.
+ __ PrepareCallCFunction(0, 0, cp);
+ __ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+ 0);
+ } else {
+ __ Move(cp, isolate()->native_context());
+ gen_->AssembleSourcePosition(instr_);
+ __ CallRuntime(trap_id);
+ }
+ ReferenceMap* reference_map =
+ new (gen_->zone()) ReferenceMap(gen_->zone());
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ }
+ bool frame_elided_;
+ Instruction* instr_;
+ CodeGenerator* gen_;
+ };
+ bool frame_elided = !frame_access_state()->has_frame();
+ auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+ Label* tlabel = ool->entry();
+ AssembleBranchToLabels(this, masm(), instr, condition, tlabel, nullptr, true);
+}
// Assembles boolean materializations after an instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -2401,7 +2493,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
switch (src.type()) {
case Constant::kInt32:
- if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+ if (RelocInfo::IsWasmSizeReference(src.rmode())) {
__ li(dst, Operand(src.ToInt32(), src.rmode()));
} else {
__ li(dst, Operand(src.ToInt32()));
@@ -2411,11 +2503,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
break;
case Constant::kInt64:
- if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
- src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+ if (RelocInfo::IsWasmPtrReference(src.rmode())) {
__ li(dst, Operand(src.ToInt64(), src.rmode()));
} else {
- DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ DCHECK(!RelocInfo::IsWasmSizeReference(src.rmode()));
__ li(dst, Operand(src.ToInt64()));
}
break;
diff --git a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
index 8f68ced62e..0c0e1aa61e 100644
--- a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
@@ -87,12 +87,8 @@ namespace compiler {
V(Mips64MinD) \
V(Mips64MaddS) \
V(Mips64MaddD) \
- V(Mips64MaddfS) \
- V(Mips64MaddfD) \
V(Mips64MsubS) \
V(Mips64MsubD) \
- V(Mips64MsubfS) \
- V(Mips64MsubfD) \
V(Mips64Float64RoundDown) \
V(Mips64Float64RoundTruncate) \
V(Mips64Float64RoundUp) \
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index fbf09d6ca2..d48007b858 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -92,9 +92,35 @@ class Mips64OperandGenerator final : public OperandGenerator {
case kMips64Tst:
case kMips64Xor:
return is_uint16(value);
+ case kMips64Lb:
+ case kMips64Lbu:
+ case kMips64Sb:
+ case kMips64Lh:
+ case kMips64Lhu:
+ case kMips64Sh:
+ case kMips64Lw:
+ case kMips64Sw:
+ case kMips64Ld:
+ case kMips64Sd:
+ case kMips64Lwc1:
+ case kMips64Swc1:
case kMips64Ldc1:
case kMips64Sdc1:
- return is_int16(value + kIntSize);
+ case kCheckedLoadInt8:
+ case kCheckedLoadUint8:
+ case kCheckedLoadInt16:
+ case kCheckedLoadUint16:
+ case kCheckedLoadWord32:
+ case kCheckedLoadWord64:
+ case kCheckedStoreWord8:
+ case kCheckedStoreWord16:
+ case kCheckedStoreWord32:
+ case kCheckedStoreWord64:
+ case kCheckedLoadFloat32:
+ case kCheckedLoadFloat64:
+ case kCheckedStoreFloat32:
+ case kCheckedStoreFloat64:
+ return is_int32(value);
default:
return is_int16(value);
}
@@ -169,6 +195,16 @@ struct ExtendingLoadMatcher {
DCHECK(m.IsWord64Sar());
if (m.left().IsLoad() && m.right().Is(32) &&
selector_->CanCover(m.node(), m.left().node())) {
+ MachineRepresentation rep =
+ LoadRepresentationOf(m.left().node()->op()).representation();
+ DCHECK(ElementSizeLog2Of(rep) == 3);
+ if (rep != MachineRepresentation::kTaggedSigned &&
+ rep != MachineRepresentation::kTaggedPointer &&
+ rep != MachineRepresentation::kTagged &&
+ rep != MachineRepresentation::kWord64) {
+ return;
+ }
+
Mips64OperandGenerator g(selector_);
Node* load = m.left().node();
Node* offset = load->InputAt(1);
@@ -186,7 +222,8 @@ struct ExtendingLoadMatcher {
}
};
-bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node) {
+bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node,
+ Node* output_node) {
ExtendingLoadMatcher m(node, selector);
Mips64OperandGenerator g(selector);
if (m.Matches()) {
@@ -196,7 +233,7 @@ bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node) {
m.opcode() | AddressingModeField::encode(kMode_MRI);
DCHECK(is_int32(m.immediate()));
inputs[1] = g.TempImmediate(static_cast<int32_t>(m.immediate()));
- InstructionOperand outputs[] = {g.DefineAsRegister(node)};
+ InstructionOperand outputs[] = {g.DefineAsRegister(output_node)};
selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs),
inputs);
return true;
@@ -247,6 +284,8 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
if (cont->IsBranch()) {
inputs[input_count++] = g.Label(cont->true_block());
inputs[input_count++] = g.Label(cont->false_block());
+ } else if (cont->IsTrap()) {
+ inputs[input_count++] = g.TempImmediate(cont->trap_id());
}
if (cont->IsDeoptimize()) {
@@ -438,6 +477,10 @@ void InstructionSelector::VisitStore(Node* node) {
}
}
+void InstructionSelector::VisitProtectedStore(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
void InstructionSelector::VisitWord32And(Node* node) {
Mips64OperandGenerator g(this);
@@ -748,7 +791,7 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
void InstructionSelector::VisitWord64Sar(Node* node) {
- if (TryEmitExtendingLoad(this, node)) return;
+ if (TryEmitExtendingLoad(this, node, node)) return;
VisitRRO(this, kMips64Dsar, node);
}
@@ -824,7 +867,7 @@ void InstructionSelector::VisitInt32Add(Node* node) {
if (m.right().opcode() == IrOpcode::kWord32Shl &&
CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
Int32BinopMatcher mright(m.right().node());
- if (mright.right().HasValue()) {
+ if (mright.right().HasValue() && !m.left().HasValue()) {
int32_t shift_value = static_cast<int32_t>(mright.right().Value());
Emit(kMips64Lsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
g.UseRegister(mright.left().node()), g.TempImmediate(shift_value));
@@ -836,7 +879,7 @@ void InstructionSelector::VisitInt32Add(Node* node) {
if (m.left().opcode() == IrOpcode::kWord32Shl &&
CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) {
+ if (mleft.right().HasValue() && !m.right().HasValue()) {
int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
Emit(kMips64Lsa, g.DefineAsRegister(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
@@ -856,7 +899,7 @@ void InstructionSelector::VisitInt64Add(Node* node) {
if (m.right().opcode() == IrOpcode::kWord64Shl &&
CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
Int64BinopMatcher mright(m.right().node());
- if (mright.right().HasValue()) {
+ if (mright.right().HasValue() && !m.left().HasValue()) {
int32_t shift_value = static_cast<int32_t>(mright.right().Value());
Emit(kMips64Dlsa, g.DefineAsRegister(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
@@ -869,7 +912,7 @@ void InstructionSelector::VisitInt64Add(Node* node) {
if (m.left().opcode() == IrOpcode::kWord64Shl &&
CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
Int64BinopMatcher mleft(m.left().node());
- if (mleft.right().HasValue()) {
+ if (mleft.right().HasValue() && !m.right().HasValue()) {
int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
Emit(kMips64Dlsa, g.DefineAsRegister(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
@@ -1318,13 +1361,17 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
if (CanCover(node, value)) {
switch (value->opcode()) {
case IrOpcode::kWord64Sar: {
- Int64BinopMatcher m(value);
- if (m.right().IsInRange(32, 63)) {
- // After smi untagging no need for truncate. Combine sequence.
- Emit(kMips64Dsar, g.DefineSameAsFirst(node),
- g.UseRegister(m.left().node()),
- g.UseImmediate(m.right().node()));
+ if (TryEmitExtendingLoad(this, value, node)) {
return;
+ } else {
+ Int64BinopMatcher m(value);
+ if (m.right().IsInRange(32, 63)) {
+ // After smi untagging no need for truncate. Combine sequence.
+ Emit(kMips64Dsar, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
}
break;
}
@@ -1404,35 +1451,23 @@ void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
void InstructionSelector::VisitFloat32Add(Node* node) {
Mips64OperandGenerator g(this);
- Float32BinopMatcher m(node);
- if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
- // For Add.S(Mul.S(x, y), z):
- Float32BinopMatcher mleft(m.left().node());
- if (kArchVariant == kMips64r2) { // Select Madd.S(z, x, y).
+ if (kArchVariant == kMips64r2) { // Select Madd.S(z, x, y).
+ Float32BinopMatcher m(node);
+ if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
+ // For Add.S(Mul.S(x, y), z):
+ Float32BinopMatcher mleft(m.left().node());
Emit(kMips64MaddS, g.DefineAsRegister(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
- } else if (kArchVariant == kMips64r6) { // Select Maddf.S(z, x, y).
- Emit(kMips64MaddfS, g.DefineSameAsFirst(node),
- g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
- g.UseRegister(mleft.right().node()));
- return;
}
- }
- if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
- // For Add.S(x, Mul.S(y, z)):
- Float32BinopMatcher mright(m.right().node());
- if (kArchVariant == kMips64r2) { // Select Madd.S(x, y, z).
+ if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
+ // For Add.S(x, Mul.S(y, z)):
+ Float32BinopMatcher mright(m.right().node());
Emit(kMips64MaddS, g.DefineAsRegister(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
- } else if (kArchVariant == kMips64r6) { // Select Maddf.S(x, y, z).
- Emit(kMips64MaddfS, g.DefineSameAsFirst(node),
- g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()));
- return;
}
}
VisitRRR(this, kMips64AddS, node);
@@ -1441,35 +1476,23 @@ void InstructionSelector::VisitFloat32Add(Node* node) {
void InstructionSelector::VisitFloat64Add(Node* node) {
Mips64OperandGenerator g(this);
- Float64BinopMatcher m(node);
- if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
- // For Add.D(Mul.D(x, y), z):
- Float64BinopMatcher mleft(m.left().node());
- if (kArchVariant == kMips64r2) { // Select Madd.D(z, x, y).
+ if (kArchVariant == kMips64r2) { // Select Madd.S(z, x, y).
+ Float64BinopMatcher m(node);
+ if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
+ // For Add.D(Mul.D(x, y), z):
+ Float64BinopMatcher mleft(m.left().node());
Emit(kMips64MaddD, g.DefineAsRegister(node),
g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
g.UseRegister(mleft.right().node()));
return;
- } else if (kArchVariant == kMips64r6) { // Select Maddf.D(z, x, y).
- Emit(kMips64MaddfD, g.DefineSameAsFirst(node),
- g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
- g.UseRegister(mleft.right().node()));
- return;
}
- }
- if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
- // For Add.D(x, Mul.D(y, z)):
- Float64BinopMatcher mright(m.right().node());
- if (kArchVariant == kMips64r2) { // Select Madd.D(x, y, z).
+ if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
+ // For Add.D(x, Mul.D(y, z)):
+ Float64BinopMatcher mright(m.right().node());
Emit(kMips64MaddD, g.DefineAsRegister(node),
g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
g.UseRegister(mright.right().node()));
return;
- } else if (kArchVariant == kMips64r6) { // Select Maddf.D(x, y, z).
- Emit(kMips64MaddfD, g.DefineSameAsFirst(node),
- g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()));
- return;
}
}
VisitRRR(this, kMips64AddD, node);
@@ -1478,9 +1501,9 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
void InstructionSelector::VisitFloat32Sub(Node* node) {
Mips64OperandGenerator g(this);
- Float32BinopMatcher m(node);
- if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
- if (kArchVariant == kMips64r2) {
+ if (kArchVariant == kMips64r2) { // Select Madd.S(z, x, y).
+ Float32BinopMatcher m(node);
+ if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
// For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y).
Float32BinopMatcher mleft(m.left().node());
Emit(kMips64MsubS, g.DefineAsRegister(node),
@@ -1488,24 +1511,15 @@ void InstructionSelector::VisitFloat32Sub(Node* node) {
g.UseRegister(mleft.right().node()));
return;
}
- } else if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
- if (kArchVariant == kMips64r6) {
- // For Sub.S(x,Mul.S(y,z)) select Msubf.S(x, y, z).
- Float32BinopMatcher mright(m.right().node());
- Emit(kMips64MsubfS, g.DefineSameAsFirst(node),
- g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()));
- return;
- }
}
VisitRRR(this, kMips64SubS, node);
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
Mips64OperandGenerator g(this);
- Float64BinopMatcher m(node);
- if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
- if (kArchVariant == kMips64r2) {
+ if (kArchVariant == kMips64r2) { // Select Madd.S(z, x, y).
+ Float64BinopMatcher m(node);
+ if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
// For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y).
Float64BinopMatcher mleft(m.left().node());
Emit(kMips64MsubD, g.DefineAsRegister(node),
@@ -1513,15 +1527,6 @@ void InstructionSelector::VisitFloat64Sub(Node* node) {
g.UseRegister(mleft.right().node()));
return;
}
- } else if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
- if (kArchVariant == kMips64r6) {
- // For Sub.D(x,Mul.S(y,z)) select Msubf.D(x, y, z).
- Float64BinopMatcher mright(m.right().node());
- Emit(kMips64MsubfD, g.DefineSameAsFirst(node),
- g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
- g.UseRegister(mright.right().node()));
- return;
- }
}
VisitRRR(this, kMips64SubD, node);
}
@@ -1849,6 +1854,15 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
: g.UseRegister(length)
: g.UseRegister(length);
+ if (length->opcode() == IrOpcode::kInt32Constant) {
+ Int32Matcher m(length);
+ if (m.IsPowerOf2()) {
+ Emit(opcode, g.DefineAsRegister(node), offset_operand,
+ g.UseImmediate(length), g.UseRegister(buffer));
+ return;
+ }
+ }
+
Emit(opcode | AddressingModeField::encode(kMode_MRI),
g.DefineAsRegister(node), offset_operand, length_operand,
g.UseRegister(buffer));
@@ -1901,6 +1915,15 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
: g.UseRegister(length)
: g.UseRegister(length);
+ if (length->opcode() == IrOpcode::kInt32Constant) {
+ Int32Matcher m(length);
+ if (m.IsPowerOf2()) {
+ Emit(opcode, g.NoOutput(), offset_operand, g.UseImmediate(length),
+ g.UseRegisterOrImmediateZero(value), g.UseRegister(buffer));
+ return;
+ }
+ }
+
Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
offset_operand, length_operand, g.UseRegisterOrImmediateZero(value),
g.UseRegister(buffer));
@@ -1921,9 +1944,12 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ } else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+ } else {
+ DCHECK(cont->IsTrap());
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.TempImmediate(cont->trap_id()));
}
}
@@ -2135,6 +2161,9 @@ void EmitWordCompareZero(InstructionSelector* selector, Node* value,
selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
g.TempImmediate(0), cont->reason(),
cont->frame_state());
+ } else if (cont->IsTrap()) {
+ selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
+ g.TempImmediate(cont->trap_id()));
} else {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
g.TempImmediate(0));
@@ -2280,6 +2309,19 @@ void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+ Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
Mips64OperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
diff --git a/deps/v8/src/compiler/node-marker.h b/deps/v8/src/compiler/node-marker.h
index 84666d5f07..e38105dd8b 100644
--- a/deps/v8/src/compiler/node-marker.h
+++ b/deps/v8/src/compiler/node-marker.h
@@ -20,11 +20,10 @@ class NodeMarkerBase {
public:
NodeMarkerBase(Graph* graph, uint32_t num_states);
- V8_INLINE Mark Get(Node* node) {
+ V8_INLINE Mark Get(const Node* node) {
Mark mark = node->mark();
if (mark < mark_min_) {
- mark = mark_min_;
- node->set_mark(mark_min_);
+ return 0;
}
DCHECK_LT(mark, mark_max_);
return mark - mark_min_;
@@ -52,9 +51,9 @@ class NodeMarkerBase {
// set to State(0) in constant time.
//
// In its current implementation, in debug mode NodeMarker will try to
-// (efficiently) detect invalid use of an older NodeMarker. Namely, if you get
-// or set a node with a NodeMarker, and then get or set that node
-// with an older NodeMarker you will get a crash.
+// (efficiently) detect invalid use of an older NodeMarker. Namely, if you set a
+// node with a NodeMarker, and then get or set that node with an older
+// NodeMarker you will get a crash.
//
// GraphReducer uses a NodeMarker, so individual Reducers cannot use a
// NodeMarker.
@@ -64,7 +63,7 @@ class NodeMarker : public NodeMarkerBase {
V8_INLINE NodeMarker(Graph* graph, uint32_t num_states)
: NodeMarkerBase(graph, num_states) {}
- V8_INLINE State Get(Node* node) {
+ V8_INLINE State Get(const Node* node) {
return static_cast<State>(NodeMarkerBase::Get(node));
}
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index 646dbc209e..cc3a07d7e3 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -2,14 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compiler/node-properties.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/linkage.h"
-#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/verifier.h"
#include "src/handles-inl.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -338,6 +339,17 @@ MaybeHandle<Context> NodeProperties::GetSpecializationContext(
// static
+Node* NodeProperties::GetOuterContext(Node* node, size_t* depth) {
+ Node* context = NodeProperties::GetContextInput(node);
+ while (*depth > 0 &&
+ IrOpcode::IsContextChainExtendingOpcode(context->opcode())) {
+ context = NodeProperties::GetContextInput(context);
+ (*depth)--;
+ }
+ return context;
+}
+
+// static
Type* NodeProperties::GetTypeOrAny(Node* node) {
return IsTyped(node) ? node->type() : Type::Any();
}
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index 23253239a1..d428160651 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -132,6 +132,11 @@ class V8_EXPORT_PRIVATE NodeProperties final {
static MaybeHandle<Context> GetSpecializationContext(
Node* node, MaybeHandle<Context> context = MaybeHandle<Context>());
+ // Walk up the context chain from the given {node} until we reduce the {depth}
+ // to 0 or hit a node that does not extend the context chain ({depth} will be
+ // updated accordingly).
+ static Node* GetOuterContext(Node* node, size_t* depth);
+
// ---------------------------------------------------------------------------
// Type.
diff --git a/deps/v8/src/compiler/node.cc b/deps/v8/src/compiler/node.cc
index f4e7b17ed2..1410ab436c 100644
--- a/deps/v8/src/compiler/node.cc
+++ b/deps/v8/src/compiler/node.cc
@@ -404,9 +404,6 @@ Node::InputEdges::iterator Node::InputEdges::iterator::operator++(int n) {
}
-bool Node::InputEdges::empty() const { return begin() == end(); }
-
-
Node::Inputs::const_iterator Node::Inputs::const_iterator::operator++(int n) {
const_iterator result(*this);
++(*this);
@@ -414,9 +411,6 @@ Node::Inputs::const_iterator Node::Inputs::const_iterator::operator++(int n) {
}
-bool Node::Inputs::empty() const { return begin() == end(); }
-
-
Node::UseEdges::iterator Node::UseEdges::iterator::operator++(int n) {
iterator result(*this);
++(*this);
diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h
index dc6c5dc01c..7c9f3ad26f 100644
--- a/deps/v8/src/compiler/node.h
+++ b/deps/v8/src/compiler/node.h
@@ -46,7 +46,7 @@ class V8_EXPORT_PRIVATE Node final {
Node* const* inputs, bool has_extensible_inputs);
static Node* Clone(Zone* zone, NodeId id, const Node* node);
- bool IsDead() const { return InputCount() > 0 && !InputAt(0); }
+ inline bool IsDead() const;
void Kill();
const Operator* op() const { return op_; }
@@ -109,41 +109,11 @@ class V8_EXPORT_PRIVATE Node final {
int UseCount() const;
void ReplaceUses(Node* replace_to);
- class InputEdges final {
- public:
- typedef Edge value_type;
-
- class iterator;
- inline iterator begin() const;
- inline iterator end() const;
-
- bool empty() const;
-
- explicit InputEdges(Node* node) : node_(node) {}
-
- private:
- Node* node_;
- };
-
- InputEdges input_edges() { return InputEdges(this); }
-
- class V8_EXPORT_PRIVATE Inputs final {
- public:
- typedef Node* value_type;
+ class InputEdges;
+ inline InputEdges input_edges();
- class const_iterator;
- inline const_iterator begin() const;
- inline const_iterator end() const;
-
- bool empty() const;
-
- explicit Inputs(Node* node) : node_(node) {}
-
- private:
- Node* node_;
- };
-
- Inputs inputs() { return Inputs(this); }
+ class Inputs;
+ inline Inputs inputs() const;
class UseEdges final {
public:
@@ -294,7 +264,7 @@ class V8_EXPORT_PRIVATE Node final {
void set_type(Type* type) { type_ = type; }
// Only NodeMarkers should manipulate the marks on nodes.
- Mark mark() { return mark_; }
+ Mark mark() const { return mark_; }
void set_mark(Mark mark) { mark_ = mark; }
inline bool has_inline_inputs() const {
@@ -345,6 +315,48 @@ static inline const T& OpParameter(const Node* node) {
return OpParameter<T>(node->op());
}
+class Node::InputEdges final {
+ public:
+ typedef Edge value_type;
+
+ class iterator;
+ inline iterator begin() const;
+ inline iterator end() const;
+
+ bool empty() const { return count_ == 0; }
+ int count() const { return count_; }
+
+ inline value_type operator[](int index) const;
+
+ InputEdges(Node** input_root, Use* use_root, int count)
+ : input_root_(input_root), use_root_(use_root), count_(count) {}
+
+ private:
+ Node** input_root_;
+ Use* use_root_;
+ int count_;
+};
+
+class V8_EXPORT_PRIVATE Node::Inputs final {
+ public:
+ typedef Node* value_type;
+
+ class const_iterator;
+ inline const_iterator begin() const;
+ inline const_iterator end() const;
+
+ bool empty() const { return count_ == 0; }
+ int count() const { return count_; }
+
+ inline value_type operator[](int index) const;
+
+ explicit Inputs(Node* const* input_root, int count)
+ : input_root_(input_root), count_(count) {}
+
+ private:
+ Node* const* input_root_;
+ int count_;
+};
// An encapsulation for information associated with a single use of node as a
// input from another node, allowing access to both the defining node and
@@ -373,6 +385,7 @@ class Edge final {
private:
friend class Node::UseEdges::iterator;
+ friend class Node::InputEdges;
friend class Node::InputEdges::iterator;
Edge(Node::Use* use, Node** input_ptr) : use_(use), input_ptr_(input_ptr) {
@@ -385,12 +398,37 @@ class Edge final {
Node** input_ptr_;
};
+bool Node::IsDead() const {
+ Node::Inputs inputs = this->inputs();
+ return inputs.count() > 0 && inputs[0] == nullptr;
+}
+
+Node::InputEdges Node::input_edges() {
+ int inline_count = InlineCountField::decode(bit_field_);
+ if (inline_count != kOutlineMarker) {
+ return InputEdges(inputs_.inline_, reinterpret_cast<Use*>(this) - 1,
+ inline_count);
+ } else {
+ return InputEdges(inputs_.outline_->inputs_,
+ reinterpret_cast<Use*>(inputs_.outline_) - 1,
+ inputs_.outline_->count_);
+ }
+}
+
+Node::Inputs Node::inputs() const {
+ int inline_count = InlineCountField::decode(bit_field_);
+ if (inline_count != kOutlineMarker) {
+ return Inputs(inputs_.inline_, inline_count);
+ } else {
+ return Inputs(inputs_.outline_->inputs_, inputs_.outline_->count_);
+ }
+}
// A forward iterator to visit the edges for the input dependencies of a node.
class Node::InputEdges::iterator final {
public:
typedef std::forward_iterator_tag iterator_category;
- typedef int difference_type;
+ typedef std::ptrdiff_t difference_type;
typedef Edge value_type;
typedef Edge* pointer;
typedef Edge& reference;
@@ -410,12 +448,23 @@ class Node::InputEdges::iterator final {
return *this;
}
iterator operator++(int);
+ iterator& operator+=(difference_type offset) {
+ input_ptr_ += offset;
+ use_ -= offset;
+ return *this;
+ }
+ iterator operator+(difference_type offset) const {
+ return iterator(use_ - offset, input_ptr_ + offset);
+ }
+ difference_type operator-(const iterator& other) const {
+ return input_ptr_ - other.input_ptr_;
+ }
private:
friend class Node;
- explicit iterator(Node* from, int index = 0)
- : use_(from->GetUsePtr(index)), input_ptr_(from->GetInputPtr(index)) {}
+ explicit iterator(Use* use, Node** input_ptr)
+ : use_(use), input_ptr_(input_ptr) {}
Use* use_;
Node** input_ptr_;
@@ -423,57 +472,71 @@ class Node::InputEdges::iterator final {
Node::InputEdges::iterator Node::InputEdges::begin() const {
- return Node::InputEdges::iterator(this->node_, 0);
+ return Node::InputEdges::iterator(use_root_, input_root_);
}
Node::InputEdges::iterator Node::InputEdges::end() const {
- return Node::InputEdges::iterator(this->node_, this->node_->InputCount());
+ return Node::InputEdges::iterator(use_root_ - count_, input_root_ + count_);
}
+Edge Node::InputEdges::operator[](int index) const {
+ return Edge(use_root_ + index, input_root_ + index);
+}
// A forward iterator to visit the inputs of a node.
class Node::Inputs::const_iterator final {
public:
typedef std::forward_iterator_tag iterator_category;
- typedef int difference_type;
+ typedef std::ptrdiff_t difference_type;
typedef Node* value_type;
- typedef Node** pointer;
- typedef Node*& reference;
+ typedef const value_type* pointer;
+ typedef value_type& reference;
- const_iterator(const const_iterator& other) : iter_(other.iter_) {}
+ const_iterator(const const_iterator& other) : input_ptr_(other.input_ptr_) {}
- Node* operator*() const { return (*iter_).to(); }
+ Node* operator*() const { return *input_ptr_; }
bool operator==(const const_iterator& other) const {
- return iter_ == other.iter_;
+ return input_ptr_ == other.input_ptr_;
}
bool operator!=(const const_iterator& other) const {
return !(*this == other);
}
const_iterator& operator++() {
- ++iter_;
+ ++input_ptr_;
return *this;
}
const_iterator operator++(int);
+ const_iterator& operator+=(difference_type offset) {
+ input_ptr_ += offset;
+ return *this;
+ }
+ const_iterator operator+(difference_type offset) const {
+ return const_iterator(input_ptr_ + offset);
+ }
+ difference_type operator-(const const_iterator& other) const {
+ return input_ptr_ - other.input_ptr_;
+ }
private:
friend class Node::Inputs;
- const_iterator(Node* node, int index) : iter_(node, index) {}
+ explicit const_iterator(Node* const* input_ptr) : input_ptr_(input_ptr) {}
- Node::InputEdges::iterator iter_;
+ Node* const* input_ptr_;
};
Node::Inputs::const_iterator Node::Inputs::begin() const {
- return const_iterator(this->node_, 0);
+ return const_iterator(input_root_);
}
Node::Inputs::const_iterator Node::Inputs::end() const {
- return const_iterator(this->node_, this->node_->InputCount());
+ return const_iterator(input_root_ + count_);
}
+Node* Node::Inputs::operator[](int index) const { return input_root_[index]; }
// A forward iterator to visit the uses edges of a node.
class Node::UseEdges::iterator final {
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index fdbe001de3..1d90095769 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -25,6 +25,8 @@
V(Deoptimize) \
V(DeoptimizeIf) \
V(DeoptimizeUnless) \
+ V(TrapIf) \
+ V(TrapUnless) \
V(Return) \
V(TailCall) \
V(Terminate) \
@@ -104,7 +106,9 @@
#define JS_SIMPLE_BINOP_LIST(V) \
JS_COMPARE_BINOP_LIST(V) \
JS_BITWISE_BINOP_LIST(V) \
- JS_ARITH_BINOP_LIST(V)
+ JS_ARITH_BINOP_LIST(V) \
+ V(JSInstanceOf) \
+ V(JSOrdinaryHasInstance)
#define JS_CONVERSION_UNOP_LIST(V) \
V(JSToBoolean) \
@@ -122,26 +126,26 @@
JS_CONVERSION_UNOP_LIST(V) \
JS_OTHER_UNOP_LIST(V)
-#define JS_OBJECT_OP_LIST(V) \
- V(JSCreate) \
- V(JSCreateArguments) \
- V(JSCreateArray) \
- V(JSCreateClosure) \
- V(JSCreateIterResultObject) \
- V(JSCreateKeyValueArray) \
- V(JSCreateLiteralArray) \
- V(JSCreateLiteralObject) \
- V(JSCreateLiteralRegExp) \
- V(JSLoadProperty) \
- V(JSLoadNamed) \
- V(JSLoadGlobal) \
- V(JSStoreProperty) \
- V(JSStoreNamed) \
- V(JSStoreGlobal) \
- V(JSDeleteProperty) \
- V(JSHasProperty) \
- V(JSInstanceOf) \
- V(JSOrdinaryHasInstance)
+#define JS_OBJECT_OP_LIST(V) \
+ V(JSCreate) \
+ V(JSCreateArguments) \
+ V(JSCreateArray) \
+ V(JSCreateClosure) \
+ V(JSCreateIterResultObject) \
+ V(JSCreateKeyValueArray) \
+ V(JSCreateLiteralArray) \
+ V(JSCreateLiteralObject) \
+ V(JSCreateLiteralRegExp) \
+ V(JSLoadProperty) \
+ V(JSLoadNamed) \
+ V(JSLoadGlobal) \
+ V(JSStoreProperty) \
+ V(JSStoreNamed) \
+ V(JSStoreGlobal) \
+ V(JSStoreDataPropertyInLiteral) \
+ V(JSDeleteProperty) \
+ V(JSHasProperty) \
+ V(JSGetSuperConstructor)
#define JS_CONTEXT_OP_LIST(V) \
V(JSLoadContext) \
@@ -154,6 +158,7 @@
#define JS_OTHER_OP_LIST(V) \
V(JSCallConstruct) \
+ V(JSCallConstructWithSpread) \
V(JSCallFunction) \
V(JSCallRuntime) \
V(JSConvertReceiver) \
@@ -294,6 +299,7 @@
V(PlainPrimitiveToWord32) \
V(PlainPrimitiveToFloat64) \
V(BooleanNot) \
+ V(StringCharAt) \
V(StringCharCodeAt) \
V(StringFromCharCode) \
V(StringFromCodePoint) \
@@ -301,6 +307,7 @@
V(CheckIf) \
V(CheckMaps) \
V(CheckNumber) \
+ V(CheckInternalizedString) \
V(CheckString) \
V(CheckSmi) \
V(CheckHeapObject) \
@@ -322,6 +329,8 @@
V(ObjectIsSmi) \
V(ObjectIsString) \
V(ObjectIsUndetectable) \
+ V(NewRestParameterElements) \
+ V(NewUnmappedArgumentsElements) \
V(ArrayBufferWasNeutered) \
V(EnsureWritableFastElements) \
V(MaybeGrowFastElements) \
@@ -527,6 +536,7 @@
V(Word32PairShr) \
V(Word32PairSar) \
V(ProtectedLoad) \
+ V(ProtectedStore) \
V(AtomicLoad) \
V(AtomicStore) \
V(UnsafePointerAdd)
@@ -553,9 +563,6 @@
V(Float32x4LessThanOrEqual) \
V(Float32x4GreaterThan) \
V(Float32x4GreaterThanOrEqual) \
- V(Float32x4Select) \
- V(Float32x4Swizzle) \
- V(Float32x4Shuffle) \
V(Float32x4FromInt32x4) \
V(Float32x4FromUint32x4) \
V(CreateInt32x4) \
@@ -574,9 +581,6 @@
V(Int32x4LessThanOrEqual) \
V(Int32x4GreaterThan) \
V(Int32x4GreaterThanOrEqual) \
- V(Int32x4Select) \
- V(Int32x4Swizzle) \
- V(Int32x4Shuffle) \
V(Int32x4FromFloat32x4) \
V(Uint32x4Min) \
V(Uint32x4Max) \
@@ -709,7 +713,10 @@
V(Simd128And) \
V(Simd128Or) \
V(Simd128Xor) \
- V(Simd128Not)
+ V(Simd128Not) \
+ V(Simd32x4Select) \
+ V(Simd32x4Swizzle) \
+ V(Simd32x4Shuffle)
#define MACHINE_SIMD_OP_LIST(V) \
MACHINE_SIMD_RETURN_SIMD_OP_LIST(V) \
@@ -793,6 +800,10 @@ class V8_EXPORT_PRIVATE IrOpcode {
(kNumberEqual <= value && value <= kStringLessThanOrEqual) ||
(kWord32Equal <= value && value <= kFloat64LessThanOrEqual);
}
+
+ static bool IsContextChainExtendingOpcode(Value value) {
+ return kJSCreateFunctionContext <= value && value <= kJSCreateScriptContext;
+ }
};
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, IrOpcode::Value);
diff --git a/deps/v8/src/compiler/operation-typer.cc b/deps/v8/src/compiler/operation-typer.cc
index 9198f4b9a9..c422f0986b 100644
--- a/deps/v8/src/compiler/operation-typer.cc
+++ b/deps/v8/src/compiler/operation-typer.cc
@@ -366,8 +366,9 @@ Type* OperationTyper::NumberExpm1(Type* type) {
Type* OperationTyper::NumberFloor(Type* type) {
DCHECK(type->Is(Type::Number()));
if (type->Is(cache_.kIntegerOrMinusZeroOrNaN)) return type;
- // TODO(bmeurer): We could infer a more precise type here.
- return cache_.kIntegerOrMinusZeroOrNaN;
+ type = Type::Intersect(type, Type::MinusZeroOrNaN(), zone());
+ type = Type::Union(type, cache_.kInteger, zone());
+ return type;
}
Type* OperationTyper::NumberFround(Type* type) {
@@ -624,12 +625,19 @@ Type* OperationTyper::NumberDivide(Type* lhs, Type* rhs) {
}
if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
- // Division is tricky, so all we do is try ruling out nan.
+ // Division is tricky, so all we do is try ruling out -0 and NaN.
+ bool maybe_minuszero = !lhs->Is(cache_.kPositiveIntegerOrNaN) ||
+ !rhs->Is(cache_.kPositiveIntegerOrNaN);
bool maybe_nan =
lhs->Maybe(Type::NaN()) || rhs->Maybe(cache_.kZeroish) ||
((lhs->Min() == -V8_INFINITY || lhs->Max() == +V8_INFINITY) &&
(rhs->Min() == -V8_INFINITY || rhs->Max() == +V8_INFINITY));
- return maybe_nan ? Type::Number() : Type::OrderedNumber();
+
+ // Take into account the -0 and NaN information computed earlier.
+ Type* type = Type::PlainNumber();
+ if (maybe_minuszero) type = Type::Union(type, Type::MinusZero(), zone());
+ if (maybe_nan) type = Type::Union(type, Type::NaN(), zone());
+ return type;
}
Type* OperationTyper::NumberModulus(Type* lhs, Type* rhs) {
@@ -796,8 +804,35 @@ Type* OperationTyper::NumberShiftLeft(Type* lhs, Type* rhs) {
DCHECK(lhs->Is(Type::Number()));
DCHECK(rhs->Is(Type::Number()));
- // TODO(turbofan): Infer a better type here.
- return Type::Signed32();
+ if (!lhs->IsInhabited() || !rhs->IsInhabited()) return Type::None();
+
+ lhs = NumberToInt32(lhs);
+ rhs = NumberToUint32(rhs);
+
+ int32_t min_lhs = lhs->Min();
+ int32_t max_lhs = lhs->Max();
+ uint32_t min_rhs = rhs->Min();
+ uint32_t max_rhs = rhs->Max();
+ if (max_rhs > 31) {
+ // rhs can be larger than the bitmask
+ max_rhs = 31;
+ min_rhs = 0;
+ }
+
+ if (max_lhs > (kMaxInt >> max_rhs) || min_lhs < (kMinInt >> max_rhs)) {
+ // overflow possible
+ return Type::Signed32();
+ }
+
+ double min =
+ std::min(static_cast<int32_t>(static_cast<uint32_t>(min_lhs) << min_rhs),
+ static_cast<int32_t>(static_cast<uint32_t>(min_lhs) << max_rhs));
+ double max =
+ std::max(static_cast<int32_t>(static_cast<uint32_t>(max_lhs) << min_rhs),
+ static_cast<int32_t>(static_cast<uint32_t>(max_lhs) << max_rhs));
+
+ if (max == kMaxInt && min == kMinInt) return Type::Signed32();
+ return Type::Range(min, max, zone());
}
Type* OperationTyper::NumberShiftRight(Type* lhs, Type* rhs) {
@@ -809,33 +844,18 @@ Type* OperationTyper::NumberShiftRight(Type* lhs, Type* rhs) {
lhs = NumberToInt32(lhs);
rhs = NumberToUint32(rhs);
- double min = kMinInt;
- double max = kMaxInt;
- if (lhs->Min() >= 0) {
- // Right-shifting a non-negative value cannot make it negative, nor larger.
- min = std::max(min, 0.0);
- max = std::min(max, lhs->Max());
- if (rhs->Min() > 0 && rhs->Max() <= 31) {
- max = static_cast<int>(max) >> static_cast<int>(rhs->Min());
- }
+ int32_t min_lhs = lhs->Min();
+ int32_t max_lhs = lhs->Max();
+ uint32_t min_rhs = rhs->Min();
+ uint32_t max_rhs = rhs->Max();
+ if (max_rhs > 31) {
+ // rhs can be larger than the bitmask
+ max_rhs = 31;
+ min_rhs = 0;
}
- if (lhs->Max() < 0) {
- // Right-shifting a negative value cannot make it non-negative, nor smaller.
- min = std::max(min, lhs->Min());
- max = std::min(max, -1.0);
- if (rhs->Min() > 0 && rhs->Max() <= 31) {
- min = static_cast<int>(min) >> static_cast<int>(rhs->Min());
- }
- }
- if (rhs->Min() > 0 && rhs->Max() <= 31) {
- // Right-shifting by a positive value yields a small integer value.
- double shift_min = kMinInt >> static_cast<int>(rhs->Min());
- double shift_max = kMaxInt >> static_cast<int>(rhs->Min());
- min = std::max(min, shift_min);
- max = std::min(max, shift_max);
- }
- // TODO(jarin) Ideally, the following micro-optimization should be performed
- // by the type constructor.
+ double min = std::min(min_lhs >> min_rhs, min_lhs >> max_rhs);
+ double max = std::max(max_lhs >> min_rhs, max_lhs >> max_rhs);
+
if (max == kMaxInt && min == kMinInt) return Type::Signed32();
return Type::Range(min, max, zone());
}
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index 0a9e6448e2..02b2f64a30 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -78,6 +78,7 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSStoreProperty:
case IrOpcode::kJSLoadGlobal:
case IrOpcode::kJSStoreGlobal:
+ case IrOpcode::kJSStoreDataPropertyInLiteral:
case IrOpcode::kJSDeleteProperty:
// Context operations
@@ -93,6 +94,7 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
// Call operations
case IrOpcode::kJSCallConstruct:
+ case IrOpcode::kJSCallConstructWithSpread:
case IrOpcode::kJSCallFunction:
// Misc operations
@@ -100,6 +102,7 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSForInNext:
case IrOpcode::kJSForInPrepare:
case IrOpcode::kJSStackCheck:
+ case IrOpcode::kJSGetSuperConstructor:
return true;
default:
diff --git a/deps/v8/src/compiler/osr.cc b/deps/v8/src/compiler/osr.cc
index a2dc4305a3..687424b66f 100644
--- a/deps/v8/src/compiler/osr.cc
+++ b/deps/v8/src/compiler/osr.cc
@@ -268,28 +268,7 @@ void SetTypeForOsrValue(Node* osr_value, Node* loop,
}
}
- OsrGuardType guard_type = OsrGuardType::kAny;
- // Find the phi that uses the OsrGuard node and get the type from
- // there. Skip the search if the OsrGuard does not have value use
- // (i.e., if there is other use beyond the effect use).
- if (OsrGuardTypeOf(osr_guard->op()) == OsrGuardType::kUninitialized &&
- osr_guard->UseCount() > 1) {
- Type* type = nullptr;
- for (Node* use : osr_guard->uses()) {
- if (use->opcode() == IrOpcode::kPhi) {
- if (NodeProperties::GetControlInput(use) != loop) continue;
- CHECK_NULL(type);
- type = NodeProperties::GetType(use);
- }
- }
- CHECK_NOT_NULL(type);
-
- if (type->Is(Type::SignedSmall())) {
- guard_type = OsrGuardType::kSignedSmall;
- }
- }
-
- NodeProperties::ChangeOp(osr_guard, common->OsrGuard(guard_type));
+ NodeProperties::ChangeOp(osr_guard, common->OsrGuard(OsrGuardType::kAny));
}
} // namespace
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 2614155722..d0f4f18ea3 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -65,7 +65,6 @@
#include "src/compiler/simplified-operator.h"
#include "src/compiler/store-store-elimination.h"
#include "src/compiler/tail-call-optimization.h"
-#include "src/compiler/type-hint-analyzer.h"
#include "src/compiler/typed-optimization.h"
#include "src/compiler/typer.h"
#include "src/compiler/value-numbering-reducer.h"
@@ -75,6 +74,7 @@
#include "src/ostreams.h"
#include "src/parsing/parse-info.h"
#include "src/register-configuration.h"
+#include "src/trap-handler/trap-handler.h"
#include "src/type-info.h"
#include "src/utils.h"
@@ -111,39 +111,51 @@ class PipelineData {
javascript_ = new (graph_zone_) JSOperatorBuilder(graph_zone_);
jsgraph_ = new (graph_zone_)
JSGraph(isolate_, graph_, common_, javascript_, simplified_, machine_);
+ is_asm_ = info->shared_info()->asm_function();
}
// For WASM compile entry point.
- PipelineData(ZoneStats* zone_stats, CompilationInfo* info, Graph* graph,
- SourcePositionTable* source_positions)
+ PipelineData(ZoneStats* zone_stats, CompilationInfo* info, JSGraph* jsgraph,
+ SourcePositionTable* source_positions,
+ ZoneVector<trap_handler::ProtectedInstructionData>*
+ protected_instructions)
: isolate_(info->isolate()),
info_(info),
debug_name_(info_->GetDebugName()),
zone_stats_(zone_stats),
graph_zone_scope_(zone_stats_, ZONE_NAME),
- graph_(graph),
+ graph_(jsgraph->graph()),
source_positions_(source_positions),
+ machine_(jsgraph->machine()),
+ common_(jsgraph->common()),
+ javascript_(jsgraph->javascript()),
+ jsgraph_(jsgraph),
instruction_zone_scope_(zone_stats_, ZONE_NAME),
instruction_zone_(instruction_zone_scope_.zone()),
register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
- register_allocation_zone_(register_allocation_zone_scope_.zone()) {}
+ register_allocation_zone_(register_allocation_zone_scope_.zone()),
+ protected_instructions_(protected_instructions) {
+ is_asm_ =
+ info->has_shared_info() ? info->shared_info()->asm_function() : false;
+ }
// For machine graph testing entry point.
PipelineData(ZoneStats* zone_stats, CompilationInfo* info, Graph* graph,
- Schedule* schedule)
+ Schedule* schedule, SourcePositionTable* source_positions)
: isolate_(info->isolate()),
info_(info),
debug_name_(info_->GetDebugName()),
zone_stats_(zone_stats),
graph_zone_scope_(zone_stats_, ZONE_NAME),
graph_(graph),
- source_positions_(new (info->zone()) SourcePositionTable(graph_)),
+ source_positions_(source_positions),
schedule_(schedule),
instruction_zone_scope_(zone_stats_, ZONE_NAME),
instruction_zone_(instruction_zone_scope_.zone()),
register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
- register_allocation_zone_(register_allocation_zone_scope_.zone()) {}
-
+ register_allocation_zone_(register_allocation_zone_scope_.zone()) {
+ is_asm_ = false;
+ }
// For register allocation testing entry point.
PipelineData(ZoneStats* zone_stats, CompilationInfo* info,
InstructionSequence* sequence)
@@ -156,7 +168,10 @@ class PipelineData {
instruction_zone_(sequence->zone()),
sequence_(sequence),
register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
- register_allocation_zone_(register_allocation_zone_scope_.zone()) {}
+ register_allocation_zone_(register_allocation_zone_scope_.zone()) {
+ is_asm_ =
+ info->has_shared_info() ? info->shared_info()->asm_function() : false;
+ }
~PipelineData() {
DeleteRegisterAllocationZone();
@@ -170,6 +185,11 @@ class PipelineData {
PipelineStatistics* pipeline_statistics() { return pipeline_statistics_; }
bool compilation_failed() const { return compilation_failed_; }
void set_compilation_failed() { compilation_failed_ = true; }
+
+ bool is_asm() const { return is_asm_; }
+ bool verify_graph() const { return verify_graph_; }
+ void set_verify_graph(bool value) { verify_graph_ = value; }
+
Handle<Code> code() { return code_; }
void set_code(Handle<Code> code) {
DCHECK(code_.is_null());
@@ -199,12 +219,6 @@ class PipelineData {
loop_assignment_ = loop_assignment;
}
- TypeHintAnalysis* type_hint_analysis() const { return type_hint_analysis_; }
- void set_type_hint_analysis(TypeHintAnalysis* type_hint_analysis) {
- DCHECK_NULL(type_hint_analysis_);
- type_hint_analysis_ = type_hint_analysis;
- }
-
Schedule* schedule() const { return schedule_; }
void set_schedule(Schedule* schedule) {
DCHECK(!schedule_);
@@ -233,6 +247,11 @@ class PipelineData {
source_position_output_ = source_position_output;
}
+ ZoneVector<trap_handler::ProtectedInstructionData>* protected_instructions()
+ const {
+ return protected_instructions_;
+ }
+
void DeleteGraphZone() {
if (graph_zone_ == nullptr) return;
graph_zone_scope_.Destroy();
@@ -240,7 +259,6 @@ class PipelineData {
graph_ = nullptr;
source_positions_ = nullptr;
loop_assignment_ = nullptr;
- type_hint_analysis_ = nullptr;
simplified_ = nullptr;
machine_ = nullptr;
common_ = nullptr;
@@ -293,7 +311,7 @@ class PipelineData {
DCHECK(register_allocation_data_ == nullptr);
register_allocation_data_ = new (register_allocation_zone())
RegisterAllocationData(config, register_allocation_zone(), frame(),
- sequence(), debug_name_.get());
+ sequence(), debug_name());
}
void BeginPhaseKind(const char* phase_kind_name) {
@@ -308,6 +326,8 @@ class PipelineData {
}
}
+ const char* debug_name() const { return debug_name_.get(); }
+
private:
Isolate* const isolate_;
CompilationInfo* const info_;
@@ -316,6 +336,8 @@ class PipelineData {
ZoneStats* const zone_stats_;
PipelineStatistics* pipeline_statistics_ = nullptr;
bool compilation_failed_ = false;
+ bool verify_graph_ = false;
+ bool is_asm_ = false;
Handle<Code> code_ = Handle<Code>::null();
// All objects in the following group of fields are allocated in graph_zone_.
@@ -325,7 +347,6 @@ class PipelineData {
Graph* graph_ = nullptr;
SourcePositionTable* source_positions_ = nullptr;
LoopAssignmentAnalysis* loop_assignment_ = nullptr;
- TypeHintAnalysis* type_hint_analysis_ = nullptr;
SimplifiedOperatorBuilder* simplified_ = nullptr;
MachineOperatorBuilder* machine_ = nullptr;
CommonOperatorBuilder* common_ = nullptr;
@@ -355,6 +376,9 @@ class PipelineData {
// Source position output for --trace-turbo.
std::string source_position_output_;
+ ZoneVector<trap_handler::ProtectedInstructionData>* protected_instructions_ =
+ nullptr;
+
DISALLOW_COPY_AND_ASSIGN(PipelineData);
};
@@ -555,27 +579,29 @@ class PipelineCompilationJob final : public CompilationJob {
PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl() {
if (info()->shared_info()->asm_function()) {
- if (info()->osr_frame()) info()->MarkAsFrameSpecializing();
+ if (info()->osr_frame() && !info()->is_optimizing_from_bytecode()) {
+ info()->MarkAsFrameSpecializing();
+ }
info()->MarkAsFunctionContextSpecializing();
} else {
if (!FLAG_always_opt) {
info()->MarkAsBailoutOnUninitialized();
}
- if (FLAG_turbo_inlining) {
- info()->MarkAsInliningEnabled();
+ if (FLAG_turbo_loop_peeling) {
+ info()->MarkAsLoopPeelingEnabled();
}
}
- if (!info()->shared_info()->asm_function() || FLAG_turbo_asm_deoptimization) {
+ if (info()->is_optimizing_from_bytecode() ||
+ !info()->shared_info()->asm_function()) {
info()->MarkAsDeoptimizationEnabled();
if (FLAG_inline_accessors) {
info()->MarkAsAccessorInliningEnabled();
}
}
if (!info()->is_optimizing_from_bytecode()) {
- if (info()->is_deoptimization_enabled() && FLAG_turbo_type_feedback) {
- info()->MarkAsTypeFeedbackEnabled();
- }
if (!Compiler::EnsureDeoptimizationSupport(info())) return FAILED;
+ } else if (FLAG_turbo_inlining) {
+ info()->MarkAsInliningEnabled();
}
linkage_ = new (&zone_) Linkage(Linkage::ComputeIncoming(&zone_, info()));
@@ -612,15 +638,18 @@ PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl() {
class PipelineWasmCompilationJob final : public CompilationJob {
public:
- explicit PipelineWasmCompilationJob(CompilationInfo* info, Graph* graph,
- CallDescriptor* descriptor,
- SourcePositionTable* source_positions)
+ explicit PipelineWasmCompilationJob(
+ CompilationInfo* info, JSGraph* jsgraph, CallDescriptor* descriptor,
+ SourcePositionTable* source_positions,
+ ZoneVector<trap_handler::ProtectedInstructionData>* protected_insts,
+ bool allow_signalling_nan)
: CompilationJob(info->isolate(), info, "TurboFan",
State::kReadyToExecute),
zone_stats_(info->isolate()->allocator()),
- data_(&zone_stats_, info, graph, source_positions),
+ data_(&zone_stats_, info, jsgraph, source_positions, protected_insts),
pipeline_(&data_),
- linkage_(descriptor) {}
+ linkage_(descriptor),
+ allow_signalling_nan_(allow_signalling_nan) {}
protected:
Status PrepareJobImpl() final;
@@ -632,6 +661,7 @@ class PipelineWasmCompilationJob final : public CompilationJob {
PipelineData data_;
PipelineImpl pipeline_;
Linkage linkage_;
+ bool allow_signalling_nan_;
};
PipelineWasmCompilationJob::Status
@@ -649,6 +679,24 @@ PipelineWasmCompilationJob::ExecuteJobImpl() {
}
pipeline_.RunPrintAndVerify("Machine", true);
+ if (FLAG_wasm_opt) {
+ PipelineData* data = &data_;
+ PipelineRunScope scope(data, "WASM optimization");
+ JSGraphReducer graph_reducer(data->jsgraph(), scope.zone());
+ DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
+ data->common());
+ ValueNumberingReducer value_numbering(scope.zone(), data->graph()->zone());
+ MachineOperatorReducer machine_reducer(data->jsgraph(),
+ allow_signalling_nan_);
+ CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
+ data->common(), data->machine());
+ AddReducer(data, &graph_reducer, &dead_code_elimination);
+ AddReducer(data, &graph_reducer, &value_numbering);
+ AddReducer(data, &graph_reducer, &machine_reducer);
+ AddReducer(data, &graph_reducer, &common_reducer);
+ graph_reducer.ReduceGraph();
+ pipeline_.RunPrintAndVerify("Optimized Machine", true);
+ }
if (!pipeline_.ScheduleAndSelectInstructions(&linkage_, true)) return FAILED;
return SUCCEEDED;
@@ -694,20 +742,6 @@ struct LoopAssignmentAnalysisPhase {
};
-struct TypeHintAnalysisPhase {
- static const char* phase_name() { return "type hint analysis"; }
-
- void Run(PipelineData* data, Zone* temp_zone) {
- if (data->info()->is_type_feedback_enabled()) {
- TypeHintAnalyzer analyzer(data->graph_zone());
- Handle<Code> code(data->info()->shared_info()->code(), data->isolate());
- TypeHintAnalysis* type_hint_analysis = analyzer.Analyze(code);
- data->set_type_hint_analysis(type_hint_analysis);
- }
- }
-};
-
-
struct GraphBuilderPhase {
static const char* phase_name() { return "graph builder"; }
@@ -715,15 +749,18 @@ struct GraphBuilderPhase {
bool succeeded = false;
if (data->info()->is_optimizing_from_bytecode()) {
- BytecodeGraphBuilder graph_builder(temp_zone, data->info(),
- data->jsgraph(), 1.0f,
- data->source_positions());
+ // Bytecode graph builder assumes deoptimziation is enabled.
+ DCHECK(data->info()->is_deoptimization_enabled());
+ BytecodeGraphBuilder graph_builder(
+ temp_zone, data->info()->shared_info(),
+ handle(data->info()->closure()->feedback_vector()),
+ data->info()->osr_ast_id(), data->jsgraph(), 1.0f,
+ data->source_positions());
succeeded = graph_builder.CreateGraph();
} else {
AstGraphBuilderWithPositions graph_builder(
temp_zone, data->info(), data->jsgraph(), 1.0f,
- data->loop_assignment(), data->type_hint_analysis(),
- data->source_positions());
+ data->loop_assignment(), data->source_positions());
succeeded = graph_builder.CreateGraph();
}
@@ -744,9 +781,6 @@ struct InliningPhase {
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->common(), data->machine());
JSCallReducer::Flags call_reducer_flags = JSCallReducer::kNoFlags;
- if (data->info()->is_bailout_on_uninitialized()) {
- call_reducer_flags |= JSCallReducer::kBailoutOnUninitialized;
- }
if (data->info()->is_deoptimization_enabled()) {
call_reducer_flags |= JSCallReducer::kDeoptimizationEnabled;
}
@@ -817,21 +851,6 @@ struct TyperPhase {
}
};
-struct OsrTyperPhase {
- static const char* phase_name() { return "osr typer"; }
-
- void Run(PipelineData* data, Zone* temp_zone) {
- NodeVector roots(temp_zone);
- data->jsgraph()->GetCachedNodes(&roots);
- // Dummy induction variable optimizer: at the moment, we do not try
- // to compute loop variable bounds on OSR.
- LoopVariableOptimizer induction_vars(data->jsgraph()->graph(),
- data->common(), temp_zone);
- Typer typer(data->isolate(), Typer::kNoFlags, data->graph());
- typer.Run(roots, &induction_vars);
- }
-};
-
struct UntyperPhase {
static const char* phase_name() { return "untyper"; }
@@ -944,8 +963,8 @@ struct EscapeAnalysisPhase {
}
};
-struct RepresentationSelectionPhase {
- static const char* phase_name() { return "representation selection"; }
+struct SimplifiedLoweringPhase {
+ static const char* phase_name() { return "simplified lowering"; }
void Run(PipelineData* data, Zone* temp_zone) {
SimplifiedLowering lowering(data->jsgraph(), temp_zone,
@@ -978,6 +997,23 @@ struct LoopExitEliminationPhase {
}
};
+struct ConcurrentOptimizationPrepPhase {
+ static const char* phase_name() {
+ return "concurrent optimization preparation";
+ }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ // Make sure we cache these code stubs.
+ data->jsgraph()->CEntryStubConstant(1);
+ data->jsgraph()->CEntryStubConstant(2);
+ data->jsgraph()->CEntryStubConstant(3);
+
+ // This is needed for escape analysis.
+ NodeProperties::SetType(data->jsgraph()->FalseConstant(), Type::Boolean());
+ NodeProperties::SetType(data->jsgraph()->TrueConstant(), Type::Boolean());
+ }
+};
+
struct GenericLoweringPhase {
static const char* phase_name() { return "generic lowering"; }
@@ -1178,21 +1214,6 @@ struct LateGraphTrimmingPhase {
};
-struct StressLoopPeelingPhase {
- static const char* phase_name() { return "stress loop peeling"; }
-
- void Run(PipelineData* data, Zone* temp_zone) {
- // Peel the first outer loop for testing.
- // TODO(titzer): peel all loops? the N'th loop? Innermost loops?
- LoopTree* loop_tree = LoopFinder::BuildLoopTree(data->graph(), temp_zone);
- if (loop_tree != nullptr && loop_tree->outer_loops().size() > 0) {
- LoopPeeler::Peel(data->graph(), data->common(), loop_tree,
- loop_tree->outer_loops()[0], temp_zone);
- }
- }
-};
-
-
struct ComputeSchedulePhase {
static const char* phase_name() { return "scheduling"; }
@@ -1404,7 +1425,7 @@ struct GenerateCodePhase {
void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
CodeGenerator generator(data->frame(), linkage, data->sequence(),
- data->info());
+ data->info(), data->protected_instructions());
data->set_code(generator.GenerateCode());
}
};
@@ -1475,8 +1496,6 @@ bool PipelineImpl::CreateGraph() {
Run<LoopAssignmentAnalysisPhase>();
}
- Run<TypeHintAnalysisPhase>();
-
Run<GraphBuilderPhase>();
if (data->compilation_failed()) {
data->EndPhaseKind();
@@ -1486,8 +1505,6 @@ bool PipelineImpl::CreateGraph() {
// Perform OSR deconstruction.
if (info()->is_osr()) {
- Run<OsrTyperPhase>();
-
Run<OsrDeconstructionPhase>();
Run<UntyperPhase>();
@@ -1512,7 +1529,7 @@ bool PipelineImpl::CreateGraph() {
// Determine the Typer operation flags.
Typer::Flags flags = Typer::kNoFlags;
if (is_sloppy(info()->shared_info()->language_mode()) &&
- !info()->shared_info()->IsBuiltin()) {
+ info()->shared_info()->IsUserJavaScript()) {
// Sloppy mode functions always have an Object for this.
flags |= Typer::kThisIsReceiver;
}
@@ -1533,43 +1550,50 @@ bool PipelineImpl::CreateGraph() {
// Lower JSOperators where we can determine types.
Run<TypedLoweringPhase>();
RunPrintAndVerify("Lowered typed");
+ }
- if (FLAG_turbo_loop_peeling) {
- Run<LoopPeelingPhase>();
- RunPrintAndVerify("Loops peeled", true);
- } else {
- Run<LoopExitEliminationPhase>();
- RunPrintAndVerify("Loop exits eliminated", true);
- }
+ // Do some hacky things to prepare for the optimization phase.
+ // (caching handles, etc.).
+ Run<ConcurrentOptimizationPrepPhase>();
- if (FLAG_turbo_stress_loop_peeling) {
- Run<StressLoopPeelingPhase>();
- RunPrintAndVerify("Loop peeled");
- }
+ data->EndPhaseKind();
- if (!info()->shared_info()->asm_function()) {
- if (FLAG_turbo_load_elimination) {
- Run<LoadEliminationPhase>();
- RunPrintAndVerify("Load eliminated");
- }
+ return true;
+}
- if (FLAG_turbo_escape) {
- Run<EscapeAnalysisPhase>();
- if (data->compilation_failed()) {
- info()->AbortOptimization(kCyclicObjectStateDetectedInEscapeAnalysis);
- data->EndPhaseKind();
- return false;
- }
- RunPrintAndVerify("Escape Analysed");
+bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
+ PipelineData* data = this->data_;
+
+ if (data->info()->is_loop_peeling_enabled()) {
+ Run<LoopPeelingPhase>();
+ RunPrintAndVerify("Loops peeled", true);
+ } else {
+ Run<LoopExitEliminationPhase>();
+ RunPrintAndVerify("Loop exits eliminated", true);
+ }
+
+ if (!data->is_asm()) {
+ if (FLAG_turbo_load_elimination) {
+ Run<LoadEliminationPhase>();
+ RunPrintAndVerify("Load eliminated");
+ }
+
+ if (FLAG_turbo_escape) {
+ Run<EscapeAnalysisPhase>();
+ if (data->compilation_failed()) {
+ info()->AbortOptimization(kCyclicObjectStateDetectedInEscapeAnalysis);
+ data->EndPhaseKind();
+ return false;
}
+ RunPrintAndVerify("Escape Analysed");
}
}
- // Select representations. This has to run w/o the Typer decorator, because
- // we cannot compute meaningful types anyways, and the computed types might
- // even conflict with the representation/truncation logic.
- Run<RepresentationSelectionPhase>();
- RunPrintAndVerify("Representations selected", true);
+ // Perform simplified lowering. This has to run w/o the Typer decorator,
+ // because we cannot compute meaningful types anyways, and the computed types
+ // might even conflict with the representation/truncation logic.
+ Run<SimplifiedLoweringPhase>();
+ RunPrintAndVerify("Simplified lowering", true);
#ifdef DEBUG
// From now on it is invalid to look at types on the nodes, because:
@@ -1592,14 +1616,6 @@ bool PipelineImpl::CreateGraph() {
Run<GenericLoweringPhase>();
RunPrintAndVerify("Generic lowering", true);
- data->EndPhaseKind();
-
- return true;
-}
-
-bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
- PipelineData* data = this->data_;
-
data->BeginPhaseKind("block building");
// Run early optimization pass.
@@ -1648,7 +1664,9 @@ Handle<Code> Pipeline::GenerateCodeForCodeStub(Isolate* isolate,
// Construct a pipeline for scheduling and code generation.
ZoneStats zone_stats(isolate->allocator());
- PipelineData data(&zone_stats, &info, graph, schedule);
+ SourcePositionTable source_positions(graph);
+ PipelineData data(&zone_stats, &info, graph, schedule, &source_positions);
+ data.set_verify_graph(FLAG_csa_verify);
std::unique_ptr<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
pipeline_statistics.reset(new PipelineStatistics(&info, &zone_stats));
@@ -1660,6 +1678,12 @@ Handle<Code> Pipeline::GenerateCodeForCodeStub(Isolate* isolate,
if (FLAG_trace_turbo) {
{
+ CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
+ OFStream os(tracing_scope.file());
+ os << "---------------------------------------------------\n"
+ << "Begin compiling " << debug_name << " using Turbofan" << std::endl;
+ }
+ {
TurboJsonFile json_of(&info, std::ios_base::trunc);
json_of << "{\"function\":\"" << info.GetDebugName().get()
<< "\", \"source\":\"\",\n\"phases\":[";
@@ -1696,13 +1720,16 @@ Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
}
// static
-Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
- CallDescriptor* call_descriptor,
- Graph* graph,
- Schedule* schedule) {
+Handle<Code> Pipeline::GenerateCodeForTesting(
+ CompilationInfo* info, CallDescriptor* call_descriptor, Graph* graph,
+ Schedule* schedule, SourcePositionTable* source_positions) {
// Construct a pipeline for scheduling and code generation.
ZoneStats zone_stats(info->isolate()->allocator());
- PipelineData data(&zone_stats, info, graph, schedule);
+ // TODO(wasm): Refactor code generation to check for non-existing source
+ // table, then remove this conditional allocation.
+ if (!source_positions)
+ source_positions = new (info->zone()) SourcePositionTable(graph);
+ PipelineData data(&zone_stats, info, graph, schedule, source_positions);
std::unique_ptr<PipelineStatistics> pipeline_statistics;
if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
pipeline_statistics.reset(new PipelineStatistics(info, &zone_stats));
@@ -1729,10 +1756,13 @@ CompilationJob* Pipeline::NewCompilationJob(Handle<JSFunction> function) {
// static
CompilationJob* Pipeline::NewWasmCompilationJob(
- CompilationInfo* info, Graph* graph, CallDescriptor* descriptor,
- SourcePositionTable* source_positions) {
- return new PipelineWasmCompilationJob(info, graph, descriptor,
- source_positions);
+ CompilationInfo* info, JSGraph* jsgraph, CallDescriptor* descriptor,
+ SourcePositionTable* source_positions,
+ ZoneVector<trap_handler::ProtectedInstructionData>* protected_instructions,
+ bool allow_signalling_nan) {
+ return new PipelineWasmCompilationJob(
+ info, jsgraph, descriptor, source_positions, protected_instructions,
+ allow_signalling_nan);
}
bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
@@ -1767,12 +1797,27 @@ bool PipelineImpl::ScheduleAndSelectInstructions(Linkage* linkage,
info(), data->graph(), data->schedule()));
}
- if (FLAG_turbo_verify_machine_graph != nullptr &&
- (!strcmp(FLAG_turbo_verify_machine_graph, "*") ||
- !strcmp(FLAG_turbo_verify_machine_graph,
- data->info()->GetDebugName().get()))) {
+ bool verify_stub_graph = data->verify_graph();
+ if (verify_stub_graph ||
+ (FLAG_turbo_verify_machine_graph != nullptr &&
+ (!strcmp(FLAG_turbo_verify_machine_graph, "*") ||
+ !strcmp(FLAG_turbo_verify_machine_graph, data->debug_name())))) {
+ if (FLAG_trace_csa_verify) {
+ AllowHandleDereference allow_deref;
+ CompilationInfo* info = data->info();
+ CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
+ OFStream os(tracing_scope.file());
+ os << "--------------------------------------------------\n"
+ << "--- Verifying " << data->debug_name() << " generated by TurboFan\n"
+ << "--------------------------------------------------\n"
+ << *data->schedule()
+ << "--------------------------------------------------\n"
+ << "--- End of " << data->debug_name() << " generated by TurboFan\n"
+ << "--------------------------------------------------\n";
+ }
Zone temp_zone(data->isolate()->allocator(), ZONE_NAME);
MachineGraphVerifier::Run(data->graph(), data->schedule(), linkage,
+ data->info()->IsStub(), data->debug_name(),
&temp_zone);
}
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index 0c0a57b286..0c3e4ea7cb 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -9,6 +9,7 @@
// Do not include anything from src/compiler here!
#include "src/globals.h"
#include "src/objects.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
@@ -17,9 +18,14 @@ class CompilationInfo;
class CompilationJob;
class RegisterConfiguration;
+namespace trap_handler {
+struct ProtectedInstructionData;
+} // namespace trap_handler
+
namespace compiler {
class CallDescriptor;
+class JSGraph;
class Graph;
class InstructionSequence;
class Schedule;
@@ -32,8 +38,11 @@ class Pipeline : public AllStatic {
// Returns a new compilation job for the WebAssembly compilation info.
static CompilationJob* NewWasmCompilationJob(
- CompilationInfo* info, Graph* graph, CallDescriptor* descriptor,
- SourcePositionTable* source_positions);
+ CompilationInfo* info, JSGraph* jsgraph, CallDescriptor* descriptor,
+ SourcePositionTable* source_positions,
+ ZoneVector<trap_handler::ProtectedInstructionData>*
+ protected_instructions,
+ bool wasm_origin);
// Run the pipeline on a machine graph and generate code. The {schedule} must
// be valid, hence the given {graph} does not need to be schedulable.
@@ -60,10 +69,10 @@ class Pipeline : public AllStatic {
// Run the pipeline on a machine graph and generate code. If {schedule} is
// {nullptr}, then compute a new schedule for code generation.
- static Handle<Code> GenerateCodeForTesting(CompilationInfo* info,
- CallDescriptor* call_descriptor,
- Graph* graph,
- Schedule* schedule = nullptr);
+ static Handle<Code> GenerateCodeForTesting(
+ CompilationInfo* info, CallDescriptor* call_descriptor, Graph* graph,
+ Schedule* schedule = nullptr,
+ SourcePositionTable* source_positions = nullptr);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Pipeline);
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
index a838ede47c..56755d2446 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -34,6 +34,7 @@ class PPCOperandConverter final : public InstructionOperandConverter {
case kFlags_branch:
case kFlags_deoptimize:
case kFlags_set:
+ case kFlags_trap:
return SetRC;
case kFlags_none:
return LeaveRC;
@@ -263,7 +264,8 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
// Overflow checked for add/sub only.
switch (op) {
#if V8_TARGET_ARCH_PPC64
- case kPPC_Add:
+ case kPPC_Add32:
+ case kPPC_Add64:
case kPPC_Sub:
#endif
case kPPC_AddWithOverflow32:
@@ -276,7 +278,8 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
case kNotOverflow:
switch (op) {
#if V8_TARGET_ARCH_PPC64
- case kPPC_Add:
+ case kPPC_Add32:
+ case kPPC_Add64:
case kPPC_Sub:
#endif
case kPPC_AddWithOverflow32:
@@ -761,36 +764,33 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
-#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr, asm_instrx) \
- do { \
- Label done; \
- Register result = i.OutputRegister(); \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode); \
- __ sync(); \
- if (mode == kMode_MRI) { \
- __ asm_instr(result, operand); \
- } else { \
- __ asm_instrx(result, operand); \
- } \
- __ bind(&done); \
- __ cmp(result, result); \
- __ bne(&done); \
- __ isync(); \
+#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr, asm_instrx) \
+ do { \
+ Label done; \
+ Register result = i.OutputRegister(); \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode); \
+ if (mode == kMode_MRI) { \
+ __ asm_instr(result, operand); \
+ } else { \
+ __ asm_instrx(result, operand); \
+ } \
+ __ lwsync(); \
} while (0)
-#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, asm_instrx) \
- do { \
- size_t index = 0; \
- AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, &index); \
- Register value = i.InputRegister(index); \
- __ sync(); \
- if (mode == kMode_MRI) { \
- __ asm_instr(value, operand); \
- } else { \
- __ asm_instrx(value, operand); \
- } \
- DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, asm_instrx) \
+ do { \
+ size_t index = 0; \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode, &index); \
+ Register value = i.InputRegister(index); \
+ __ lwsync(); \
+ if (mode == kMode_MRI) { \
+ __ asm_instr(value, operand); \
+ } else { \
+ __ asm_instrx(value, operand); \
+ } \
+ __ sync(); \
+ DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
void CodeGenerator::AssembleDeconstructFrame() {
@@ -1322,7 +1322,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
63 - i.InputInt32(2), i.OutputRCBit());
break;
#endif
- case kPPC_Add:
+ case kPPC_Add32:
#if V8_TARGET_ARCH_PPC64
if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
ASSEMBLE_ADD_WITH_OVERFLOW();
@@ -1335,10 +1335,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ addi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
DCHECK_EQ(LeaveRC, i.OutputRCBit());
}
+ __ extsw(i.OutputRegister(), i.OutputRegister());
#if V8_TARGET_ARCH_PPC64
}
#endif
break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_Add64:
+ if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
+ ASSEMBLE_ADD_WITH_OVERFLOW();
+ } else {
+ if (HasRegisterInput(instr, 1)) {
+ __ add(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ LeaveOE, i.OutputRCBit());
+ } else {
+ __ addi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ }
+ }
+ break;
+#endif
case kPPC_AddWithOverflow32:
ASSEMBLE_ADD_WITH_OVERFLOW32();
break;
@@ -1431,19 +1447,35 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_FLOAT_BINOP_RC(fdiv, MiscField::decode(instr->opcode()));
break;
case kPPC_Mod32:
- ASSEMBLE_MODULO(divw, mullw);
+ if (CpuFeatures::IsSupported(MODULO)) {
+ __ modsw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ ASSEMBLE_MODULO(divw, mullw);
+ }
break;
#if V8_TARGET_ARCH_PPC64
case kPPC_Mod64:
- ASSEMBLE_MODULO(divd, mulld);
+ if (CpuFeatures::IsSupported(MODULO)) {
+ __ modsd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ ASSEMBLE_MODULO(divd, mulld);
+ }
break;
#endif
case kPPC_ModU32:
- ASSEMBLE_MODULO(divwu, mullw);
+ if (CpuFeatures::IsSupported(MODULO)) {
+ __ moduw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ ASSEMBLE_MODULO(divwu, mullw);
+ }
break;
#if V8_TARGET_ARCH_PPC64
case kPPC_ModU64:
- ASSEMBLE_MODULO(divdu, mulld);
+ if (CpuFeatures::IsSupported(MODULO)) {
+ __ modud(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ ASSEMBLE_MODULO(divdu, mulld);
+ }
break;
#endif
case kPPC_ModDouble:
@@ -1984,6 +2016,82 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
}
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+ FlagsCondition condition) {
+ class OutOfLineTrap final : public OutOfLineCode {
+ public:
+ OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+ : OutOfLineCode(gen),
+ frame_elided_(frame_elided),
+ instr_(instr),
+ gen_(gen) {}
+
+ void Generate() final {
+ PPCOperandConverter i(gen_, instr_);
+
+ Runtime::FunctionId trap_id = static_cast<Runtime::FunctionId>(
+ i.InputInt32(instr_->InputCount() - 1));
+ bool old_has_frame = __ has_frame();
+ if (frame_elided_) {
+ __ set_has_frame(true);
+ __ EnterFrame(StackFrame::WASM_COMPILED, true);
+ }
+ GenerateCallToTrap(trap_id);
+ if (frame_elided_) {
+ __ set_has_frame(old_has_frame);
+ }
+ if (FLAG_debug_code) {
+ __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+ }
+ }
+
+ private:
+ void GenerateCallToTrap(Runtime::FunctionId trap_id) {
+ if (trap_id == Runtime::kNumFunctions) {
+ // We cannot test calls to the runtime in cctest/test-run-wasm.
+ // Therefore we emit a call to C here instead of a call to the runtime.
+ // We use the context register as the scratch register, because we do
+ // not have a context here.
+ __ PrepareCallCFunction(0, 0, cp);
+ __ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+ 0);
+ } else {
+ __ Move(cp, isolate()->native_context());
+ gen_->AssembleSourcePosition(instr_);
+ __ CallRuntime(trap_id);
+ }
+ ReferenceMap* reference_map =
+ new (gen_->zone()) ReferenceMap(gen_->zone());
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ }
+
+ bool frame_elided_;
+ Instruction* instr_;
+ CodeGenerator* gen_;
+ };
+ bool frame_elided = !frame_access_state()->has_frame();
+ auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+ Label* tlabel = ool->entry();
+ Label end;
+
+ ArchOpcode op = instr->arch_opcode();
+ CRegister cr = cr0;
+ Condition cond = FlagsConditionToCondition(condition, op);
+ if (op == kPPC_CmpDouble) {
+ // check for unordered if necessary
+ if (cond == le) {
+ __ bunordered(&end, cr);
+ // Unnecessary for eq/lt since only FU bit will be set.
+ } else if (cond == gt) {
+ __ bunordered(tlabel, cr);
+ // Unnecessary for ne/ge since only FU bit will be set.
+ }
+ }
+ __ b(cond, tlabel, cr);
+ __ bind(&end);
+}
// Assembles boolean materializations after an instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -2257,11 +2365,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
switch (src.type()) {
case Constant::kInt32:
#if V8_TARGET_ARCH_PPC64
- if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+ if (RelocInfo::IsWasmSizeReference(src.rmode())) {
#else
- if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
- src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
- src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+ if (RelocInfo::IsWasmReference(src.rmode())) {
#endif
__ mov(dst, Operand(src.ToInt32(), src.rmode()));
} else {
@@ -2270,11 +2376,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
break;
case Constant::kInt64:
#if V8_TARGET_ARCH_PPC64
- if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
- src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+ if (RelocInfo::IsWasmPtrReference(src.rmode())) {
__ mov(dst, Operand(src.ToInt64(), src.rmode()));
} else {
- DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ DCHECK(!RelocInfo::IsWasmSizeReference(src.rmode()));
#endif
__ mov(dst, Operand(src.ToInt64()));
#if V8_TARGET_ARCH_PPC64
@@ -2313,8 +2418,23 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
DoubleRegister dst = destination->IsFPRegister()
? g.ToDoubleRegister(destination)
: kScratchDoubleReg;
- double value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
- : src.ToFloat64();
+ double value;
+// bit_cast of snan is converted to qnan on ia32/x64
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+ intptr_t valueInt = (src.type() == Constant::kFloat32)
+ ? src.ToFloat32AsInt()
+ : src.ToFloat64AsInt();
+ if (valueInt == ((src.type() == Constant::kFloat32)
+ ? 0x7fa00000
+ : 0x7fa0000000000000)) {
+ value = bit_cast<double, int64_t>(0x7ff4000000000000L);
+ } else {
+#endif
+ value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
+ : src.ToFloat64();
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+ }
+#endif
__ LoadDoubleLiteral(dst, value, kScratchReg);
if (destination->IsFPStackSlot()) {
__ StoreDouble(dst, g.ToMemOperand(destination), r0);
diff --git a/deps/v8/src/compiler/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
index 9198bcb00c..f68ab3ae68 100644
--- a/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
+++ b/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
@@ -33,7 +33,8 @@ namespace compiler {
V(PPC_RotLeftAndClear64) \
V(PPC_RotLeftAndClearLeft64) \
V(PPC_RotLeftAndClearRight64) \
- V(PPC_Add) \
+ V(PPC_Add32) \
+ V(PPC_Add64) \
V(PPC_AddWithOverflow32) \
V(PPC_AddPair) \
V(PPC_AddDouble) \
@@ -42,7 +43,7 @@ namespace compiler {
V(PPC_SubPair) \
V(PPC_SubDouble) \
V(PPC_Mul32) \
- V(PPC_Mul32WithHigh32) \
+ V(PPC_Mul32WithHigh32) \
V(PPC_Mul64) \
V(PPC_MulHigh32) \
V(PPC_MulHighU32) \
diff --git a/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc b/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
index dee84943fa..640a7e439a 100644
--- a/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-scheduler-ppc.cc
@@ -35,7 +35,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_RotLeftAndClear64:
case kPPC_RotLeftAndClearLeft64:
case kPPC_RotLeftAndClearRight64:
- case kPPC_Add:
+ case kPPC_Add32:
+ case kPPC_Add64:
case kPPC_AddWithOverflow32:
case kPPC_AddPair:
case kPPC_AddDouble:
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
index 768b188aaa..c7e1fa34c1 100644
--- a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -155,6 +155,9 @@ void VisitBinop(InstructionSelector* selector, Node* node,
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
cont->reason(), cont->frame_state());
+ } else if (cont->IsTrap()) {
+ inputs[input_count++] = g.UseImmediate(cont->trap_id());
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -339,6 +342,11 @@ void InstructionSelector::VisitStore(Node* node) {
}
}
+void InstructionSelector::VisitProtectedStore(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
+
// Architecture supports unaligned access, therefore VisitLoad is used instead
void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
@@ -836,7 +844,7 @@ void VisitPairBinop(InstructionSelector* selector, InstructionCode opcode,
}
void InstructionSelector::VisitInt32PairAdd(Node* node) {
- VisitPairBinop(this, kPPC_AddPair, kPPC_Add, node);
+ VisitPairBinop(this, kPPC_AddPair, kPPC_Add32, node);
}
void InstructionSelector::VisitInt32PairSub(Node* node) {
@@ -1013,13 +1021,13 @@ void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitInt32Add(Node* node) {
- VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add, kInt16Imm);
+ VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add32, kInt16Imm);
}
#if V8_TARGET_ARCH_PPC64
void InstructionSelector::VisitInt64Add(Node* node) {
- VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add, kInt16Imm);
+ VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add64, kInt16Imm);
}
#endif
@@ -1481,11 +1489,11 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
- return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add, kInt16Imm,
+ return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add64, kInt16Imm,
&cont);
}
FlagsContinuation cont;
- VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add, kInt16Imm, &cont);
+ VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add64, kInt16Imm, &cont);
}
@@ -1530,9 +1538,12 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ } else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+ } else {
+ DCHECK(cont->IsTrap());
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.UseImmediate(cont->trap_id()));
}
}
@@ -1693,7 +1704,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
#if V8_TARGET_ARCH_PPC64
case IrOpcode::kInt64AddWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
- return VisitBinop<Int64BinopMatcher>(selector, node, kPPC_Add,
+ return VisitBinop<Int64BinopMatcher>(selector, node, kPPC_Add64,
kInt16Imm, cont);
case IrOpcode::kInt64SubWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
@@ -1782,6 +1793,19 @@ void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
}
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+ VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+ Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
+ VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
+}
+
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
PPCOperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index 14695c11b8..a318dd02ae 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -4,10 +4,10 @@
#include "src/compiler/raw-machine-assembler.h"
-#include "src/code-factory.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/scheduler.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -51,12 +51,12 @@ Schedule* RawMachineAssembler::Export() {
os << *schedule_;
}
schedule_->EnsureCFGWellFormedness();
+ Scheduler::ComputeSpecialRPO(zone(), schedule_);
schedule_->PropagateDeferredMark();
if (FLAG_trace_turbo_scheduler) {
PrintF("--- EDGE SPLIT AND PROPAGATED DEFERRED SCHEDULE ------------\n");
os << *schedule_;
}
- Scheduler::ComputeSpecialRPO(zone(), schedule_);
// Invalidate RawMachineAssembler.
Schedule* schedule = schedule_;
schedule_ = nullptr;
@@ -170,295 +170,28 @@ void RawMachineAssembler::Comment(const char* msg) {
AddNode(machine()->Comment(msg));
}
-Node* RawMachineAssembler::CallN(CallDescriptor* desc, Node* function,
- Node** args) {
- int param_count = static_cast<int>(desc->ParameterCount());
- int input_count = param_count + 1;
- Node** buffer = zone()->NewArray<Node*>(input_count);
- int index = 0;
- buffer[index++] = function;
- for (int i = 0; i < param_count; i++) {
- buffer[index++] = args[i];
- }
- return AddNode(common()->Call(desc), input_count, buffer);
+Node* RawMachineAssembler::CallN(CallDescriptor* desc, int input_count,
+ Node* const* inputs) {
+ DCHECK(!desc->NeedsFrameState());
+ // +1 is for target.
+ DCHECK_EQ(input_count, desc->ParameterCount() + 1);
+ return AddNode(common()->Call(desc), input_count, inputs);
}
-
Node* RawMachineAssembler::CallNWithFrameState(CallDescriptor* desc,
- Node* function, Node** args,
- Node* frame_state) {
+ int input_count,
+ Node* const* inputs) {
DCHECK(desc->NeedsFrameState());
- int param_count = static_cast<int>(desc->ParameterCount());
- int input_count = param_count + 2;
- Node** buffer = zone()->NewArray<Node*>(input_count);
- int index = 0;
- buffer[index++] = function;
- for (int i = 0; i < param_count; i++) {
- buffer[index++] = args[i];
- }
- buffer[index++] = frame_state;
- return AddNode(common()->Call(desc), input_count, buffer);
-}
-
-Node* RawMachineAssembler::CallRuntime0(Runtime::FunctionId function,
- Node* context) {
- CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
- zone(), function, 0, Operator::kNoProperties, CallDescriptor::kNoFlags);
- int return_count = static_cast<int>(descriptor->ReturnCount());
-
- Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
- Node* ref = AddNode(
- common()->ExternalConstant(ExternalReference(function, isolate())));
- Node* arity = Int32Constant(0);
-
- return AddNode(common()->Call(descriptor), centry, ref, arity, context);
-}
-
-Node* RawMachineAssembler::CallRuntime1(Runtime::FunctionId function,
- Node* arg1, Node* context) {
- CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
- zone(), function, 1, Operator::kNoProperties, CallDescriptor::kNoFlags);
- int return_count = static_cast<int>(descriptor->ReturnCount());
-
- Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
- Node* ref = AddNode(
- common()->ExternalConstant(ExternalReference(function, isolate())));
- Node* arity = Int32Constant(1);
-
- return AddNode(common()->Call(descriptor), centry, arg1, ref, arity, context);
-}
-
-
-Node* RawMachineAssembler::CallRuntime2(Runtime::FunctionId function,
- Node* arg1, Node* arg2, Node* context) {
- CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
- zone(), function, 2, Operator::kNoProperties, CallDescriptor::kNoFlags);
- int return_count = static_cast<int>(descriptor->ReturnCount());
-
- Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
- Node* ref = AddNode(
- common()->ExternalConstant(ExternalReference(function, isolate())));
- Node* arity = Int32Constant(2);
-
- return AddNode(common()->Call(descriptor), centry, arg1, arg2, ref, arity,
- context);
-}
-
-Node* RawMachineAssembler::CallRuntime3(Runtime::FunctionId function,
- Node* arg1, Node* arg2, Node* arg3,
- Node* context) {
- CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
- zone(), function, 3, Operator::kNoProperties, CallDescriptor::kNoFlags);
- int return_count = static_cast<int>(descriptor->ReturnCount());
-
- Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
- Node* ref = AddNode(
- common()->ExternalConstant(ExternalReference(function, isolate())));
- Node* arity = Int32Constant(3);
-
- return AddNode(common()->Call(descriptor), centry, arg1, arg2, arg3, ref,
- arity, context);
-}
-
-Node* RawMachineAssembler::CallRuntime4(Runtime::FunctionId function,
- Node* arg1, Node* arg2, Node* arg3,
- Node* arg4, Node* context) {
- CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
- zone(), function, 4, Operator::kNoProperties, CallDescriptor::kNoFlags);
- int return_count = static_cast<int>(descriptor->ReturnCount());
-
- Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
- Node* ref = AddNode(
- common()->ExternalConstant(ExternalReference(function, isolate())));
- Node* arity = Int32Constant(4);
-
- return AddNode(common()->Call(descriptor), centry, arg1, arg2, arg3, arg4,
- ref, arity, context);
-}
-
-Node* RawMachineAssembler::CallRuntime5(Runtime::FunctionId function,
- Node* arg1, Node* arg2, Node* arg3,
- Node* arg4, Node* arg5, Node* context) {
- CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
- zone(), function, 5, Operator::kNoProperties, CallDescriptor::kNoFlags);
- int return_count = static_cast<int>(descriptor->ReturnCount());
-
- Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
- Node* ref = AddNode(
- common()->ExternalConstant(ExternalReference(function, isolate())));
- Node* arity = Int32Constant(5);
-
- return AddNode(common()->Call(descriptor), centry, arg1, arg2, arg3, arg4,
- arg5, ref, arity, context);
-}
-
-Node* RawMachineAssembler::TailCallN(CallDescriptor* desc, Node* function,
- Node** args) {
- int param_count = static_cast<int>(desc->ParameterCount());
- int input_count = param_count + 1;
- Node** buffer = zone()->NewArray<Node*>(input_count);
- int index = 0;
- buffer[index++] = function;
- for (int i = 0; i < param_count; i++) {
- buffer[index++] = args[i];
- }
- Node* tail_call = MakeNode(common()->TailCall(desc), input_count, buffer);
- schedule()->AddTailCall(CurrentBlock(), tail_call);
- current_block_ = nullptr;
- return tail_call;
+ // +2 is for target and frame state.
+ DCHECK_EQ(input_count, desc->ParameterCount() + 2);
+ return AddNode(common()->Call(desc), input_count, inputs);
}
-Node* RawMachineAssembler::TailCallRuntime0(Runtime::FunctionId function,
- Node* context) {
- const int kArity = 0;
- CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
- zone(), function, kArity, Operator::kNoProperties,
- CallDescriptor::kSupportsTailCalls);
- int return_count = static_cast<int>(desc->ReturnCount());
-
- Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
- Node* ref = AddNode(
- common()->ExternalConstant(ExternalReference(function, isolate())));
- Node* arity = Int32Constant(kArity);
-
- Node* nodes[] = {centry, ref, arity, context};
- Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
-
- schedule()->AddTailCall(CurrentBlock(), tail_call);
- current_block_ = nullptr;
- return tail_call;
-}
-
-Node* RawMachineAssembler::TailCallRuntime1(Runtime::FunctionId function,
- Node* arg1, Node* context) {
- const int kArity = 1;
- CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
- zone(), function, kArity, Operator::kNoProperties,
- CallDescriptor::kSupportsTailCalls);
- int return_count = static_cast<int>(desc->ReturnCount());
-
- Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
- Node* ref = AddNode(
- common()->ExternalConstant(ExternalReference(function, isolate())));
- Node* arity = Int32Constant(kArity);
-
- Node* nodes[] = {centry, arg1, ref, arity, context};
- Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
-
- schedule()->AddTailCall(CurrentBlock(), tail_call);
- current_block_ = nullptr;
- return tail_call;
-}
-
-
-Node* RawMachineAssembler::TailCallRuntime2(Runtime::FunctionId function,
- Node* arg1, Node* arg2,
- Node* context) {
- const int kArity = 2;
- CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
- zone(), function, kArity, Operator::kNoProperties,
- CallDescriptor::kSupportsTailCalls);
- int return_count = static_cast<int>(desc->ReturnCount());
-
- Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
- Node* ref = AddNode(
- common()->ExternalConstant(ExternalReference(function, isolate())));
- Node* arity = Int32Constant(kArity);
-
- Node* nodes[] = {centry, arg1, arg2, ref, arity, context};
- Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
-
- schedule()->AddTailCall(CurrentBlock(), tail_call);
- current_block_ = nullptr;
- return tail_call;
-}
-
-Node* RawMachineAssembler::TailCallRuntime3(Runtime::FunctionId function,
- Node* arg1, Node* arg2, Node* arg3,
- Node* context) {
- const int kArity = 3;
- CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
- zone(), function, kArity, Operator::kNoProperties,
- CallDescriptor::kSupportsTailCalls);
- int return_count = static_cast<int>(desc->ReturnCount());
-
- Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
- Node* ref = AddNode(
- common()->ExternalConstant(ExternalReference(function, isolate())));
- Node* arity = Int32Constant(kArity);
-
- Node* nodes[] = {centry, arg1, arg2, arg3, ref, arity, context};
- Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
-
- schedule()->AddTailCall(CurrentBlock(), tail_call);
- current_block_ = nullptr;
- return tail_call;
-}
-
-Node* RawMachineAssembler::TailCallRuntime4(Runtime::FunctionId function,
- Node* arg1, Node* arg2, Node* arg3,
- Node* arg4, Node* context) {
- const int kArity = 4;
- CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
- zone(), function, kArity, Operator::kNoProperties,
- CallDescriptor::kSupportsTailCalls);
- int return_count = static_cast<int>(desc->ReturnCount());
-
- Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
- Node* ref = AddNode(
- common()->ExternalConstant(ExternalReference(function, isolate())));
- Node* arity = Int32Constant(kArity);
-
- Node* nodes[] = {centry, arg1, arg2, arg3, arg4, ref, arity, context};
- Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
-
- schedule()->AddTailCall(CurrentBlock(), tail_call);
- current_block_ = nullptr;
- return tail_call;
-}
-
-Node* RawMachineAssembler::TailCallRuntime5(Runtime::FunctionId function,
- Node* arg1, Node* arg2, Node* arg3,
- Node* arg4, Node* arg5,
- Node* context) {
- const int kArity = 5;
- CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
- zone(), function, kArity, Operator::kNoProperties,
- CallDescriptor::kSupportsTailCalls);
- int return_count = static_cast<int>(desc->ReturnCount());
-
- Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
- Node* ref = AddNode(
- common()->ExternalConstant(ExternalReference(function, isolate())));
- Node* arity = Int32Constant(kArity);
-
- Node* nodes[] = {centry, arg1, arg2, arg3, arg4, arg5, ref, arity, context};
- Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
-
- schedule()->AddTailCall(CurrentBlock(), tail_call);
- current_block_ = nullptr;
- return tail_call;
-}
-
-Node* RawMachineAssembler::TailCallRuntime6(Runtime::FunctionId function,
- Node* arg1, Node* arg2, Node* arg3,
- Node* arg4, Node* arg5, Node* arg6,
- Node* context) {
- const int kArity = 6;
- CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
- zone(), function, kArity, Operator::kNoProperties,
- CallDescriptor::kSupportsTailCalls);
- int return_count = static_cast<int>(desc->ReturnCount());
-
- Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
- Node* ref = AddNode(
- common()->ExternalConstant(ExternalReference(function, isolate())));
- Node* arity = Int32Constant(kArity);
-
- Node* nodes[] = {centry, arg1, arg2, arg3, arg4,
- arg5, arg6, ref, arity, context};
- Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
-
+Node* RawMachineAssembler::TailCallN(CallDescriptor* desc, int input_count,
+ Node* const* inputs) {
+ // +1 is for target.
+ DCHECK_EQ(input_count, desc->ParameterCount() + 1);
+ Node* tail_call = MakeNode(common()->TailCall(desc), input_count, inputs);
schedule()->AddTailCall(CurrentBlock(), tail_call);
current_block_ = nullptr;
return tail_call;
@@ -502,6 +235,21 @@ Node* RawMachineAssembler::CallCFunction2(MachineType return_type,
return AddNode(common()->Call(descriptor), function, arg0, arg1);
}
+Node* RawMachineAssembler::CallCFunction3(MachineType return_type,
+ MachineType arg0_type,
+ MachineType arg1_type,
+ MachineType arg2_type, Node* function,
+ Node* arg0, Node* arg1, Node* arg2) {
+ MachineSignature::Builder builder(zone(), 1, 3);
+ builder.AddReturn(return_type);
+ builder.AddParam(arg0_type);
+ builder.AddParam(arg1_type);
+ builder.AddParam(arg2_type);
+ const CallDescriptor* descriptor =
+ Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
+
+ return AddNode(common()->Call(descriptor), function, arg0, arg1, arg2);
+}
Node* RawMachineAssembler::CallCFunction8(
MachineType return_type, MachineType arg0_type, MachineType arg1_type,
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 6d2accb861..af36b8c08a 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -653,6 +653,12 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* Float64RoundTiesEven(Node* a) {
return AddNode(machine()->Float64RoundTiesEven().op(), a);
}
+ Node* Word32ReverseBytes(Node* a) {
+ return AddNode(machine()->Word32ReverseBytes().op(), a);
+ }
+ Node* Word64ReverseBytes(Node* a) {
+ return AddNode(machine()->Word64ReverseBytes().op(), a);
+ }
// Float64 bit operations.
Node* Float64ExtractLowWord32(Node* a) {
@@ -701,26 +707,18 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
}
// Call a given call descriptor and the given arguments.
- Node* CallN(CallDescriptor* desc, Node* function, Node** args);
+ // The call target is passed as part of the {inputs} array.
+ Node* CallN(CallDescriptor* desc, int input_count, Node* const* inputs);
+
// Call a given call descriptor and the given arguments and frame-state.
- Node* CallNWithFrameState(CallDescriptor* desc, Node* function, Node** args,
- Node* frame_state);
- // Call to a runtime function with zero arguments.
- Node* CallRuntime0(Runtime::FunctionId function, Node* context);
- // Call to a runtime function with one arguments.
- Node* CallRuntime1(Runtime::FunctionId function, Node* arg0, Node* context);
- // Call to a runtime function with two arguments.
- Node* CallRuntime2(Runtime::FunctionId function, Node* arg1, Node* arg2,
- Node* context);
- // Call to a runtime function with three arguments.
- Node* CallRuntime3(Runtime::FunctionId function, Node* arg1, Node* arg2,
- Node* arg3, Node* context);
- // Call to a runtime function with four arguments.
- Node* CallRuntime4(Runtime::FunctionId function, Node* arg1, Node* arg2,
- Node* arg3, Node* arg4, Node* context);
- // Call to a runtime function with five arguments.
- Node* CallRuntime5(Runtime::FunctionId function, Node* arg1, Node* arg2,
- Node* arg3, Node* arg4, Node* arg5, Node* context);
+ // The call target and frame state are passed as part of the {inputs} array.
+ Node* CallNWithFrameState(CallDescriptor* desc, int input_count,
+ Node* const* inputs);
+
+ // Tail call a given call descriptor and the given arguments.
+ // The call target is passed as part of the {inputs} array.
+ Node* TailCallN(CallDescriptor* desc, int input_count, Node* const* inputs);
+
// Call to a C function with zero arguments.
Node* CallCFunction0(MachineType return_type, Node* function);
// Call to a C function with one parameter.
@@ -730,6 +728,10 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* CallCFunction2(MachineType return_type, MachineType arg0_type,
MachineType arg1_type, Node* function, Node* arg0,
Node* arg1);
+ // Call to a C function with three arguments.
+ Node* CallCFunction3(MachineType return_type, MachineType arg0_type,
+ MachineType arg1_type, MachineType arg2_type,
+ Node* function, Node* arg0, Node* arg1, Node* arg2);
// Call to a C function with eight arguments.
Node* CallCFunction8(MachineType return_type, MachineType arg0_type,
MachineType arg1_type, MachineType arg2_type,
@@ -739,30 +741,6 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* arg1, Node* arg2, Node* arg3, Node* arg4,
Node* arg5, Node* arg6, Node* arg7);
- // Tail call the given call descriptor and the given arguments.
- Node* TailCallN(CallDescriptor* call_descriptor, Node* function, Node** args);
- // Tail call to a runtime function with zero arguments.
- Node* TailCallRuntime0(Runtime::FunctionId function, Node* context);
- // Tail call to a runtime function with one argument.
- Node* TailCallRuntime1(Runtime::FunctionId function, Node* arg0,
- Node* context);
- // Tail call to a runtime function with two arguments.
- Node* TailCallRuntime2(Runtime::FunctionId function, Node* arg1, Node* arg2,
- Node* context);
- // Tail call to a runtime function with three arguments.
- Node* TailCallRuntime3(Runtime::FunctionId function, Node* arg1, Node* arg2,
- Node* arg3, Node* context);
- // Tail call to a runtime function with four arguments.
- Node* TailCallRuntime4(Runtime::FunctionId function, Node* arg1, Node* arg2,
- Node* arg3, Node* arg4, Node* context);
- // Tail call to a runtime function with five arguments.
- Node* TailCallRuntime5(Runtime::FunctionId function, Node* arg1, Node* arg2,
- Node* arg3, Node* arg4, Node* arg5, Node* context);
- // Tail call to a runtime function with six arguments.
- Node* TailCallRuntime6(Runtime::FunctionId function, Node* arg1, Node* arg2,
- Node* arg3, Node* arg4, Node* arg5, Node* arg6,
- Node* context);
-
// ===========================================================================
// The following utility methods deal with control flow, hence might switch
// the current basic block or create new basic blocks for labels.
diff --git a/deps/v8/src/compiler/redundancy-elimination.cc b/deps/v8/src/compiler/redundancy-elimination.cc
index 6dcf2bf4cf..707752f364 100644
--- a/deps/v8/src/compiler/redundancy-elimination.cc
+++ b/deps/v8/src/compiler/redundancy-elimination.cc
@@ -16,11 +16,13 @@ RedundancyElimination::RedundancyElimination(Editor* editor, Zone* zone)
RedundancyElimination::~RedundancyElimination() {}
Reduction RedundancyElimination::Reduce(Node* node) {
+ if (node_checks_.Get(node)) return NoChange();
switch (node->opcode()) {
case IrOpcode::kCheckBounds:
case IrOpcode::kCheckFloat64Hole:
case IrOpcode::kCheckHeapObject:
case IrOpcode::kCheckIf:
+ case IrOpcode::kCheckInternalizedString:
case IrOpcode::kCheckNumber:
case IrOpcode::kCheckSmi:
case IrOpcode::kCheckString:
@@ -36,6 +38,11 @@ Reduction RedundancyElimination::Reduce(Node* node) {
case IrOpcode::kCheckedTaggedToInt32:
case IrOpcode::kCheckedUint32ToInt32:
return ReduceCheckNode(node);
+ case IrOpcode::kSpeculativeNumberAdd:
+ case IrOpcode::kSpeculativeNumberSubtract:
+ // For increments and decrements by a constant, try to learn from the last
+ // bounds check.
+ return TryReuseBoundsCheckForFirstInput(node);
case IrOpcode::kEffectPhi:
return ReduceEffectPhi(node);
case IrOpcode::kDead:
@@ -114,7 +121,14 @@ RedundancyElimination::EffectPathChecks::AddCheck(Zone* zone,
namespace {
bool IsCompatibleCheck(Node const* a, Node const* b) {
- if (a->op() != b->op()) return false;
+ if (a->op() != b->op()) {
+ if (a->opcode() == IrOpcode::kCheckInternalizedString &&
+ b->opcode() == IrOpcode::kCheckString) {
+ // CheckInternalizedString(node) implies CheckString(node)
+ } else {
+ return false;
+ }
+ }
for (int i = a->op()->ValueInputCount(); --i >= 0;) {
if (a->InputAt(i) != b->InputAt(i)) return false;
}
@@ -133,6 +147,17 @@ Node* RedundancyElimination::EffectPathChecks::LookupCheck(Node* node) const {
return nullptr;
}
+Node* RedundancyElimination::EffectPathChecks::LookupBoundsCheckFor(
+ Node* node) const {
+ for (Check const* check = head_; check != nullptr; check = check->next) {
+ if (check->node->opcode() == IrOpcode::kCheckBounds &&
+ check->node->InputAt(0) == node) {
+ return check->node;
+ }
+ }
+ return nullptr;
+}
+
RedundancyElimination::EffectPathChecks const*
RedundancyElimination::PathChecksForEffectNodes::Get(Node* node) const {
size_t const id = node->id();
@@ -158,10 +183,41 @@ Reduction RedundancyElimination::ReduceCheckNode(Node* node) {
ReplaceWithValue(node, check);
return Replace(check);
}
+
// Learn from this check.
return UpdateChecks(node, checks->AddCheck(zone(), node));
}
+Reduction RedundancyElimination::TryReuseBoundsCheckForFirstInput(Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kSpeculativeNumberAdd ||
+ node->opcode() == IrOpcode::kSpeculativeNumberSubtract);
+
+ DCHECK_EQ(1, node->op()->EffectInputCount());
+ DCHECK_EQ(1, node->op()->EffectOutputCount());
+
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ EffectPathChecks const* checks = node_checks_.Get(effect);
+
+ // If we do not know anything about the predecessor, do not propagate just yet
+ // because we will have to recompute anyway once we compute the predecessor.
+ if (checks == nullptr) return NoChange();
+
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ // Only use bounds checks for increments/decrements by a constant.
+ if (right->opcode() == IrOpcode::kNumberConstant) {
+ if (Node* bounds_check = checks->LookupBoundsCheckFor(left)) {
+ // Only use the bounds checked type if it is better.
+ if (NodeProperties::GetType(bounds_check)
+ ->Is(NodeProperties::GetType(left))) {
+ node->ReplaceInput(0, bounds_check);
+ }
+ }
+ }
+
+ return UpdateChecks(node, checks);
+}
+
Reduction RedundancyElimination::ReduceEffectPhi(Node* node) {
Node* const control = NodeProperties::GetControlInput(node);
if (control->opcode() == IrOpcode::kLoop) {
diff --git a/deps/v8/src/compiler/redundancy-elimination.h b/deps/v8/src/compiler/redundancy-elimination.h
index 88f9032a84..786c9608df 100644
--- a/deps/v8/src/compiler/redundancy-elimination.h
+++ b/deps/v8/src/compiler/redundancy-elimination.h
@@ -34,6 +34,7 @@ class RedundancyElimination final : public AdvancedReducer {
EffectPathChecks const* AddCheck(Zone* zone, Node* node) const;
Node* LookupCheck(Node* node) const;
+ Node* LookupBoundsCheckFor(Node* node) const;
private:
EffectPathChecks(Check* head, size_t size) : head_(head), size_(size) {}
@@ -62,6 +63,8 @@ class RedundancyElimination final : public AdvancedReducer {
Reduction TakeChecksFromFirstEffect(Node* node);
Reduction UpdateChecks(Node* node, EffectPathChecks const* checks);
+ Reduction TryReuseBoundsCheckForFirstInput(Node* node);
+
Zone* zone() const { return zone_; }
PathChecksForEffectNodes node_checks_;
diff --git a/deps/v8/src/compiler/register-allocator-verifier.cc b/deps/v8/src/compiler/register-allocator-verifier.cc
index cefd04af1f..5a2ed93827 100644
--- a/deps/v8/src/compiler/register-allocator-verifier.cc
+++ b/deps/v8/src/compiler/register-allocator-verifier.cc
@@ -300,6 +300,27 @@ void BlockAssessments::DropRegisters() {
}
}
+void BlockAssessments::Print() const {
+ OFStream os(stdout);
+ for (const auto pair : map()) {
+ const InstructionOperand op = pair.first;
+ const Assessment* assessment = pair.second;
+ // Use operator<< so we can write the assessment on the same
+ // line. Since we need a register configuration, just pick
+ // Turbofan for now.
+ PrintableInstructionOperand wrapper = {RegisterConfiguration::Turbofan(),
+ op};
+ os << wrapper << " : ";
+ if (assessment->kind() == AssessmentKind::Final) {
+ os << "v" << FinalAssessment::cast(assessment)->virtual_register();
+ } else {
+ os << "P";
+ }
+ os << std::endl;
+ }
+ os << std::endl;
+}
+
BlockAssessments* RegisterAllocatorVerifier::CreateForBlock(
const InstructionBlock* block) {
RpoNumber current_block_id = block->rpo_number();
@@ -352,8 +373,9 @@ void RegisterAllocatorVerifier::ValidatePendingAssessment(
// for the original operand (the one where the assessment was created for
// first) are also pending. To avoid recursion, we use a work list. To
// deal with cycles, we keep a set of seen nodes.
- ZoneQueue<std::pair<const PendingAssessment*, int>> worklist(zone());
- ZoneSet<RpoNumber> seen(zone());
+ Zone local_zone(zone()->allocator(), ZONE_NAME);
+ ZoneQueue<std::pair<const PendingAssessment*, int>> worklist(&local_zone);
+ ZoneSet<RpoNumber> seen(&local_zone);
worklist.push(std::make_pair(assessment, virtual_register));
seen.insert(block_id);
@@ -448,7 +470,11 @@ void RegisterAllocatorVerifier::ValidateFinalAssessment(
// is virtual_register.
const PendingAssessment* old = assessment->original_pending_assessment();
CHECK_NOT_NULL(old);
- ValidatePendingAssessment(block_id, op, current_assessments, old,
+ RpoNumber old_block = old->origin()->rpo_number();
+ DCHECK_LE(old_block, block_id);
+ BlockAssessments* old_block_assessments =
+ old_block == block_id ? current_assessments : assessments_[old_block];
+ ValidatePendingAssessment(old_block, op, old_block_assessments, old,
virtual_register);
}
diff --git a/deps/v8/src/compiler/register-allocator.cc b/deps/v8/src/compiler/register-allocator.cc
index 0ed479fa99..5515843612 100644
--- a/deps/v8/src/compiler/register-allocator.cc
+++ b/deps/v8/src/compiler/register-allocator.cc
@@ -2985,7 +2985,7 @@ void LinearScanAllocator::FindFreeRegistersForRange(
GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
DCHECK_GE(positions.length(), num_regs);
- for (int i = 0; i < num_regs; i++) {
+ for (int i = 0; i < num_regs; ++i) {
positions[i] = LifetimePosition::MaxPosition();
}
@@ -3009,9 +3009,17 @@ void LinearScanAllocator::FindFreeRegistersForRange(
for (LiveRange* cur_inactive : inactive_live_ranges()) {
DCHECK(cur_inactive->End() > range->Start());
+ int cur_reg = cur_inactive->assigned_register();
+ // No need to carry out intersections, when this register won't be
+ // interesting to this range anyway.
+ // TODO(mtrofin): extend to aliased ranges, too.
+ if ((kSimpleFPAliasing || !check_fp_aliasing()) &&
+ positions[cur_reg] < range->Start()) {
+ continue;
+ }
+
LifetimePosition next_intersection = cur_inactive->FirstIntersection(range);
if (!next_intersection.IsValid()) continue;
- int cur_reg = cur_inactive->assigned_register();
if (kSimpleFPAliasing || !check_fp_aliasing()) {
positions[cur_reg] = Min(positions[cur_reg], next_intersection);
TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
@@ -3111,8 +3119,9 @@ bool LinearScanAllocator::TryAllocateFreeReg(
const int* codes = allocatable_register_codes();
MachineRepresentation rep = current->representation();
if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 ||
- rep == MachineRepresentation::kSimd128))
+ rep == MachineRepresentation::kSimd128)) {
GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
+ }
DCHECK_GE(free_until_pos.length(), num_codes);
@@ -3166,6 +3175,9 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
rep == MachineRepresentation::kSimd128))
GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
+ // use_pos keeps track of positions a register/alias is used at.
+ // block_pos keeps track of positions where a register/alias is blocked
+ // from.
LifetimePosition use_pos[RegisterConfiguration::kMaxFPRegisters];
LifetimePosition block_pos[RegisterConfiguration::kMaxFPRegisters];
for (int i = 0; i < num_regs; i++) {
@@ -3181,6 +3193,8 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
block_pos[cur_reg] = use_pos[cur_reg] =
LifetimePosition::GapFromInstructionIndex(0);
} else {
+ DCHECK_NE(LifetimePosition::GapFromInstructionIndex(0),
+ block_pos[cur_reg]);
use_pos[cur_reg] =
range->NextLifetimePositionRegisterIsBeneficial(current->Start());
}
@@ -3196,7 +3210,9 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
LifetimePosition::GapFromInstructionIndex(0);
} else {
use_pos[aliased_reg] =
- range->NextLifetimePositionRegisterIsBeneficial(current->Start());
+ Min(block_pos[aliased_reg],
+ range->NextLifetimePositionRegisterIsBeneficial(
+ current->Start()));
}
}
}
@@ -3204,10 +3220,23 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
for (LiveRange* range : inactive_live_ranges()) {
DCHECK(range->End() > current->Start());
- LifetimePosition next_intersection = range->FirstIntersection(current);
- if (!next_intersection.IsValid()) continue;
int cur_reg = range->assigned_register();
bool is_fixed = range->TopLevel()->IsFixed();
+
+ // Don't perform costly intersections if they are guaranteed to not update
+ // block_pos or use_pos.
+ // TODO(mtrofin): extend to aliased ranges, too.
+ if ((kSimpleFPAliasing || !check_fp_aliasing())) {
+ if (is_fixed) {
+ if (block_pos[cur_reg] < range->Start()) continue;
+ } else {
+ if (use_pos[cur_reg] < range->Start()) continue;
+ }
+ }
+
+ LifetimePosition next_intersection = range->FirstIntersection(current);
+ if (!next_intersection.IsValid()) continue;
+
if (kSimpleFPAliasing || !check_fp_aliasing()) {
if (is_fixed) {
block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index 4d002cc3c6..7a5a43e61a 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -9,6 +9,7 @@
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-matchers.h"
namespace v8 {
namespace internal {
@@ -587,33 +588,33 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
} else if (output_rep == MachineRepresentation::kBit) {
return node; // Sloppy comparison -> word32
} else if (output_rep == MachineRepresentation::kFloat64) {
- if (output_type->Is(Type::Unsigned32())) {
- op = machine()->ChangeFloat64ToUint32();
- } else if (output_type->Is(Type::Signed32())) {
+ if (output_type->Is(Type::Signed32())) {
op = machine()->ChangeFloat64ToInt32();
- } else if (use_info.truncation().IsUsedAsWord32()) {
- op = machine()->TruncateFloat64ToWord32();
} else if (use_info.type_check() == TypeCheckKind::kSignedSmall ||
use_info.type_check() == TypeCheckKind::kSigned32) {
op = simplified()->CheckedFloat64ToInt32(
output_type->Maybe(Type::MinusZero())
? use_info.minus_zero_check()
: CheckForMinusZeroMode::kDontCheckForMinusZero);
+ } else if (output_type->Is(Type::Unsigned32())) {
+ op = machine()->ChangeFloat64ToUint32();
+ } else if (use_info.truncation().IsUsedAsWord32()) {
+ op = machine()->TruncateFloat64ToWord32();
}
} else if (output_rep == MachineRepresentation::kFloat32) {
node = InsertChangeFloat32ToFloat64(node); // float32 -> float64 -> int32
- if (output_type->Is(Type::Unsigned32())) {
- op = machine()->ChangeFloat64ToUint32();
- } else if (output_type->Is(Type::Signed32())) {
+ if (output_type->Is(Type::Signed32())) {
op = machine()->ChangeFloat64ToInt32();
- } else if (use_info.truncation().IsUsedAsWord32()) {
- op = machine()->TruncateFloat64ToWord32();
} else if (use_info.type_check() == TypeCheckKind::kSignedSmall ||
use_info.type_check() == TypeCheckKind::kSigned32) {
op = simplified()->CheckedFloat64ToInt32(
output_type->Maybe(Type::MinusZero())
? CheckForMinusZeroMode::kCheckForMinusZero
: CheckForMinusZeroMode::kDontCheckForMinusZero);
+ } else if (output_type->Is(Type::Unsigned32())) {
+ op = machine()->ChangeFloat64ToUint32();
+ } else if (use_info.truncation().IsUsedAsWord32()) {
+ op = machine()->TruncateFloat64ToWord32();
}
} else if (output_rep == MachineRepresentation::kTaggedSigned) {
if (output_type->Is(Type::Signed32())) {
@@ -627,16 +628,8 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
}
} else if (output_rep == MachineRepresentation::kTagged ||
output_rep == MachineRepresentation::kTaggedPointer) {
- if (output_type->Is(Type::Unsigned32())) {
- op = simplified()->ChangeTaggedToUint32();
- } else if (output_type->Is(Type::Signed32())) {
+ if (output_type->Is(Type::Signed32())) {
op = simplified()->ChangeTaggedToInt32();
- } else if (use_info.truncation().IsUsedAsWord32()) {
- if (use_info.type_check() != TypeCheckKind::kNone) {
- op = simplified()->CheckedTruncateTaggedToWord32();
- } else {
- op = simplified()->TruncateTaggedToWord32();
- }
} else if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
op = simplified()->CheckedTaggedSignedToInt32();
} else if (use_info.type_check() == TypeCheckKind::kSigned32) {
@@ -644,6 +637,14 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
output_type->Maybe(Type::MinusZero())
? CheckForMinusZeroMode::kCheckForMinusZero
: CheckForMinusZeroMode::kDontCheckForMinusZero);
+ } else if (output_type->Is(Type::Unsigned32())) {
+ op = simplified()->ChangeTaggedToUint32();
+ } else if (use_info.truncation().IsUsedAsWord32()) {
+ if (output_type->Is(Type::NumberOrOddball())) {
+ op = simplified()->TruncateTaggedToWord32();
+ } else if (use_info.type_check() != TypeCheckKind::kNone) {
+ op = simplified()->CheckedTruncateTaggedToWord32();
+ }
}
} else if (output_rep == MachineRepresentation::kWord32) {
// Only the checked case should get here, the non-checked case is
@@ -694,8 +695,12 @@ Node* RepresentationChanger::GetBitRepresentationFor(
// Eagerly fold representation changes for constants.
switch (node->opcode()) {
case IrOpcode::kHeapConstant: {
- Handle<HeapObject> value = OpParameter<Handle<HeapObject>>(node);
- return jsgraph()->Int32Constant(value->BooleanValue() ? 1 : 0);
+ HeapObjectMatcher m(node);
+ if (m.Is(factory()->false_value())) {
+ return jsgraph()->Int32Constant(0);
+ } else if (m.Is(factory()->true_value())) {
+ return jsgraph()->Int32Constant(1);
+ }
}
default:
break;
@@ -812,6 +817,24 @@ const Operator* RepresentationChanger::Int32OverflowOperatorFor(
}
}
+const Operator* RepresentationChanger::TaggedSignedOperatorFor(
+ IrOpcode::Value opcode) {
+ switch (opcode) {
+ case IrOpcode::kSpeculativeNumberLessThan:
+ return machine()->Is32() ? machine()->Int32LessThan()
+ : machine()->Int64LessThan();
+ case IrOpcode::kSpeculativeNumberLessThanOrEqual:
+ return machine()->Is32() ? machine()->Int32LessThanOrEqual()
+ : machine()->Int64LessThanOrEqual();
+ case IrOpcode::kSpeculativeNumberEqual:
+ return machine()->Is32() ? machine()->Word32Equal()
+ : machine()->Word64Equal();
+ default:
+ UNREACHABLE();
+ return nullptr;
+ }
+}
+
const Operator* RepresentationChanger::Uint32OperatorFor(
IrOpcode::Value opcode) {
switch (opcode) {
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index d7895da825..4fa7d917b7 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -238,6 +238,7 @@ class RepresentationChanger final {
UseInfo use_info);
const Operator* Int32OperatorFor(IrOpcode::Value opcode);
const Operator* Int32OverflowOperatorFor(IrOpcode::Value opcode);
+ const Operator* TaggedSignedOperatorFor(IrOpcode::Value opcode);
const Operator* Uint32OperatorFor(IrOpcode::Value opcode);
const Operator* Uint32OverflowOperatorFor(IrOpcode::Value opcode);
const Operator* Float64OperatorFor(IrOpcode::Value opcode);
diff --git a/deps/v8/src/compiler/s390/code-generator-s390.cc b/deps/v8/src/compiler/s390/code-generator-s390.cc
index 5dcc82f7a0..f99ab37838 100644
--- a/deps/v8/src/compiler/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/s390/code-generator-s390.cc
@@ -119,6 +119,16 @@ class S390OperandConverter final : public InstructionOperandConverter {
InstructionOperand* op = instr_->InputAt(index);
return SlotToMemOperand(AllocatedOperand::cast(op)->index());
}
+
+ MemOperand InputStackSlot32(size_t index) {
+#if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN
+ // We want to read the 32-bits directly from memory
+ MemOperand mem = InputStackSlot(index);
+ return MemOperand(mem.rb(), mem.rx(), mem.offset() + 4);
+#else
+ return InputStackSlot(index);
+#endif
+ }
};
static inline bool HasRegisterInput(Instruction* instr, int index) {
@@ -335,9 +345,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
} \
} while (0)
-#define ASSEMBLE_FLOAT_COMPARE(cmp_instr) \
- do { \
- __ cmp_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1); \
+#define ASSEMBLE_FLOAT_COMPARE(cmp_instr) \
+ do { \
+ __ cmp_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
} while (0)
// Divide instruction dr will implicity use register pair
@@ -1223,25 +1233,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kS390_Not64:
__ Not64(i.OutputRegister(), i.InputRegister(0));
break;
- case kS390_RotLeftAndMask32:
- if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
- int shiftAmount = i.InputInt32(1);
- int endBit = 63 - i.InputInt32(3);
- int startBit = 63 - i.InputInt32(2);
- __ rll(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
- __ risbg(i.OutputRegister(), i.OutputRegister(), Operand(startBit),
- Operand(endBit), Operand::Zero(), true);
- } else {
- int shiftAmount = i.InputInt32(1);
- int clearBitLeft = 63 - i.InputInt32(2);
- int clearBitRight = i.InputInt32(3);
- __ rll(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
- __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBitLeft));
- __ srlg(i.OutputRegister(), i.OutputRegister(),
- Operand((clearBitLeft + clearBitRight)));
- __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBitRight));
- }
- break;
#if V8_TARGET_ARCH_S390X
case kS390_RotLeftAndClear64:
if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
@@ -1357,16 +1348,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
} else if (HasImmediateInput(instr, 1)) {
__ Mul32(i.InputRegister(0), i.InputImmediate(1));
} else if (HasStackSlotInput(instr, 1)) {
-#ifdef V8_TARGET_ARCH_S390X
- // Avoid endian-issue here:
- // stg r1, 0(fp)
- // ...
- // msy r2, 0(fp) <-- This will read the upper 32 bits
- __ lg(kScratchReg, i.InputStackSlot(1));
- __ Mul32(i.InputRegister(0), kScratchReg);
-#else
- __ Mul32(i.InputRegister(0), i.InputStackSlot(1));
-#endif
+ __ Mul32(i.InputRegister(0), i.InputStackSlot32(1));
} else {
UNIMPLEMENTED();
}
@@ -1387,16 +1369,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasRegisterInput(instr, 1)) {
__ mr_z(r0, i.InputRegister(1));
} else if (HasStackSlotInput(instr, 1)) {
-#ifdef V8_TARGET_ARCH_S390X
- // Avoid endian-issue here:
- // stg r1, 0(fp)
- // ...
- // mfy r2, 0(fp) <-- This will read the upper 32 bits
- __ lg(kScratchReg, i.InputStackSlot(1));
- __ mr_z(r0, kScratchReg);
-#else
- __ mfy(r0, i.InputStackSlot(1));
-#endif
+ __ mfy(r0, i.InputStackSlot32(1));
} else {
UNIMPLEMENTED();
}
@@ -1413,16 +1386,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasRegisterInput(instr, 1)) {
__ mlr(r0, i.InputRegister(1));
} else if (HasStackSlotInput(instr, 1)) {
-#ifdef V8_TARGET_ARCH_S390X
- // Avoid endian-issue here:
- // stg r1, 0(fp)
- // ...
- // mfy r2, 0(fp) <-- This will read the upper 32 bits
- __ lg(kScratchReg, i.InputStackSlot(1));
- __ mlr(r0, kScratchReg);
-#else
- __ ml(r0, i.InputStackSlot(1));
-#endif
+ __ ml(r0, i.InputStackSlot32(1));
} else {
UNIMPLEMENTED();
}
@@ -1692,21 +1656,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kS390_Tst32:
if (HasRegisterInput(instr, 1)) {
- __ AndP(r0, i.InputRegister(0), i.InputRegister(1));
+ __ lr(r0, i.InputRegister(0));
+ __ nr(r0, i.InputRegister(1));
} else {
- __ AndP(r0, i.InputRegister(0), i.InputImmediate(1));
+ Operand opnd = i.InputImmediate(1);
+ if (is_uint16(opnd.immediate())) {
+ __ tmll(i.InputRegister(0), opnd);
+ } else {
+ __ lr(r0, i.InputRegister(0));
+ __ nilf(r0, opnd);
+ }
}
- __ LoadAndTestP_ExtendSrc(r0, r0);
break;
-#if V8_TARGET_ARCH_S390X
case kS390_Tst64:
if (HasRegisterInput(instr, 1)) {
__ AndP(r0, i.InputRegister(0), i.InputRegister(1));
} else {
- __ AndP(r0, i.InputRegister(0), i.InputImmediate(1));
+ Operand opnd = i.InputImmediate(1);
+ if (is_uint16(opnd.immediate())) {
+ __ tmll(i.InputRegister(0), opnd);
+ } else {
+ __ AndP(r0, i.InputRegister(0), opnd);
+ }
}
break;
-#endif
case kS390_Float64SilenceNaN: {
DoubleRegister value = i.InputDoubleRegister(0);
DoubleRegister result = i.OutputDoubleRegister();
@@ -2152,6 +2125,82 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
}
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+ FlagsCondition condition) {
+ class OutOfLineTrap final : public OutOfLineCode {
+ public:
+ OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+ : OutOfLineCode(gen),
+ frame_elided_(frame_elided),
+ instr_(instr),
+ gen_(gen) {}
+
+ void Generate() final {
+ S390OperandConverter i(gen_, instr_);
+
+ Runtime::FunctionId trap_id = static_cast<Runtime::FunctionId>(
+ i.InputInt32(instr_->InputCount() - 1));
+ bool old_has_frame = __ has_frame();
+ if (frame_elided_) {
+ __ set_has_frame(true);
+ __ EnterFrame(StackFrame::WASM_COMPILED);
+ }
+ GenerateCallToTrap(trap_id);
+ if (frame_elided_) {
+ __ set_has_frame(old_has_frame);
+ }
+ if (FLAG_debug_code) {
+ __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+ }
+ }
+
+ private:
+ void GenerateCallToTrap(Runtime::FunctionId trap_id) {
+ if (trap_id == Runtime::kNumFunctions) {
+ // We cannot test calls to the runtime in cctest/test-run-wasm.
+ // Therefore we emit a call to C here instead of a call to the runtime.
+ // We use the context register as the scratch register, because we do
+ // not have a context here.
+ __ PrepareCallCFunction(0, 0, cp);
+ __ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+ 0);
+ } else {
+ __ Move(cp, isolate()->native_context());
+ gen_->AssembleSourcePosition(instr_);
+ __ CallRuntime(trap_id);
+ }
+ ReferenceMap* reference_map =
+ new (gen_->zone()) ReferenceMap(gen_->zone());
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ }
+
+ bool frame_elided_;
+ Instruction* instr_;
+ CodeGenerator* gen_;
+ };
+ bool frame_elided = !frame_access_state()->has_frame();
+ auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+ Label* tlabel = ool->entry();
+ Label end;
+
+ ArchOpcode op = instr->arch_opcode();
+ Condition cond = FlagsConditionToCondition(condition, op);
+ if (op == kS390_CmpDouble) {
+ // check for unordered if necessary
+ if (cond == le) {
+ __ bunordered(&end);
+ // Unnecessary for eq/lt since only FU bit will be set.
+ } else if (cond == gt) {
+ __ bunordered(tlabel);
+ // Unnecessary for ne/ge since only FU bit will be set.
+ }
+ }
+ __ b(cond, tlabel);
+ __ bind(&end);
+}
+
// Assembles boolean materializations after an instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
FlagsCondition condition) {
@@ -2377,11 +2426,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
switch (src.type()) {
case Constant::kInt32:
#if V8_TARGET_ARCH_S390X
- if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+ if (RelocInfo::IsWasmSizeReference(src.rmode())) {
#else
- if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
- src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
- src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+ if (RelocInfo::IsWasmReference(src.rmode())) {
#endif
__ mov(dst, Operand(src.ToInt32(), src.rmode()));
} else {
@@ -2390,11 +2437,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
break;
case Constant::kInt64:
#if V8_TARGET_ARCH_S390X
- if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
- src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+ if (RelocInfo::IsWasmPtrReference(src.rmode())) {
__ mov(dst, Operand(src.ToInt64(), src.rmode()));
} else {
- DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ DCHECK(!RelocInfo::IsWasmSizeReference(src.rmode()));
__ mov(dst, Operand(src.ToInt64()));
}
#else
diff --git a/deps/v8/src/compiler/s390/instruction-codes-s390.h b/deps/v8/src/compiler/s390/instruction-codes-s390.h
index 80e1532adb..ad5d7cbc74 100644
--- a/deps/v8/src/compiler/s390/instruction-codes-s390.h
+++ b/deps/v8/src/compiler/s390/instruction-codes-s390.h
@@ -31,7 +31,6 @@ namespace compiler {
V(S390_RotRight64) \
V(S390_Not32) \
V(S390_Not64) \
- V(S390_RotLeftAndMask32) \
V(S390_RotLeftAndClear64) \
V(S390_RotLeftAndClearLeft64) \
V(S390_RotLeftAndClearRight64) \
diff --git a/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc b/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
index 5ebe489e39..8fc1cfb8be 100644
--- a/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-scheduler-s390.cc
@@ -32,7 +32,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_RotRight64:
case kS390_Not32:
case kS390_Not64:
- case kS390_RotLeftAndMask32:
case kS390_RotLeftAndClear64:
case kS390_RotLeftAndClearLeft64:
case kS390_RotLeftAndClearRight64:
diff --git a/deps/v8/src/compiler/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
index eed08a9c44..d906c17fbe 100644
--- a/deps/v8/src/compiler/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
@@ -261,6 +261,9 @@ void VisitBinop(InstructionSelector* selector, Node* node,
if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
cont->reason(), cont->frame_state());
+ } else if (cont->IsTrap()) {
+ inputs[input_count++] = g.UseImmediate(cont->trap_id());
+ selector->Emit(opcode, output_count, outputs, input_count, inputs);
} else {
selector->Emit(opcode, output_count, outputs, input_count, inputs);
}
@@ -440,6 +443,11 @@ void InstructionSelector::VisitStore(Node* node) {
}
}
+void InstructionSelector::VisitProtectedStore(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
+
// Architecture supports unaligned access, therefore VisitLoad is used instead
void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
@@ -539,6 +547,7 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
g.UseOperand(length, kUint32Imm), g.UseRegister(value));
}
+#if 0
static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
int mask_width = base::bits::CountPopulation32(value);
int mask_msb = base::bits::CountLeadingZeros32(value);
@@ -549,6 +558,7 @@ static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
*me = mask_lsb;
return true;
}
+#endif
#if V8_TARGET_ARCH_S390X
static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
@@ -564,36 +574,6 @@ static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
#endif
void InstructionSelector::VisitWord32And(Node* node) {
- S390OperandGenerator g(this);
- Int32BinopMatcher m(node);
- int mb = 0;
- int me = 0;
- if (m.right().HasValue() && IsContiguousMask32(m.right().Value(), &mb, &me)) {
- int sh = 0;
- Node* left = m.left().node();
- if ((m.left().IsWord32Shr() || m.left().IsWord32Shl()) &&
- CanCover(node, left)) {
- Int32BinopMatcher mleft(m.left().node());
- if (mleft.right().IsInRange(0, 31)) {
- left = mleft.left().node();
- sh = mleft.right().Value();
- if (m.left().IsWord32Shr()) {
- // Adjust the mask such that it doesn't include any rotated bits.
- if (mb > 31 - sh) mb = 31 - sh;
- sh = (32 - sh) & 0x1f;
- } else {
- // Adjust the mask such that it doesn't include any rotated bits.
- if (me < sh) me = sh;
- }
- }
- }
- if (mb >= me) {
- Emit(kS390_RotLeftAndMask32, g.DefineAsRegister(node),
- g.UseRegister(left), g.TempImmediate(sh), g.TempImmediate(mb),
- g.TempImmediate(me));
- return;
- }
- }
VisitBinop<Int32BinopMatcher>(this, node, kS390_And32, kUint32Imm);
}
@@ -685,25 +665,6 @@ void InstructionSelector::VisitWord64Xor(Node* node) {
#endif
void InstructionSelector::VisitWord32Shl(Node* node) {
- S390OperandGenerator g(this);
- Int32BinopMatcher m(node);
- if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
- Int32BinopMatcher mleft(m.left().node());
- int sh = m.right().Value();
- int mb;
- int me;
- if (mleft.right().HasValue() &&
- IsContiguousMask32(mleft.right().Value() << sh, &mb, &me)) {
- // Adjust the mask such that it doesn't include any rotated bits.
- if (me < sh) me = sh;
- if (mb >= me) {
- Emit(kS390_RotLeftAndMask32, g.DefineAsRegister(node),
- g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
- g.TempImmediate(mb), g.TempImmediate(me));
- return;
- }
- }
- }
VisitRRO(this, kS390_ShiftLeft32, node, kShift32Imm);
}
@@ -752,26 +713,6 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
#endif
void InstructionSelector::VisitWord32Shr(Node* node) {
- S390OperandGenerator g(this);
- Int32BinopMatcher m(node);
- if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
- Int32BinopMatcher mleft(m.left().node());
- int sh = m.right().Value();
- int mb;
- int me;
- if (mleft.right().HasValue() &&
- IsContiguousMask32((uint32_t)(mleft.right().Value()) >> sh, &mb, &me)) {
- // Adjust the mask such that it doesn't include any rotated bits.
- if (mb > 31 - sh) mb = 31 - sh;
- sh = (32 - sh) & 0x1f;
- if (mb >= me) {
- Emit(kS390_RotLeftAndMask32, g.DefineAsRegister(node),
- g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
- g.TempImmediate(mb), g.TempImmediate(me));
- return;
- }
- }
- }
VisitRRO(this, kS390_ShiftRight32, node, kShift32Imm);
}
@@ -1541,9 +1482,12 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ } else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+ } else {
+ DCHECK(cont->IsTrap());
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.UseImmediate(cont->trap_id()));
}
}
@@ -1620,9 +1564,27 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
if (selector->CanCover(user, value)) {
switch (value->opcode()) {
- case IrOpcode::kWord32Equal:
+ case IrOpcode::kWord32Equal: {
cont->OverwriteAndNegateIfEqual(kEqual);
+ Int32BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ // Try to combine the branch with a comparison.
+ Node* const user = m.node();
+ Node* const value = m.left().node();
+ if (selector->CanCover(user, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kInt32Sub:
+ return VisitWord32Compare(selector, value, cont);
+ case IrOpcode::kWord32And:
+ return VisitWordCompare(selector, value, kS390_Tst64, cont,
+ true, kUint32Imm);
+ default:
+ break;
+ }
+ }
+ }
return VisitWord32Compare(selector, value, cont);
+ }
case IrOpcode::kInt32LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
return VisitWord32Compare(selector, value, cont);
@@ -1636,9 +1598,27 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitWord32Compare(selector, value, cont);
#if V8_TARGET_ARCH_S390X
- case IrOpcode::kWord64Equal:
+ case IrOpcode::kWord64Equal: {
cont->OverwriteAndNegateIfEqual(kEqual);
+ Int64BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ // Try to combine the branch with a comparison.
+ Node* const user = m.node();
+ Node* const value = m.left().node();
+ if (selector->CanCover(user, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kInt64Sub:
+ return VisitWord64Compare(selector, value, cont);
+ case IrOpcode::kWord64And:
+ return VisitWordCompare(selector, value, kS390_Tst64, cont,
+ true, kUint32Imm);
+ default:
+ break;
+ }
+ }
+ }
return VisitWord64Compare(selector, value, cont);
+ }
case IrOpcode::kInt64LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
return VisitWord64Compare(selector, value, cont);
@@ -1781,6 +1761,19 @@ void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
}
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+ VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+ Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
+ VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
+}
+
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
S390OperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
diff --git a/deps/v8/src/compiler/schedule.cc b/deps/v8/src/compiler/schedule.cc
index eb3dda8c26..dcc84b31ed 100644
--- a/deps/v8/src/compiler/schedule.cc
+++ b/deps/v8/src/compiler/schedule.cc
@@ -407,7 +407,7 @@ void Schedule::PropagateDeferredMark() {
if (!block->deferred()) {
bool deferred = block->PredecessorCount() > 0;
for (auto pred : block->predecessors()) {
- if (!pred->deferred()) {
+ if (!pred->deferred() && (pred->rpo_number() < block->rpo_number())) {
deferred = false;
}
}
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index c5a94b4297..a11d8bc4cc 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -58,6 +58,9 @@ void SimdScalarLowering::LowerGraph() {
// that they are processed after all other nodes.
PreparePhiReplacement(input);
stack_.push_front({input, 0});
+ } else if (input->opcode() == IrOpcode::kEffectPhi ||
+ input->opcode() == IrOpcode::kLoop) {
+ stack_.push_front({input, 0});
} else {
stack_.push_back({input, 0});
}
@@ -70,12 +73,14 @@ void SimdScalarLowering::LowerGraph() {
#define FOREACH_INT32X4_OPCODE(V) \
V(Int32x4Add) \
V(Int32x4ExtractLane) \
- V(CreateInt32x4)
+ V(CreateInt32x4) \
+ V(Int32x4ReplaceLane)
#define FOREACH_FLOAT32X4_OPCODE(V) \
V(Float32x4Add) \
V(Float32x4ExtractLane) \
- V(CreateFloat32x4)
+ V(CreateFloat32x4) \
+ V(Float32x4ReplaceLane)
void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
switch (node->opcode()) {
@@ -102,7 +107,7 @@ static int GetParameterIndexAfterLowering(
// In function calls, the simd128 types are passed as 4 Int32 types. The
// parameters are typecast to the types as needed for various operations.
int result = old_index;
- for (int i = 0; i < old_index; i++) {
+ for (int i = 0; i < old_index; ++i) {
if (signature->GetParam(i) == MachineRepresentation::kSimd128) {
result += 3;
}
@@ -123,7 +128,7 @@ int SimdScalarLowering::GetParameterCountAfterLowering() {
static int GetReturnCountAfterLowering(
Signature<MachineRepresentation>* signature) {
int result = static_cast<int>(signature->return_count());
- for (int i = 0; i < static_cast<int>(signature->return_count()); i++) {
+ for (int i = 0; i < static_cast<int>(signature->return_count()); ++i) {
if (signature->GetReturn(i) == MachineRepresentation::kSimd128) {
result += 3;
}
@@ -131,6 +136,100 @@ static int GetReturnCountAfterLowering(
return result;
}
+void SimdScalarLowering::GetIndexNodes(Node* index, Node** new_indices) {
+ new_indices[0] = index;
+ for (size_t i = 1; i < kMaxLanes; ++i) {
+ new_indices[i] = graph()->NewNode(machine()->Int32Add(), index,
+ graph()->NewNode(common()->Int32Constant(
+ static_cast<int>(i) * kLaneWidth)));
+ }
+}
+
+void SimdScalarLowering::LowerLoadOp(MachineRepresentation rep, Node* node,
+ const Operator* load_op) {
+ if (rep == MachineRepresentation::kSimd128) {
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* indices[kMaxLanes];
+ GetIndexNodes(index, indices);
+ Node* rep_nodes[kMaxLanes];
+ rep_nodes[0] = node;
+ NodeProperties::ChangeOp(rep_nodes[0], load_op);
+ if (node->InputCount() > 2) {
+ DCHECK(node->InputCount() > 3);
+ Node* effect_input = node->InputAt(2);
+ Node* control_input = node->InputAt(3);
+ rep_nodes[3] = graph()->NewNode(load_op, base, indices[3], effect_input,
+ control_input);
+ rep_nodes[2] = graph()->NewNode(load_op, base, indices[2], rep_nodes[3],
+ control_input);
+ rep_nodes[1] = graph()->NewNode(load_op, base, indices[1], rep_nodes[2],
+ control_input);
+ rep_nodes[0]->ReplaceInput(2, rep_nodes[1]);
+ } else {
+ for (size_t i = 1; i < kMaxLanes; ++i) {
+ rep_nodes[i] = graph()->NewNode(load_op, base, indices[i]);
+ }
+ }
+ ReplaceNode(node, rep_nodes);
+ } else {
+ DefaultLowering(node);
+ }
+}
+
+void SimdScalarLowering::LowerStoreOp(MachineRepresentation rep, Node* node,
+ const Operator* store_op,
+ SimdType rep_type) {
+ if (rep == MachineRepresentation::kSimd128) {
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* indices[kMaxLanes];
+ GetIndexNodes(index, indices);
+ DCHECK(node->InputCount() > 2);
+ Node* value = node->InputAt(2);
+ DCHECK(HasReplacement(1, value));
+ Node* rep_nodes[kMaxLanes];
+ rep_nodes[0] = node;
+ Node** rep_inputs = GetReplacementsWithType(value, rep_type);
+ rep_nodes[0]->ReplaceInput(2, rep_inputs[0]);
+ NodeProperties::ChangeOp(node, store_op);
+ if (node->InputCount() > 3) {
+ DCHECK(node->InputCount() > 4);
+ Node* effect_input = node->InputAt(3);
+ Node* control_input = node->InputAt(4);
+ rep_nodes[3] = graph()->NewNode(store_op, base, indices[3], rep_inputs[3],
+ effect_input, control_input);
+ rep_nodes[2] = graph()->NewNode(store_op, base, indices[2], rep_inputs[2],
+ rep_nodes[3], control_input);
+ rep_nodes[1] = graph()->NewNode(store_op, base, indices[1], rep_inputs[1],
+ rep_nodes[2], control_input);
+ rep_nodes[0]->ReplaceInput(3, rep_nodes[1]);
+
+ } else {
+ for (size_t i = 1; i < kMaxLanes; ++i) {
+ rep_nodes[i] =
+ graph()->NewNode(store_op, base, indices[i], rep_inputs[i]);
+ }
+ }
+
+ ReplaceNode(node, rep_nodes);
+ } else {
+ DefaultLowering(node);
+ }
+}
+
+void SimdScalarLowering::LowerBinaryOp(Node* node, SimdType rep_type,
+ const Operator* op) {
+ DCHECK(node->InputCount() == 2);
+ Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type);
+ Node** rep_right = GetReplacementsWithType(node->InputAt(1), rep_type);
+ Node* rep_node[kMaxLanes];
+ for (int i = 0; i < kMaxLanes; ++i) {
+ rep_node[i] = graph()->NewNode(op, rep_left[i], rep_right[i]);
+ }
+ ReplaceNode(node, rep_node);
+}
+
void SimdScalarLowering::LowerNode(Node* node) {
SimdType rep_type = ReplacementType(node);
switch (node->opcode()) {
@@ -159,13 +258,13 @@ void SimdScalarLowering::LowerNode(Node* node) {
NodeProperties::ChangeOp(node, common()->Parameter(new_index));
Node* new_node[kMaxLanes];
- for (int i = 0; i < kMaxLanes; i++) {
+ for (int i = 0; i < kMaxLanes; ++i) {
new_node[i] = nullptr;
}
new_node[0] = node;
if (signature()->GetParam(old_index) ==
MachineRepresentation::kSimd128) {
- for (int i = 1; i < kMaxLanes; i++) {
+ for (int i = 1; i < kMaxLanes; ++i) {
new_node[i] = graph()->NewNode(common()->Parameter(new_index + i),
graph()->start());
}
@@ -175,6 +274,57 @@ void SimdScalarLowering::LowerNode(Node* node) {
}
break;
}
+ case IrOpcode::kLoad: {
+ MachineRepresentation rep =
+ LoadRepresentationOf(node->op()).representation();
+ const Operator* load_op;
+ if (rep_type == SimdType::kInt32) {
+ load_op = machine()->Load(MachineType::Int32());
+ } else if (rep_type == SimdType::kFloat32) {
+ load_op = machine()->Load(MachineType::Float32());
+ }
+ LowerLoadOp(rep, node, load_op);
+ break;
+ }
+ case IrOpcode::kUnalignedLoad: {
+ MachineRepresentation rep =
+ UnalignedLoadRepresentationOf(node->op()).representation();
+ const Operator* load_op;
+ if (rep_type == SimdType::kInt32) {
+ load_op = machine()->UnalignedLoad(MachineType::Int32());
+ } else if (rep_type == SimdType::kFloat32) {
+ load_op = machine()->UnalignedLoad(MachineType::Float32());
+ }
+ LowerLoadOp(rep, node, load_op);
+ break;
+ }
+ case IrOpcode::kStore: {
+ MachineRepresentation rep =
+ StoreRepresentationOf(node->op()).representation();
+ WriteBarrierKind write_barrier_kind =
+ StoreRepresentationOf(node->op()).write_barrier_kind();
+ const Operator* store_op;
+ if (rep_type == SimdType::kInt32) {
+ store_op = machine()->Store(StoreRepresentation(
+ MachineRepresentation::kWord32, write_barrier_kind));
+ } else {
+ store_op = machine()->Store(StoreRepresentation(
+ MachineRepresentation::kFloat32, write_barrier_kind));
+ }
+ LowerStoreOp(rep, node, store_op, rep_type);
+ break;
+ }
+ case IrOpcode::kUnalignedStore: {
+ MachineRepresentation rep = UnalignedStoreRepresentationOf(node->op());
+ const Operator* store_op;
+ if (rep_type == SimdType::kInt32) {
+ store_op = machine()->UnalignedStore(MachineRepresentation::kWord32);
+ } else {
+ store_op = machine()->UnalignedStore(MachineRepresentation::kFloat32);
+ }
+ LowerStoreOp(rep, node, store_op, rep_type);
+ break;
+ }
case IrOpcode::kReturn: {
DefaultLowering(node);
int new_return_count = GetReturnCountAfterLowering(signature());
@@ -200,7 +350,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
descriptor->GetReturnType(0) == MachineType::Simd128()) {
// We access the additional return values through projections.
Node* rep_node[kMaxLanes];
- for (int i = 0; i < kMaxLanes; i++) {
+ for (int i = 0; i < kMaxLanes; ++i) {
rep_node[i] =
graph()->NewNode(common()->Projection(i), node, graph()->start());
}
@@ -214,7 +364,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
// The replacement nodes have already been created, we only have to
// replace placeholder nodes.
Node** rep_node = GetReplacements(node);
- for (int i = 0; i < node->op()->ValueInputCount(); i++) {
+ for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
Node** rep_input =
GetReplacementsWithType(node->InputAt(i), rep_type);
for (int j = 0; j < kMaxLanes; j++) {
@@ -226,75 +376,51 @@ void SimdScalarLowering::LowerNode(Node* node) {
}
break;
}
-
case IrOpcode::kInt32x4Add: {
- DCHECK(node->InputCount() == 2);
- Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type);
- Node** rep_right = GetReplacementsWithType(node->InputAt(1), rep_type);
- Node* rep_node[kMaxLanes];
- for (int i = 0; i < kMaxLanes; i++) {
- rep_node[i] =
- graph()->NewNode(machine()->Int32Add(), rep_left[i], rep_right[i]);
- }
- ReplaceNode(node, rep_node);
- break;
- }
-
- case IrOpcode::kCreateInt32x4: {
- Node* rep_node[kMaxLanes];
- for (int i = 0; i < kMaxLanes; i++) {
- DCHECK(!HasReplacement(1, node->InputAt(i)));
- rep_node[i] = node->InputAt(i);
- }
- ReplaceNode(node, rep_node);
- break;
- }
-
- case IrOpcode::kInt32x4ExtractLane: {
- Node* laneNode = node->InputAt(1);
- DCHECK_EQ(laneNode->opcode(), IrOpcode::kInt32Constant);
- int32_t lane = OpParameter<int32_t>(laneNode);
- Node* rep_node[kMaxLanes] = {
- GetReplacementsWithType(node->InputAt(0), rep_type)[lane], nullptr,
- nullptr, nullptr};
- ReplaceNode(node, rep_node);
+ LowerBinaryOp(node, rep_type, machine()->Int32Add());
break;
}
-
case IrOpcode::kFloat32x4Add: {
- DCHECK(node->InputCount() == 2);
- Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type);
- Node** rep_right = GetReplacementsWithType(node->InputAt(1), rep_type);
- Node* rep_node[kMaxLanes];
- for (int i = 0; i < kMaxLanes; i++) {
- rep_node[i] = graph()->NewNode(machine()->Float32Add(), rep_left[i],
- rep_right[i]);
- }
- ReplaceNode(node, rep_node);
+ LowerBinaryOp(node, rep_type, machine()->Float32Add());
break;
}
-
+ case IrOpcode::kCreateInt32x4:
case IrOpcode::kCreateFloat32x4: {
Node* rep_node[kMaxLanes];
- for (int i = 0; i < kMaxLanes; i++) {
- DCHECK(!HasReplacement(1, node->InputAt(i)));
- rep_node[i] = node->InputAt(i);
+ for (int i = 0; i < kMaxLanes; ++i) {
+ if (HasReplacement(0, node->InputAt(i))) {
+ rep_node[i] = GetReplacements(node->InputAt(i))[0];
+ } else {
+ rep_node[i] = node->InputAt(i);
+ }
}
ReplaceNode(node, rep_node);
break;
}
-
+ case IrOpcode::kInt32x4ExtractLane:
case IrOpcode::kFloat32x4ExtractLane: {
- Node* laneNode = node->InputAt(1);
- DCHECK_EQ(laneNode->opcode(), IrOpcode::kInt32Constant);
- int32_t lane = OpParameter<int32_t>(laneNode);
+ int32_t lane = OpParameter<int32_t>(node);
Node* rep_node[kMaxLanes] = {
GetReplacementsWithType(node->InputAt(0), rep_type)[lane], nullptr,
nullptr, nullptr};
ReplaceNode(node, rep_node);
break;
}
-
+ case IrOpcode::kInt32x4ReplaceLane:
+ case IrOpcode::kFloat32x4ReplaceLane: {
+ DCHECK_EQ(2, node->InputCount());
+ Node* repNode = node->InputAt(1);
+ int32_t lane = OpParameter<int32_t>(node);
+ DCHECK(lane >= 0 && lane <= 3);
+ Node** rep_node = GetReplacementsWithType(node->InputAt(0), rep_type);
+ if (HasReplacement(0, repNode)) {
+ rep_node[lane] = GetReplacements(repNode)[0];
+ } else {
+ rep_node[lane] = repNode;
+ }
+ ReplaceNode(node, rep_node);
+ break;
+ }
default: { DefaultLowering(node); }
}
}
@@ -322,7 +448,7 @@ void SimdScalarLowering::ReplaceNode(Node* old, Node** new_node) {
DCHECK(new_node[0] != nullptr ||
(new_node[1] == nullptr && new_node[2] == nullptr &&
new_node[3] == nullptr));
- for (int i = 0; i < kMaxLanes; i++) {
+ for (int i = 0; i < kMaxLanes; ++i) {
replacements_[old->id()].node[i] = new_node[i];
}
}
@@ -348,7 +474,7 @@ Node** SimdScalarLowering::GetReplacementsWithType(Node* node, SimdType type) {
}
Node** result = zone()->NewArray<Node*>(kMaxLanes);
if (ReplacementType(node) == SimdType::kInt32 && type == SimdType::kFloat32) {
- for (int i = 0; i < kMaxLanes; i++) {
+ for (int i = 0; i < kMaxLanes; ++i) {
if (replacements[i] != nullptr) {
result[i] = graph()->NewNode(machine()->BitcastInt32ToFloat32(),
replacements[i]);
@@ -357,7 +483,7 @@ Node** SimdScalarLowering::GetReplacementsWithType(Node* node, SimdType type) {
}
}
} else {
- for (int i = 0; i < kMaxLanes; i++) {
+ for (int i = 0; i < kMaxLanes; ++i) {
if (replacements[i] != nullptr) {
result[i] = graph()->NewNode(machine()->BitcastFloat32ToInt32(),
replacements[i]);
@@ -379,17 +505,17 @@ void SimdScalarLowering::PreparePhiReplacement(Node* phi) {
int value_count = phi->op()->ValueInputCount();
SimdType type = ReplacementType(phi);
Node** inputs_rep[kMaxLanes];
- for (int i = 0; i < kMaxLanes; i++) {
+ for (int i = 0; i < kMaxLanes; ++i) {
inputs_rep[i] = zone()->NewArray<Node*>(value_count + 1);
inputs_rep[i][value_count] = NodeProperties::GetControlInput(phi, 0);
}
- for (int i = 0; i < value_count; i++) {
+ for (int i = 0; i < value_count; ++i) {
for (int j = 0; j < kMaxLanes; j++) {
inputs_rep[j][i] = placeholder_;
}
}
Node* rep_nodes[kMaxLanes];
- for (int i = 0; i < kMaxLanes; i++) {
+ for (int i = 0; i < kMaxLanes; ++i) {
if (type == SimdType::kInt32) {
rep_nodes[i] = graph()->NewNode(
common()->Phi(MachineRepresentation::kWord32, value_count),
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.h b/deps/v8/src/compiler/simd-scalar-lowering.h
index 39449f4b9f..c795c6b88b 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.h
+++ b/deps/v8/src/compiler/simd-scalar-lowering.h
@@ -31,6 +31,7 @@ class SimdScalarLowering {
enum class SimdType : uint8_t { kInt32, kFloat32 };
static const int kMaxLanes = 4;
+ static const int kLaneWidth = 16 / kMaxLanes;
struct Replacement {
Node* node[kMaxLanes];
@@ -53,6 +54,12 @@ class SimdScalarLowering {
SimdType ReplacementType(Node* node);
void PreparePhiReplacement(Node* phi);
void SetLoweredType(Node* node, Node* output);
+ void GetIndexNodes(Node* index, Node** new_indices);
+ void LowerLoadOp(MachineRepresentation rep, Node* node,
+ const Operator* load_op);
+ void LowerStoreOp(MachineRepresentation rep, Node* node,
+ const Operator* store_op, SimdType rep_type);
+ void LowerBinaryOp(Node* node, SimdType rep_type, const Operator* op);
struct NodeState {
Node* node;
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index c90d7437bf..c9fda35b36 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -209,8 +209,30 @@ class InputUseInfos {
#endif // DEBUG
-} // namespace
+bool CanOverflowSigned32(const Operator* op, Type* left, Type* right,
+ Zone* type_zone) {
+ // We assume the inputs are checked Signed32 (or known statically
+ // to be Signed32). Technically, theinputs could also be minus zero, but
+ // that cannot cause overflow.
+ left = Type::Intersect(left, Type::Signed32(), type_zone);
+ right = Type::Intersect(right, Type::Signed32(), type_zone);
+ if (!left->IsInhabited() || !right->IsInhabited()) return false;
+ switch (op->opcode()) {
+ case IrOpcode::kSpeculativeNumberAdd:
+ return (left->Max() + right->Max() > kMaxInt) ||
+ (left->Min() + right->Min() < kMinInt);
+
+ case IrOpcode::kSpeculativeNumberSubtract:
+ return (left->Max() - right->Min() > kMaxInt) ||
+ (left->Min() - right->Max() < kMinInt);
+
+ default:
+ UNREACHABLE();
+ }
+ return true;
+}
+} // namespace
class RepresentationSelector {
public:
@@ -675,6 +697,11 @@ class RepresentationSelector {
GetUpperBound(node->InputAt(1))->Is(type);
}
+ bool IsNodeRepresentationTagged(Node* node) {
+ MachineRepresentation representation = GetInfo(node)->representation();
+ return IsAnyTagged(representation);
+ }
+
bool OneInputCannotBe(Node* node, Type* type) {
DCHECK_EQ(2, node->op()->ValueInputCount());
return !GetUpperBound(node->InputAt(0))->Maybe(type) ||
@@ -988,8 +1015,9 @@ class RepresentationSelector {
machine_type.semantic() == MachineSemantic::kUint32);
(*types)[i] = machine_type;
}
- NodeProperties::ChangeOp(node,
- jsgraph_->common()->TypedStateValues(types));
+ SparseInputMask mask = SparseInputMaskOf(node->op());
+ NodeProperties::ChangeOp(
+ node, jsgraph_->common()->TypedStateValues(types, mask));
}
SetOutput(node, MachineRepresentation::kTagged);
}
@@ -1002,9 +1030,14 @@ class RepresentationSelector {
// TODO(turbofan): Special treatment for ExternalPointer here,
// to avoid incompatible truncations. We really need a story
// for the JSFunction::entry field.
- UseInfo use_info = input_type->Is(Type::ExternalPointer())
- ? UseInfo::PointerInt()
- : UseInfo::Any();
+ UseInfo use_info = UseInfo::None();
+ if (input_type->IsInhabited()) {
+ if (input_type->Is(Type::ExternalPointer())) {
+ use_info = UseInfo::PointerInt();
+ } else {
+ use_info = UseInfo::Any();
+ }
+ }
EnqueueInput(node, i, use_info);
}
} else if (lower()) {
@@ -1019,7 +1052,9 @@ class RepresentationSelector {
// TODO(turbofan): Special treatment for ExternalPointer here,
// to avoid incompatible truncations. We really need a story
// for the JSFunction::entry field.
- if (input_type->Is(Type::ExternalPointer())) {
+ if (!input_type->IsInhabited()) {
+ (*types)[i] = MachineType::None();
+ } else if (input_type->Is(Type::ExternalPointer())) {
(*types)[i] = MachineType::Pointer();
} else {
MachineRepresentation rep = input_type->IsInhabited()
@@ -1080,17 +1115,14 @@ class RepresentationSelector {
return kNoWriteBarrier;
}
if (value_type->IsHeapConstant()) {
- Handle<HeapObject> value_object = value_type->AsHeapConstant()->Value();
- RootIndexMap root_index_map(jsgraph_->isolate());
- int root_index = root_index_map.Lookup(*value_object);
- if (root_index != RootIndexMap::kInvalidRootIndex &&
- jsgraph_->isolate()->heap()->RootIsImmortalImmovable(root_index)) {
- // Write barriers are unnecessary for immortal immovable roots.
- return kNoWriteBarrier;
- }
- if (value_object->IsMap()) {
- // Write barriers for storing maps are cheaper.
- return kMapWriteBarrier;
+ Heap::RootListIndex root_index;
+ Heap* heap = jsgraph_->isolate()->heap();
+ if (heap->IsRootHandle(value_type->AsHeapConstant()->Value(),
+ &root_index)) {
+ if (heap->RootIsImmortalImmovable(root_index)) {
+ // Write barriers are unnecessary for immortal immovable roots.
+ return kNoWriteBarrier;
+ }
}
}
if (field_representation == MachineRepresentation::kTaggedPointer ||
@@ -1164,6 +1196,7 @@ class RepresentationSelector {
if (BothInputsAre(node, Type::PlainPrimitive())) {
if (truncation.IsUnused()) return VisitUnused(node);
}
+
if (BothInputsAre(node, type_cache_.kAdditiveSafeIntegerOrMinusZero) &&
(GetUpperBound(node)->Is(Type::Signed32()) ||
GetUpperBound(node)->Is(Type::Unsigned32()) ||
@@ -1177,33 +1210,38 @@ class RepresentationSelector {
// Try to use type feedback.
NumberOperationHint hint = NumberOperationHintOf(node->op());
- // Handle the case when no int32 checks on inputs are necessary
- // (but an overflow check is needed on the output).
- if (BothInputsAre(node, Type::Signed32()) ||
- (BothInputsAre(node, Type::Signed32OrMinusZero()) &&
- NodeProperties::GetType(node)->Is(type_cache_.kSafeInteger))) {
- // If both the inputs the feedback are int32, use the overflow op.
- if (hint == NumberOperationHint::kSignedSmall ||
- hint == NumberOperationHint::kSigned32) {
+ if (hint == NumberOperationHint::kSignedSmall ||
+ hint == NumberOperationHint::kSigned32) {
+ Type* left_feedback_type = TypeOf(node->InputAt(0));
+ Type* right_feedback_type = TypeOf(node->InputAt(1));
+ // Handle the case when no int32 checks on inputs are necessary (but
+ // an overflow check is needed on the output).
+ // TODO(jarin) We should not look at the upper bound because the typer
+ // could have already baked in some feedback into the upper bound.
+ if (BothInputsAre(node, Type::Signed32()) ||
+ (BothInputsAre(node, Type::Signed32OrMinusZero()) &&
+ GetUpperBound(node)->Is(type_cache_.kSafeInteger))) {
VisitBinop(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32, Type::Signed32());
- if (lower()) ChangeToInt32OverflowOp(node);
- return;
+ } else {
+ UseInfo left_use = CheckedUseInfoAsWord32FromHint(hint);
+ // For CheckedInt32Add and CheckedInt32Sub, we don't need to do
+ // a minus zero check for the right hand side, since we already
+ // know that the left hand side is a proper Signed32 value,
+ // potentially guarded by a check.
+ UseInfo right_use = CheckedUseInfoAsWord32FromHint(
+ hint, CheckForMinusZeroMode::kDontCheckForMinusZero);
+ VisitBinop(node, left_use, right_use, MachineRepresentation::kWord32,
+ Type::Signed32());
+ }
+ if (lower()) {
+ if (CanOverflowSigned32(node->op(), left_feedback_type,
+ right_feedback_type, graph_zone())) {
+ ChangeToInt32OverflowOp(node);
+ } else {
+ ChangeToPureOp(node, Int32Op(node));
+ }
}
- }
-
- if (hint == NumberOperationHint::kSignedSmall ||
- hint == NumberOperationHint::kSigned32) {
- UseInfo left_use = CheckedUseInfoAsWord32FromHint(hint);
- // For CheckedInt32Add and CheckedInt32Sub, we don't need to do
- // a minus zero check for the right hand side, since we already
- // know that the left hand side is a proper Signed32 value,
- // potentially guarded by a check.
- UseInfo right_use = CheckedUseInfoAsWord32FromHint(
- hint, CheckForMinusZeroMode::kDontCheckForMinusZero);
- VisitBinop(node, left_use, right_use, MachineRepresentation::kWord32,
- Type::Signed32());
- if (lower()) ChangeToInt32OverflowOp(node);
return;
}
@@ -1550,13 +1588,38 @@ class RepresentationSelector {
NumberOperationHint hint = NumberOperationHintOf(node->op());
switch (hint) {
case NumberOperationHint::kSignedSmall:
- case NumberOperationHint::kSigned32:
- VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
- MachineRepresentation::kBit);
- if (lower()) ChangeToPureOp(node, Int32Op(node));
+ case NumberOperationHint::kSigned32: {
+ if (propagate()) {
+ VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+ MachineRepresentation::kBit);
+ } else if (retype()) {
+ SetOutput(node, MachineRepresentation::kBit, Type::Any());
+ } else {
+ DCHECK(lower());
+ Node* lhs = node->InputAt(0);
+ Node* rhs = node->InputAt(1);
+ if (IsNodeRepresentationTagged(lhs) &&
+ IsNodeRepresentationTagged(rhs)) {
+ VisitBinop(node, UseInfo::CheckedSignedSmallAsTaggedSigned(),
+ MachineRepresentation::kBit);
+ ChangeToPureOp(
+ node, changer_->TaggedSignedOperatorFor(node->opcode()));
+
+ } else {
+ VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+ MachineRepresentation::kBit);
+ ChangeToPureOp(node, Int32Op(node));
+ }
+ }
return;
- case NumberOperationHint::kNumber:
+ }
case NumberOperationHint::kNumberOrOddball:
+ // Abstract and strict equality don't perform ToNumber conversions
+ // on Oddballs, so make sure we don't accidentially sneak in a
+ // hint with Oddball feedback here.
+ DCHECK_NE(IrOpcode::kSpeculativeNumberEqual, node->opcode());
+ // Fallthrough
+ case NumberOperationHint::kNumber:
VisitBinop(node, CheckedUseInfoAsFloat64FromHint(hint),
MachineRepresentation::kBit);
if (lower()) ChangeToPureOp(node, Float64Op(node));
@@ -2156,9 +2219,15 @@ class RepresentationSelector {
return VisitBinop(node, UseInfo::AnyTagged(),
MachineRepresentation::kTaggedPointer);
}
+ case IrOpcode::kStringCharAt: {
+ VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
+ MachineRepresentation::kTaggedPointer);
+ return;
+ }
case IrOpcode::kStringCharCodeAt: {
+ // TODO(turbofan): Allow builtins to return untagged values.
VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
+ MachineRepresentation::kTaggedSigned);
return;
}
case IrOpcode::kStringFromCharCode: {
@@ -2207,6 +2276,17 @@ class RepresentationSelector {
SetOutput(node, MachineRepresentation::kNone);
return;
}
+ case IrOpcode::kCheckInternalizedString: {
+ if (InputIs(node, Type::InternalizedString())) {
+ VisitUnop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else {
+ VisitUnop(node, UseInfo::AnyTagged(),
+ MachineRepresentation::kTaggedPointer);
+ }
+ return;
+ }
case IrOpcode::kCheckNumber: {
if (InputIs(node, Type::Number())) {
if (truncation.IsUsedAsWord32()) {
@@ -2449,6 +2529,12 @@ class RepresentationSelector {
VisitObjectIs(node, Type::Undetectable(), lowering);
return;
}
+ case IrOpcode::kNewRestParameterElements:
+ case IrOpcode::kNewUnmappedArgumentsElements: {
+ ProcessRemainingInputs(node, 0);
+ SetOutput(node, MachineRepresentation::kTaggedPointer);
+ return;
+ }
case IrOpcode::kArrayBufferWasNeutered: {
VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
return;
@@ -2466,8 +2552,7 @@ class RepresentationSelector {
return;
}
case IrOpcode::kCheckTaggedHole: {
- VisitUnop(node, UseInfo::AnyTagged(),
- MachineRepresentation::kTaggedPointer);
+ VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
return;
}
case IrOpcode::kConvertTaggedHoleToUndefined: {
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.cc b/deps/v8/src/compiler/simplified-operator-reducer.cc
index b8a486df38..dcfb485156 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.cc
+++ b/deps/v8/src/compiler/simplified-operator-reducer.cc
@@ -129,6 +129,15 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
}
break;
}
+ case IrOpcode::kCheckedFloat64ToInt32: {
+ Float64Matcher m(node->InputAt(0));
+ if (m.HasValue() && IsInt32Double(m.Value())) {
+ Node* value = jsgraph()->Int32Constant(static_cast<int32_t>(m.Value()));
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+ break;
+ }
case IrOpcode::kCheckedTaggedToInt32:
case IrOpcode::kCheckedTaggedSignedToInt32: {
NodeMatcher m(node->InputAt(0));
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 345a2c5f88..31dac61d7e 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -92,6 +92,7 @@ bool operator==(FieldAccess const& lhs, FieldAccess const& rhs) {
// really only relevant for eliminating loads and they don't care about the
// write barrier mode.
return lhs.base_is_tagged == rhs.base_is_tagged && lhs.offset == rhs.offset &&
+ lhs.map.address() == rhs.map.address() &&
lhs.machine_type == rhs.machine_type;
}
@@ -118,6 +119,10 @@ std::ostream& operator<<(std::ostream& os, FieldAccess const& access) {
name->Print(os);
os << ", ";
}
+ Handle<Map> map;
+ if (access.map.ToHandle(&map)) {
+ os << Brief(*map) << ", ";
+ }
#endif
access.type->PrintTo(os);
os << ", " << access.machine_type << ", " << access.write_barrier_kind << "]";
@@ -229,6 +234,44 @@ std::ostream& operator<<(std::ostream& os, CheckForMinusZeroMode mode) {
return os;
}
+std::ostream& operator<<(std::ostream& os, CheckMapsFlags flags) {
+ bool empty = true;
+ if (flags & CheckMapsFlag::kTryMigrateInstance) {
+ os << "TryMigrateInstance";
+ empty = false;
+ }
+ if (empty) os << "None";
+ return os;
+}
+
+bool operator==(CheckMapsParameters const& lhs,
+ CheckMapsParameters const& rhs) {
+ return lhs.flags() == rhs.flags() && lhs.maps() == rhs.maps();
+}
+
+bool operator!=(CheckMapsParameters const& lhs,
+ CheckMapsParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(CheckMapsParameters const& p) {
+ return base::hash_combine(p.flags(), p.maps());
+}
+
+std::ostream& operator<<(std::ostream& os, CheckMapsParameters const& p) {
+ ZoneHandleSet<Map> const& maps = p.maps();
+ os << p.flags();
+ for (size_t i = 0; i < maps.size(); ++i) {
+ os << ", " << Brief(*maps[i]);
+ }
+ return os;
+}
+
+CheckMapsParameters const& CheckMapsParametersOf(Operator const* op) {
+ DCHECK_EQ(IrOpcode::kCheckMaps, op->opcode());
+ return OpParameter<CheckMapsParameters>(op);
+}
+
size_t hash_value(CheckTaggedInputMode mode) {
return static_cast<size_t>(mode);
}
@@ -274,22 +317,36 @@ GrowFastElementsFlags GrowFastElementsFlagsOf(const Operator* op) {
return OpParameter<GrowFastElementsFlags>(op);
}
+bool operator==(ElementsTransition const& lhs, ElementsTransition const& rhs) {
+ return lhs.mode() == rhs.mode() &&
+ lhs.source().address() == rhs.source().address() &&
+ lhs.target().address() == rhs.target().address();
+}
+
+bool operator!=(ElementsTransition const& lhs, ElementsTransition const& rhs) {
+ return !(lhs == rhs);
+}
+
size_t hash_value(ElementsTransition transition) {
- return static_cast<uint8_t>(transition);
+ return base::hash_combine(static_cast<uint8_t>(transition.mode()),
+ transition.source().address(),
+ transition.target().address());
}
std::ostream& operator<<(std::ostream& os, ElementsTransition transition) {
- switch (transition) {
+ switch (transition.mode()) {
case ElementsTransition::kFastTransition:
- return os << "fast-transition";
+ return os << "fast-transition from " << Brief(*transition.source())
+ << " to " << Brief(*transition.target());
case ElementsTransition::kSlowTransition:
- return os << "slow-transition";
+ return os << "slow-transition from " << Brief(*transition.source())
+ << " to " << Brief(*transition.target());
}
UNREACHABLE();
return os;
}
-ElementsTransition ElementsTransitionOf(const Operator* op) {
+ElementsTransition const& ElementsTransitionOf(const Operator* op) {
DCHECK_EQ(IrOpcode::kTransitionElementsKind, op->opcode());
return OpParameter<ElementsTransition>(op);
}
@@ -331,6 +388,12 @@ NumberOperationHint NumberOperationHintOf(const Operator* op) {
return OpParameter<NumberOperationHint>(op);
}
+int ParameterCountOf(const Operator* op) {
+ DCHECK(op->opcode() == IrOpcode::kNewUnmappedArgumentsElements ||
+ op->opcode() == IrOpcode::kNewRestParameterElements);
+ return OpParameter<int>(op);
+}
+
PretenureFlag PretenureFlagOf(const Operator* op) {
DCHECK_EQ(IrOpcode::kAllocate, op->opcode());
return OpParameter<PretenureFlag>(op);
@@ -395,6 +458,7 @@ UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
V(NumberToUint32, Operator::kNoProperties, 1, 0) \
V(NumberToUint8Clamped, Operator::kNoProperties, 1, 0) \
V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \
+ V(StringCharAt, Operator::kNoProperties, 2, 1) \
V(StringCharCodeAt, Operator::kNoProperties, 2, 1) \
V(StringFromCharCode, Operator::kNoProperties, 1, 0) \
V(PlainPrimitiveToNumber, Operator::kNoProperties, 1, 0) \
@@ -436,6 +500,7 @@ UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
V(CheckBounds, 2, 1) \
V(CheckHeapObject, 1, 1) \
V(CheckIf, 1, 0) \
+ V(CheckInternalizedString, 1, 1) \
V(CheckNumber, 1, 1) \
V(CheckSmi, 1, 1) \
V(CheckString, 1, 1) \
@@ -689,16 +754,15 @@ const Operator* SimplifiedOperatorBuilder::CheckedTaggedToFloat64(
return nullptr;
}
-const Operator* SimplifiedOperatorBuilder::CheckMaps(int map_input_count) {
- // TODO(bmeurer): Cache the most important versions of this operator.
- DCHECK_LT(0, map_input_count);
- int const value_input_count = 1 + map_input_count;
- return new (zone()) Operator1<int>( // --
- IrOpcode::kCheckMaps, // opcode
- Operator::kNoThrow | Operator::kNoWrite, // flags
- "CheckMaps", // name
- value_input_count, 1, 1, 0, 1, 0, // counts
- map_input_count); // parameter
+const Operator* SimplifiedOperatorBuilder::CheckMaps(CheckMapsFlags flags,
+ ZoneHandleSet<Map> maps) {
+ CheckMapsParameters const parameters(flags, maps);
+ return new (zone()) Operator1<CheckMapsParameters>( // --
+ IrOpcode::kCheckMaps, // opcode
+ Operator::kNoThrow | Operator::kNoWrite, // flags
+ "CheckMaps", // name
+ 1, 1, 1, 0, 1, 0, // counts
+ parameters); // parameter
}
const Operator* SimplifiedOperatorBuilder::CheckFloat64Hole(
@@ -733,10 +797,30 @@ const Operator* SimplifiedOperatorBuilder::TransitionElementsKind(
IrOpcode::kTransitionElementsKind, // opcode
Operator::kNoDeopt | Operator::kNoThrow, // flags
"TransitionElementsKind", // name
- 3, 1, 1, 0, 1, 0, // counts
+ 1, 1, 1, 0, 1, 0, // counts
transition); // parameter
}
+const Operator* SimplifiedOperatorBuilder::NewUnmappedArgumentsElements(
+ int parameter_count) {
+ return new (zone()) Operator1<int>( // --
+ IrOpcode::kNewUnmappedArgumentsElements, // opcode
+ Operator::kEliminatable, // flags
+ "NewUnmappedArgumentsElements", // name
+ 0, 1, 0, 1, 1, 0, // counts
+ parameter_count); // parameter
+}
+
+const Operator* SimplifiedOperatorBuilder::NewRestParameterElements(
+ int parameter_count) {
+ return new (zone()) Operator1<int>( // --
+ IrOpcode::kNewRestParameterElements, // opcode
+ Operator::kEliminatable, // flags
+ "NewRestParameterElements", // name
+ 0, 1, 0, 1, 1, 0, // counts
+ parameter_count); // parameter
+}
+
const Operator* SimplifiedOperatorBuilder::Allocate(PretenureFlag pretenure) {
switch (pretenure) {
case NOT_TENURED:
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 833a0554f5..4ad44354f8 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -14,6 +14,7 @@
#include "src/handles.h"
#include "src/machine-type.h"
#include "src/objects.h"
+#include "src/zone/zone-handle-set.h"
namespace v8 {
namespace internal {
@@ -64,6 +65,7 @@ struct FieldAccess {
BaseTaggedness base_is_tagged; // specifies if the base pointer is tagged.
int offset; // offset of the field, without tag.
MaybeHandle<Name> name; // debugging only.
+ MaybeHandle<Map> map; // map of the field value (if known).
Type* type; // type of the field.
MachineType machine_type; // machine type of the field.
WriteBarrierKind write_barrier_kind; // write barrier hint.
@@ -143,6 +145,41 @@ std::ostream& operator<<(std::ostream&, CheckForMinusZeroMode);
CheckForMinusZeroMode CheckMinusZeroModeOf(const Operator*) WARN_UNUSED_RESULT;
+// Flags for map checks.
+enum class CheckMapsFlag : uint8_t {
+ kNone = 0u,
+ kTryMigrateInstance = 1u << 0, // Try instance migration.
+};
+typedef base::Flags<CheckMapsFlag> CheckMapsFlags;
+
+DEFINE_OPERATORS_FOR_FLAGS(CheckMapsFlags)
+
+std::ostream& operator<<(std::ostream&, CheckMapsFlags);
+
+// A descriptor for map checks.
+class CheckMapsParameters final {
+ public:
+ CheckMapsParameters(CheckMapsFlags flags, ZoneHandleSet<Map> const& maps)
+ : flags_(flags), maps_(maps) {}
+
+ CheckMapsFlags flags() const { return flags_; }
+ ZoneHandleSet<Map> const& maps() const { return maps_; }
+
+ private:
+ CheckMapsFlags const flags_;
+ ZoneHandleSet<Map> const maps_;
+};
+
+bool operator==(CheckMapsParameters const&, CheckMapsParameters const&);
+bool operator!=(CheckMapsParameters const&, CheckMapsParameters const&);
+
+size_t hash_value(CheckMapsParameters const&);
+
+std::ostream& operator<<(std::ostream&, CheckMapsParameters const&);
+
+CheckMapsParameters const& CheckMapsParametersOf(Operator const*)
+ WARN_UNUSED_RESULT;
+
// A descriptor for growing elements backing stores.
enum class GrowFastElementsFlag : uint8_t {
kNone = 0u,
@@ -160,16 +197,35 @@ GrowFastElementsFlags GrowFastElementsFlagsOf(const Operator*)
WARN_UNUSED_RESULT;
// A descriptor for elements kind transitions.
-enum class ElementsTransition : uint8_t {
- kFastTransition, // simple transition, just updating the map.
- kSlowTransition // full transition, round-trip to the runtime.
+class ElementsTransition final {
+ public:
+ enum Mode : uint8_t {
+ kFastTransition, // simple transition, just updating the map.
+ kSlowTransition // full transition, round-trip to the runtime.
+ };
+
+ ElementsTransition(Mode mode, Handle<Map> source, Handle<Map> target)
+ : mode_(mode), source_(source), target_(target) {}
+
+ Mode mode() const { return mode_; }
+ Handle<Map> source() const { return source_; }
+ Handle<Map> target() const { return target_; }
+
+ private:
+ Mode const mode_;
+ Handle<Map> const source_;
+ Handle<Map> const target_;
};
+bool operator==(ElementsTransition const&, ElementsTransition const&);
+bool operator!=(ElementsTransition const&, ElementsTransition const&);
+
size_t hash_value(ElementsTransition);
std::ostream& operator<<(std::ostream&, ElementsTransition);
-ElementsTransition ElementsTransitionOf(const Operator* op) WARN_UNUSED_RESULT;
+ElementsTransition const& ElementsTransitionOf(const Operator* op)
+ WARN_UNUSED_RESULT;
// A hint for speculative number operations.
enum class NumberOperationHint : uint8_t {
@@ -186,6 +242,8 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, NumberOperationHint);
NumberOperationHint NumberOperationHintOf(const Operator* op)
WARN_UNUSED_RESULT;
+int ParameterCountOf(const Operator* op) WARN_UNUSED_RESULT;
+
PretenureFlag PretenureFlagOf(const Operator* op) WARN_UNUSED_RESULT;
UnicodeEncoding UnicodeEncodingOf(const Operator*) WARN_UNUSED_RESULT;
@@ -294,6 +352,7 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* StringEqual();
const Operator* StringLessThan();
const Operator* StringLessThanOrEqual();
+ const Operator* StringCharAt();
const Operator* StringCharCodeAt();
const Operator* StringFromCharCode();
const Operator* StringFromCodePoint(UnicodeEncoding encoding);
@@ -319,9 +378,10 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* CheckIf();
const Operator* CheckBounds();
- const Operator* CheckMaps(int map_input_count);
+ const Operator* CheckMaps(CheckMapsFlags, ZoneHandleSet<Map>);
const Operator* CheckHeapObject();
+ const Operator* CheckInternalizedString();
const Operator* CheckNumber();
const Operator* CheckSmi();
const Operator* CheckString();
@@ -355,6 +415,12 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* ObjectIsString();
const Operator* ObjectIsUndetectable();
+ // new-rest-parameter-elements
+ const Operator* NewRestParameterElements(int parameter_count);
+
+ // new-unmapped-arguments-elements
+ const Operator* NewUnmappedArgumentsElements(int parameter_count);
+
// array-buffer-was-neutered buffer
const Operator* ArrayBufferWasNeutered();
diff --git a/deps/v8/src/compiler/state-values-utils.cc b/deps/v8/src/compiler/state-values-utils.cc
index e8310d7d56..61c71caf87 100644
--- a/deps/v8/src/compiler/state-values-utils.cc
+++ b/deps/v8/src/compiler/state-values-utils.cc
@@ -4,6 +4,8 @@
#include "src/compiler/state-values-utils.h"
+#include "src/bit-vector.h"
+
namespace v8 {
namespace internal {
namespace compiler {
@@ -47,6 +49,16 @@ bool StateValuesCache::IsKeysEqualToNode(StateValuesKey* key, Node* node) {
if (key->count != static_cast<size_t>(node->InputCount())) {
return false;
}
+
+ DCHECK(node->opcode() == IrOpcode::kStateValues);
+ SparseInputMask node_mask = SparseInputMaskOf(node->op());
+
+ if (node_mask != key->mask) {
+ return false;
+ }
+
+ // Comparing real inputs rather than sparse inputs, since we already know the
+ // sparse input masks are the same.
for (size_t i = 0; i < key->count; i++) {
if (key->values[i] != node->InputAt(static_cast<int>(i))) {
return false;
@@ -62,6 +74,9 @@ bool StateValuesCache::AreValueKeysEqual(StateValuesKey* key1,
if (key1->count != key2->count) {
return false;
}
+ if (key1->mask != key2->mask) {
+ return false;
+ }
for (size_t i = 0; i < key1->count; i++) {
if (key1->values[i] != key2->values[i]) {
return false;
@@ -73,19 +88,18 @@ bool StateValuesCache::AreValueKeysEqual(StateValuesKey* key1,
Node* StateValuesCache::GetEmptyStateValues() {
if (empty_state_values_ == nullptr) {
- empty_state_values_ = graph()->NewNode(common()->StateValues(0));
+ empty_state_values_ =
+ graph()->NewNode(common()->StateValues(0, SparseInputMask::Dense()));
}
return empty_state_values_;
}
-
-NodeVector* StateValuesCache::GetWorkingSpace(size_t level) {
- while (working_space_.size() <= level) {
- void* space = zone()->New(sizeof(NodeVector));
- working_space_.push_back(new (space)
- NodeVector(kMaxInputCount, nullptr, zone()));
+StateValuesCache::WorkingBuffer* StateValuesCache::GetWorkingSpace(
+ size_t level) {
+ if (working_space_.size() <= level) {
+ working_space_.resize(level + 1);
}
- return working_space_[level];
+ return &working_space_[level];
}
namespace {
@@ -93,24 +107,24 @@ namespace {
int StateValuesHashKey(Node** nodes, size_t count) {
size_t hash = count;
for (size_t i = 0; i < count; i++) {
- hash = hash * 23 + nodes[i]->id();
+ hash = hash * 23 + (nodes[i] == nullptr ? 0 : nodes[i]->id());
}
return static_cast<int>(hash & 0x7fffffff);
}
} // namespace
-
-Node* StateValuesCache::GetValuesNodeFromCache(Node** nodes, size_t count) {
- StateValuesKey key(count, nodes);
+Node* StateValuesCache::GetValuesNodeFromCache(Node** nodes, size_t count,
+ SparseInputMask mask) {
+ StateValuesKey key(count, mask, nodes);
int hash = StateValuesHashKey(nodes, count);
ZoneHashMap::Entry* lookup =
hash_map_.LookupOrInsert(&key, hash, ZoneAllocationPolicy(zone()));
DCHECK_NOT_NULL(lookup);
Node* node;
if (lookup->value == nullptr) {
- int input_count = static_cast<int>(count);
- node = graph()->NewNode(common()->StateValues(input_count), input_count,
+ int node_count = static_cast<int>(count);
+ node = graph()->NewNode(common()->StateValues(node_count, mask), node_count,
nodes);
NodeKey* new_key = new (zone()->New(sizeof(NodeKey))) NodeKey(node);
lookup->key = new_key;
@@ -121,106 +135,190 @@ Node* StateValuesCache::GetValuesNodeFromCache(Node** nodes, size_t count) {
return node;
}
+SparseInputMask::BitMaskType StateValuesCache::FillBufferWithValues(
+ WorkingBuffer* node_buffer, size_t* node_count, size_t* values_idx,
+ Node** values, size_t count, const BitVector* liveness) {
+ SparseInputMask::BitMaskType input_mask = 0;
-class StateValuesCache::ValueArrayIterator {
- public:
- ValueArrayIterator(Node** values, size_t count)
- : values_(values), count_(count), current_(0) {}
+ // Virtual nodes are the live nodes plus the implicit optimized out nodes,
+ // which are implied by the liveness mask.
+ size_t virtual_node_count = *node_count;
- void Advance() {
- if (!done()) {
- current_++;
- }
- }
+ while (*values_idx < count && *node_count < kMaxInputCount &&
+ virtual_node_count < SparseInputMask::kMaxSparseInputs) {
+ DCHECK_LE(*values_idx, static_cast<size_t>(INT_MAX));
- bool done() { return current_ >= count_; }
+ if (liveness == nullptr ||
+ liveness->Contains(static_cast<int>(*values_idx))) {
+ input_mask |= 1 << (virtual_node_count);
+ (*node_buffer)[(*node_count)++] = values[*values_idx];
+ }
+ virtual_node_count++;
- Node* node() {
- DCHECK(!done());
- return values_[current_];
+ (*values_idx)++;
}
- private:
- Node** values_;
- size_t count_;
- size_t current_;
-};
+ DCHECK(*node_count <= StateValuesCache::kMaxInputCount);
+ DCHECK(virtual_node_count <= SparseInputMask::kMaxSparseInputs);
+ // Add the end marker at the end of the mask.
+ input_mask |= SparseInputMask::kEndMarker << virtual_node_count;
-Node* StateValuesCache::BuildTree(ValueArrayIterator* it, size_t max_height) {
- if (max_height == 0) {
- Node* node = it->node();
- it->Advance();
- return node;
- }
- DCHECK(!it->done());
+ return input_mask;
+}
- NodeVector* buffer = GetWorkingSpace(max_height);
- size_t count = 0;
- for (; count < kMaxInputCount; count++) {
- if (it->done()) break;
- (*buffer)[count] = BuildTree(it, max_height - 1);
+Node* StateValuesCache::BuildTree(size_t* values_idx, Node** values,
+ size_t count, const BitVector* liveness,
+ size_t level) {
+ WorkingBuffer* node_buffer = GetWorkingSpace(level);
+ size_t node_count = 0;
+ SparseInputMask::BitMaskType input_mask = SparseInputMask::kDenseBitMask;
+
+ if (level == 0) {
+ input_mask = FillBufferWithValues(node_buffer, &node_count, values_idx,
+ values, count, liveness);
+ // Make sure we returned a sparse input mask.
+ DCHECK_NE(input_mask, SparseInputMask::kDenseBitMask);
+ } else {
+ while (*values_idx < count && node_count < kMaxInputCount) {
+ if (count - *values_idx < kMaxInputCount - node_count) {
+ // If we have fewer values remaining than inputs remaining, dump the
+ // remaining values into this node.
+ // TODO(leszeks): We could optimise this further by only counting
+ // remaining live nodes.
+
+ size_t previous_input_count = node_count;
+ input_mask = FillBufferWithValues(node_buffer, &node_count, values_idx,
+ values, count, liveness);
+ // Make sure we have exhausted our values.
+ DCHECK_EQ(*values_idx, count);
+ // Make sure we returned a sparse input mask.
+ DCHECK_NE(input_mask, SparseInputMask::kDenseBitMask);
+
+ // Make sure we haven't touched inputs below previous_input_count in the
+ // mask.
+ DCHECK_EQ(input_mask & ((1 << previous_input_count) - 1), 0u);
+ // Mark all previous inputs as live.
+ input_mask |= ((1 << previous_input_count) - 1);
+
+ break;
+
+ } else {
+ // Otherwise, add the values to a subtree and add that as an input.
+ Node* subtree =
+ BuildTree(values_idx, values, count, liveness, level - 1);
+ (*node_buffer)[node_count++] = subtree;
+ // Don't touch the bitmask, so that it stays dense.
+ }
+ }
}
- if (count == 1) {
- return (*buffer)[0];
+
+ if (node_count == 1 && input_mask == SparseInputMask::kDenseBitMask) {
+ // Elide the StateValue node if there is only one, dense input. This will
+ // only happen if we built a single subtree (as nodes with values are always
+ // sparse), and so we can replace ourselves with it.
+ DCHECK_EQ((*node_buffer)[0]->opcode(), IrOpcode::kStateValues);
+ return (*node_buffer)[0];
} else {
- return GetValuesNodeFromCache(&(buffer->front()), count);
+ return GetValuesNodeFromCache(node_buffer->data(), node_count,
+ SparseInputMask(input_mask));
+ }
+}
+
+#if DEBUG
+namespace {
+
+void CheckTreeContainsValues(Node* tree, Node** values, size_t count,
+ const BitVector* liveness) {
+ CHECK_EQ(count, StateValuesAccess(tree).size());
+
+ int i;
+ auto access = StateValuesAccess(tree);
+ auto it = access.begin();
+ auto itend = access.end();
+ for (i = 0; it != itend; ++it, ++i) {
+ if (liveness == nullptr || liveness->Contains(i)) {
+ CHECK((*it).node == values[i]);
+ } else {
+ CHECK((*it).node == nullptr);
+ }
}
+ CHECK_EQ(static_cast<size_t>(i), count);
}
+} // namespace
+#endif
-Node* StateValuesCache::GetNodeForValues(Node** values, size_t count) {
+Node* StateValuesCache::GetNodeForValues(Node** values, size_t count,
+ const BitVector* liveness) {
#if DEBUG
+ // Check that the values represent actual values, and not a tree of values.
for (size_t i = 0; i < count; i++) {
- DCHECK_NE(values[i]->opcode(), IrOpcode::kStateValues);
- DCHECK_NE(values[i]->opcode(), IrOpcode::kTypedStateValues);
+ if (values[i] != nullptr) {
+ DCHECK_NE(values[i]->opcode(), IrOpcode::kStateValues);
+ DCHECK_NE(values[i]->opcode(), IrOpcode::kTypedStateValues);
+ }
+ }
+ if (liveness != nullptr) {
+ // Liveness can have extra bits for the stack or accumulator, which we
+ // ignore here.
+ DCHECK_LE(count, static_cast<size_t>(liveness->length()));
+
+ for (size_t i = 0; i < count; i++) {
+ if (liveness->Contains(static_cast<int>(i))) {
+ DCHECK_NOT_NULL(values[i]);
+ }
+ }
}
#endif
+
if (count == 0) {
return GetEmptyStateValues();
}
+
+ // This is a worst-case tree height estimate, assuming that all values are
+ // live. We could get a better estimate by counting zeroes in the liveness
+ // vector, but there's no point -- any excess height in the tree will be
+ // collapsed by the single-input elision at the end of BuildTree.
size_t height = 0;
- size_t max_nodes = 1;
- while (count > max_nodes) {
+ size_t max_inputs = kMaxInputCount;
+ while (count > max_inputs) {
height++;
- max_nodes *= kMaxInputCount;
+ max_inputs *= kMaxInputCount;
}
- ValueArrayIterator it(values, count);
+ size_t values_idx = 0;
+ Node* tree = BuildTree(&values_idx, values, count, liveness, height);
+ // The values should be exhausted by the end of BuildTree.
+ DCHECK_EQ(values_idx, count);
- Node* tree = BuildTree(&it, height);
+ // The 'tree' must be rooted with a state value node.
+ DCHECK_EQ(tree->opcode(), IrOpcode::kStateValues);
- // If the 'tree' is a single node, equip it with a StateValues wrapper.
- if (tree->opcode() != IrOpcode::kStateValues &&
- tree->opcode() != IrOpcode::kTypedStateValues) {
- tree = GetValuesNodeFromCache(&tree, 1);
- }
+#if DEBUG
+ CheckTreeContainsValues(tree, values, count, liveness);
+#endif
return tree;
}
-
StateValuesAccess::iterator::iterator(Node* node) : current_depth_(0) {
- // A hacky way initialize - just set the index before the node we want
- // to process and then advance to it.
- stack_[current_depth_].node = node;
- stack_[current_depth_].index = -1;
- Advance();
+ stack_[current_depth_] =
+ SparseInputMaskOf(node->op()).IterateOverInputs(node);
+ EnsureValid();
}
-
-StateValuesAccess::iterator::StatePos* StateValuesAccess::iterator::Top() {
+SparseInputMask::InputIterator* StateValuesAccess::iterator::Top() {
DCHECK(current_depth_ >= 0);
DCHECK(current_depth_ < kMaxInlineDepth);
return &(stack_[current_depth_]);
}
-
void StateValuesAccess::iterator::Push(Node* node) {
current_depth_++;
CHECK(current_depth_ < kMaxInlineDepth);
- stack_[current_depth_].node = node;
- stack_[current_depth_].index = 0;
+ stack_[current_depth_] =
+ SparseInputMaskOf(node->op()).IterateOverInputs(node);
}
@@ -234,48 +332,61 @@ bool StateValuesAccess::iterator::done() { return current_depth_ < 0; }
void StateValuesAccess::iterator::Advance() {
- // Advance the current index.
- Top()->index++;
+ Top()->Advance();
+ EnsureValid();
+}
- // Fix up the position to point to a valid node.
+void StateValuesAccess::iterator::EnsureValid() {
while (true) {
- // TODO(jarin): Factor to a separate method.
- Node* node = Top()->node;
- int index = Top()->index;
+ SparseInputMask::InputIterator* top = Top();
+
+ if (top->IsEmpty()) {
+ // We are on a valid (albeit optimized out) node.
+ return;
+ }
- if (index >= node->InputCount()) {
- // Pop stack and move to the next sibling.
+ if (top->IsEnd()) {
+ // We have hit the end of this iterator. Pop the stack and move to the
+ // next sibling iterator.
Pop();
if (done()) {
// Stack is exhausted, we have reached the end.
return;
}
- Top()->index++;
- } else if (node->InputAt(index)->opcode() == IrOpcode::kStateValues ||
- node->InputAt(index)->opcode() == IrOpcode::kTypedStateValues) {
- // Nested state, we need to push to the stack.
- Push(node->InputAt(index));
- } else {
- // We are on a valid node, we can stop the iteration.
- return;
+ Top()->Advance();
+ continue;
}
- }
-}
+ // At this point the value is known to be live and within our input nodes.
+ Node* value_node = top->GetReal();
+
+ if (value_node->opcode() == IrOpcode::kStateValues ||
+ value_node->opcode() == IrOpcode::kTypedStateValues) {
+ // Nested state, we need to push to the stack.
+ Push(value_node);
+ continue;
+ }
-Node* StateValuesAccess::iterator::node() {
- return Top()->node->InputAt(Top()->index);
+ // We are on a valid node, we can stop the iteration.
+ return;
+ }
}
+Node* StateValuesAccess::iterator::node() { return Top()->Get(nullptr); }
MachineType StateValuesAccess::iterator::type() {
- Node* state = Top()->node;
- if (state->opcode() == IrOpcode::kStateValues) {
+ Node* parent = Top()->parent();
+ if (parent->opcode() == IrOpcode::kStateValues) {
return MachineType::AnyTagged();
} else {
- DCHECK_EQ(IrOpcode::kTypedStateValues, state->opcode());
- ZoneVector<MachineType> const* types = MachineTypesOf(state->op());
- return (*types)[Top()->index];
+ DCHECK_EQ(IrOpcode::kTypedStateValues, parent->opcode());
+
+ if (Top()->IsEmpty()) {
+ return MachineType::None();
+ } else {
+ ZoneVector<MachineType> const* types = MachineTypesOf(parent->op());
+ return (*types)[Top()->real_index()];
+ }
}
}
@@ -300,14 +411,24 @@ StateValuesAccess::TypedNode StateValuesAccess::iterator::operator*() {
size_t StateValuesAccess::size() {
size_t count = 0;
- for (int i = 0; i < node_->InputCount(); i++) {
- if (node_->InputAt(i)->opcode() == IrOpcode::kStateValues ||
- node_->InputAt(i)->opcode() == IrOpcode::kTypedStateValues) {
- count += StateValuesAccess(node_->InputAt(i)).size();
- } else {
+ SparseInputMask mask = SparseInputMaskOf(node_->op());
+
+ SparseInputMask::InputIterator iterator = mask.IterateOverInputs(node_);
+
+ for (; !iterator.IsEnd(); iterator.Advance()) {
+ if (iterator.IsEmpty()) {
count++;
+ } else {
+ Node* value = iterator.GetReal();
+ if (value->opcode() == IrOpcode::kStateValues ||
+ value->opcode() == IrOpcode::kTypedStateValues) {
+ count += StateValuesAccess(value).size();
+ } else {
+ count++;
+ }
}
}
+
return count;
}
diff --git a/deps/v8/src/compiler/state-values-utils.h b/deps/v8/src/compiler/state-values-utils.h
index 14b1b9e599..d5e84d208c 100644
--- a/deps/v8/src/compiler/state-values-utils.h
+++ b/deps/v8/src/compiler/state-values-utils.h
@@ -5,12 +5,16 @@
#ifndef V8_COMPILER_STATE_VALUES_UTILS_H_
#define V8_COMPILER_STATE_VALUES_UTILS_H_
+#include <array>
+#include "src/compiler/common-operator.h"
#include "src/compiler/js-graph.h"
#include "src/globals.h"
namespace v8 {
namespace internal {
+class BitVector;
+
namespace compiler {
class Graph;
@@ -19,10 +23,12 @@ class V8_EXPORT_PRIVATE StateValuesCache {
public:
explicit StateValuesCache(JSGraph* js_graph);
- Node* GetNodeForValues(Node** values, size_t count);
+ Node* GetNodeForValues(Node** values, size_t count,
+ const BitVector* liveness = nullptr);
private:
static const size_t kMaxInputCount = 8;
+ typedef std::array<Node*, kMaxInputCount> WorkingBuffer;
struct NodeKey {
Node* node;
@@ -33,22 +39,34 @@ class V8_EXPORT_PRIVATE StateValuesCache {
struct StateValuesKey : public NodeKey {
// ValueArray - array of nodes ({node} has to be nullptr).
size_t count;
+ SparseInputMask mask;
Node** values;
- StateValuesKey(size_t count, Node** values)
- : NodeKey(nullptr), count(count), values(values) {}
+ StateValuesKey(size_t count, SparseInputMask mask, Node** values)
+ : NodeKey(nullptr), count(count), mask(mask), values(values) {}
};
- class ValueArrayIterator;
-
static bool AreKeysEqual(void* key1, void* key2);
static bool IsKeysEqualToNode(StateValuesKey* key, Node* node);
static bool AreValueKeysEqual(StateValuesKey* key1, StateValuesKey* key2);
- Node* BuildTree(ValueArrayIterator* it, size_t max_height);
- NodeVector* GetWorkingSpace(size_t level);
+ // Fills {node_buffer}, starting from {node_count}, with {values}, starting
+ // at {values_idx}, sparsely encoding according to {liveness}. {node_count} is
+ // updated with the new number of inputs in {node_buffer}, and a bitmask of
+ // the sparse encoding is returned.
+ SparseInputMask::BitMaskType FillBufferWithValues(WorkingBuffer* node_buffer,
+ size_t* node_count,
+ size_t* values_idx,
+ Node** values, size_t count,
+ const BitVector* liveness);
+
+ Node* BuildTree(size_t* values_idx, Node** values, size_t count,
+ const BitVector* liveness, size_t level);
+
+ WorkingBuffer* GetWorkingSpace(size_t level);
Node* GetEmptyStateValues();
- Node* GetValuesNodeFromCache(Node** nodes, size_t count);
+ Node* GetValuesNodeFromCache(Node** nodes, size_t count,
+ SparseInputMask mask);
Graph* graph() { return js_graph_->graph(); }
CommonOperatorBuilder* common() { return js_graph_->common(); }
@@ -57,7 +75,7 @@ class V8_EXPORT_PRIVATE StateValuesCache {
JSGraph* js_graph_;
CustomMatcherZoneHashMap hash_map_;
- ZoneVector<NodeVector*> working_space_; // One working space per level.
+ ZoneVector<WorkingBuffer> working_space_; // One working space per level.
Node* empty_state_values_;
};
@@ -86,21 +104,14 @@ class V8_EXPORT_PRIVATE StateValuesAccess {
MachineType type();
bool done();
void Advance();
+ void EnsureValid();
- struct StatePos {
- Node* node;
- int index;
-
- explicit StatePos(Node* node) : node(node), index(0) {}
- StatePos() {}
- };
-
- StatePos* Top();
+ SparseInputMask::InputIterator* Top();
void Push(Node* node);
void Pop();
static const int kMaxInlineDepth = 8;
- StatePos stack_[kMaxInlineDepth];
+ SparseInputMask::InputIterator stack_[kMaxInlineDepth];
int current_depth_;
};
diff --git a/deps/v8/src/compiler/type-cache.h b/deps/v8/src/compiler/type-cache.h
index 69eaf11616..3d9801bc10 100644
--- a/deps/v8/src/compiler/type-cache.h
+++ b/deps/v8/src/compiler/type-cache.h
@@ -64,6 +64,8 @@ class TypeCache final {
Type* const kPositiveInteger = CreateRange(0.0, V8_INFINITY);
Type* const kPositiveIntegerOrMinusZero =
Type::Union(kPositiveInteger, Type::MinusZero(), zone());
+ Type* const kPositiveIntegerOrNaN =
+ Type::Union(kPositiveInteger, Type::NaN(), zone());
Type* const kPositiveIntegerOrMinusZeroOrNaN =
Type::Union(kPositiveIntegerOrMinusZero, Type::NaN(), zone());
@@ -97,6 +99,11 @@ class TypeCache final {
// [0, String::kMaxLength].
Type* const kStringLengthType = CreateRange(0.0, String::kMaxLength);
+ // A time value always contains a tagged number in the range
+ // [-kMaxTimeInMs, kMaxTimeInMs].
+ Type* const kTimeValueType =
+ CreateRange(-DateCache::kMaxTimeInMs, DateCache::kMaxTimeInMs);
+
// The JSDate::day property always contains a tagged number in the range
// [1, 31] or NaN.
Type* const kJSDateDayType =
@@ -123,9 +130,8 @@ class TypeCache final {
// The JSDate::value property always contains a tagged number in the range
// [-kMaxTimeInMs, kMaxTimeInMs] or NaN.
- Type* const kJSDateValueType = Type::Union(
- CreateRange(-DateCache::kMaxTimeInMs, DateCache::kMaxTimeInMs),
- Type::NaN(), zone());
+ Type* const kJSDateValueType =
+ Type::Union(kTimeValueType, Type::NaN(), zone());
// The JSDate::weekday property always contains a tagged number in the range
// [0, 6] or NaN.
@@ -137,6 +143,10 @@ class TypeCache final {
Type* const kJSDateYearType =
Type::Union(Type::SignedSmall(), Type::NaN(), zone());
+ // The valid number of arguments for JavaScript functions.
+ Type* const kArgumentsLengthType =
+ Type::Range(0.0, Code::kMaxArguments, zone());
+
private:
template <typename T>
Type* CreateRange() {
diff --git a/deps/v8/src/compiler/type-hint-analyzer.cc b/deps/v8/src/compiler/type-hint-analyzer.cc
deleted file mode 100644
index da77a0c997..0000000000
--- a/deps/v8/src/compiler/type-hint-analyzer.cc
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/type-hint-analyzer.h"
-
-#include "src/assembler.h"
-#include "src/code-stubs.h"
-#include "src/ic/ic-state.h"
-#include "src/type-hints.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-namespace {
-
-BinaryOperationHint ToBinaryOperationHint(Token::Value op,
- BinaryOpICState::Kind kind) {
- switch (kind) {
- case BinaryOpICState::NONE:
- return BinaryOperationHint::kNone;
- case BinaryOpICState::SMI:
- return BinaryOperationHint::kSignedSmall;
- case BinaryOpICState::INT32:
- return (Token::IsTruncatingBinaryOp(op) && SmiValuesAre31Bits())
- ? BinaryOperationHint::kNumberOrOddball
- : BinaryOperationHint::kSigned32;
- case BinaryOpICState::NUMBER:
- return BinaryOperationHint::kNumberOrOddball;
- case BinaryOpICState::STRING:
- return BinaryOperationHint::kString;
- case BinaryOpICState::GENERIC:
- return BinaryOperationHint::kAny;
- }
- UNREACHABLE();
- return BinaryOperationHint::kNone;
-}
-
-CompareOperationHint ToCompareOperationHint(Token::Value op,
- CompareICState::State state) {
- switch (state) {
- case CompareICState::UNINITIALIZED:
- return CompareOperationHint::kNone;
- case CompareICState::SMI:
- return CompareOperationHint::kSignedSmall;
- case CompareICState::NUMBER:
- return Token::IsOrderedRelationalCompareOp(op)
- ? CompareOperationHint::kNumberOrOddball
- : CompareOperationHint::kNumber;
- case CompareICState::STRING:
- case CompareICState::INTERNALIZED_STRING:
- case CompareICState::UNIQUE_NAME:
- case CompareICState::RECEIVER:
- case CompareICState::KNOWN_RECEIVER:
- case CompareICState::BOOLEAN:
- case CompareICState::GENERIC:
- return CompareOperationHint::kAny;
- }
- UNREACHABLE();
- return CompareOperationHint::kNone;
-}
-
-} // namespace
-
-bool TypeHintAnalysis::GetBinaryOperationHint(TypeFeedbackId id,
- BinaryOperationHint* hint) const {
- auto i = infos_.find(id);
- if (i == infos_.end()) return false;
- Handle<Code> code = i->second;
- DCHECK_EQ(Code::BINARY_OP_IC, code->kind());
- BinaryOpICState state(code->GetIsolate(), code->extra_ic_state());
- *hint = ToBinaryOperationHint(state.op(), state.kind());
- return true;
-}
-
-bool TypeHintAnalysis::GetCompareOperationHint(
- TypeFeedbackId id, CompareOperationHint* hint) const {
- auto i = infos_.find(id);
- if (i == infos_.end()) return false;
- Handle<Code> code = i->second;
- DCHECK_EQ(Code::COMPARE_IC, code->kind());
- CompareICStub stub(code->stub_key(), code->GetIsolate());
- *hint = ToCompareOperationHint(stub.op(), stub.state());
- return true;
-}
-
-bool TypeHintAnalysis::GetToBooleanHints(TypeFeedbackId id,
- ToBooleanHints* hints) const {
- auto i = infos_.find(id);
- if (i == infos_.end()) return false;
- Handle<Code> code = i->second;
- DCHECK_EQ(Code::TO_BOOLEAN_IC, code->kind());
- ToBooleanICStub stub(code->GetIsolate(), code->extra_ic_state());
- *hints = stub.hints();
- return true;
-}
-
-TypeHintAnalysis* TypeHintAnalyzer::Analyze(Handle<Code> code) {
- DisallowHeapAllocation no_gc;
- TypeHintAnalysis::Infos infos(zone());
- Isolate* const isolate = code->GetIsolate();
- int const mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID);
- for (RelocIterator it(*code, mask); !it.done(); it.next()) {
- RelocInfo* rinfo = it.rinfo();
- Address target_address = rinfo->target_address();
- Code* target = Code::GetCodeFromTargetAddress(target_address);
- switch (target->kind()) {
- case Code::BINARY_OP_IC:
- case Code::COMPARE_IC:
- case Code::TO_BOOLEAN_IC: {
- // Add this feedback to the {infos}.
- TypeFeedbackId id(static_cast<unsigned>(rinfo->data()));
- infos.insert(std::make_pair(id, handle(target, isolate)));
- break;
- }
- default:
- // Ignore the remaining code objects.
- break;
- }
- }
- return new (zone()) TypeHintAnalysis(infos, zone());
-}
-
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/type-hint-analyzer.h b/deps/v8/src/compiler/type-hint-analyzer.h
deleted file mode 100644
index 354f8943bb..0000000000
--- a/deps/v8/src/compiler/type-hint-analyzer.h
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_TYPE_HINT_ANALYZER_H_
-#define V8_COMPILER_TYPE_HINT_ANALYZER_H_
-
-#include "src/handles.h"
-#include "src/type-hints.h"
-#include "src/zone/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// The result of analyzing type hints.
-class TypeHintAnalysis final : public ZoneObject {
- public:
- typedef ZoneMap<TypeFeedbackId, Handle<Code>> Infos;
-
- explicit TypeHintAnalysis(Infos const& infos, Zone* zone)
- : infos_(infos), zone_(zone) {}
-
- bool GetBinaryOperationHint(TypeFeedbackId id,
- BinaryOperationHint* hint) const;
- bool GetCompareOperationHint(TypeFeedbackId id,
- CompareOperationHint* hint) const;
- bool GetToBooleanHints(TypeFeedbackId id, ToBooleanHints* hints) const;
-
- private:
- Zone* zone() const { return zone_; }
-
- Infos const infos_;
- Zone* zone_;
-};
-
-
-// The class that performs type hint analysis on the fullcodegen code object.
-class TypeHintAnalyzer final {
- public:
- explicit TypeHintAnalyzer(Zone* zone) : zone_(zone) {}
-
- TypeHintAnalysis* Analyze(Handle<Code> code);
-
- private:
- Zone* zone() const { return zone_; }
-
- Zone* const zone_;
-
- DISALLOW_COPY_AND_ASSIGN(TypeHintAnalyzer);
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_TYPE_HINT_ANALYZER_H_
diff --git a/deps/v8/src/compiler/typed-optimization.cc b/deps/v8/src/compiler/typed-optimization.cc
index 5ebc390c8b..8149a1bee4 100644
--- a/deps/v8/src/compiler/typed-optimization.cc
+++ b/deps/v8/src/compiler/typed-optimization.cc
@@ -83,10 +83,11 @@ Reduction TypedOptimization::Reduce(Node* node) {
case IrOpcode::kLoadField:
return ReduceLoadField(node);
case IrOpcode::kNumberCeil:
- case IrOpcode::kNumberFloor:
case IrOpcode::kNumberRound:
case IrOpcode::kNumberTrunc:
return ReduceNumberRoundop(node);
+ case IrOpcode::kNumberFloor:
+ return ReduceNumberFloor(node);
case IrOpcode::kNumberToUint8Clamped:
return ReduceNumberToUint8Clamped(node);
case IrOpcode::kPhi:
@@ -185,6 +186,40 @@ Reduction TypedOptimization::ReduceLoadField(Node* node) {
return NoChange();
}
+Reduction TypedOptimization::ReduceNumberFloor(Node* node) {
+ Node* const input = NodeProperties::GetValueInput(node, 0);
+ Type* const input_type = NodeProperties::GetType(input);
+ if (input_type->Is(type_cache_.kIntegerOrMinusZeroOrNaN)) {
+ return Replace(input);
+ }
+ if (input_type->Is(Type::PlainNumber()) &&
+ input->opcode() == IrOpcode::kNumberDivide) {
+ Node* const lhs = NodeProperties::GetValueInput(input, 0);
+ Type* const lhs_type = NodeProperties::GetType(lhs);
+ Node* const rhs = NodeProperties::GetValueInput(input, 1);
+ Type* const rhs_type = NodeProperties::GetType(rhs);
+ if (lhs_type->Is(Type::Unsigned32()) && rhs_type->Is(Type::Unsigned32())) {
+ // We can replace
+ //
+ // NumberFloor(NumberDivide(lhs: unsigned32,
+ // rhs: unsigned32)): plain-number
+ //
+ // with
+ //
+ // NumberToUint32(NumberDivide(lhs, rhs))
+ //
+ // and just smash the type of the {lhs} on the {node},
+ // as the truncated result must be in the same range as
+ // {lhs} since {rhs} cannot be less than 1 (due to the
+ // plain-number type constraint on the {node}).
+ NodeProperties::ChangeOp(node, simplified()->NumberToUint32());
+ NodeProperties::SetType(node, lhs_type);
+ return Changed(node);
+ }
+ }
+ return NoChange();
+}
+
Reduction TypedOptimization::ReduceNumberRoundop(Node* node) {
Node* const input = NodeProperties::GetValueInput(node, 0);
Type* const input_type = NodeProperties::GetType(input);
diff --git a/deps/v8/src/compiler/typed-optimization.h b/deps/v8/src/compiler/typed-optimization.h
index fb2db7249d..810914993f 100644
--- a/deps/v8/src/compiler/typed-optimization.h
+++ b/deps/v8/src/compiler/typed-optimization.h
@@ -46,6 +46,7 @@ class V8_EXPORT_PRIVATE TypedOptimization final
Reduction ReduceCheckMaps(Node* node);
Reduction ReduceCheckString(Node* node);
Reduction ReduceLoadField(Node* node);
+ Reduction ReduceNumberFloor(Node* node);
Reduction ReduceNumberRoundop(Node* node);
Reduction ReduceNumberToUint8Clamped(Node* node);
Reduction ReducePhi(Node* node);
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 2642a1007a..51b8352b31 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -122,6 +122,8 @@ class Typer::Visitor : public Reducer {
DECLARE_CASE(Deoptimize)
DECLARE_CASE(DeoptimizeIf)
DECLARE_CASE(DeoptimizeUnless)
+ DECLARE_CASE(TrapIf)
+ DECLARE_CASE(TrapUnless)
DECLARE_CASE(Return)
DECLARE_CASE(TailCall)
DECLARE_CASE(Terminate)
@@ -185,6 +187,8 @@ class Typer::Visitor : public Reducer {
DECLARE_CASE(Deoptimize)
DECLARE_CASE(DeoptimizeIf)
DECLARE_CASE(DeoptimizeUnless)
+ DECLARE_CASE(TrapIf)
+ DECLARE_CASE(TrapUnless)
DECLARE_CASE(Return)
DECLARE_CASE(TailCall)
DECLARE_CASE(Terminate)
@@ -1233,6 +1237,10 @@ Type* Typer::Visitor::TypeJSStoreGlobal(Node* node) {
return nullptr;
}
+Type* Typer::Visitor::TypeJSStoreDataPropertyInLiteral(Node* node) {
+ UNREACHABLE();
+ return nullptr;
+}
Type* Typer::Visitor::TypeJSDeleteProperty(Node* node) {
return Type::Boolean();
@@ -1240,12 +1248,21 @@ Type* Typer::Visitor::TypeJSDeleteProperty(Node* node) {
Type* Typer::Visitor::TypeJSHasProperty(Node* node) { return Type::Boolean(); }
-Type* Typer::Visitor::TypeJSInstanceOf(Node* node) { return Type::Boolean(); }
+// JS instanceof operator.
+
+Type* Typer::Visitor::JSInstanceOfTyper(Type* lhs, Type* rhs, Typer* t) {
+ return Type::Boolean();
+}
-Type* Typer::Visitor::TypeJSOrdinaryHasInstance(Node* node) {
+Type* Typer::Visitor::JSOrdinaryHasInstanceTyper(Type* lhs, Type* rhs,
+ Typer* t) {
return Type::Boolean();
}
+Type* Typer::Visitor::TypeJSGetSuperConstructor(Node* node) {
+ return Type::Callable();
+}
+
// JS context operators.
@@ -1296,6 +1313,10 @@ Type* Typer::Visitor::TypeJSCallConstruct(Node* node) {
return Type::Receiver();
}
+Type* Typer::Visitor::TypeJSCallConstructWithSpread(Node* node) {
+ return Type::Receiver();
+}
+
Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
if (fun->IsHeapConstant() && fun->AsHeapConstant()->Value()->IsJSFunction()) {
Handle<JSFunction> function =
@@ -1344,6 +1365,8 @@ Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
case kMathClz32:
return t->cache_.kZeroToThirtyTwo;
// Date functions.
+ case kDateNow:
+ return t->cache_.kTimeValueType;
case kDateGetDate:
return t->cache_.kJSDateDayType;
case kDateGetDay:
@@ -1363,6 +1386,7 @@ Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
return t->cache_.kJSDateSecondType;
case kDateGetTime:
return t->cache_.kJSDateValueType;
+
// Number functions.
case kNumberIsFinite:
case kNumberIsInteger:
@@ -1375,16 +1399,41 @@ Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
return t->cache_.kIntegerOrMinusZeroOrNaN;
case kNumberToString:
return Type::String();
+
// String functions.
case kStringCharCodeAt:
return Type::Union(Type::Range(0, kMaxUInt16, t->zone()), Type::NaN(),
t->zone());
case kStringCharAt:
+ return Type::String();
+ case kStringCodePointAt:
+ return Type::Union(Type::Range(0.0, String::kMaxCodePoint, t->zone()),
+ Type::Undefined(), t->zone());
case kStringConcat:
case kStringFromCharCode:
+ case kStringFromCodePoint:
+ return Type::String();
+ case kStringIndexOf:
+ case kStringLastIndexOf:
+ return Type::Range(-1.0, String::kMaxLength - 1.0, t->zone());
+ case kStringEndsWith:
+ case kStringIncludes:
+ return Type::Boolean();
+ case kStringRaw:
+ case kStringRepeat:
+ case kStringSlice:
+ return Type::String();
+ case kStringStartsWith:
+ return Type::Boolean();
case kStringSubstr:
+ case kStringSubstring:
case kStringToLowerCase:
+ case kStringToString:
case kStringToUpperCase:
+ case kStringTrim:
+ case kStringTrimLeft:
+ case kStringTrimRight:
+ case kStringValueOf:
return Type::String();
case kStringIterator:
@@ -1401,16 +1450,53 @@ Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
return Type::OtherObject();
// Array functions.
+ case kArrayConcat:
+ return Type::Receiver();
+ case kArrayEvery:
+ return Type::Boolean();
+ case kArrayFill:
+ case kArrayFilter:
+ return Type::Receiver();
+ case kArrayFindIndex:
+ return Type::Range(-1, kMaxSafeInteger, t->zone());
+ case kArrayForEach:
+ return Type::Undefined();
+ case kArrayIncludes:
+ return Type::Boolean();
case kArrayIndexOf:
+ return Type::Range(-1, kMaxSafeInteger, t->zone());
+ case kArrayJoin:
+ return Type::String();
case kArrayLastIndexOf:
return Type::Range(-1, kMaxSafeInteger, t->zone());
+ case kArrayMap:
+ return Type::Receiver();
case kArrayPush:
return t->cache_.kPositiveSafeInteger;
+ case kArrayReverse:
+ case kArraySlice:
+ return Type::Receiver();
+ case kArraySome:
+ return Type::Boolean();
+ case kArraySplice:
+ return Type::Receiver();
+ case kArrayUnshift:
+ return t->cache_.kPositiveSafeInteger;
// Object functions.
case kObjectHasOwnProperty:
return Type::Boolean();
+ // RegExp functions.
+ case kRegExpCompile:
+ return Type::OtherObject();
+ case kRegExpExec:
+ return Type::Union(Type::OtherObject(), Type::Null(), t->zone());
+ case kRegExpTest:
+ return Type::Boolean();
+ case kRegExpToString:
+ return Type::String();
+
// Function functions.
case kFunctionHasInstance:
return Type::Boolean();
@@ -1595,6 +1681,8 @@ Type* Typer::Visitor::StringFromCodePointTyper(Type* type, Typer* t) {
return Type::String();
}
+Type* Typer::Visitor::TypeStringCharAt(Node* node) { return Type::String(); }
+
Type* Typer::Visitor::TypeStringCharCodeAt(Node* node) {
return typer_->cache_.kUint16;
}
@@ -1628,6 +1716,11 @@ Type* Typer::Visitor::TypeCheckIf(Node* node) {
return nullptr;
}
+Type* Typer::Visitor::TypeCheckInternalizedString(Node* node) {
+ Type* arg = Operand(node, 0);
+ return Type::Intersect(arg, Type::InternalizedString(), zone());
+}
+
Type* Typer::Visitor::TypeCheckMaps(Node* node) {
UNREACHABLE();
return nullptr;
@@ -1752,6 +1845,14 @@ Type* Typer::Visitor::TypeObjectIsUndetectable(Node* node) {
return TypeUnaryOp(node, ObjectIsUndetectable);
}
+Type* Typer::Visitor::TypeNewUnmappedArgumentsElements(Node* node) {
+ return Type::OtherInternal();
+}
+
+Type* Typer::Visitor::TypeNewRestParameterElements(Node* node) {
+ return Type::OtherInternal();
+}
+
Type* Typer::Visitor::TypeArrayBufferWasNeutered(Node* node) {
return Type::Boolean();
}
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index 806bd8f2c5..a2af190d9d 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -7,6 +7,7 @@
#include "src/compiler/types.h"
#include "src/handles-inl.h"
+#include "src/objects-inl.h"
#include "src/ostreams.h"
namespace v8 {
@@ -196,7 +197,17 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case JS_GLOBAL_PROXY_TYPE:
case JS_API_OBJECT_TYPE:
case JS_SPECIAL_API_OBJECT_TYPE:
- if (map->is_undetectable()) return kOtherUndetectable;
+ if (map->is_undetectable()) {
+ // Currently we assume that every undetectable receiver is also
+ // callable, which is what we need to support document.all. We
+ // could add another Type bit to support other use cases in the
+ // future if necessary.
+ DCHECK(map->is_callable());
+ return kOtherUndetectable;
+ }
+ if (map->is_callable()) {
+ return kOtherCallable;
+ }
return kOtherObject;
case JS_VALUE_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
@@ -204,7 +215,6 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
case JS_MODULE_NAMESPACE_TYPE:
- case JS_FIXED_ARRAY_ITERATOR_TYPE:
case JS_ARRAY_BUFFER_TYPE:
case JS_ARRAY_TYPE:
case JS_REGEXP_TYPE: // TODO(rossberg): there should be a RegExp type.
@@ -254,16 +264,21 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
+ case JS_PROMISE_CAPABILITY_TYPE:
case JS_PROMISE_TYPE:
- case JS_BOUND_FUNCTION_TYPE:
+ DCHECK(!map->is_callable());
DCHECK(!map->is_undetectable());
return kOtherObject;
+ case JS_BOUND_FUNCTION_TYPE:
+ DCHECK(!map->is_undetectable());
+ return kBoundFunction;
case JS_FUNCTION_TYPE:
DCHECK(!map->is_undetectable());
return kFunction;
case JS_PROXY_TYPE:
DCHECK(!map->is_undetectable());
- return kProxy;
+ if (map->is_callable()) return kCallableProxy;
+ return kOtherProxy;
case MAP_TYPE:
case ALLOCATION_SITE_TYPE:
case ACCESSOR_INFO_TYPE:
@@ -297,8 +312,6 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case INTERCEPTOR_INFO_TYPE:
case CALL_HANDLER_INFO_TYPE:
case OBJECT_TEMPLATE_INFO_TYPE:
- case SIGNATURE_INFO_TYPE:
- case TYPE_SWITCH_INFO_TYPE:
case ALLOCATION_MEMENTO_TYPE:
case TYPE_FEEDBACK_INFO_TYPE:
case ALIASED_ARGUMENTS_ENTRY_TYPE:
@@ -310,8 +323,10 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case CELL_TYPE:
case WEAK_CELL_TYPE:
case PROTOTYPE_INFO_TYPE:
+ case TUPLE2_TYPE:
case TUPLE3_TYPE:
case CONTEXT_EXTENSION_TYPE:
+ case CONSTANT_ELEMENTS_PAIR_TYPE:
UNREACHABLE();
return kNone;
}
diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h
index e78357030e..b04f4e3c98 100644
--- a/deps/v8/src/compiler/types.h
+++ b/deps/v8/src/compiler/types.h
@@ -117,13 +117,16 @@ namespace compiler {
V(InternalizedString, 1u << 13) \
V(OtherString, 1u << 14) \
V(Simd, 1u << 15) \
+ V(OtherCallable, 1u << 16) \
V(OtherObject, 1u << 17) \
- V(OtherUndetectable, 1u << 16) \
- V(Proxy, 1u << 18) \
- V(Function, 1u << 19) \
- V(Hole, 1u << 20) \
- V(OtherInternal, 1u << 21) \
- V(ExternalPointer, 1u << 22) \
+ V(OtherUndetectable, 1u << 18) \
+ V(CallableProxy, 1u << 19) \
+ V(OtherProxy, 1u << 20) \
+ V(Function, 1u << 21) \
+ V(BoundFunction, 1u << 22) \
+ V(Hole, 1u << 23) \
+ V(OtherInternal, 1u << 24) \
+ V(ExternalPointer, 1u << 25) \
\
V(Signed31, kUnsigned30 | kNegative31) \
V(Signed32, kSigned31 | kOtherUnsigned31 | kOtherSigned32) \
@@ -136,6 +139,7 @@ namespace compiler {
V(Unsigned32OrMinusZero, kUnsigned32 | kMinusZero) \
V(Unsigned32OrMinusZeroOrNaN, kUnsigned32 | kMinusZero | kNaN) \
V(Integral32, kSigned32 | kUnsigned32) \
+ V(Integral32OrMinusZeroOrNaN, kIntegral32 | kMinusZero | kNaN) \
V(PlainNumber, kIntegral32 | kOtherNumber) \
V(OrderedNumber, kPlainNumber | kMinusZero) \
V(MinusZeroOrNaN, kMinusZero | kNaN) \
@@ -155,13 +159,22 @@ namespace compiler {
V(NumberOrUndefined, kNumber | kUndefined) \
V(PlainPrimitive, kNumberOrString | kBoolean | kNullOrUndefined) \
V(Primitive, kSymbol | kSimd | kPlainPrimitive) \
- V(DetectableReceiver, kFunction | kOtherObject | kProxy) \
- V(Object, kFunction | kOtherObject | kOtherUndetectable) \
+ V(Proxy, kCallableProxy | kOtherProxy) \
+ V(Callable, kFunction | kBoundFunction | kOtherCallable | \
+ kCallableProxy | kOtherUndetectable) \
+ V(DetectableObject, kFunction | kBoundFunction | kOtherCallable | \
+ kOtherObject) \
+ V(DetectableReceiver, kDetectableObject | kProxy) \
+ V(DetectableReceiverOrNull, kDetectableReceiver | kNull) \
+ V(Object, kDetectableObject | kOtherUndetectable) \
V(Receiver, kObject | kProxy) \
V(ReceiverOrUndefined, kReceiver | kUndefined) \
+ V(ReceiverOrNullOrUndefined, kReceiver | kNull | kUndefined) \
V(StringOrReceiver, kString | kReceiver) \
V(Unique, kBoolean | kUniqueName | kNull | kUndefined | \
kReceiver) \
+ V(NonStringUniqueOrHole, kBoolean | kHole | kNull | kReceiver | \
+ kSymbol | kUndefined) \
V(Internal, kHole | kExternalPointer | kOtherInternal) \
V(NonInternal, kPrimitive | kReceiver) \
V(NonNumber, kUnique | kString | kInternal) \
diff --git a/deps/v8/src/compiler/value-numbering-reducer.cc b/deps/v8/src/compiler/value-numbering-reducer.cc
index 30473f2798..38e1f0c84f 100644
--- a/deps/v8/src/compiler/value-numbering-reducer.cc
+++ b/deps/v8/src/compiler/value-numbering-reducer.cc
@@ -18,8 +18,8 @@ namespace {
size_t HashCode(Node* node) {
size_t h = base::hash_combine(node->op()->HashCode(), node->InputCount());
- for (int j = 0; j < node->InputCount(); ++j) {
- h = base::hash_combine(h, node->InputAt(j)->id());
+ for (Node* input : node->inputs()) {
+ h = base::hash_combine(h, input->id());
}
return h;
}
@@ -32,10 +32,17 @@ bool Equals(Node* a, Node* b) {
DCHECK_NOT_NULL(b->op());
if (!a->op()->Equals(b->op())) return false;
if (a->InputCount() != b->InputCount()) return false;
- for (int j = 0; j < a->InputCount(); ++j) {
- DCHECK_NOT_NULL(a->InputAt(j));
- DCHECK_NOT_NULL(b->InputAt(j));
- if (a->InputAt(j)->id() != b->InputAt(j)->id()) return false;
+ Node::Inputs aInputs = a->inputs();
+ Node::Inputs bInputs = b->inputs();
+
+ auto aIt = aInputs.begin();
+ auto bIt = bInputs.begin();
+ auto aEnd = aInputs.end();
+
+ for (; aIt != aEnd; ++aIt, ++bIt) {
+ DCHECK_NOT_NULL(*aIt);
+ DCHECK_NOT_NULL(*bIt);
+ if ((*aIt)->id() != (*bIt)->id()) return false;
}
return true;
}
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 872305b40a..e11fc98320 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -14,11 +14,12 @@
#include "src/compiler/all-nodes.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
-#include "src/compiler/node.h"
+#include "src/compiler/js-operator.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
#include "src/compiler/opcodes.h"
-#include "src/compiler/operator.h"
#include "src/compiler/operator-properties.h"
+#include "src/compiler/operator.h"
#include "src/compiler/schedule.h"
#include "src/compiler/simplified-operator.h"
#include "src/ostreams.h"
@@ -150,7 +151,7 @@ void Verifier::Visitor::Check(Node* node) {
"control");
}
- // Verify that no-no-throw nodes only have IfSuccess/IfException control
+ // Verify that nodes that can throw only have IfSuccess/IfException control
// uses.
if (!node->op()->HasProperty(Operator::kNoThrow)) {
int count_success = 0, count_exception = 0;
@@ -283,6 +284,11 @@ void Verifier::Visitor::Check(Node* node) {
// Type is empty.
CheckNotTyped(node);
break;
+ case IrOpcode::kTrapIf:
+ case IrOpcode::kTrapUnless:
+ // Type is empty.
+ CheckNotTyped(node);
+ break;
case IrOpcode::kDeoptimize:
case IrOpcode::kReturn:
case IrOpcode::kThrow:
@@ -590,16 +596,38 @@ void Verifier::Visitor::Check(Node* node) {
CheckTypeIs(node, Type::OtherObject());
break;
case IrOpcode::kJSLoadProperty:
+ // Type can be anything.
+ CheckTypeIs(node, Type::Any());
+ CHECK(PropertyAccessOf(node->op()).feedback().IsValid());
+ break;
case IrOpcode::kJSLoadNamed:
+ // Type can be anything.
+ CheckTypeIs(node, Type::Any());
+ CHECK(NamedAccessOf(node->op()).feedback().IsValid());
+ break;
case IrOpcode::kJSLoadGlobal:
// Type can be anything.
CheckTypeIs(node, Type::Any());
+ CHECK(LoadGlobalParametersOf(node->op()).feedback().IsValid());
break;
case IrOpcode::kJSStoreProperty:
+ // Type is empty.
+ CheckNotTyped(node);
+ CHECK(PropertyAccessOf(node->op()).feedback().IsValid());
+ break;
case IrOpcode::kJSStoreNamed:
+ // Type is empty.
+ CheckNotTyped(node);
+ CHECK(NamedAccessOf(node->op()).feedback().IsValid());
+ break;
case IrOpcode::kJSStoreGlobal:
// Type is empty.
CheckNotTyped(node);
+ CHECK(StoreGlobalParametersOf(node->op()).feedback().IsValid());
+ break;
+ case IrOpcode::kJSStoreDataPropertyInLiteral:
+ // Type is empty.
+ CheckNotTyped(node);
break;
case IrOpcode::kJSDeleteProperty:
case IrOpcode::kJSHasProperty:
@@ -612,6 +640,13 @@ void Verifier::Visitor::Check(Node* node) {
// Type is String.
CheckTypeIs(node, Type::String());
break;
+ case IrOpcode::kJSGetSuperConstructor:
+ // We don't check the input for Type::Function because
+ // this_function can be context-allocated.
+ // Any -> Callable.
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckTypeIs(node, Type::Callable());
+ break;
case IrOpcode::kJSLoadContext:
// Type can be anything.
@@ -636,6 +671,7 @@ void Verifier::Visitor::Check(Node* node) {
}
case IrOpcode::kJSCallConstruct:
+ case IrOpcode::kJSCallConstructWithSpread:
case IrOpcode::kJSConvertReceiver:
// Type is Receiver.
CheckTypeIs(node, Type::Receiver());
@@ -861,6 +897,12 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 1, Type::String());
CheckTypeIs(node, Type::Boolean());
break;
+ case IrOpcode::kStringCharAt:
+ // (String, Unsigned32) -> String
+ CheckValueInputIs(node, 0, Type::String());
+ CheckValueInputIs(node, 1, Type::Unsigned32());
+ CheckTypeIs(node, Type::String());
+ break;
case IrOpcode::kStringCharCodeAt:
// (String, Unsigned32) -> UnsignedSmall
CheckValueInputIs(node, 0, Type::String());
@@ -893,6 +935,10 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 0, Type::Any());
CheckTypeIs(node, Type::Boolean());
break;
+ case IrOpcode::kNewRestParameterElements:
+ case IrOpcode::kNewUnmappedArgumentsElements:
+ CheckTypeIs(node, Type::OtherInternal());
+ break;
case IrOpcode::kAllocate:
CheckValueInputIs(node, 0, Type::PlainNumber());
break;
@@ -910,8 +956,6 @@ void Verifier::Visitor::Check(Node* node) {
break;
case IrOpcode::kTransitionElementsKind:
CheckValueInputIs(node, 0, Type::Any());
- CheckValueInputIs(node, 1, Type::Internal());
- CheckValueInputIs(node, 2, Type::Internal());
CheckNotTyped(node);
break;
@@ -1041,6 +1085,10 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 0, Type::Boolean());
CheckNotTyped(node);
break;
+ case IrOpcode::kCheckInternalizedString:
+ CheckValueInputIs(node, 0, Type::Any());
+ CheckTypeIs(node, Type::InternalizedString());
+ break;
case IrOpcode::kCheckMaps:
// (Any, Internal, ..., Internal) -> Any
CheckValueInputIs(node, 0, Type::Any());
@@ -1140,6 +1188,7 @@ void Verifier::Visitor::Check(Node* node) {
// -----------------------
case IrOpcode::kLoad:
case IrOpcode::kProtectedLoad:
+ case IrOpcode::kProtectedStore:
case IrOpcode::kStore:
case IrOpcode::kStackSlot:
case IrOpcode::kWord32And:
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index 1b61c1504e..f54ddbf492 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -33,9 +33,12 @@
#include "src/factory.h"
#include "src/log-inl.h"
-#include "src/wasm/ast-decoder.h"
+#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-text.h"
// TODO(titzer): pull WASM_64 up to a common header.
#if !V8_TARGET_ARCH_32_BIT || V8_TARGET_ARCH_X64
@@ -68,9 +71,6 @@ Node* BuildCallToRuntime(Runtime::FunctionId f, JSGraph* jsgraph,
Handle<Context> context, Node** parameters,
int parameter_count, Node** effect_ptr,
Node* control) {
- // At the moment we only allow 2 parameters. If more parameters are needed,
- // then the size of {inputs} below has to be increased accordingly.
- DCHECK(parameter_count <= 2);
const Runtime::Function* fun = Runtime::FunctionForId(f);
CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
jsgraph->zone(), f, fun->nargs, Operator::kNoProperties,
@@ -78,7 +78,11 @@ Node* BuildCallToRuntime(Runtime::FunctionId f, JSGraph* jsgraph,
// CEntryStubConstant nodes have to be created and cached in the main
// thread. At the moment this is only done for CEntryStubConstant(1).
DCHECK_EQ(1, fun->result_size);
- Node* inputs[8];
+ // At the moment we only allow 2 parameters. If more parameters are needed,
+ // increase this constant accordingly.
+ static const int kMaxParams = 3;
+ DCHECK_GE(kMaxParams, parameter_count);
+ Node* inputs[kMaxParams + 6];
int count = 0;
inputs[count++] = jsgraph->CEntryStubConstant(fun->result_size);
for (int i = 0; i < parameter_count; i++) {
@@ -99,6 +103,13 @@ Node* BuildCallToRuntime(Runtime::FunctionId f, JSGraph* jsgraph,
} // namespace
+// TODO(eholk): Support trap handlers on other platforms.
+#if V8_TARGET_ARCH_X64 && V8_OS_LINUX
+const bool kTrapHandlerSupported = true;
+#else
+const bool kTrapHandlerSupported = false;
+#endif
+
// A helper that handles building graph fragments for trapping.
// To avoid generating a ton of redundant code that just calls the runtime
// to trap, we generate a per-trap-reason block of code that all trap sites
@@ -159,21 +170,70 @@ class WasmTrapHelper : public ZoneObject {
return TrapIfEq64(reason, node, 0, position);
}
+ Runtime::FunctionId GetFunctionIdForTrap(wasm::TrapReason reason) {
+ if (builder_->module_ && !builder_->module_->instance->context.is_null()) {
+ switch (reason) {
+#define TRAPREASON_TO_MESSAGE(name) \
+ case wasm::k##name: \
+ return Runtime::kThrowWasm##name;
+ FOREACH_WASM_TRAPREASON(TRAPREASON_TO_MESSAGE)
+#undef TRAPREASON_TO_MESSAGE
+ default:
+ UNREACHABLE();
+ return Runtime::kNumFunctions;
+ }
+ } else {
+ // We use Runtime::kNumFunctions as a marker to tell the code generator
+ // to generate a call to a testing c-function instead of a runtime
+ // function. This code should only be called from a cctest.
+ return Runtime::kNumFunctions;
+ }
+ }
+
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || \
+ V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || \
+ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390 || \
+ V8_TARGET_ARCH_S390X
+#define WASM_TRAP_IF_SUPPORTED
+#endif
+
// Add a trap if {cond} is true.
void AddTrapIfTrue(wasm::TrapReason reason, Node* cond,
wasm::WasmCodePosition position) {
- AddTrapIf(reason, cond, true, position);
+#ifdef WASM_TRAP_IF_SUPPORTED
+ if (FLAG_wasm_trap_if) {
+ int32_t trap_id = GetFunctionIdForTrap(reason);
+ Node* node = graph()->NewNode(common()->TrapIf(trap_id), cond,
+ builder_->Effect(), builder_->Control());
+ *builder_->control_ = node;
+ builder_->SetSourcePosition(node, position);
+ return;
+ }
+#endif // WASM_TRAP_IF_SUPPORTED
+ BuildTrapIf(reason, cond, true, position);
}
// Add a trap if {cond} is false.
void AddTrapIfFalse(wasm::TrapReason reason, Node* cond,
wasm::WasmCodePosition position) {
- AddTrapIf(reason, cond, false, position);
+#ifdef WASM_TRAP_IF_SUPPORTED
+ if (FLAG_wasm_trap_if) {
+ int32_t trap_id = GetFunctionIdForTrap(reason);
+
+ Node* node = graph()->NewNode(common()->TrapUnless(trap_id), cond,
+ builder_->Effect(), builder_->Control());
+ *builder_->control_ = node;
+ builder_->SetSourcePosition(node, position);
+ return;
+ }
+#endif // WASM_TRAP_IF_SUPPORTED
+
+ BuildTrapIf(reason, cond, false, position);
}
// Add a trap if {cond} is true or false according to {iftrue}.
- void AddTrapIf(wasm::TrapReason reason, Node* cond, bool iftrue,
- wasm::WasmCodePosition position) {
+ void BuildTrapIf(wasm::TrapReason reason, Node* cond, bool iftrue,
+ wasm::WasmCodePosition position) {
Node** effect_ptr = builder_->effect_;
Node** control_ptr = builder_->control_;
Node* before = *effect_ptr;
@@ -196,18 +256,18 @@ class WasmTrapHelper : public ZoneObject {
}
}
- Node* GetTrapValue(wasm::LocalType type) {
+ Node* GetTrapValue(wasm::ValueType type) {
switch (type) {
- case wasm::kAstI32:
+ case wasm::kWasmI32:
return jsgraph()->Int32Constant(0xdeadbeef);
- case wasm::kAstI64:
+ case wasm::kWasmI64:
return jsgraph()->Int64Constant(0xdeadbeefdeadbeef);
- case wasm::kAstF32:
+ case wasm::kWasmF32:
return jsgraph()->Float32Constant(bit_cast<float>(0xdeadbeef));
- case wasm::kAstF64:
+ case wasm::kWasmF64:
return jsgraph()->Float64Constant(bit_cast<double>(0xdeadbeefdeadbeef));
break;
- case wasm::kAstS128:
+ case wasm::kWasmS128:
return builder_->CreateS128Value(0xdeadbeef);
break;
default:
@@ -246,7 +306,6 @@ class WasmTrapHelper : public ZoneObject {
}
void BuildTrapCode(Node* reason_node, Node* position_node) {
- Node* end;
Node** control_ptr = builder_->control_;
Node** effect_ptr = builder_->effect_;
wasm::ModuleEnv* module = builder_->module_;
@@ -277,36 +336,36 @@ class WasmTrapHelper : public ZoneObject {
Node* thrw =
graph()->NewNode(common()->Throw(), jsgraph()->ZeroConstant(),
*effect_ptr, *control_ptr);
- end = thrw;
+ MergeControlToEnd(jsgraph(), thrw);
} else {
// End the control flow with returning 0xdeadbeef
Node* ret_value = GetTrapValue(builder_->GetFunctionSignature());
- end = graph()->NewNode(jsgraph()->common()->Return(),
- jsgraph()->Int32Constant(0), ret_value,
- *effect_ptr, *control_ptr);
+ builder_->Return(ret_value);
}
-
- MergeControlToEnd(jsgraph(), end);
}
};
WasmGraphBuilder::WasmGraphBuilder(
- Zone* zone, JSGraph* jsgraph, wasm::FunctionSig* function_signature,
+ wasm::ModuleEnv* module_env, Zone* zone, JSGraph* jsgraph,
+ wasm::FunctionSig* sig,
compiler::SourcePositionTable* source_position_table)
: zone_(zone),
jsgraph_(jsgraph),
- module_(nullptr),
- mem_buffer_(nullptr),
- mem_size_(nullptr),
+ module_(module_env),
+ signature_tables_(zone),
function_tables_(zone),
function_table_sizes_(zone),
- control_(nullptr),
- effect_(nullptr),
cur_buffer_(def_buffer_),
cur_bufsize_(kDefaultBufferSize),
trap_(new (zone) WasmTrapHelper(this)),
- function_signature_(function_signature),
+ sig_(sig),
source_position_table_(source_position_table) {
+ for (size_t i = 0; i < sig->parameter_count(); i++) {
+ if (sig->GetParam(i) == wasm::kWasmS128) has_simd_ = true;
+ }
+ for (size_t i = 0; i < sig->return_count(); i++) {
+ if (sig->GetReturn(i) == wasm::kWasmS128) has_simd_ = true;
+ }
DCHECK_NOT_NULL(jsgraph_);
}
@@ -318,7 +377,7 @@ Node* WasmGraphBuilder::Start(unsigned params) {
return start;
}
-Node* WasmGraphBuilder::Param(unsigned index, wasm::LocalType type) {
+Node* WasmGraphBuilder::Param(unsigned index) {
return graph()->NewNode(jsgraph()->common()->Parameter(index),
graph()->start());
}
@@ -376,7 +435,7 @@ Node* WasmGraphBuilder::Merge(unsigned count, Node** controls) {
return graph()->NewNode(jsgraph()->common()->Merge(count), count, controls);
}
-Node* WasmGraphBuilder::Phi(wasm::LocalType type, unsigned count, Node** vals,
+Node* WasmGraphBuilder::Phi(wasm::ValueType type, unsigned count, Node** vals,
Node* control) {
DCHECK(IrOpcode::IsMergeOpcode(control->opcode()));
Node** buf = Realloc(vals, count, count + 1);
@@ -412,6 +471,7 @@ Node* WasmGraphBuilder::Int64Constant(int64_t value) {
void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position,
Node** effect, Node** control) {
+ if (FLAG_wasm_no_stack_checks) return;
if (effect == nullptr) {
effect = effect_;
}
@@ -434,17 +494,14 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position,
stack_check.Chain(*control);
Node* effect_true = *effect;
- Node* effect_false;
// Generate a call to the runtime if there is a stack check failure.
- {
- Node* node = BuildCallToRuntime(Runtime::kStackGuard, jsgraph(),
- module_->instance->context, nullptr, 0,
- effect, stack_check.if_false);
- effect_false = node;
- }
+ Node* call = BuildCallToRuntime(Runtime::kStackGuard, jsgraph(),
+ module_->instance->context, nullptr, 0,
+ effect, stack_check.if_false);
+ SetSourcePosition(call, position);
Node* ephi = graph()->NewNode(jsgraph()->common()->EffectPhi(2),
- effect_true, effect_false, stack_check.merge);
+ effect_true, call, stack_check.merge);
*control = stack_check.merge;
*effect = ephi;
@@ -1042,9 +1099,18 @@ Node* WasmGraphBuilder::Return(unsigned count, Node** vals) {
DCHECK_NOT_NULL(*control_);
DCHECK_NOT_NULL(*effect_);
- Node** buf = Realloc(vals, count, count + 3);
- memmove(buf + 1, buf, sizeof(void*) * count);
+ static const int kStackAllocatedNodeBufferSize = 8;
+ Node* stack_buffer[kStackAllocatedNodeBufferSize];
+ std::vector<Node*> heap_buffer;
+
+ Node** buf = stack_buffer;
+ if (count + 3 > kStackAllocatedNodeBufferSize) {
+ heap_buffer.resize(count + 3);
+ buf = heap_buffer.data();
+ }
+
buf[0] = jsgraph()->Int32Constant(0);
+ memcpy(buf + 1, vals, sizeof(void*) * count);
buf[count + 1] = *effect_;
buf[count + 2] = *control_;
Node* ret =
@@ -1107,7 +1173,7 @@ static bool ReverseBytesSupported(MachineOperatorBuilder* m,
}
Node* WasmGraphBuilder::BuildChangeEndianness(Node* node, MachineType memtype,
- wasm::LocalType wasmtype) {
+ wasm::ValueType wasmtype) {
Node* result;
Node* value = node;
MachineOperatorBuilder* m = jsgraph()->machine();
@@ -1223,7 +1289,7 @@ Node* WasmGraphBuilder::BuildChangeEndianness(Node* node, MachineType memtype,
// Perform sign extension using following trick
// result = (x << machine_width - type_width) >> (machine_width -
// type_width)
- if (wasmtype == wasm::kAstI64) {
+ if (wasmtype == wasm::kWasmI64) {
shiftBitCount = jsgraph()->Int32Constant(64 - valueSizeInBits);
result = graph()->NewNode(
m->Word64Sar(),
@@ -1231,7 +1297,7 @@ Node* WasmGraphBuilder::BuildChangeEndianness(Node* node, MachineType memtype,
graph()->NewNode(m->ChangeInt32ToInt64(), result),
shiftBitCount),
shiftBitCount);
- } else if (wasmtype == wasm::kAstI32) {
+ } else if (wasmtype == wasm::kWasmI32) {
shiftBitCount = jsgraph()->Int32Constant(32 - valueSizeInBits);
result = graph()->NewNode(
m->Word32Sar(),
@@ -1714,9 +1780,8 @@ Node* WasmGraphBuilder::BuildFloatToIntConversionInstruction(
Node* WasmGraphBuilder::GrowMemory(Node* input) {
Diamond check_input_range(
graph(), jsgraph()->common(),
- graph()->NewNode(
- jsgraph()->machine()->Uint32LessThanOrEqual(), input,
- jsgraph()->Uint32Constant(wasm::WasmModule::kV8MaxPages)),
+ graph()->NewNode(jsgraph()->machine()->Uint32LessThanOrEqual(), input,
+ jsgraph()->Uint32Constant(wasm::kV8MaxWasmMemoryPages)),
BranchHint::kTrue);
check_input_range.Chain(*control_);
@@ -1911,36 +1976,101 @@ Node* WasmGraphBuilder::BuildI32AsmjsDivS(Node* left, Node* right) {
}
Node* WasmGraphBuilder::BuildI32AsmjsRemS(Node* left, Node* right) {
+ CommonOperatorBuilder* c = jsgraph()->common();
MachineOperatorBuilder* m = jsgraph()->machine();
+ Node* const zero = jsgraph()->Int32Constant(0);
Int32Matcher mr(right);
if (mr.HasValue()) {
- if (mr.Value() == 0) {
- return jsgraph()->Int32Constant(0);
- } else if (mr.Value() == -1) {
- return jsgraph()->Int32Constant(0);
+ if (mr.Value() == 0 || mr.Value() == -1) {
+ return zero;
}
return graph()->NewNode(m->Int32Mod(), left, right, *control_);
}
- // asm.js semantics return 0 on divide or mod by zero.
- // Explicit check for x % 0.
- Diamond z(
- graph(), jsgraph()->common(),
- graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
- BranchHint::kFalse);
+ // General case for signed integer modulus, with optimization for (unknown)
+ // power of 2 right hand side.
+ //
+ // if 0 < right then
+ // msk = right - 1
+ // if right & msk != 0 then
+ // left % right
+ // else
+ // if left < 0 then
+ // -(-left & msk)
+ // else
+ // left & msk
+ // else
+ // if right < -1 then
+ // left % right
+ // else
+ // zero
+ //
+ // Note: We do not use the Diamond helper class here, because it really hurts
+ // readability with nested diamonds.
+ Node* const minus_one = jsgraph()->Int32Constant(-1);
- // Explicit check for x % -1.
- Diamond d(
- graph(), jsgraph()->common(),
- graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
- BranchHint::kFalse);
- d.Chain(z.if_false);
+ const Operator* const merge_op = c->Merge(2);
+ const Operator* const phi_op = c->Phi(MachineRepresentation::kWord32, 2);
+
+ Node* check0 = graph()->NewNode(m->Int32LessThan(), zero, right);
+ Node* branch0 =
+ graph()->NewNode(c->Branch(BranchHint::kTrue), check0, graph()->start());
+
+ Node* if_true0 = graph()->NewNode(c->IfTrue(), branch0);
+ Node* true0;
+ {
+ Node* msk = graph()->NewNode(m->Int32Add(), right, minus_one);
+
+ Node* check1 = graph()->NewNode(m->Word32And(), right, msk);
+ Node* branch1 = graph()->NewNode(c->Branch(), check1, if_true0);
+
+ Node* if_true1 = graph()->NewNode(c->IfTrue(), branch1);
+ Node* true1 = graph()->NewNode(m->Int32Mod(), left, right, if_true1);
+
+ Node* if_false1 = graph()->NewNode(c->IfFalse(), branch1);
+ Node* false1;
+ {
+ Node* check2 = graph()->NewNode(m->Int32LessThan(), left, zero);
+ Node* branch2 =
+ graph()->NewNode(c->Branch(BranchHint::kFalse), check2, if_false1);
+
+ Node* if_true2 = graph()->NewNode(c->IfTrue(), branch2);
+ Node* true2 = graph()->NewNode(
+ m->Int32Sub(), zero,
+ graph()->NewNode(m->Word32And(),
+ graph()->NewNode(m->Int32Sub(), zero, left), msk));
+
+ Node* if_false2 = graph()->NewNode(c->IfFalse(), branch2);
+ Node* false2 = graph()->NewNode(m->Word32And(), left, msk);
+
+ if_false1 = graph()->NewNode(merge_op, if_true2, if_false2);
+ false1 = graph()->NewNode(phi_op, true2, false2, if_false1);
+ }
+
+ if_true0 = graph()->NewNode(merge_op, if_true1, if_false1);
+ true0 = graph()->NewNode(phi_op, true1, false1, if_true0);
+ }
+
+ Node* if_false0 = graph()->NewNode(c->IfFalse(), branch0);
+ Node* false0;
+ {
+ Node* check1 = graph()->NewNode(m->Int32LessThan(), right, minus_one);
+ Node* branch1 =
+ graph()->NewNode(c->Branch(BranchHint::kTrue), check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(c->IfTrue(), branch1);
+ Node* true1 = graph()->NewNode(m->Int32Mod(), left, right, if_true1);
+
+ Node* if_false1 = graph()->NewNode(c->IfFalse(), branch1);
+ Node* false1 = zero;
+
+ if_false0 = graph()->NewNode(merge_op, if_true1, if_false1);
+ false0 = graph()->NewNode(phi_op, true1, false1, if_false0);
+ }
- return z.Phi(
- MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
- d.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
- graph()->NewNode(m->Int32Mod(), left, right, d.if_false)));
+ Node* merge0 = graph()->NewNode(merge_op, if_true0, if_false0);
+ return graph()->NewNode(phi_op, true0, false0, merge0);
}
Node* WasmGraphBuilder::BuildI32AsmjsDivU(Node* left, Node* right) {
@@ -2016,6 +2146,8 @@ Node* WasmGraphBuilder::BuildI64RemS(Node* left, Node* right,
graph()->NewNode(jsgraph()->machine()->Word64Equal(), right,
jsgraph()->Int64Constant(-1)));
+ d.Chain(*control_);
+
Node* rem = graph()->NewNode(jsgraph()->machine()->Int64Mod(), left, right,
d.if_false);
@@ -2179,6 +2311,7 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, size);
trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, in_bounds, position);
Node* table = function_tables_[table_index];
+ Node* signatures = signature_tables_[table_index];
// Load signature from the table and check.
// The table is a FixedArray; signatures are encoded as SMIs.
@@ -2187,7 +2320,7 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
const int fixed_offset = access.header_size - access.tag();
{
Node* load_sig = graph()->NewNode(
- machine->Load(MachineType::AnyTagged()), table,
+ machine->Load(MachineType::AnyTagged()), signatures,
graph()->NewNode(machine->Int32Add(),
graph()->NewNode(machine->Word32Shl(), key,
Int32Constant(kPointerSizeLog2)),
@@ -2202,14 +2335,12 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
}
// Load code object from the table.
- uint32_t table_size = module_->module->function_tables[table_index].min_size;
- uint32_t offset = fixed_offset + kPointerSize * table_size;
Node* load_code = graph()->NewNode(
machine->Load(MachineType::AnyTagged()), table,
graph()->NewNode(machine->Int32Add(),
graph()->NewNode(machine->Word32Shl(), key,
Int32Constant(kPointerSizeLog2)),
- Uint32Constant(offset)),
+ Uint32Constant(fixed_offset)),
*effect_, *control_);
args[0] = load_code;
@@ -2342,24 +2473,20 @@ Node* WasmGraphBuilder::BuildChangeFloat64ToTagged(Node* value) {
return value;
}
-Node* WasmGraphBuilder::ToJS(Node* node, wasm::LocalType type) {
+Node* WasmGraphBuilder::ToJS(Node* node, wasm::ValueType type) {
switch (type) {
- case wasm::kAstI32:
+ case wasm::kWasmI32:
return BuildChangeInt32ToTagged(node);
- case wasm::kAstS128:
- case wasm::kAstI64:
- // Throw a TypeError. The native context is good enough here because we
- // only throw a TypeError.
- return BuildCallToRuntime(Runtime::kWasmThrowTypeError, jsgraph(),
- jsgraph()->isolate()->native_context(), nullptr,
- 0, effect_, *control_);
- case wasm::kAstF32:
+ case wasm::kWasmS128:
+ case wasm::kWasmI64:
+ UNREACHABLE();
+ case wasm::kWasmF32:
node = graph()->NewNode(jsgraph()->machine()->ChangeFloat32ToFloat64(),
node);
return BuildChangeFloat64ToTagged(node);
- case wasm::kAstF64:
+ case wasm::kWasmF64:
return BuildChangeFloat64ToTagged(node);
- case wasm::kAstStmt:
+ case wasm::kWasmStmt:
return jsgraph()->UndefinedConstant();
default:
UNREACHABLE();
@@ -2367,8 +2494,7 @@ Node* WasmGraphBuilder::ToJS(Node* node, wasm::LocalType type) {
}
}
-Node* WasmGraphBuilder::BuildJavaScriptToNumber(Node* node, Node* context,
- Node* effect, Node* control) {
+Node* WasmGraphBuilder::BuildJavaScriptToNumber(Node* node, Node* context) {
Callable callable = CodeFactory::ToNumber(jsgraph()->isolate());
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
jsgraph()->isolate(), jsgraph()->zone(), callable.descriptor(), 0,
@@ -2376,7 +2502,9 @@ Node* WasmGraphBuilder::BuildJavaScriptToNumber(Node* node, Node* context,
Node* stub_code = jsgraph()->HeapConstant(callable.code());
Node* result = graph()->NewNode(jsgraph()->common()->Call(desc), stub_code,
- node, context, effect, control);
+ node, context, *effect_, *control_);
+
+ SetSourcePosition(result, 1);
*effect_ = result;
@@ -2495,35 +2623,30 @@ Node* WasmGraphBuilder::BuildChangeTaggedToFloat64(Node* value) {
}
Node* WasmGraphBuilder::FromJS(Node* node, Node* context,
- wasm::LocalType type) {
+ wasm::ValueType type) {
+ DCHECK_NE(wasm::kWasmStmt, type);
+
// Do a JavaScript ToNumber.
- Node* num = BuildJavaScriptToNumber(node, context, *effect_, *control_);
+ Node* num = BuildJavaScriptToNumber(node, context);
// Change representation.
SimplifiedOperatorBuilder simplified(jsgraph()->zone());
num = BuildChangeTaggedToFloat64(num);
switch (type) {
- case wasm::kAstI32: {
+ case wasm::kWasmI32: {
num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToWord32(),
num);
break;
}
- case wasm::kAstS128:
- case wasm::kAstI64:
- // Throw a TypeError. The native context is good enough here because we
- // only throw a TypeError.
- return BuildCallToRuntime(Runtime::kWasmThrowTypeError, jsgraph(),
- jsgraph()->isolate()->native_context(), nullptr,
- 0, effect_, *control_);
- case wasm::kAstF32:
+ case wasm::kWasmS128:
+ case wasm::kWasmI64:
+ UNREACHABLE();
+ case wasm::kWasmF32:
num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToFloat32(),
num);
break;
- case wasm::kAstF64:
- break;
- case wasm::kAstStmt:
- num = jsgraph()->Int32Constant(0);
+ case wasm::kWasmF64:
break;
default:
UNREACHABLE();
@@ -2613,22 +2736,59 @@ Node* WasmGraphBuilder::BuildHeapNumberValueIndexConstant() {
return jsgraph()->IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag);
}
+bool IsJSCompatible(wasm::ValueType type) {
+ return (type != wasm::kWasmI64) && (type != wasm::kWasmS128);
+}
+
+bool HasJSCompatibleSignature(wasm::FunctionSig* sig) {
+ for (size_t i = 0; i < sig->parameter_count(); i++) {
+ if (!IsJSCompatible(sig->GetParam(i))) {
+ return false;
+ }
+ }
+ for (size_t i = 0; i < sig->return_count(); i++) {
+ if (!IsJSCompatible(sig->GetReturn(i))) {
+ return false;
+ }
+ }
+ return true;
+}
+
void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
wasm::FunctionSig* sig) {
int wasm_count = static_cast<int>(sig->parameter_count());
- int param_count;
- if (jsgraph()->machine()->Is64()) {
- param_count = static_cast<int>(sig->parameter_count());
- } else {
- param_count = Int64Lowering::GetParameterCountAfterLowering(sig);
- }
- int count = param_count + 3;
+ int count = wasm_count + 3;
Node** args = Buffer(count);
// Build the start and the JS parameter nodes.
- Node* start = Start(param_count + 5);
+ Node* start = Start(wasm_count + 5);
*control_ = start;
*effect_ = start;
+
+ if (!HasJSCompatibleSignature(sig_)) {
+ // Throw a TypeError. The native context is good enough here because we
+ // only throw a TypeError.
+ BuildCallToRuntime(Runtime::kWasmThrowTypeError, jsgraph(),
+ jsgraph()->isolate()->native_context(), nullptr, 0,
+ effect_, *control_);
+
+ // Add a dummy call to the wasm function so that the generated wrapper
+ // contains a reference to the wrapped wasm function. Without this reference
+ // the wasm function could not be re-imported into another wasm module.
+ int pos = 0;
+ args[pos++] = HeapConstant(wasm_code);
+ args[pos++] = *effect_;
+ args[pos++] = *control_;
+
+ // We only need a dummy call descriptor.
+ wasm::FunctionSig::Builder dummy_sig_builder(jsgraph()->zone(), 0, 0);
+ CallDescriptor* desc = wasm::ModuleEnv::GetWasmCallDescriptor(
+ jsgraph()->zone(), dummy_sig_builder.Build());
+ *effect_ = graph()->NewNode(jsgraph()->common()->Call(desc), pos, args);
+ Return(jsgraph()->UndefinedConstant());
+ return;
+ }
+
// Create the context parameter
Node* context = graph()->NewNode(
jsgraph()->common()->Parameter(
@@ -2640,15 +2800,9 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
// Convert JS parameters to WASM numbers.
for (int i = 0; i < wasm_count; ++i) {
- Node* param =
- graph()->NewNode(jsgraph()->common()->Parameter(i + 1), start);
+ Node* param = Param(i + 1);
Node* wasm_param = FromJS(param, context, sig->GetParam(i));
args[pos++] = wasm_param;
- if (jsgraph()->machine()->Is32() && sig->GetParam(i) == wasm::kAstI64) {
- // We make up the high word with SAR to get the proper sign extension.
- args[pos++] = graph()->NewNode(jsgraph()->machine()->Word32Sar(),
- wasm_param, jsgraph()->Int32Constant(31));
- }
}
args[pos++] = *effect_;
@@ -2657,23 +2811,13 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
// Call the WASM code.
CallDescriptor* desc =
wasm::ModuleEnv::GetWasmCallDescriptor(jsgraph()->zone(), sig);
- if (jsgraph()->machine()->Is32()) {
- desc = wasm::ModuleEnv::GetI32WasmCallDescriptor(jsgraph()->zone(), desc);
- }
+
Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), count, args);
+ *effect_ = call;
Node* retval = call;
- if (jsgraph()->machine()->Is32() && sig->return_count() > 0 &&
- sig->GetReturn(0) == wasm::kAstI64) {
- // The return values comes as two values, we pick the low word.
- retval = graph()->NewNode(jsgraph()->common()->Projection(0), retval,
- graph()->start());
- }
Node* jsval = ToJS(
- retval, sig->return_count() == 0 ? wasm::kAstStmt : sig->GetReturn());
- Node* ret = graph()->NewNode(jsgraph()->common()->Return(),
- jsgraph()->Int32Constant(0), jsval, call, start);
-
- MergeControlToEnd(jsgraph(), ret);
+ retval, sig->return_count() == 0 ? wasm::kWasmStmt : sig->GetReturn());
+ Return(jsval);
}
int WasmGraphBuilder::AddParameterNodes(Node** args, int pos, int param_count,
@@ -2681,14 +2825,8 @@ int WasmGraphBuilder::AddParameterNodes(Node** args, int pos, int param_count,
// Convert WASM numbers to JS values.
int param_index = 0;
for (int i = 0; i < param_count; ++i) {
- Node* param = graph()->NewNode(
- jsgraph()->common()->Parameter(param_index++), graph()->start());
+ Node* param = Param(param_index++);
args[pos++] = ToJS(param, sig->GetParam(i));
- if (jsgraph()->machine()->Is32() && sig->GetParam(i) == wasm::kAstI64) {
- // On 32 bit platforms we have to skip the high word of int64
- // parameters.
- param_index++;
- }
}
return pos;
}
@@ -2698,19 +2836,23 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
DCHECK(target->IsCallable());
int wasm_count = static_cast<int>(sig->parameter_count());
- int param_count;
- if (jsgraph()->machine()->Is64()) {
- param_count = wasm_count;
- } else {
- param_count = Int64Lowering::GetParameterCountAfterLowering(sig);
- }
// Build the start and the parameter nodes.
Isolate* isolate = jsgraph()->isolate();
CallDescriptor* desc;
- Node* start = Start(param_count + 3);
+ Node* start = Start(wasm_count + 3);
*effect_ = start;
*control_ = start;
+
+ if (!HasJSCompatibleSignature(sig_)) {
+ // Throw a TypeError. The native context is good enough here because we
+ // only throw a TypeError.
+ Return(BuildCallToRuntime(Runtime::kWasmThrowTypeError, jsgraph(),
+ jsgraph()->isolate()->native_context(), nullptr,
+ 0, effect_, *control_));
+ return;
+ }
+
Node** args = Buffer(wasm_count + 7);
Node* call;
@@ -2777,24 +2919,113 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
call = graph()->NewNode(jsgraph()->common()->Call(desc), pos, args);
}
+ *effect_ = call;
+ SetSourcePosition(call, 0);
+
// Convert the return value back.
- Node* ret;
- Node* val =
- FromJS(call, HeapConstant(isolate->native_context()),
- sig->return_count() == 0 ? wasm::kAstStmt : sig->GetReturn());
- Node* pop_size = jsgraph()->Int32Constant(0);
+ Node* i32_zero = jsgraph()->Int32Constant(0);
+ Node* val = sig->return_count() == 0
+ ? i32_zero
+ : FromJS(call, HeapConstant(isolate->native_context()),
+ sig->GetReturn());
+ Return(val);
+}
+
+void WasmGraphBuilder::BuildWasmInterpreterEntry(
+ uint32_t function_index, wasm::FunctionSig* sig,
+ Handle<WasmInstanceObject> instance) {
+ int wasm_count = static_cast<int>(sig->parameter_count());
+ int param_count = jsgraph()->machine()->Is64()
+ ? wasm_count
+ : Int64Lowering::GetParameterCountAfterLowering(sig);
+
+ // Build the start and the parameter nodes.
+ Node* start = Start(param_count + 3);
+ *effect_ = start;
+ *control_ = start;
+
+ // Compute size for the argument buffer.
+ int args_size_bytes = 0;
+ for (int i = 0; i < wasm_count; i++) {
+ args_size_bytes += 1 << ElementSizeLog2Of(sig->GetParam(i));
+ }
+
+ // The return value is also passed via this buffer:
+ DCHECK_GE(wasm::kV8MaxWasmFunctionReturns, sig->return_count());
+ // TODO(wasm): Handle multi-value returns.
+ DCHECK_EQ(1, wasm::kV8MaxWasmFunctionReturns);
+ int return_size_bytes =
+ sig->return_count() == 0 ? 0 : 1 << ElementSizeLog2Of(sig->GetReturn(0));
+
+ // Get a stack slot for the arguments.
+ Node* arg_buffer = graph()->NewNode(jsgraph()->machine()->StackSlot(
+ std::max(args_size_bytes, return_size_bytes)));
+
+ // Now store all our arguments to the buffer.
+ int param_index = 0;
+ int offset = 0;
+ for (int i = 0; i < wasm_count; i++) {
+ Node* param = Param(param_index++);
+ bool is_i64_as_two_params =
+ jsgraph()->machine()->Is32() && sig->GetParam(i) == wasm::kWasmI64;
+ MachineRepresentation param_rep =
+ is_i64_as_two_params ? wasm::kWasmI32 : sig->GetParam(i);
+ StoreRepresentation store_rep(param_rep, WriteBarrierKind::kNoWriteBarrier);
+ *effect_ =
+ graph()->NewNode(jsgraph()->machine()->Store(store_rep), arg_buffer,
+ Int32Constant(offset), param, *effect_, *control_);
+ offset += 1 << ElementSizeLog2Of(param_rep);
+ // TODO(clemensh): Respect endianess here. Might need to swap upper and
+ // lower word.
+ if (is_i64_as_two_params) {
+ // Also store the upper half.
+ param = Param(param_index++);
+ StoreRepresentation store_rep(wasm::kWasmI32,
+ WriteBarrierKind::kNoWriteBarrier);
+ *effect_ =
+ graph()->NewNode(jsgraph()->machine()->Store(store_rep), arg_buffer,
+ Int32Constant(offset), param, *effect_, *control_);
+ offset += 1 << ElementSizeLog2Of(wasm::kWasmI32);
+ }
+ }
+ DCHECK_EQ(param_count, param_index);
+ DCHECK_EQ(args_size_bytes, offset);
+
+ // We are passing the raw arg_buffer here. To the GC and other parts, it looks
+ // like a Smi (lowest bit not set). In the runtime function however, don't
+ // call Smi::value on it, but just cast it to a byte pointer.
+ Node* parameters[] = {
+ jsgraph()->HeapConstant(instance), // wasm instance
+ jsgraph()->SmiConstant(function_index), // function index
+ arg_buffer, // argument buffer
+ };
+ BuildCallToRuntime(Runtime::kWasmRunInterpreter, jsgraph(),
+ jsgraph()->isolate()->native_context(), parameters,
+ arraysize(parameters), effect_, *control_);
+
+ // Read back the return value.
if (jsgraph()->machine()->Is32() && sig->return_count() > 0 &&
- sig->GetReturn() == wasm::kAstI64) {
- ret = graph()->NewNode(jsgraph()->common()->Return(), pop_size, val,
- graph()->NewNode(jsgraph()->machine()->Word32Sar(),
- val, jsgraph()->Int32Constant(31)),
- call, start);
+ sig->GetReturn() == wasm::kWasmI64) {
+ MachineType load_rep = wasm::WasmOpcodes::MachineTypeFor(wasm::kWasmI32);
+ Node* lower =
+ graph()->NewNode(jsgraph()->machine()->Load(load_rep), arg_buffer,
+ Int32Constant(0), *effect_, *control_);
+ Node* upper =
+ graph()->NewNode(jsgraph()->machine()->Load(load_rep), arg_buffer,
+ Int32Constant(sizeof(int32_t)), *effect_, *control_);
+ Return(upper, lower);
} else {
- ret = graph()->NewNode(jsgraph()->common()->Return(), pop_size, val, call,
- start);
+ Node* val;
+ if (sig->return_count() == 0) {
+ val = Int32Constant(0);
+ } else {
+ MachineType load_rep =
+ wasm::WasmOpcodes::MachineTypeFor(sig->GetReturn());
+ val = graph()->NewNode(jsgraph()->machine()->Load(load_rep), arg_buffer,
+ Int32Constant(0), *effect_, *control_);
+ }
+ Return(val);
}
-
- MergeControlToEnd(jsgraph(), ret);
}
Node* WasmGraphBuilder::MemBuffer(uint32_t offset) {
@@ -2853,12 +3084,18 @@ Node* WasmGraphBuilder::MemSize(uint32_t offset) {
void WasmGraphBuilder::EnsureFunctionTableNodes() {
if (function_tables_.size() > 0) return;
- for (size_t i = 0; i < module_->instance->function_tables.size(); ++i) {
- auto handle = module_->instance->function_tables[i];
- DCHECK(!handle.is_null());
- function_tables_.push_back(HeapConstant(handle));
+ size_t tables_size = module_->instance->function_tables.size();
+ DCHECK(tables_size == module_->instance->signature_tables.size());
+ for (size_t i = 0; i < tables_size; ++i) {
+ auto function_handle = module_->instance->function_tables[i];
+ auto signature_handle = module_->instance->signature_tables[i];
+ DCHECK(!function_handle.is_null() && !signature_handle.is_null());
+ function_tables_.push_back(HeapConstant(function_handle));
+ signature_tables_.push_back(HeapConstant(signature_handle));
uint32_t table_size = module_->module->function_tables[i].min_size;
- function_table_sizes_.push_back(Uint32Constant(table_size));
+ function_table_sizes_.push_back(jsgraph()->RelocatableInt32Constant(
+ static_cast<uint32_t>(table_size),
+ RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE));
}
}
@@ -2895,6 +3132,7 @@ void WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
uint32_t offset,
wasm::WasmCodePosition position) {
DCHECK(module_ && module_->instance);
+ if (FLAG_wasm_no_bounds_checks) return;
uint32_t size = module_->instance->mem_size;
byte memsize = wasm::WasmOpcodes::MemSize(memtype);
@@ -2945,15 +3183,14 @@ void WasmGraphBuilder::BoundsCheckMem(MachineType memtype, Node* index,
trap_->AddTrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
}
-
-Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype,
+Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
Node* index, uint32_t offset,
uint32_t alignment,
wasm::WasmCodePosition position) {
Node* load;
// WASM semantics throw on OOB. Introduce explicit bounds check.
- if (!FLAG_wasm_trap_handler) {
+ if (!FLAG_wasm_trap_handler || !kTrapHandlerSupported) {
BoundsCheckMem(memtype, index, offset, position);
}
bool aligned = static_cast<int>(alignment) >=
@@ -2961,18 +3198,19 @@ Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype,
if (aligned ||
jsgraph()->machine()->UnalignedLoadSupported(memtype, alignment)) {
- if (FLAG_wasm_trap_handler) {
- Node* context = HeapConstant(module_->instance->context);
+ if (FLAG_wasm_trap_handler && kTrapHandlerSupported) {
+ DCHECK(FLAG_wasm_guard_pages);
Node* position_node = jsgraph()->Int32Constant(position);
load = graph()->NewNode(jsgraph()->machine()->ProtectedLoad(memtype),
- MemBuffer(offset), index, context, position_node,
- *effect_, *control_);
+ MemBuffer(offset), index, position_node, *effect_,
+ *control_);
} else {
load = graph()->NewNode(jsgraph()->machine()->Load(memtype),
MemBuffer(offset), index, *effect_, *control_);
}
} else {
- DCHECK(!FLAG_wasm_trap_handler);
+ // TODO(eholk): Support unaligned loads with trap handlers.
+ DCHECK(!FLAG_wasm_trap_handler || !kTrapHandlerSupported);
load = graph()->NewNode(jsgraph()->machine()->UnalignedLoad(memtype),
MemBuffer(offset), index, *effect_, *control_);
}
@@ -2983,7 +3221,7 @@ Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype,
load = BuildChangeEndianness(load, memtype, type);
#endif
- if (type == wasm::kAstI64 &&
+ if (type == wasm::kWasmI64 &&
ElementSizeLog2Of(memtype.representation()) < 3) {
// TODO(titzer): TF zeroes the upper bits of 64-bit loads for subword sizes.
if (memtype.IsSigned()) {
@@ -3006,7 +3244,9 @@ Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
Node* store;
// WASM semantics throw on OOB. Introduce explicit bounds check.
- BoundsCheckMem(memtype, index, offset, position);
+ if (!FLAG_wasm_trap_handler || !kTrapHandlerSupported) {
+ BoundsCheckMem(memtype, index, offset, position);
+ }
StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
bool aligned = static_cast<int>(alignment) >=
@@ -3018,11 +3258,20 @@ Node* WasmGraphBuilder::StoreMem(MachineType memtype, Node* index,
if (aligned ||
jsgraph()->machine()->UnalignedStoreSupported(memtype, alignment)) {
- StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
- store =
- graph()->NewNode(jsgraph()->machine()->Store(rep), MemBuffer(offset),
- index, val, *effect_, *control_);
+ if (FLAG_wasm_trap_handler && kTrapHandlerSupported) {
+ Node* position_node = jsgraph()->Int32Constant(position);
+ store = graph()->NewNode(
+ jsgraph()->machine()->ProtectedStore(memtype.representation()),
+ MemBuffer(offset), index, val, position_node, *effect_, *control_);
+ } else {
+ StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
+ store =
+ graph()->NewNode(jsgraph()->machine()->Store(rep), MemBuffer(offset),
+ index, val, *effect_, *control_);
+ }
} else {
+ // TODO(eholk): Support unaligned stores with trap handlers.
+ DCHECK(!FLAG_wasm_trap_handler || !kTrapHandlerSupported);
UnalignedStoreRepresentation rep(memtype.representation());
store =
graph()->NewNode(jsgraph()->machine()->UnalignedStore(rep),
@@ -3070,16 +3319,14 @@ Graph* WasmGraphBuilder::graph() { return jsgraph()->graph(); }
void WasmGraphBuilder::Int64LoweringForTesting() {
if (jsgraph()->machine()->Is32()) {
Int64Lowering r(jsgraph()->graph(), jsgraph()->machine(),
- jsgraph()->common(), jsgraph()->zone(),
- function_signature_);
+ jsgraph()->common(), jsgraph()->zone(), sig_);
r.LowerGraph();
}
}
void WasmGraphBuilder::SimdScalarLoweringForTesting() {
SimdScalarLowering(jsgraph()->graph(), jsgraph()->machine(),
- jsgraph()->common(), jsgraph()->zone(),
- function_signature_)
+ jsgraph()->common(), jsgraph()->zone(), sig_)
.LowerGraph();
}
@@ -3093,6 +3340,7 @@ void WasmGraphBuilder::SetSourcePosition(Node* node,
Node* WasmGraphBuilder::CreateS128Value(int32_t value) {
// TODO(gdeepti): Introduce Simd128Constant to common-operator.h and use
// instead of creating a SIMD Value.
+ has_simd_ = true;
return graph()->NewNode(jsgraph()->machine()->CreateInt32x4(),
Int32Constant(value), Int32Constant(value),
Int32Constant(value), Int32Constant(value));
@@ -3100,36 +3348,78 @@ Node* WasmGraphBuilder::CreateS128Value(int32_t value) {
Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
const NodeVector& inputs) {
+ has_simd_ = true;
switch (opcode) {
+ case wasm::kExprF32x4Splat:
+ return graph()->NewNode(jsgraph()->machine()->CreateFloat32x4(),
+ inputs[0], inputs[0], inputs[0], inputs[0]);
+ case wasm::kExprF32x4FromInt32x4:
+ return graph()->NewNode(jsgraph()->machine()->Float32x4FromInt32x4(),
+ inputs[0]);
+ case wasm::kExprF32x4FromUint32x4:
+ return graph()->NewNode(jsgraph()->machine()->Float32x4FromUint32x4(),
+ inputs[0]);
+ case wasm::kExprF32x4Abs:
+ return graph()->NewNode(jsgraph()->machine()->Float32x4Abs(), inputs[0]);
+ case wasm::kExprF32x4Neg:
+ return graph()->NewNode(jsgraph()->machine()->Float32x4Neg(), inputs[0]);
+ case wasm::kExprF32x4Add:
+ return graph()->NewNode(jsgraph()->machine()->Float32x4Add(), inputs[0],
+ inputs[1]);
+ case wasm::kExprF32x4Sub:
+ return graph()->NewNode(jsgraph()->machine()->Float32x4Sub(), inputs[0],
+ inputs[1]);
+ case wasm::kExprF32x4Eq:
+ return graph()->NewNode(jsgraph()->machine()->Float32x4Equal(), inputs[0],
+ inputs[1]);
+ case wasm::kExprF32x4Ne:
+ return graph()->NewNode(jsgraph()->machine()->Float32x4NotEqual(),
+ inputs[0], inputs[1]);
case wasm::kExprI32x4Splat:
return graph()->NewNode(jsgraph()->machine()->CreateInt32x4(), inputs[0],
inputs[0], inputs[0], inputs[0]);
+ case wasm::kExprI32x4FromFloat32x4:
+ return graph()->NewNode(jsgraph()->machine()->Int32x4FromFloat32x4(),
+ inputs[0]);
+ case wasm::kExprUi32x4FromFloat32x4:
+ return graph()->NewNode(jsgraph()->machine()->Uint32x4FromFloat32x4(),
+ inputs[0]);
case wasm::kExprI32x4Add:
return graph()->NewNode(jsgraph()->machine()->Int32x4Add(), inputs[0],
inputs[1]);
- case wasm::kExprF32x4ExtractLane:
- return graph()->NewNode(jsgraph()->machine()->Float32x4ExtractLane(),
- inputs[0], inputs[1]);
- case wasm::kExprF32x4Splat:
- return graph()->NewNode(jsgraph()->machine()->CreateFloat32x4(),
- inputs[0], inputs[0], inputs[0], inputs[0]);
- case wasm::kExprF32x4Add:
- return graph()->NewNode(jsgraph()->machine()->Float32x4Add(), inputs[0],
+ case wasm::kExprI32x4Sub:
+ return graph()->NewNode(jsgraph()->machine()->Int32x4Sub(), inputs[0],
+ inputs[1]);
+ case wasm::kExprI32x4Eq:
+ return graph()->NewNode(jsgraph()->machine()->Int32x4Equal(), inputs[0],
inputs[1]);
+ case wasm::kExprI32x4Ne:
+ return graph()->NewNode(jsgraph()->machine()->Int32x4NotEqual(),
+ inputs[0], inputs[1]);
+ case wasm::kExprS32x4Select:
+ return graph()->NewNode(jsgraph()->machine()->Simd32x4Select(), inputs[0],
+ inputs[1], inputs[2]);
default:
return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
}
}
-Node* WasmGraphBuilder::SimdExtractLane(wasm::WasmOpcode opcode, uint8_t lane,
- Node* input) {
+Node* WasmGraphBuilder::SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane,
+ const NodeVector& inputs) {
+ has_simd_ = true;
switch (opcode) {
case wasm::kExprI32x4ExtractLane:
- return graph()->NewNode(jsgraph()->machine()->Int32x4ExtractLane(), input,
- Int32Constant(lane));
+ return graph()->NewNode(jsgraph()->common()->Int32x4ExtractLane(lane),
+ inputs[0]);
+ case wasm::kExprI32x4ReplaceLane:
+ return graph()->NewNode(jsgraph()->common()->Int32x4ReplaceLane(lane),
+ inputs[0], inputs[1]);
case wasm::kExprF32x4ExtractLane:
- return graph()->NewNode(jsgraph()->machine()->Float32x4ExtractLane(),
- input, Int32Constant(lane));
+ return graph()->NewNode(jsgraph()->common()->Float32x4ExtractLane(lane),
+ inputs[0]);
+ case wasm::kExprF32x4ReplaceLane:
+ return graph()->NewNode(jsgraph()->common()->Float32x4ReplaceLane(lane),
+ inputs[0], inputs[1]);
default:
return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
}
@@ -3156,9 +3446,10 @@ static void RecordFunctionCompilation(CodeEventListener::LogEventsAndTags tag,
*script_str, 0, 0));
}
-Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::ModuleEnv* module,
+Handle<Code> CompileJSToWasmWrapper(Isolate* isolate,
+ const wasm::WasmModule* module,
Handle<Code> wasm_code, uint32_t index) {
- const wasm::WasmFunction* func = &module->module->functions[index];
+ const wasm::WasmFunction* func = &module->functions[index];
//----------------------------------------------------------------------------
// Create the Graph
@@ -3172,10 +3463,10 @@ Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::ModuleEnv* module,
Node* control = nullptr;
Node* effect = nullptr;
- WasmGraphBuilder builder(&zone, &jsgraph, func->sig);
+ wasm::ModuleEnv module_env(module, nullptr);
+ WasmGraphBuilder builder(&module_env, &zone, &jsgraph, func->sig);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
- builder.set_module(module);
builder.BuildJSToWasmWrapper(wasm_code, func->sig);
//----------------------------------------------------------------------------
@@ -3188,8 +3479,8 @@ Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::ModuleEnv* module,
}
// Schedule and compile to machine code.
- int params =
- static_cast<int>(module->GetFunctionSignature(index)->parameter_count());
+ int params = static_cast<int>(
+ module_env.GetFunctionSignature(index)->parameter_count());
CallDescriptor* incoming = Linkage::GetJSCallDescriptor(
&zone, false, params + 1, CallDescriptor::kNoFlags);
Code::Flags flags = Code::ComputeFlags(Code::JS_TO_WASM_FUNCTION);
@@ -3222,10 +3513,11 @@ Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::ModuleEnv* module,
}
if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
- RecordFunctionCompilation(
- CodeEventListener::FUNCTION_TAG, isolate, code, "js-to-wasm", index,
- wasm::WasmName("export"),
- module->module->GetName(func->name_offset, func->name_length));
+ char func_name[32];
+ SNPrintF(ArrayVector(func_name), "js-to-wasm#%d", func->func_index);
+ RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate, code,
+ "js-to-wasm", index, wasm::WasmName("export"),
+ CStrVector(func_name));
}
return code;
}
@@ -3233,7 +3525,8 @@ Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::ModuleEnv* module,
Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
wasm::FunctionSig* sig, uint32_t index,
Handle<String> module_name,
- MaybeHandle<String> import_name) {
+ MaybeHandle<String> import_name,
+ wasm::ModuleOrigin origin) {
//----------------------------------------------------------------------------
// Create the Graph
//----------------------------------------------------------------------------
@@ -3246,7 +3539,12 @@ Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
Node* control = nullptr;
Node* effect = nullptr;
- WasmGraphBuilder builder(&zone, &jsgraph, sig);
+ SourcePositionTable* source_position_table =
+ origin == wasm::kAsmJsOrigin ? new (&zone) SourcePositionTable(&graph)
+ : nullptr;
+
+ WasmGraphBuilder builder(nullptr, &zone, &jsgraph, sig,
+ source_position_table);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
builder.BuildWasmToJSWrapper(target, sig);
@@ -3282,7 +3580,8 @@ Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
}
CompilationInfo info(func_name, isolate, &zone, flags);
- code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
+ code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr,
+ source_position_table);
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_opt_code && !code.is_null()) {
OFStream os(stdout);
@@ -3310,6 +3609,72 @@ Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
return code;
}
+Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
+ wasm::FunctionSig* sig,
+ Handle<WasmInstanceObject> instance) {
+ //----------------------------------------------------------------------------
+ // Create the Graph
+ //----------------------------------------------------------------------------
+ Zone zone(isolate->allocator(), ZONE_NAME);
+ Graph graph(&zone);
+ CommonOperatorBuilder common(&zone);
+ MachineOperatorBuilder machine(&zone);
+ JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
+
+ Node* control = nullptr;
+ Node* effect = nullptr;
+
+ WasmGraphBuilder builder(nullptr, &zone, &jsgraph, sig);
+ builder.set_control_ptr(&control);
+ builder.set_effect_ptr(&effect);
+ builder.BuildWasmInterpreterEntry(func_index, sig, instance);
+
+ Handle<Code> code = Handle<Code>::null();
+ {
+ if (FLAG_trace_turbo_graph) { // Simple textual RPO.
+ OFStream os(stdout);
+ os << "-- Wasm to interpreter graph -- " << std::endl;
+ os << AsRPO(graph);
+ }
+
+ // Schedule and compile to machine code.
+ CallDescriptor* incoming =
+ wasm::ModuleEnv::GetWasmCallDescriptor(&zone, sig);
+ if (machine.Is32()) {
+ incoming = wasm::ModuleEnv::GetI32WasmCallDescriptor(&zone, incoming);
+ }
+ Code::Flags flags = Code::ComputeFlags(Code::WASM_INTERPRETER_ENTRY);
+ EmbeddedVector<char, 32> debug_name;
+ int name_len = SNPrintF(debug_name, "wasm-to-interpreter#%d", func_index);
+ DCHECK(name_len > 0 && name_len < debug_name.length());
+ debug_name.Truncate(name_len);
+ DCHECK_EQ('\0', debug_name.start()[debug_name.length()]);
+
+ CompilationInfo info(debug_name, isolate, &zone, flags);
+ code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
+#ifdef ENABLE_DISASSEMBLER
+ if (FLAG_print_opt_code && !code.is_null()) {
+ OFStream os(stdout);
+ code->Disassemble(debug_name.start(), os);
+ }
+#endif
+
+ if (isolate->logger()->is_logging_code_events() ||
+ isolate->is_profiling()) {
+ RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate, code,
+ "wasm-to-interpreter", func_index,
+ wasm::WasmName("module"), debug_name);
+ }
+ }
+
+ Handle<FixedArray> deopt_data = isolate->factory()->NewFixedArray(1, TENURED);
+ Handle<WeakCell> weak_instance = isolate->factory()->NewWeakCell(instance);
+ deopt_data->set(0, *weak_instance);
+ code->set_deoptimization_data(*deopt_data);
+
+ return code;
+}
+
SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
double* decode_ms) {
base::ElapsedTimer decode_timer;
@@ -3323,12 +3688,12 @@ SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
MachineOperatorBuilder* machine = jsgraph_->machine();
SourcePositionTable* source_position_table =
new (jsgraph_->zone()) SourcePositionTable(graph);
- WasmGraphBuilder builder(jsgraph_->zone(), jsgraph_, function_->sig,
- source_position_table);
- wasm::FunctionBody body = {
- module_env_, function_->sig, module_env_->module->module_start,
- module_env_->module->module_start + function_->code_start_offset,
- module_env_->module->module_start + function_->code_end_offset};
+ WasmGraphBuilder builder(module_env_, jsgraph_->zone(), jsgraph_,
+ function_->sig, source_position_table);
+ const byte* module_start = module_env_->module_bytes.start();
+ wasm::FunctionBody body = {function_->sig, module_start,
+ module_start + function_->code_start_offset,
+ module_start + function_->code_end_offset};
graph_construction_result_ =
wasm::BuildTFGraph(isolate_->allocator(), &builder, body);
@@ -3341,18 +3706,25 @@ SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
}
if (machine->Is32()) {
- Int64Lowering r(graph, machine, common, jsgraph_->zone(), function_->sig);
- r.LowerGraph();
+ Int64Lowering(graph, machine, common, jsgraph_->zone(), function_->sig)
+ .LowerGraph();
}
- SimdScalarLowering(graph, machine, common, jsgraph_->zone(), function_->sig)
- .LowerGraph();
+ if (builder.has_simd() && !CpuFeatures::SupportsSimd128()) {
+ SimdScalarLowering(graph, machine, common, jsgraph_->zone(), function_->sig)
+ .LowerGraph();
+ }
int index = static_cast<int>(function_->func_index);
if (index >= FLAG_trace_wasm_ast_start && index < FLAG_trace_wasm_ast_end) {
OFStream os(stdout);
- PrintAst(isolate_->allocator(), body, os, nullptr);
+ PrintRawWasmCode(isolate_->allocator(), body, module_env_->module);
+ }
+ if (index >= FLAG_trace_wasm_text_start && index < FLAG_trace_wasm_text_end) {
+ OFStream os(stdout);
+ PrintWasmText(module_env_->module, *module_env_, function_->func_index, os,
+ nullptr);
}
if (FLAG_trace_wasm_decode_time) {
*decode_ms = decode_timer.Elapsed().InMillisecondsF();
@@ -3362,13 +3734,13 @@ SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
WasmCompilationUnit::WasmCompilationUnit(wasm::ErrorThrower* thrower,
Isolate* isolate,
- wasm::ModuleEnv* module_env,
+ wasm::ModuleBytesEnv* module_env,
const wasm::WasmFunction* function,
uint32_t index)
: thrower_(thrower),
isolate_(isolate),
module_env_(module_env),
- function_(function),
+ function_(&module_env->module->functions[index]),
graph_zone_(new Zone(isolate->allocator(), ZONE_NAME)),
jsgraph_(new (graph_zone()) JSGraph(
isolate, new (graph_zone()) Graph(graph_zone()),
@@ -3378,15 +3750,14 @@ WasmCompilationUnit::WasmCompilationUnit(wasm::ErrorThrower* thrower,
InstructionSelector::SupportedMachineOperatorFlags(),
InstructionSelector::AlignmentRequirements()))),
compilation_zone_(isolate->allocator(), ZONE_NAME),
- info_(function->name_length != 0
- ? module_env->module->GetNameOrNull(function->name_offset,
- function->name_length)
- : ArrayVector("wasm"),
+ info_(function->name_length != 0 ? module_env->GetNameOrNull(function)
+ : ArrayVector("wasm"),
isolate, &compilation_zone_,
Code::ComputeFlags(Code::WASM_FUNCTION)),
job_(),
index_(index),
- ok_(true) {
+ ok_(true),
+ protected_instructions_(&compilation_zone_) {
// Create and cache this node in the main thread.
jsgraph_->CEntryStubConstant(1);
}
@@ -3426,8 +3797,9 @@ void WasmCompilationUnit::ExecuteCompilation() {
descriptor =
module_env_->GetI32WasmCallDescriptor(&compilation_zone_, descriptor);
}
- job_.reset(Pipeline::NewWasmCompilationJob(&info_, jsgraph_->graph(),
- descriptor, source_positions));
+ job_.reset(Pipeline::NewWasmCompilationJob(
+ &info_, jsgraph_, descriptor, source_positions, &protected_instructions_,
+ module_env_->module->origin != wasm::kWasmOrigin));
ok_ = job_->ExecuteJob() == CompilationJob::SUCCEEDED;
// TODO(bradnelson): Improve histogram handling of size_t.
// TODO(ahaas): The counters are not thread-safe at the moment.
@@ -3451,8 +3823,7 @@ Handle<Code> WasmCompilationUnit::FinishCompilation() {
if (graph_construction_result_.failed()) {
// Add the function as another context for the exception
ScopedVector<char> buffer(128);
- wasm::WasmName name = module_env_->module->GetName(
- function_->name_offset, function_->name_length);
+ wasm::WasmName name = module_env_->GetName(function_);
SNPrintF(buffer, "Compiling WASM function #%d:%.*s failed:",
function_->func_index, name.length(), name.start());
thrower_->CompileFailed(buffer.start(), graph_construction_result_);
@@ -3472,11 +3843,10 @@ Handle<Code> WasmCompilationUnit::FinishCompilation() {
if (isolate_->logger()->is_logging_code_events() ||
isolate_->is_profiling()) {
- RecordFunctionCompilation(
- CodeEventListener::FUNCTION_TAG, isolate_, code, "WASM_function",
- function_->func_index, wasm::WasmName("module"),
- module_env_->module->GetName(function_->name_offset,
- function_->name_length));
+ RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate_, code,
+ "WASM_function", function_->func_index,
+ wasm::WasmName("module"),
+ module_env_->GetName(function_));
}
if (FLAG_trace_wasm_decode_time) {
@@ -3487,9 +3857,27 @@ Handle<Code> WasmCompilationUnit::FinishCompilation() {
compile_ms);
}
+ Handle<FixedArray> protected_instructions = PackProtectedInstructions();
+ code->set_protected_instructions(*protected_instructions);
+
return code;
}
+Handle<FixedArray> WasmCompilationUnit::PackProtectedInstructions() const {
+ const int num_instructions = static_cast<int>(protected_instructions_.size());
+ Handle<FixedArray> fn_protected = isolate_->factory()->NewFixedArray(
+ num_instructions * Code::kTrapDataSize, TENURED);
+ for (unsigned i = 0; i < protected_instructions_.size(); ++i) {
+ const trap_handler::ProtectedInstructionData& instruction =
+ protected_instructions_[i];
+ fn_protected->set(Code::kTrapDataSize * i + Code::kTrapCodeOffset,
+ Smi::FromInt(instruction.instr_offset));
+ fn_protected->set(Code::kTrapDataSize * i + Code::kTrapLandingOffset,
+ Smi::FromInt(instruction.landing_offset));
+ }
+ return fn_protected;
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index b4bc350297..a1bad1f0e5 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -11,6 +11,8 @@
// Do not include anything from src/compiler here!
#include "src/compilation-info.h"
#include "src/compiler.h"
+#include "src/trap-handler/trap-handler.h"
+#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-result.h"
#include "src/zone/zone.h"
@@ -29,8 +31,10 @@ class SourcePositionTable;
namespace wasm {
// Forward declarations for some WASM data structures.
+struct ModuleBytesEnv;
struct ModuleEnv;
struct WasmFunction;
+struct WasmModule;
class ErrorThrower;
struct DecodeStruct;
@@ -43,7 +47,7 @@ namespace compiler {
class WasmCompilationUnit final {
public:
WasmCompilationUnit(wasm::ErrorThrower* thrower, Isolate* isolate,
- wasm::ModuleEnv* module_env,
+ wasm::ModuleBytesEnv* module_env,
const wasm::WasmFunction* function, uint32_t index);
Zone* graph_zone() { return graph_zone_.get(); }
@@ -54,19 +58,21 @@ class WasmCompilationUnit final {
static Handle<Code> CompileWasmFunction(wasm::ErrorThrower* thrower,
Isolate* isolate,
- wasm::ModuleEnv* module_env,
+ wasm::ModuleBytesEnv* module_env,
const wasm::WasmFunction* function) {
- WasmCompilationUnit unit(thrower, isolate, module_env, function, 0);
+ WasmCompilationUnit unit(thrower, isolate, module_env, function,
+ function->func_index);
unit.ExecuteCompilation();
return unit.FinishCompilation();
}
private:
SourcePositionTable* BuildGraphForWasmFunction(double* decode_ms);
+ Handle<FixedArray> PackProtectedInstructions() const;
wasm::ErrorThrower* thrower_;
Isolate* isolate_;
- wasm::ModuleEnv* module_env_;
+ wasm::ModuleBytesEnv* module_env_;
const wasm::WasmFunction* function_;
// The graph zone is deallocated at the end of ExecuteCompilation.
std::unique_ptr<Zone> graph_zone_;
@@ -77,6 +83,9 @@ class WasmCompilationUnit final {
uint32_t index_;
wasm::Result<wasm::DecodeStruct*> graph_construction_result_;
bool ok_;
+ ZoneVector<trap_handler::ProtectedInstructionData>
+ protected_instructions_; // Instructions that are protected by the signal
+ // handler.
DISALLOW_COPY_AND_ASSIGN(WasmCompilationUnit);
};
@@ -85,12 +94,20 @@ class WasmCompilationUnit final {
Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
wasm::FunctionSig* sig, uint32_t index,
Handle<String> module_name,
- MaybeHandle<String> import_name);
+ MaybeHandle<String> import_name,
+ wasm::ModuleOrigin origin);
// Wraps a given wasm code object, producing a code object.
-Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::ModuleEnv* module,
+Handle<Code> CompileJSToWasmWrapper(Isolate* isolate,
+ const wasm::WasmModule* module,
Handle<Code> wasm_code, uint32_t index);
+// Compiles a stub that redirects a call to a wasm function to the wasm
+// interpreter. It's ABI compatible with the compiled wasm function.
+Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
+ wasm::FunctionSig* sig,
+ Handle<WasmInstanceObject> instance);
+
// Abstracts details of building TurboFan graph nodes for WASM to separate
// the WASM decoder from the internal details of TurboFan.
class WasmTrapHelper;
@@ -98,7 +115,7 @@ typedef ZoneVector<Node*> NodeVector;
class WasmGraphBuilder {
public:
WasmGraphBuilder(
- Zone* z, JSGraph* g, wasm::FunctionSig* function_signature,
+ wasm::ModuleEnv* module_env, Zone* z, JSGraph* g, wasm::FunctionSig* sig,
compiler::SourcePositionTable* source_position_table = nullptr);
Node** Buffer(size_t count) {
@@ -116,11 +133,11 @@ class WasmGraphBuilder {
//-----------------------------------------------------------------------
Node* Error();
Node* Start(unsigned params);
- Node* Param(unsigned index, wasm::LocalType type);
+ Node* Param(unsigned index);
Node* Loop(Node* entry);
Node* Terminate(Node* effect, Node* control);
Node* Merge(unsigned count, Node** controls);
- Node* Phi(wasm::LocalType type, unsigned count, Node** vals, Node* control);
+ Node* Phi(wasm::ValueType type, unsigned count, Node** vals, Node* control);
Node* EffectPhi(unsigned count, Node** effects, Node* control);
Node* NumberConstant(int32_t value);
Node* Uint32Constant(uint32_t value);
@@ -155,7 +172,12 @@ class WasmGraphBuilder {
Node* Switch(unsigned count, Node* key);
Node* IfValue(int32_t value, Node* sw);
Node* IfDefault(Node* sw);
- Node* Return(unsigned count, Node** vals);
+ Node* Return(unsigned count, Node** nodes);
+ template <typename... Nodes>
+ Node* Return(Node* fst, Nodes*... more) {
+ Node* arr[] = {fst, more...};
+ return Return(arraysize(arr), arr);
+ }
Node* ReturnVoid();
Node* Unreachable(wasm::WasmCodePosition position);
@@ -166,9 +188,11 @@ class WasmGraphBuilder {
void BuildJSToWasmWrapper(Handle<Code> wasm_code, wasm::FunctionSig* sig);
void BuildWasmToJSWrapper(Handle<JSReceiver> target, wasm::FunctionSig* sig);
+ void BuildWasmInterpreterEntry(uint32_t func_index, wasm::FunctionSig* sig,
+ Handle<WasmInstanceObject> instance);
- Node* ToJS(Node* node, wasm::LocalType type);
- Node* FromJS(Node* node, Node* context, wasm::LocalType type);
+ Node* ToJS(Node* node, wasm::ValueType type);
+ Node* FromJS(Node* node, Node* context, wasm::ValueType type);
Node* Invert(Node* node);
void EnsureFunctionTableNodes();
@@ -178,7 +202,7 @@ class WasmGraphBuilder {
Node* CurrentMemoryPages();
Node* GetGlobal(uint32_t index);
Node* SetGlobal(uint32_t index, Node* val);
- Node* LoadMem(wasm::LocalType type, MachineType memtype, Node* index,
+ Node* LoadMem(wasm::ValueType type, MachineType memtype, Node* index,
uint32_t offset, uint32_t alignment,
wasm::WasmCodePosition position);
Node* StoreMem(MachineType type, Node* index, uint32_t offset,
@@ -190,13 +214,11 @@ class WasmGraphBuilder {
Node* Control() { return *control_; }
Node* Effect() { return *effect_; }
- void set_module(wasm::ModuleEnv* module) { this->module_ = module; }
-
void set_control_ptr(Node** control) { this->control_ = control; }
void set_effect_ptr(Node** effect) { this->effect_ = effect; }
- wasm::FunctionSig* GetFunctionSignature() { return function_signature_; }
+ wasm::FunctionSig* GetFunctionSignature() { return sig_; }
void Int64LoweringForTesting();
@@ -207,7 +229,13 @@ class WasmGraphBuilder {
Node* CreateS128Value(int32_t value);
Node* SimdOp(wasm::WasmOpcode opcode, const NodeVector& inputs);
- Node* SimdExtractLane(wasm::WasmOpcode opcode, uint8_t lane, Node* input);
+
+ Node* SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane,
+ const NodeVector& inputs);
+
+ bool has_simd() const { return has_simd_; }
+
+ wasm::ModuleEnv* module_env() const { return module_; }
private:
static const int kDefaultBufferSize = 16;
@@ -215,19 +243,21 @@ class WasmGraphBuilder {
Zone* zone_;
JSGraph* jsgraph_;
- wasm::ModuleEnv* module_;
- Node* mem_buffer_;
- Node* mem_size_;
+ wasm::ModuleEnv* module_ = nullptr;
+ Node* mem_buffer_ = nullptr;
+ Node* mem_size_ = nullptr;
+ NodeVector signature_tables_;
NodeVector function_tables_;
NodeVector function_table_sizes_;
- Node** control_;
- Node** effect_;
+ Node** control_ = nullptr;
+ Node** effect_ = nullptr;
Node** cur_buffer_;
size_t cur_bufsize_;
Node* def_buffer_[kDefaultBufferSize];
+ bool has_simd_ = false;
WasmTrapHelper* trap_;
- wasm::FunctionSig* function_signature_;
+ wasm::FunctionSig* sig_;
SetOncePointer<const Operator> allocate_heap_number_operator_;
compiler::SourcePositionTable* source_position_table_ = nullptr;
@@ -243,7 +273,7 @@ class WasmGraphBuilder {
wasm::WasmCodePosition position);
Node* BuildChangeEndianness(Node* node, MachineType type,
- wasm::LocalType wasmtype = wasm::kAstStmt);
+ wasm::ValueType wasmtype = wasm::kWasmStmt);
Node* MaskShiftCount32(Node* node);
Node* MaskShiftCount64(Node* node);
@@ -314,8 +344,7 @@ class WasmGraphBuilder {
MachineType result_type, int trap_zero,
wasm::WasmCodePosition position);
- Node* BuildJavaScriptToNumber(Node* node, Node* context, Node* effect,
- Node* control);
+ Node* BuildJavaScriptToNumber(Node* node, Node* context);
Node* BuildChangeInt32ToTagged(Node* value);
Node* BuildChangeFloat64ToTagged(Node* value);
diff --git a/deps/v8/src/compiler/wasm-linkage.cc b/deps/v8/src/compiler/wasm-linkage.cc
index a41c93ca35..c4acfb3672 100644
--- a/deps/v8/src/compiler/wasm-linkage.cc
+++ b/deps/v8/src/compiler/wasm-linkage.cc
@@ -24,17 +24,17 @@ using compiler::LinkageLocation;
namespace {
-MachineType MachineTypeFor(LocalType type) {
+MachineType MachineTypeFor(ValueType type) {
switch (type) {
- case kAstI32:
+ case kWasmI32:
return MachineType::Int32();
- case kAstI64:
+ case kWasmI64:
return MachineType::Int64();
- case kAstF64:
+ case kWasmF64:
return MachineType::Float64();
- case kAstF32:
+ case kWasmF32:
return MachineType::Float32();
- case kAstS128:
+ case kWasmS128:
return MachineType::Simd128();
default:
UNREACHABLE();
@@ -173,7 +173,7 @@ struct Allocator {
int stack_offset;
- LinkageLocation Next(LocalType type) {
+ LinkageLocation Next(ValueType type) {
if (IsFloatingPoint(type)) {
// Allocate a floating point register/stack location.
if (fp_offset < fp_count) {
@@ -182,7 +182,7 @@ struct Allocator {
// Allocate floats using a double register, but modify the code to
// reflect how ARM FP registers alias.
// TODO(bbudge) Modify wasm linkage to allow use of all float regs.
- if (type == kAstF32) {
+ if (type == kWasmF32) {
int float_reg_code = reg.code() * 2;
DCHECK(float_reg_code < RegisterConfiguration::kMaxFPRegisters);
return regloc(DoubleRegister::from_code(float_reg_code),
@@ -206,11 +206,11 @@ struct Allocator {
}
}
}
- bool IsFloatingPoint(LocalType type) {
- return type == kAstF32 || type == kAstF64;
+ bool IsFloatingPoint(ValueType type) {
+ return type == kWasmF32 || type == kWasmF64;
}
- int Words(LocalType type) {
- if (kPointerSize < 8 && (type == kAstI64 || type == kAstF64)) {
+ int Words(ValueType type) {
+ if (kPointerSize < 8 && (type == kWasmI64 || type == kWasmF64)) {
return 2;
}
return 1;
@@ -285,7 +285,7 @@ CallDescriptor* ModuleEnv::GetWasmCallDescriptor(Zone* zone,
// Add return location(s).
const int return_count = static_cast<int>(locations.return_count_);
for (int i = 0; i < return_count; i++) {
- LocalType ret = fsig->GetReturn(i);
+ ValueType ret = fsig->GetReturn(i);
locations.AddReturn(rets.Next(ret));
}
@@ -294,7 +294,7 @@ CallDescriptor* ModuleEnv::GetWasmCallDescriptor(Zone* zone,
// Add register and/or stack parameter(s).
const int parameter_count = static_cast<int>(fsig->parameter_count());
for (int i = 0; i < parameter_count; i++) {
- LocalType param = fsig->GetParam(i);
+ ValueType param = fsig->GetParam(i);
locations.AddParam(params.Next(param));
}
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index 745ac50841..cd4eeedf10 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -43,9 +43,7 @@ class X64OperandConverter : public InstructionOperandConverter {
DCHECK_EQ(0, bit_cast<int64_t>(constant.ToFloat64()));
return Immediate(0);
}
- if (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
- constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE ||
- constant.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+ if (RelocInfo::IsWasmReference(constant.rmode())) {
return Immediate(constant.ToInt32(), constant.rmode());
}
return Immediate(constant.ToInt32());
@@ -270,38 +268,58 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
class WasmOutOfLineTrap final : public OutOfLineCode {
public:
- WasmOutOfLineTrap(CodeGenerator* gen, Address pc, bool frame_elided,
- Register context, int32_t position)
+ WasmOutOfLineTrap(CodeGenerator* gen, int pc, bool frame_elided,
+ int32_t position, Instruction* instr)
: OutOfLineCode(gen),
+ gen_(gen),
pc_(pc),
frame_elided_(frame_elided),
- context_(context),
- position_(position) {}
+ position_(position),
+ instr_(instr) {}
+ // TODO(eholk): Refactor this method to take the code generator as a
+ // parameter.
void Generate() final {
- // TODO(eholk): record pc_ and the current pc in a table so that
- // the signal handler can find it.
- USE(pc_);
+ int current_pc = __ pc_offset();
+
+ gen_->AddProtectedInstruction(pc_, current_pc);
if (frame_elided_) {
- __ EnterFrame(StackFrame::WASM);
+ __ EnterFrame(StackFrame::WASM_COMPILED);
}
wasm::TrapReason trap_id = wasm::kTrapMemOutOfBounds;
int trap_reason = wasm::WasmOpcodes::TrapReasonToMessageId(trap_id);
__ Push(Smi::FromInt(trap_reason));
__ Push(Smi::FromInt(position_));
- __ Move(rsi, context_);
+ __ Move(rsi, gen_->isolate()->native_context());
__ CallRuntime(Runtime::kThrowWasmError);
+
+ if (instr_->reference_map() != nullptr) {
+ gen_->RecordSafepoint(instr_->reference_map(), Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ }
}
private:
- Address pc_;
+ CodeGenerator* gen_;
+ int pc_;
bool frame_elided_;
- Register context_;
int32_t position_;
+ Instruction* instr_;
};
+void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
+ InstructionCode opcode, size_t input_count,
+ X64OperandConverter& i, int pc, Instruction* instr) {
+ const X64MemoryProtection protection =
+ static_cast<X64MemoryProtection>(MiscField::decode(opcode));
+ if (protection == X64MemoryProtection::kProtected) {
+ const bool frame_elided = !codegen->frame_access_state()->has_frame();
+ const int32_t position = i.InputInt32(input_count - 1);
+ new (zone) WasmOutOfLineTrap(codegen, pc, frame_elided, position, instr);
+ }
+}
} // namespace
@@ -1838,21 +1856,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Subsd(i.InputDoubleRegister(0), kScratchDoubleReg);
break;
case kX64Movsxbl:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+ __ pc_offset(), instr);
ASSEMBLE_MOVX(movsxbl);
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movzxbl:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+ __ pc_offset(), instr);
ASSEMBLE_MOVX(movzxbl);
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movsxbq:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+ __ pc_offset(), instr);
ASSEMBLE_MOVX(movsxbq);
break;
case kX64Movzxbq:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+ __ pc_offset(), instr);
ASSEMBLE_MOVX(movzxbq);
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movb: {
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+ __ pc_offset(), instr);
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
@@ -1863,21 +1891,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64Movsxwl:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+ __ pc_offset(), instr);
ASSEMBLE_MOVX(movsxwl);
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movzxwl:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+ __ pc_offset(), instr);
ASSEMBLE_MOVX(movzxwl);
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movsxwq:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+ __ pc_offset(), instr);
ASSEMBLE_MOVX(movsxwq);
break;
case kX64Movzxwq:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+ __ pc_offset(), instr);
ASSEMBLE_MOVX(movzxwq);
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movw: {
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+ __ pc_offset(), instr);
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
@@ -1888,7 +1926,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64Movl:
- case kX64TrapMovl:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+ __ pc_offset(), instr);
if (instr->HasOutput()) {
if (instr->addressing_mode() == kMode_None) {
if (instr->InputAt(0)->IsRegister()) {
@@ -1897,14 +1936,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movl(i.OutputRegister(), i.InputOperand(0));
}
} else {
- Address pc = __ pc();
__ movl(i.OutputRegister(), i.MemoryOperand());
-
- if (arch_opcode == kX64TrapMovl) {
- bool frame_elided = !frame_access_state()->has_frame();
- new (zone()) WasmOutOfLineTrap(this, pc, frame_elided,
- i.InputRegister(2), i.InputInt32(3));
- }
}
__ AssertZeroExtended(i.OutputRegister());
} else {
@@ -1918,9 +1950,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kX64Movsxlq:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+ __ pc_offset(), instr);
ASSEMBLE_MOVX(movsxlq);
break;
case kX64Movq:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+ __ pc_offset(), instr);
if (instr->HasOutput()) {
__ movq(i.OutputRegister(), i.MemoryOperand());
} else {
@@ -1934,6 +1970,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kX64Movss:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+ __ pc_offset(), instr);
if (instr->HasOutput()) {
__ movss(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
@@ -1943,6 +1981,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kX64Movsd:
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+ __ pc_offset(), instr);
if (instr->HasOutput()) {
__ Movsd(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
@@ -2124,6 +2164,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Pextrd(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1));
break;
}
+ case kX64Int32x4ReplaceLane: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ if (instr->InputAt(2)->IsRegister()) {
+ __ Pinsrd(i.OutputSimd128Register(), i.InputRegister(2),
+ i.InputInt8(1));
+ } else {
+ __ Pinsrd(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
+ }
+ break;
+ }
+ case kX64Int32x4Add: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ paddd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64Int32x4Sub: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ psubd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
case kCheckedLoadInt8:
ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
break;
@@ -2183,61 +2243,58 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
return kSuccess;
} // NOLINT(readability/fn_size)
+namespace {
-// Assembles branches after this instruction.
-void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
- X64OperandConverter i(this, instr);
- Label::Distance flabel_distance =
- branch->fallthru ? Label::kNear : Label::kFar;
- Label* tlabel = branch->true_label;
- Label* flabel = branch->false_label;
- switch (branch->condition) {
+Condition FlagsConditionToCondition(FlagsCondition condition) {
+ switch (condition) {
case kUnorderedEqual:
- __ j(parity_even, flabel, flabel_distance);
- // Fall through.
case kEqual:
- __ j(equal, tlabel);
- break;
+ return equal;
case kUnorderedNotEqual:
- __ j(parity_even, tlabel);
- // Fall through.
case kNotEqual:
- __ j(not_equal, tlabel);
- break;
+ return not_equal;
case kSignedLessThan:
- __ j(less, tlabel);
- break;
+ return less;
case kSignedGreaterThanOrEqual:
- __ j(greater_equal, tlabel);
- break;
+ return greater_equal;
case kSignedLessThanOrEqual:
- __ j(less_equal, tlabel);
- break;
+ return less_equal;
case kSignedGreaterThan:
- __ j(greater, tlabel);
- break;
+ return greater;
case kUnsignedLessThan:
- __ j(below, tlabel);
- break;
+ return below;
case kUnsignedGreaterThanOrEqual:
- __ j(above_equal, tlabel);
- break;
+ return above_equal;
case kUnsignedLessThanOrEqual:
- __ j(below_equal, tlabel);
- break;
+ return below_equal;
case kUnsignedGreaterThan:
- __ j(above, tlabel);
- break;
+ return above;
case kOverflow:
- __ j(overflow, tlabel);
- break;
+ return overflow;
case kNotOverflow:
- __ j(no_overflow, tlabel);
- break;
+ return no_overflow;
default:
- UNREACHABLE();
break;
}
+ UNREACHABLE();
+ return no_condition;
+}
+
+} // namespace
+
+// Assembles branches after this instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+ Label::Distance flabel_distance =
+ branch->fallthru ? Label::kNear : Label::kFar;
+ Label* tlabel = branch->true_label;
+ Label* flabel = branch->false_label;
+ if (branch->condition == kUnorderedEqual) {
+ __ j(parity_even, flabel, flabel_distance);
+ } else if (branch->condition == kUnorderedNotEqual) {
+ __ j(parity_even, tlabel);
+ }
+ __ j(FlagsConditionToCondition(branch->condition), tlabel);
+
if (!branch->fallthru) __ jmp(flabel, flabel_distance);
}
@@ -2246,6 +2303,71 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
}
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+ FlagsCondition condition) {
+ class OutOfLineTrap final : public OutOfLineCode {
+ public:
+ OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+ : OutOfLineCode(gen),
+ frame_elided_(frame_elided),
+ instr_(instr),
+ gen_(gen) {}
+
+ void Generate() final {
+ X64OperandConverter i(gen_, instr_);
+
+ Runtime::FunctionId trap_id = static_cast<Runtime::FunctionId>(
+ i.InputInt32(instr_->InputCount() - 1));
+ bool old_has_frame = __ has_frame();
+ if (frame_elided_) {
+ __ set_has_frame(true);
+ __ EnterFrame(StackFrame::WASM_COMPILED);
+ }
+ GenerateCallToTrap(trap_id);
+ if (frame_elided_) {
+ __ set_has_frame(old_has_frame);
+ }
+ if (FLAG_debug_code) {
+ __ ud2();
+ }
+ }
+
+ private:
+ void GenerateCallToTrap(Runtime::FunctionId trap_id) {
+ if (trap_id == Runtime::kNumFunctions) {
+ // We cannot test calls to the runtime in cctest/test-run-wasm.
+ // Therefore we emit a call to C here instead of a call to the runtime.
+ __ PrepareCallCFunction(0);
+ __ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+ 0);
+ } else {
+ __ Move(rsi, isolate()->native_context());
+ gen_->AssembleSourcePosition(instr_);
+ __ CallRuntime(trap_id);
+ }
+ ReferenceMap* reference_map =
+ new (gen_->zone()) ReferenceMap(gen_->zone());
+ gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ }
+
+ bool frame_elided_;
+ Instruction* instr_;
+ CodeGenerator* gen_;
+ };
+ bool frame_elided = !frame_access_state()->has_frame();
+ auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+ Label* tlabel = ool->entry();
+ Label end;
+ if (condition == kUnorderedEqual) {
+ __ j(parity_even, &end);
+ } else if (condition == kUnorderedNotEqual) {
+ __ j(parity_even, tlabel);
+ }
+ __ j(FlagsConditionToCondition(condition), tlabel);
+ __ bind(&end);
+}
// Assembles boolean materializations after this instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -2258,60 +2380,17 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
Label check;
DCHECK_NE(0u, instr->OutputCount());
Register reg = i.OutputRegister(instr->OutputCount() - 1);
- Condition cc = no_condition;
- switch (condition) {
- case kUnorderedEqual:
- __ j(parity_odd, &check, Label::kNear);
- __ movl(reg, Immediate(0));
- __ jmp(&done, Label::kNear);
- // Fall through.
- case kEqual:
- cc = equal;
- break;
- case kUnorderedNotEqual:
- __ j(parity_odd, &check, Label::kNear);
- __ movl(reg, Immediate(1));
- __ jmp(&done, Label::kNear);
- // Fall through.
- case kNotEqual:
- cc = not_equal;
- break;
- case kSignedLessThan:
- cc = less;
- break;
- case kSignedGreaterThanOrEqual:
- cc = greater_equal;
- break;
- case kSignedLessThanOrEqual:
- cc = less_equal;
- break;
- case kSignedGreaterThan:
- cc = greater;
- break;
- case kUnsignedLessThan:
- cc = below;
- break;
- case kUnsignedGreaterThanOrEqual:
- cc = above_equal;
- break;
- case kUnsignedLessThanOrEqual:
- cc = below_equal;
- break;
- case kUnsignedGreaterThan:
- cc = above;
- break;
- case kOverflow:
- cc = overflow;
- break;
- case kNotOverflow:
- cc = no_overflow;
- break;
- default:
- UNREACHABLE();
- break;
+ if (condition == kUnorderedEqual) {
+ __ j(parity_odd, &check, Label::kNear);
+ __ movl(reg, Immediate(0));
+ __ jmp(&done, Label::kNear);
+ } else if (condition == kUnorderedNotEqual) {
+ __ j(parity_odd, &check, Label::kNear);
+ __ movl(reg, Immediate(1));
+ __ jmp(&done, Label::kNear);
}
__ bind(&check);
- __ setcc(cc, reg);
+ __ setcc(FlagsConditionToCondition(condition), reg);
__ movzxbl(reg, reg);
__ bind(&done);
}
@@ -2555,8 +2634,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
: kScratchRegister;
switch (src.type()) {
case Constant::kInt32: {
- if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
- src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+ if (RelocInfo::IsWasmPtrReference(src.rmode())) {
__ movq(dst, src.ToInt64(), src.rmode());
} else {
// TODO(dcarney): don't need scratch in this case.
@@ -2564,7 +2642,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (value == 0) {
__ xorl(dst, dst);
} else {
- if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+ if (RelocInfo::IsWasmSizeReference(src.rmode())) {
__ movl(dst, Immediate(value, src.rmode()));
} else {
__ movl(dst, Immediate(value));
@@ -2574,11 +2652,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
break;
}
case Constant::kInt64:
- if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
- src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+ if (RelocInfo::IsWasmPtrReference(src.rmode())) {
__ movq(dst, src.ToInt64(), src.rmode());
} else {
- DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ DCHECK(!RelocInfo::IsWasmSizeReference(src.rmode()));
__ Set(dst, src.ToInt64());
}
break;
diff --git a/deps/v8/src/compiler/x64/instruction-codes-x64.h b/deps/v8/src/compiler/x64/instruction-codes-x64.h
index 35acec08dc..aad172788e 100644
--- a/deps/v8/src/compiler/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/x64/instruction-codes-x64.h
@@ -128,7 +128,6 @@ namespace compiler {
V(X64Movzxwq) \
V(X64Movw) \
V(X64Movl) \
- V(X64TrapMovl) \
V(X64Movsxlq) \
V(X64Movq) \
V(X64Movsd) \
@@ -148,7 +147,10 @@ namespace compiler {
V(X64Xchgw) \
V(X64Xchgl) \
V(X64Int32x4Create) \
- V(X64Int32x4ExtractLane)
+ V(X64Int32x4ExtractLane) \
+ V(X64Int32x4ReplaceLane) \
+ V(X64Int32x4Add) \
+ V(X64Int32x4Sub)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
@@ -183,6 +185,8 @@ namespace compiler {
V(M8I) /* [ %r2*8 + K] */ \
V(Root) /* [%root + K] */
+enum X64MemoryProtection { kUnprotected = 0, kProtected = 1 };
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
index ef0c3ad92c..427e58083f 100644
--- a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
@@ -125,6 +125,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Inc32:
case kX64Int32x4Create:
case kX64Int32x4ExtractLane:
+ case kX64Int32x4ReplaceLane:
+ case kX64Int32x4Add:
+ case kX64Int32x4Sub:
return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags
: kIsLoadOperation | kHasSideEffect;
@@ -155,7 +158,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
return kHasSideEffect;
case kX64Movl:
- case kX64TrapMovl:
if (instr->HasOutput()) {
DCHECK(instr->InputCount() >= 1);
return instr->InputAt(0)->IsRegister() ? kNoOpcodeFlags
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index 878e778da0..4c213793f7 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -82,6 +82,15 @@ class X64OperandGenerator final : public OperandGenerator {
InstructionOperand inputs[],
size_t* input_count) {
AddressingMode mode = kMode_MRI;
+ if (base != nullptr && (index != nullptr || displacement != nullptr)) {
+ if (base->opcode() == IrOpcode::kInt32Constant &&
+ OpParameter<int32_t>(base) == 0) {
+ base = nullptr;
+ } else if (base->opcode() == IrOpcode::kInt64Constant &&
+ OpParameter<int64_t>(base) == 0) {
+ base = nullptr;
+ }
+ }
if (base != nullptr) {
inputs[(*input_count)++] = UseRegister(base);
if (index != nullptr) {
@@ -110,17 +119,22 @@ class X64OperandGenerator final : public OperandGenerator {
}
}
} else {
- DCHECK_NOT_NULL(index);
DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
- inputs[(*input_count)++] = UseRegister(index);
if (displacement != nullptr) {
- inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
- ? UseNegatedImmediate(displacement)
- : UseImmediate(displacement);
- static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
- kMode_M4I, kMode_M8I};
- mode = kMnI_modes[scale_exponent];
+ if (index == nullptr) {
+ inputs[(*input_count)++] = UseRegister(displacement);
+ mode = kMode_MR;
+ } else {
+ inputs[(*input_count)++] = UseRegister(index);
+ inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
+ ? UseNegatedImmediate(displacement)
+ : UseImmediate(displacement);
+ static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
+ kMode_M4I, kMode_M8I};
+ mode = kMnI_modes[scale_exponent];
+ }
} else {
+ inputs[(*input_count)++] = UseRegister(index);
static const AddressingMode kMn_modes[] = {kMode_MR, kMode_MR1,
kMode_M4, kMode_M8};
mode = kMn_modes[scale_exponent];
@@ -154,10 +168,18 @@ class X64OperandGenerator final : public OperandGenerator {
}
BaseWithIndexAndDisplacement64Matcher m(operand, AddressOption::kAllowAll);
DCHECK(m.matches());
- if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
+ if (m.displacement() == nullptr || CanBeImmediate(m.displacement())) {
return GenerateMemoryOperandInputs(
m.index(), m.scale(), m.base(), m.displacement(),
m.displacement_mode(), inputs, input_count);
+ } else if (m.base() == nullptr &&
+ m.displacement_mode() == kPositiveDisplacement) {
+ // The displacement cannot be an immediate, but we can use the
+ // displacement as base instead and still benefit from addressing
+ // modes for the scale.
+ return GenerateMemoryOperandInputs(m.index(), m.scale(), m.displacement(),
+ nullptr, m.displacement_mode(), inputs,
+ input_count);
} else {
inputs[(*input_count)++] = UseRegister(operand->InputAt(0));
inputs[(*input_count)++] = UseRegister(operand->InputAt(1));
@@ -171,7 +193,6 @@ class X64OperandGenerator final : public OperandGenerator {
};
namespace {
-
ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
ArchOpcode opcode = kArchNop;
switch (load_rep.representation()) {
@@ -205,6 +226,39 @@ ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
return opcode;
}
+ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
+ switch (store_rep.representation()) {
+ case MachineRepresentation::kFloat32:
+ return kX64Movss;
+ break;
+ case MachineRepresentation::kFloat64:
+ return kX64Movsd;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ return kX64Movb;
+ break;
+ case MachineRepresentation::kWord16:
+ return kX64Movw;
+ break;
+ case MachineRepresentation::kWord32:
+ return kX64Movl;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ return kX64Movq;
+ break;
+ case MachineRepresentation::kSimd128: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ return kArchNop;
+ }
+ UNREACHABLE();
+ return kArchNop;
+}
+
} // namespace
void InstructionSelector::VisitLoad(Node* node) {
@@ -214,33 +268,21 @@ void InstructionSelector::VisitLoad(Node* node) {
ArchOpcode opcode = GetLoadOpcode(load_rep);
InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
- InstructionOperand inputs[3];
- size_t input_count = 0;
- AddressingMode mode =
- g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
- InstructionCode code = opcode | AddressingModeField::encode(mode);
- Emit(code, 1, outputs, input_count, inputs);
-}
-
-void InstructionSelector::VisitProtectedLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- X64OperandGenerator g(this);
-
- ArchOpcode opcode = GetLoadOpcode(load_rep);
- InstructionOperand outputs[1];
- outputs[0] = g.DefineAsRegister(node);
InstructionOperand inputs[4];
size_t input_count = 0;
AddressingMode mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
- // Add the context parameter as an input.
- inputs[input_count++] = g.UseUniqueRegister(node->InputAt(2));
- // Add the source position as an input
- inputs[input_count++] = g.UseImmediate(node->InputAt(3));
InstructionCode code = opcode | AddressingModeField::encode(mode);
+ if (node->opcode() == IrOpcode::kProtectedLoad) {
+ code |= MiscField::encode(X64MemoryProtection::kProtected);
+ // Add the source position as an input
+ inputs[input_count++] = g.UseImmediate(node->InputAt(2));
+ }
Emit(code, 1, outputs, input_count, inputs);
}
+void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); }
+
void InstructionSelector::VisitStore(Node* node) {
X64OperandGenerator g(this);
Node* base = node->InputAt(0);
@@ -249,10 +291,9 @@ void InstructionSelector::VisitStore(Node* node) {
StoreRepresentation store_rep = StoreRepresentationOf(node->op());
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
- MachineRepresentation rep = store_rep.representation();
if (write_barrier_kind != kNoWriteBarrier) {
- DCHECK(CanBeTaggedPointer(rep));
+ DCHECK(CanBeTaggedPointer(store_rep.representation()));
AddressingMode addressing_mode;
InstructionOperand inputs[3];
size_t input_count = 0;
@@ -287,35 +328,7 @@ void InstructionSelector::VisitStore(Node* node) {
code |= MiscField::encode(static_cast<int>(record_write_mode));
Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- ArchOpcode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kFloat32:
- opcode = kX64Movss;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kX64Movsd;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kWord8:
- opcode = kX64Movb;
- break;
- case MachineRepresentation::kWord16:
- opcode = kX64Movw;
- break;
- case MachineRepresentation::kWord32:
- opcode = kX64Movl;
- break;
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord64:
- opcode = kX64Movq;
- break;
- case MachineRepresentation::kSimd128: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- return;
- }
+ ArchOpcode opcode = GetStoreOpcode(store_rep);
InstructionOperand inputs[4];
size_t input_count = 0;
AddressingMode addressing_mode =
@@ -330,6 +343,27 @@ void InstructionSelector::VisitStore(Node* node) {
}
}
+void InstructionSelector::VisitProtectedStore(Node* node) {
+ X64OperandGenerator g(this);
+ Node* value = node->InputAt(2);
+ Node* position = node->InputAt(3);
+
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+
+ ArchOpcode opcode = GetStoreOpcode(store_rep);
+ InstructionOperand inputs[5];
+ size_t input_count = 0;
+ AddressingMode addressing_mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ MiscField::encode(X64MemoryProtection::kProtected);
+ InstructionOperand value_operand =
+ g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
+ inputs[input_count++] = value_operand;
+ inputs[input_count++] = g.UseImmediate(position);
+ Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, inputs);
+}
+
// Architecture supports unaligned access, therefore VisitLoad is used instead
void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
@@ -1650,10 +1684,13 @@ void VisitCompareWithMemoryOperand(InstructionSelector* selector,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
cont->reason(), cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ } else if (cont->IsSet()) {
InstructionOperand output = g.DefineAsRegister(cont->result());
selector->Emit(opcode, 1, &output, input_count, inputs);
+ } else {
+ DCHECK(cont->IsTrap());
+ inputs[input_count++] = g.UseImmediate(cont->trap_id());
+ selector->Emit(opcode, 0, nullptr, input_count, inputs);
}
}
@@ -1669,9 +1706,12 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ } else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+ } else {
+ DCHECK(cont->IsTrap());
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.UseImmediate(cont->trap_id()));
}
}
@@ -1687,21 +1727,54 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
}
+MachineType MachineTypeForNarrow(Node* node, Node* hint_node) {
+ if (hint_node->opcode() == IrOpcode::kLoad) {
+ MachineType hint = LoadRepresentationOf(hint_node->op());
+ if (node->opcode() == IrOpcode::kInt32Constant ||
+ node->opcode() == IrOpcode::kInt64Constant) {
+ int64_t constant = node->opcode() == IrOpcode::kInt32Constant
+ ? OpParameter<int32_t>(node)
+ : OpParameter<int64_t>(node);
+ if (hint == MachineType::Int8()) {
+ if (constant >= std::numeric_limits<int8_t>::min() &&
+ constant <= std::numeric_limits<int8_t>::max()) {
+ return hint;
+ }
+ } else if (hint == MachineType::Uint8()) {
+ if (constant >= std::numeric_limits<uint8_t>::min() &&
+ constant <= std::numeric_limits<uint8_t>::max()) {
+ return hint;
+ }
+ } else if (hint == MachineType::Int16()) {
+ if (constant >= std::numeric_limits<int16_t>::min() &&
+ constant <= std::numeric_limits<int16_t>::max()) {
+ return hint;
+ }
+ } else if (hint == MachineType::Uint16()) {
+ if (constant >= std::numeric_limits<uint16_t>::min() &&
+ constant <= std::numeric_limits<uint16_t>::max()) {
+ return hint;
+ }
+ } else if (hint == MachineType::Int32()) {
+ return hint;
+ } else if (hint == MachineType::Uint32()) {
+ if (constant >= 0) return hint;
+ }
+ }
+ }
+ return node->opcode() == IrOpcode::kLoad ? LoadRepresentationOf(node->op())
+ : MachineType::None();
+}
+
// Tries to match the size of the given opcode to that of the operands, if
// possible.
InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
Node* right, FlagsContinuation* cont) {
- // Currently, if one of the two operands is not a Load, we don't know what its
- // machine representation is, so we bail out.
- // TODO(epertoso): we can probably get some size information out of immediates
- // and phi nodes.
- if (left->opcode() != IrOpcode::kLoad || right->opcode() != IrOpcode::kLoad) {
- return opcode;
- }
+ // TODO(epertoso): we can probably get some size information out phi nodes.
// If the load representations don't match, both operands will be
// zero/sign-extended to 32bit.
- MachineType left_type = LoadRepresentationOf(left->op());
- MachineType right_type = LoadRepresentationOf(right->op());
+ MachineType left_type = MachineTypeForNarrow(left, right);
+ MachineType right_type = MachineTypeForNarrow(right, left);
if (left_type == right_type) {
switch (left_type.representation()) {
case MachineRepresentation::kBit:
@@ -1775,11 +1848,6 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
g.UseRegister(right), cont);
}
- if (g.CanBeBetterLeftOperand(right)) {
- if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
- std::swap(left, right);
- }
-
return VisitCompare(selector, opcode, left, right, cont,
node->op()->HasProperty(Operator::kCommutative));
}
@@ -1826,9 +1894,11 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node,
} else if (cont->IsDeoptimize()) {
selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->reason(),
cont->frame_state());
- } else {
- DCHECK(cont->IsSet());
+ } else if (cont->IsSet()) {
selector->Emit(opcode, g.DefineAsRegister(cont->result()));
+ } else {
+ DCHECK(cont->IsTrap());
+ selector->Emit(opcode, g.NoOutput(), g.UseImmediate(cont->trap_id()));
}
return;
}
@@ -2036,6 +2106,19 @@ void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+ Runtime::FunctionId func_id) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
+ VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
X64OperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
@@ -2347,8 +2430,29 @@ void InstructionSelector::VisitCreateInt32x4(Node* node) {
void InstructionSelector::VisitInt32x4ExtractLane(Node* node) {
X64OperandGenerator g(this);
+ int32_t lane = OpParameter<int32_t>(node);
Emit(kX64Int32x4ExtractLane, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)), g.UseImmediate(node->InputAt(1)));
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(lane));
+}
+
+void InstructionSelector::VisitInt32x4ReplaceLane(Node* node) {
+ X64OperandGenerator g(this);
+ int32_t lane = OpParameter<int32_t>(node);
+ Emit(kX64Int32x4ReplaceLane, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(lane),
+ g.Use(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitInt32x4Add(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Int32x4Add, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitInt32x4Sub(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Int32x4Sub, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
}
// static
diff --git a/deps/v8/src/compiler/x87/code-generator-x87.cc b/deps/v8/src/compiler/x87/code-generator-x87.cc
index d2f64e8cf1..5d8594c92b 100644
--- a/deps/v8/src/compiler/x87/code-generator-x87.cc
+++ b/deps/v8/src/compiler/x87/code-generator-x87.cc
@@ -60,9 +60,7 @@ class X87OperandConverter : public InstructionOperandConverter {
Immediate ToImmediate(InstructionOperand* operand) {
Constant constant = ToConstant(operand);
if (constant.type() == Constant::kInt32 &&
- (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
- constant.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
- constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE)) {
+ RelocInfo::IsWasmReference(constant.rmode())) {
return Immediate(reinterpret_cast<Address>(constant.ToInt32()),
constant.rmode());
}
@@ -2130,6 +2128,10 @@ void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
}
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+ FlagsCondition condition) {
+ UNREACHABLE();
+}
// Assembles boolean materializations after an instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
diff --git a/deps/v8/src/compiler/x87/instruction-selector-x87.cc b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
index a737d1e9e8..9f9e4264a7 100644
--- a/deps/v8/src/compiler/x87/instruction-selector-x87.cc
+++ b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
@@ -312,6 +312,11 @@ void InstructionSelector::VisitStore(Node* node) {
}
}
+void InstructionSelector::VisitProtectedStore(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
+
// Architecture supports unaligned access, therefore VisitLoad is used instead
void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
@@ -1542,6 +1547,15 @@ void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
VisitWordCompareZero(this, node, node->InputAt(0), &cont);
}
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+ Runtime::FunctionId func_id) {
+ UNREACHABLE();
+}
+
void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
X87OperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
diff --git a/deps/v8/src/contexts-inl.h b/deps/v8/src/contexts-inl.h
index ce2c97be75..a51b31257d 100644
--- a/deps/v8/src/contexts-inl.h
+++ b/deps/v8/src/contexts-inl.h
@@ -118,12 +118,19 @@ bool Context::IsModuleContext() {
return map == map->GetHeap()->module_context_map();
}
+bool Context::IsEvalContext() {
+ Map* map = this->map();
+ return map == map->GetHeap()->eval_context_map();
+}
bool Context::IsScriptContext() {
Map* map = this->map();
return map == map->GetHeap()->script_context_map();
}
+bool Context::OptimizedCodeMapIsCleared() {
+ return osr_code_table() == GetHeap()->empty_fixed_array();
+}
bool Context::HasSameSecurityTokenAs(Context* that) {
return this->native_context()->security_token() ==
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index 012944e2c2..47ffb275b4 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -61,6 +61,7 @@ bool Context::is_declaration_context() {
IsModuleContext()) {
return true;
}
+ if (IsEvalContext()) return closure()->shared()->language_mode() == STRICT;
if (!IsBlockContext()) return false;
Object* ext = extension();
// If we have the special extension, we immediately know it must be a
@@ -74,7 +75,6 @@ Context* Context::declaration_context() {
Context* current = this;
while (!current->is_declaration_context()) {
current = current->previous();
- DCHECK(current->closure() == closure());
}
return current;
}
@@ -82,7 +82,8 @@ Context* Context::declaration_context() {
Context* Context::closure_context() {
Context* current = this;
while (!current->IsFunctionContext() && !current->IsScriptContext() &&
- !current->IsModuleContext() && !current->IsNativeContext()) {
+ !current->IsModuleContext() && !current->IsNativeContext() &&
+ !current->IsEvalContext()) {
current = current->previous();
DCHECK(current->closure() == closure());
}
@@ -90,7 +91,8 @@ Context* Context::closure_context() {
}
JSObject* Context::extension_object() {
- DCHECK(IsNativeContext() || IsFunctionContext() || IsBlockContext());
+ DCHECK(IsNativeContext() || IsFunctionContext() || IsBlockContext() ||
+ IsEvalContext());
HeapObject* object = extension();
if (object->IsTheHole(GetIsolate())) return nullptr;
if (IsBlockContext()) {
@@ -103,7 +105,7 @@ JSObject* Context::extension_object() {
}
JSReceiver* Context::extension_receiver() {
- DCHECK(IsNativeContext() || IsWithContext() ||
+ DCHECK(IsNativeContext() || IsWithContext() || IsEvalContext() ||
IsFunctionContext() || IsBlockContext());
return IsWithContext() ? JSReceiver::cast(
ContextExtension::cast(extension())->extension())
@@ -112,7 +114,7 @@ JSReceiver* Context::extension_receiver() {
ScopeInfo* Context::scope_info() {
DCHECK(!IsNativeContext());
- if (IsFunctionContext() || IsModuleContext()) {
+ if (IsFunctionContext() || IsModuleContext() || IsEvalContext()) {
return closure()->shared()->scope_info();
}
HeapObject* object = extension();
@@ -223,6 +225,8 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
}
// 1. Check global objects, subjects of with, and extension objects.
+ DCHECK_IMPLIES(context->IsEvalContext(),
+ context->extension()->IsTheHole(isolate));
if ((context->IsNativeContext() ||
(context->IsWithContext() && ((flags & SKIP_WITH_CONTEXT) == 0)) ||
context->IsFunctionContext() || context->IsBlockContext()) &&
@@ -301,12 +305,10 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
// 2. Check the context proper if it has slots.
if (context->IsFunctionContext() || context->IsBlockContext() ||
- context->IsScriptContext()) {
+ context->IsScriptContext() || context->IsEvalContext()) {
// Use serialized scope information of functions and blocks to search
// for the context index.
- Handle<ScopeInfo> scope_info(context->IsFunctionContext()
- ? context->closure()->shared()->scope_info()
- : context->scope_info());
+ Handle<ScopeInfo> scope_info(context->scope_info());
VariableMode mode;
InitializationFlag flag;
MaybeAssignedFlag maybe_assigned_flag;
@@ -408,6 +410,182 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
return Handle<Object>::null();
}
+static const int kSharedOffset = 0;
+static const int kCachedCodeOffset = 1;
+static const int kLiteralsOffset = 2;
+static const int kOsrAstIdOffset = 3;
+static const int kEntryLength = 4;
+static const int kInitialLength = kEntryLength;
+
+int Context::SearchOptimizedCodeMapEntry(SharedFunctionInfo* shared,
+ BailoutId osr_ast_id) {
+ DisallowHeapAllocation no_gc;
+ DCHECK(this->IsNativeContext());
+ if (!OptimizedCodeMapIsCleared()) {
+ FixedArray* optimized_code_map = this->osr_code_table();
+ int length = optimized_code_map->length();
+ Smi* osr_ast_id_smi = Smi::FromInt(osr_ast_id.ToInt());
+ for (int i = 0; i < length; i += kEntryLength) {
+ if (WeakCell::cast(optimized_code_map->get(i + kSharedOffset))->value() ==
+ shared &&
+ optimized_code_map->get(i + kOsrAstIdOffset) == osr_ast_id_smi) {
+ return i;
+ }
+ }
+ }
+ return -1;
+}
+
+void Context::SearchOptimizedCodeMap(SharedFunctionInfo* shared,
+ BailoutId osr_ast_id, Code** pcode,
+ LiteralsArray** pliterals) {
+ DCHECK(this->IsNativeContext());
+ int entry = SearchOptimizedCodeMapEntry(shared, osr_ast_id);
+ if (entry != -1) {
+ FixedArray* code_map = osr_code_table();
+ DCHECK_LE(entry + kEntryLength, code_map->length());
+ WeakCell* cell = WeakCell::cast(code_map->get(entry + kCachedCodeOffset));
+ WeakCell* literals_cell =
+ WeakCell::cast(code_map->get(entry + kLiteralsOffset));
+
+ *pcode = cell->cleared() ? nullptr : Code::cast(cell->value());
+ *pliterals = literals_cell->cleared()
+ ? nullptr
+ : LiteralsArray::cast(literals_cell->value());
+ } else {
+ *pcode = nullptr;
+ *pliterals = nullptr;
+ }
+}
+
+void Context::AddToOptimizedCodeMap(Handle<Context> native_context,
+ Handle<SharedFunctionInfo> shared,
+ Handle<Code> code,
+ Handle<LiteralsArray> literals,
+ BailoutId osr_ast_id) {
+ DCHECK(native_context->IsNativeContext());
+ Isolate* isolate = native_context->GetIsolate();
+ if (isolate->serializer_enabled()) return;
+
+ STATIC_ASSERT(kEntryLength == 4);
+ Handle<FixedArray> new_code_map;
+ int entry;
+
+ if (native_context->OptimizedCodeMapIsCleared()) {
+ new_code_map = isolate->factory()->NewFixedArray(kInitialLength, TENURED);
+ entry = 0;
+ } else {
+ Handle<FixedArray> old_code_map(native_context->osr_code_table(), isolate);
+ entry = native_context->SearchOptimizedCodeMapEntry(*shared, osr_ast_id);
+ if (entry >= 0) {
+ // Just set the code and literals of the entry.
+ Handle<WeakCell> code_cell = isolate->factory()->NewWeakCell(code);
+ old_code_map->set(entry + kCachedCodeOffset, *code_cell);
+ Handle<WeakCell> literals_cell =
+ isolate->factory()->NewWeakCell(literals);
+ old_code_map->set(entry + kLiteralsOffset, *literals_cell);
+ return;
+ }
+
+ // Can we reuse an entry?
+ DCHECK(entry < 0);
+ int length = old_code_map->length();
+ for (int i = 0; i < length; i += kEntryLength) {
+ if (WeakCell::cast(old_code_map->get(i + kSharedOffset))->cleared()) {
+ new_code_map = old_code_map;
+ entry = i;
+ break;
+ }
+ }
+
+ if (entry < 0) {
+ // Copy old optimized code map and append one new entry.
+ new_code_map = isolate->factory()->CopyFixedArrayAndGrow(
+ old_code_map, kEntryLength, TENURED);
+ entry = old_code_map->length();
+ }
+ }
+
+ Handle<WeakCell> code_cell = isolate->factory()->NewWeakCell(code);
+ Handle<WeakCell> literals_cell = isolate->factory()->NewWeakCell(literals);
+ Handle<WeakCell> shared_cell = isolate->factory()->NewWeakCell(shared);
+
+ new_code_map->set(entry + kSharedOffset, *shared_cell);
+ new_code_map->set(entry + kCachedCodeOffset, *code_cell);
+ new_code_map->set(entry + kLiteralsOffset, *literals_cell);
+ new_code_map->set(entry + kOsrAstIdOffset, Smi::FromInt(osr_ast_id.ToInt()));
+
+#ifdef DEBUG
+ for (int i = 0; i < new_code_map->length(); i += kEntryLength) {
+ WeakCell* cell = WeakCell::cast(new_code_map->get(i + kSharedOffset));
+ DCHECK(cell->cleared() || cell->value()->IsSharedFunctionInfo());
+ cell = WeakCell::cast(new_code_map->get(i + kCachedCodeOffset));
+ DCHECK(cell->cleared() ||
+ (cell->value()->IsCode() &&
+ Code::cast(cell->value())->kind() == Code::OPTIMIZED_FUNCTION));
+ cell = WeakCell::cast(new_code_map->get(i + kLiteralsOffset));
+ DCHECK(cell->cleared() || cell->value()->IsFixedArray());
+ DCHECK(new_code_map->get(i + kOsrAstIdOffset)->IsSmi());
+ }
+#endif
+
+ FixedArray* old_code_map = native_context->osr_code_table();
+ if (old_code_map != *new_code_map) {
+ native_context->set_osr_code_table(*new_code_map);
+ }
+}
+
+void Context::EvictFromOptimizedCodeMap(Code* optimized_code,
+ const char* reason) {
+ DCHECK(IsNativeContext());
+ DisallowHeapAllocation no_gc;
+ if (OptimizedCodeMapIsCleared()) return;
+
+ Heap* heap = GetHeap();
+ FixedArray* code_map = osr_code_table();
+ int dst = 0;
+ int length = code_map->length();
+ for (int src = 0; src < length; src += kEntryLength) {
+ if (WeakCell::cast(code_map->get(src + kCachedCodeOffset))->value() ==
+ optimized_code) {
+ BailoutId osr(Smi::cast(code_map->get(src + kOsrAstIdOffset))->value());
+ if (FLAG_trace_opt) {
+ PrintF(
+ "[evicting entry from native context optimizing code map (%s) for ",
+ reason);
+ ShortPrint();
+ DCHECK(!osr.IsNone());
+ PrintF(" (osr ast id %d)]\n", osr.ToInt());
+ }
+ // Evict the src entry by not copying it to the dst entry.
+ continue;
+ }
+ // Keep the src entry by copying it to the dst entry.
+ if (dst != src) {
+ code_map->set(dst + kSharedOffset, code_map->get(src + kSharedOffset));
+ code_map->set(dst + kCachedCodeOffset,
+ code_map->get(src + kCachedCodeOffset));
+ code_map->set(dst + kLiteralsOffset,
+ code_map->get(src + kLiteralsOffset));
+ code_map->set(dst + kOsrAstIdOffset,
+ code_map->get(src + kOsrAstIdOffset));
+ }
+ dst += kEntryLength;
+ }
+ if (dst != length) {
+ // Always trim even when array is cleared because of heap verifier.
+ heap->RightTrimFixedArray(code_map, length - dst);
+ if (code_map->length() == 0) {
+ ClearOptimizedCodeMap();
+ }
+ }
+}
+
+void Context::ClearOptimizedCodeMap() {
+ DCHECK(IsNativeContext());
+ FixedArray* empty_fixed_array = GetHeap()->empty_fixed_array();
+ set_osr_code_table(empty_fixed_array);
+}
void Context::AddOptimizedFunction(JSFunction* function) {
DCHECK(IsNativeContext());
@@ -582,6 +760,10 @@ bool Context::IsBootstrappingOrValidParentContext(
#endif
+void Context::ResetErrorsThrown() {
+ DCHECK(IsNativeContext());
+ set_errors_thrown(Smi::FromInt(0));
+}
void Context::IncrementErrorsThrown() {
DCHECK(IsNativeContext());
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index b0b719585e..e419913f90 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -43,6 +43,7 @@ enum ContextLookupFlags {
V(MAKE_SYNTAX_ERROR_INDEX, JSFunction, make_syntax_error) \
V(MAKE_TYPE_ERROR_INDEX, JSFunction, make_type_error) \
V(MAKE_URI_ERROR_INDEX, JSFunction, make_uri_error) \
+ V(OBJECT_CREATE, JSFunction, object_create) \
V(OBJECT_DEFINE_PROPERTIES, JSFunction, object_define_properties) \
V(OBJECT_DEFINE_PROPERTY, JSFunction, object_define_property) \
V(OBJECT_FREEZE, JSFunction, object_freeze) \
@@ -60,60 +61,64 @@ enum ContextLookupFlags {
V(SPREAD_ITERABLE_INDEX, JSFunction, spread_iterable) \
V(MATH_FLOOR_INDEX, JSFunction, math_floor) \
V(MATH_POW_INDEX, JSFunction, math_pow) \
- V(CREATE_RESOLVING_FUNCTION_INDEX, JSFunction, create_resolving_functions)
-
-#define NATIVE_CONTEXT_IMPORTED_FIELDS(V) \
- V(ARRAY_CONCAT_INDEX, JSFunction, array_concat) \
- V(ARRAY_POP_INDEX, JSFunction, array_pop) \
- V(ARRAY_PUSH_INDEX, JSFunction, array_push) \
- V(ARRAY_SHIFT_INDEX, JSFunction, array_shift) \
- V(ARRAY_SPLICE_INDEX, JSFunction, array_splice) \
- V(ARRAY_SLICE_INDEX, JSFunction, array_slice) \
- V(ARRAY_UNSHIFT_INDEX, JSFunction, array_unshift) \
- V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator) \
- V(ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX, JSFunction, \
- async_function_await_caught) \
- V(ASYNC_FUNCTION_AWAIT_UNCAUGHT_INDEX, JSFunction, \
- async_function_await_uncaught) \
- V(ASYNC_FUNCTION_PROMISE_CREATE_INDEX, JSFunction, \
- async_function_promise_create) \
- V(ASYNC_FUNCTION_PROMISE_RELEASE_INDEX, JSFunction, \
- async_function_promise_release) \
- V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
- V(ERROR_FUNCTION_INDEX, JSFunction, error_function) \
- V(ERROR_TO_STRING, JSFunction, error_to_string) \
- V(EVAL_ERROR_FUNCTION_INDEX, JSFunction, eval_error_function) \
- V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
- V(MAP_DELETE_METHOD_INDEX, JSFunction, map_delete) \
- V(MAP_GET_METHOD_INDEX, JSFunction, map_get) \
- V(MAP_HAS_METHOD_INDEX, JSFunction, map_has) \
- V(MAP_SET_METHOD_INDEX, JSFunction, map_set) \
- V(FUNCTION_HAS_INSTANCE_INDEX, JSFunction, function_has_instance) \
- V(OBJECT_VALUE_OF, JSFunction, object_value_of) \
- V(OBJECT_TO_STRING, JSFunction, object_to_string) \
- V(PROMISE_CATCH_INDEX, JSFunction, promise_catch) \
- V(PROMISE_CREATE_INDEX, JSFunction, promise_create) \
- V(PROMISE_FUNCTION_INDEX, JSFunction, promise_function) \
- V(PROMISE_HANDLE_INDEX, JSFunction, promise_handle) \
- V(PROMISE_HAS_USER_DEFINED_REJECT_HANDLER_INDEX, JSFunction, \
- promise_has_user_defined_reject_handler) \
- V(PROMISE_DEBUG_GET_INFO_INDEX, JSFunction, promise_debug_get_info) \
- V(PROMISE_REJECT_INDEX, JSFunction, promise_reject) \
- V(PROMISE_INTERNAL_REJECT_INDEX, JSFunction, promise_internal_reject) \
- V(PROMISE_RESOLVE_INDEX, JSFunction, promise_resolve) \
- V(PROMISE_THEN_INDEX, JSFunction, promise_then) \
- V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function) \
- V(REJECT_PROMISE_NO_DEBUG_EVENT_INDEX, JSFunction, \
- reject_promise_no_debug_event) \
- V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function) \
- V(SET_ADD_METHOD_INDEX, JSFunction, set_add) \
- V(SET_DELETE_METHOD_INDEX, JSFunction, set_delete) \
- V(SET_HAS_METHOD_INDEX, JSFunction, set_has) \
- V(SYNTAX_ERROR_FUNCTION_INDEX, JSFunction, syntax_error_function) \
- V(TYPE_ERROR_FUNCTION_INDEX, JSFunction, type_error_function) \
- V(URI_ERROR_FUNCTION_INDEX, JSFunction, uri_error_function) \
- V(WASM_COMPILE_ERROR_FUNCTION_INDEX, JSFunction, \
- wasm_compile_error_function) \
+ V(NEW_PROMISE_CAPABILITY_INDEX, JSFunction, new_promise_capability) \
+ V(PROMISE_INTERNAL_CONSTRUCTOR_INDEX, JSFunction, \
+ promise_internal_constructor) \
+ V(PROMISE_INTERNAL_REJECT_INDEX, JSFunction, promise_internal_reject) \
+ V(IS_PROMISE_INDEX, JSFunction, is_promise) \
+ V(PERFORM_PROMISE_THEN_INDEX, JSFunction, perform_promise_then) \
+ V(PROMISE_RESOLVE_INDEX, JSFunction, promise_resolve) \
+ V(PROMISE_THEN_INDEX, JSFunction, promise_then) \
+ V(PROMISE_HANDLE_INDEX, JSFunction, promise_handle) \
+ V(PROMISE_HANDLE_REJECT_INDEX, JSFunction, promise_handle_reject)
+
+#define NATIVE_CONTEXT_IMPORTED_FIELDS(V) \
+ V(ARRAY_CONCAT_INDEX, JSFunction, array_concat) \
+ V(ARRAY_POP_INDEX, JSFunction, array_pop) \
+ V(ARRAY_PUSH_INDEX, JSFunction, array_push) \
+ V(ARRAY_SHIFT_INDEX, JSFunction, array_shift) \
+ V(ARRAY_SPLICE_INDEX, JSFunction, array_splice) \
+ V(ARRAY_SLICE_INDEX, JSFunction, array_slice) \
+ V(ARRAY_UNSHIFT_INDEX, JSFunction, array_unshift) \
+ V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator) \
+ V(ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX, JSFunction, \
+ async_function_await_caught) \
+ V(ASYNC_FUNCTION_AWAIT_UNCAUGHT_INDEX, JSFunction, \
+ async_function_await_uncaught) \
+ V(ASYNC_FUNCTION_PROMISE_CREATE_INDEX, JSFunction, \
+ async_function_promise_create) \
+ V(ASYNC_FUNCTION_PROMISE_RELEASE_INDEX, JSFunction, \
+ async_function_promise_release) \
+ V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
+ V(ERROR_FUNCTION_INDEX, JSFunction, error_function) \
+ V(ERROR_TO_STRING, JSFunction, error_to_string) \
+ V(EVAL_ERROR_FUNCTION_INDEX, JSFunction, eval_error_function) \
+ V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
+ V(GLOBAL_PROXY_FUNCTION_INDEX, JSFunction, global_proxy_function) \
+ V(MAP_DELETE_METHOD_INDEX, JSFunction, map_delete) \
+ V(MAP_GET_METHOD_INDEX, JSFunction, map_get) \
+ V(MAP_HAS_METHOD_INDEX, JSFunction, map_has) \
+ V(MAP_SET_METHOD_INDEX, JSFunction, map_set) \
+ V(FUNCTION_HAS_INSTANCE_INDEX, JSFunction, function_has_instance) \
+ V(OBJECT_VALUE_OF, JSFunction, object_value_of) \
+ V(OBJECT_TO_STRING, JSFunction, object_to_string) \
+ V(PROMISE_CATCH_INDEX, JSFunction, promise_catch) \
+ V(PROMISE_FUNCTION_INDEX, JSFunction, promise_function) \
+ V(PROMISE_ID_RESOLVE_HANDLER_INDEX, JSFunction, promise_id_resolve_handler) \
+ V(PROMISE_ID_REJECT_HANDLER_INDEX, JSFunction, promise_id_reject_handler) \
+ V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function) \
+ V(REJECT_PROMISE_NO_DEBUG_EVENT_INDEX, JSFunction, \
+ reject_promise_no_debug_event) \
+ V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function) \
+ V(SET_ADD_METHOD_INDEX, JSFunction, set_add) \
+ V(SET_DELETE_METHOD_INDEX, JSFunction, set_delete) \
+ V(SET_HAS_METHOD_INDEX, JSFunction, set_has) \
+ V(SYNTAX_ERROR_FUNCTION_INDEX, JSFunction, syntax_error_function) \
+ V(TYPE_ERROR_FUNCTION_INDEX, JSFunction, type_error_function) \
+ V(URI_ERROR_FUNCTION_INDEX, JSFunction, uri_error_function) \
+ V(WASM_COMPILE_ERROR_FUNCTION_INDEX, JSFunction, \
+ wasm_compile_error_function) \
+ V(WASM_LINK_ERROR_FUNCTION_INDEX, JSFunction, wasm_link_error_function) \
V(WASM_RUNTIME_ERROR_FUNCTION_INDEX, JSFunction, wasm_runtime_error_function)
#define NATIVE_CONTEXT_JS_ARRAY_ITERATOR_MAPS(V) \
@@ -207,6 +212,7 @@ enum ContextLookupFlags {
V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate) \
V(CALLSITE_FUNCTION_INDEX, JSFunction, callsite_function) \
V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
+ V(CURRENT_MODULE_INDEX, Module, current_module) \
V(DATA_PROPERTY_DESCRIPTOR_MAP_INDEX, Map, data_property_descriptor_map) \
V(DATA_VIEW_FUN_INDEX, JSFunction, data_view_fun) \
V(DATE_FUNCTION_INDEX, JSFunction, date_function) \
@@ -216,13 +222,11 @@ enum ContextLookupFlags {
V(EXTRAS_EXPORTS_OBJECT_INDEX, JSObject, extras_binding_object) \
V(EXTRAS_UTILS_OBJECT_INDEX, JSObject, extras_utils_object) \
V(FAST_ALIASED_ARGUMENTS_MAP_INDEX, Map, fast_aliased_arguments_map) \
+ V(FAST_TEMPLATE_INSTANTIATIONS_CACHE_INDEX, FixedArray, \
+ fast_template_instantiations_cache) \
V(FLOAT32_ARRAY_FUN_INDEX, JSFunction, float32_array_fun) \
V(FLOAT32X4_FUNCTION_INDEX, JSFunction, float32x4_function) \
V(FLOAT64_ARRAY_FUN_INDEX, JSFunction, float64_array_fun) \
- V(FAST_TEMPLATE_INSTANTIATIONS_CACHE_INDEX, FixedArray, \
- fast_template_instantiations_cache) \
- V(SLOW_TEMPLATE_INSTANTIATIONS_CACHE_INDEX, UnseededNumberDictionary, \
- slow_template_instantiations_cache) \
V(FUNCTION_FUNCTION_INDEX, JSFunction, function_function) \
V(GENERATOR_FUNCTION_FUNCTION_INDEX, JSFunction, \
generator_function_function) \
@@ -243,6 +247,13 @@ enum ContextLookupFlags {
V(INT8X16_FUNCTION_INDEX, JSFunction, int8x16_function) \
V(INTERNAL_ARRAY_FUNCTION_INDEX, JSFunction, internal_array_function) \
V(ITERATOR_RESULT_MAP_INDEX, Map, iterator_result_map) \
+ V(INTL_DATE_TIME_FORMAT_FUNCTION_INDEX, JSFunction, \
+ intl_date_time_format_function) \
+ V(INTL_NUMBER_FORMAT_FUNCTION_INDEX, JSFunction, \
+ intl_number_format_function) \
+ V(INTL_COLLATOR_FUNCTION_INDEX, JSFunction, intl_collator_function) \
+ V(INTL_V8_BREAK_ITERATOR_FUNCTION_INDEX, JSFunction, \
+ intl_v8_break_iterator_function) \
V(JS_ARRAY_FAST_SMI_ELEMENTS_MAP_INDEX, Map, \
js_array_fast_smi_elements_map_index) \
V(JS_ARRAY_FAST_HOLEY_SMI_ELEMENTS_MAP_INDEX, Map, \
@@ -263,7 +274,6 @@ enum ContextLookupFlags {
V(JS_WEAK_SET_FUN_INDEX, JSFunction, js_weak_set_fun) \
V(MAP_CACHE_INDEX, Object, map_cache) \
V(MAP_ITERATOR_MAP_INDEX, Map, map_iterator_map) \
- V(STRING_ITERATOR_MAP_INDEX, Map, string_iterator_map) \
V(MATH_RANDOM_INDEX_INDEX, Smi, math_random_index) \
V(MATH_RANDOM_CACHE_INDEX, Object, math_random_cache) \
V(MESSAGE_LISTENERS_INDEX, TemplateList, message_listeners) \
@@ -271,18 +281,20 @@ enum ContextLookupFlags {
V(NORMALIZED_MAP_CACHE_INDEX, Object, normalized_map_cache) \
V(NUMBER_FUNCTION_INDEX, JSFunction, number_function) \
V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
- V(SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP, Map, \
- slow_object_with_null_prototype_map) \
V(OBJECT_FUNCTION_PROTOTYPE_MAP_INDEX, Map, object_function_prototype_map) \
V(OPAQUE_REFERENCE_FUNCTION_INDEX, JSFunction, opaque_reference_function) \
+ V(OSR_CODE_TABLE_INDEX, FixedArray, osr_code_table) \
V(PROXY_CALLABLE_MAP_INDEX, Map, proxy_callable_map) \
V(PROXY_CONSTRUCTOR_MAP_INDEX, Map, proxy_constructor_map) \
V(PROXY_FUNCTION_INDEX, JSFunction, proxy_function) \
V(PROXY_FUNCTION_MAP_INDEX, Map, proxy_function_map) \
V(PROXY_MAP_INDEX, Map, proxy_map) \
+ V(PROMISE_GET_CAPABILITIES_EXECUTOR_SHARED_FUN, SharedFunctionInfo, \
+ promise_get_capabilities_executor_shared_fun) \
V(PROMISE_RESOLVE_SHARED_FUN, SharedFunctionInfo, \
promise_resolve_shared_fun) \
V(PROMISE_REJECT_SHARED_FUN, SharedFunctionInfo, promise_reject_shared_fun) \
+ V(PROMISE_PROTOTYPE_MAP_INDEX, Map, promise_prototype_map) \
V(REGEXP_EXEC_FUNCTION_INDEX, JSFunction, regexp_exec_function) \
V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
V(REGEXP_LAST_MATCH_INFO_INDEX, RegExpMatchInfo, regexp_last_match_info) \
@@ -295,7 +307,6 @@ enum ContextLookupFlags {
V(SECURITY_TOKEN_INDEX, Object, security_token) \
V(SELF_WEAK_CELL_INDEX, WeakCell, self_weak_cell) \
V(SET_ITERATOR_MAP_INDEX, Map, set_iterator_map) \
- V(FIXED_ARRAY_ITERATOR_MAP_INDEX, Map, fixed_array_iterator_map) \
V(SHARED_ARRAY_BUFFER_FUN_INDEX, JSFunction, shared_array_buffer_fun) \
V(SLOPPY_ARGUMENTS_MAP_INDEX, Map, sloppy_arguments_map) \
V(SLOPPY_FUNCTION_MAP_INDEX, Map, sloppy_function_map) \
@@ -303,27 +314,31 @@ enum ContextLookupFlags {
sloppy_function_without_prototype_map) \
V(SLOPPY_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX, Map, \
sloppy_function_with_readonly_prototype_map) \
- V(WASM_FUNCTION_MAP_INDEX, Map, wasm_function_map) \
- V(WASM_MODULE_CONSTRUCTOR_INDEX, JSFunction, wasm_module_constructor) \
- V(WASM_INSTANCE_CONSTRUCTOR_INDEX, JSFunction, wasm_instance_constructor) \
- V(WASM_TABLE_CONSTRUCTOR_INDEX, JSFunction, wasm_table_constructor) \
- V(WASM_MEMORY_CONSTRUCTOR_INDEX, JSFunction, wasm_memory_constructor) \
- V(WASM_MODULE_SYM_INDEX, Symbol, wasm_module_sym) \
- V(WASM_TABLE_SYM_INDEX, Symbol, wasm_table_sym) \
- V(WASM_MEMORY_SYM_INDEX, Symbol, wasm_memory_sym) \
- V(WASM_INSTANCE_SYM_INDEX, Symbol, wasm_instance_sym) \
- V(SLOPPY_ASYNC_FUNCTION_MAP_INDEX, Map, sloppy_async_function_map) \
- V(SLOPPY_GENERATOR_FUNCTION_MAP_INDEX, Map, sloppy_generator_function_map) \
V(SLOW_ALIASED_ARGUMENTS_MAP_INDEX, Map, slow_aliased_arguments_map) \
- V(STRICT_ASYNC_FUNCTION_MAP_INDEX, Map, strict_async_function_map) \
+ V(SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP, Map, \
+ slow_object_with_null_prototype_map) \
+ V(SLOW_TEMPLATE_INSTANTIATIONS_CACHE_INDEX, UnseededNumberDictionary, \
+ slow_template_instantiations_cache) \
V(STRICT_ARGUMENTS_MAP_INDEX, Map, strict_arguments_map) \
+ V(ASYNC_FUNCTION_MAP_INDEX, Map, async_function_map) \
V(STRICT_FUNCTION_MAP_INDEX, Map, strict_function_map) \
V(STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \
strict_function_without_prototype_map) \
- V(STRICT_GENERATOR_FUNCTION_MAP_INDEX, Map, strict_generator_function_map) \
+ V(GENERATOR_FUNCTION_MAP_INDEX, Map, generator_function_map) \
+ V(CLASS_FUNCTION_MAP_INDEX, Map, class_function_map) \
V(STRING_FUNCTION_INDEX, JSFunction, string_function) \
V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map) \
+ V(STRING_ITERATOR_MAP_INDEX, Map, string_iterator_map) \
V(SYMBOL_FUNCTION_INDEX, JSFunction, symbol_function) \
+ V(WASM_FUNCTION_MAP_INDEX, Map, wasm_function_map) \
+ V(WASM_INSTANCE_CONSTRUCTOR_INDEX, JSFunction, wasm_instance_constructor) \
+ V(WASM_INSTANCE_SYM_INDEX, Symbol, wasm_instance_sym) \
+ V(WASM_MEMORY_CONSTRUCTOR_INDEX, JSFunction, wasm_memory_constructor) \
+ V(WASM_MEMORY_SYM_INDEX, Symbol, wasm_memory_sym) \
+ V(WASM_MODULE_CONSTRUCTOR_INDEX, JSFunction, wasm_module_constructor) \
+ V(WASM_MODULE_SYM_INDEX, Symbol, wasm_module_sym) \
+ V(WASM_TABLE_CONSTRUCTOR_INDEX, JSFunction, wasm_table_constructor) \
+ V(WASM_TABLE_SYM_INDEX, Symbol, wasm_table_sym) \
V(TYPED_ARRAY_FUN_INDEX, JSFunction, typed_array_function) \
V(TYPED_ARRAY_PROTOTYPE_INDEX, JSObject, typed_array_prototype) \
V(UINT16_ARRAY_FUN_INDEX, JSFunction, uint16_array_fun) \
@@ -333,7 +348,6 @@ enum ContextLookupFlags {
V(UINT8_ARRAY_FUN_INDEX, JSFunction, uint8_array_fun) \
V(UINT8_CLAMPED_ARRAY_FUN_INDEX, JSFunction, uint8_clamped_array_fun) \
V(UINT8X16_FUNCTION_INDEX, JSFunction, uint8x16_function) \
- V(CURRENT_MODULE_INDEX, Module, current_module) \
NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \
NATIVE_CONTEXT_IMPORTED_FIELDS(V) \
NATIVE_CONTEXT_JS_ARRAY_ITERATOR_MAPS(V)
@@ -486,6 +500,7 @@ class Context: public FixedArray {
WHITE_LIST_INDEX = MIN_CONTEXT_SLOTS + 1
};
+ void ResetErrorsThrown();
void IncrementErrorsThrown();
int GetErrorsThrown();
@@ -542,10 +557,32 @@ class Context: public FixedArray {
inline bool IsDebugEvaluateContext();
inline bool IsBlockContext();
inline bool IsModuleContext();
+ inline bool IsEvalContext();
inline bool IsScriptContext();
inline bool HasSameSecurityTokenAs(Context* that);
+ // Removes a specific optimized code object from the optimized code map.
+ // In case of non-OSR the code reference is cleared from the cache entry but
+ // the entry itself is left in the map in order to proceed sharing literals.
+ void EvictFromOptimizedCodeMap(Code* optimized_code, const char* reason);
+
+ // Clear optimized code map.
+ void ClearOptimizedCodeMap();
+
+ // A native context keeps track of all osrd optimized functions.
+ inline bool OptimizedCodeMapIsCleared();
+ void SearchOptimizedCodeMap(SharedFunctionInfo* shared, BailoutId osr_ast_id,
+ Code** pcode, LiteralsArray** pliterals);
+ int SearchOptimizedCodeMapEntry(SharedFunctionInfo* shared,
+ BailoutId osr_ast_id);
+
+ static void AddToOptimizedCodeMap(Handle<Context> native_context,
+ Handle<SharedFunctionInfo> shared,
+ Handle<Code> code,
+ Handle<LiteralsArray> literals,
+ BailoutId osr_ast_id);
+
// A native context holds a list of all functions with optimized code.
void AddOptimizedFunction(JSFunction* function);
void RemoveOptimizedFunction(JSFunction* function);
@@ -601,20 +638,20 @@ class Context: public FixedArray {
}
static int FunctionMapIndex(LanguageMode language_mode, FunctionKind kind) {
- // Note: Must be kept in sync with FastNewClosureStub::Generate.
+ // Note: Must be kept in sync with the FastNewClosure builtin.
if (IsGeneratorFunction(kind)) {
- return is_strict(language_mode) ? STRICT_GENERATOR_FUNCTION_MAP_INDEX
- : SLOPPY_GENERATOR_FUNCTION_MAP_INDEX;
+ return GENERATOR_FUNCTION_MAP_INDEX;
}
if (IsAsyncFunction(kind)) {
- return is_strict(language_mode) ? STRICT_ASYNC_FUNCTION_MAP_INDEX
- : SLOPPY_ASYNC_FUNCTION_MAP_INDEX;
+ return ASYNC_FUNCTION_MAP_INDEX;
}
if (IsClassConstructor(kind)) {
- // Use strict function map (no own "caller" / "arguments")
- return STRICT_FUNCTION_MAP_INDEX;
+ // Like the strict function map, but with no 'name' accessor. 'name'
+ // needs to be the last property and it is added during instantiation,
+ // in case a static property with the same name exists"
+ return CLASS_FUNCTION_MAP_INDEX;
}
if (IsArrowFunction(kind) || IsConciseMethod(kind) ||
diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h
index 427a67d109..c4753ebc93 100644
--- a/deps/v8/src/conversions-inl.h
+++ b/deps/v8/src/conversions-inl.h
@@ -57,7 +57,7 @@ inline unsigned int FastD2UI(double x) {
#ifndef V8_TARGET_BIG_ENDIAN
Address mantissa_ptr = reinterpret_cast<Address>(&x);
#else
- Address mantissa_ptr = reinterpret_cast<Address>(&x) + kIntSize;
+ Address mantissa_ptr = reinterpret_cast<Address>(&x) + kInt32Size;
#endif
// Copy least significant 32 bits of mantissa.
memcpy(&result, mantissa_ptr, sizeof(result));
@@ -122,18 +122,61 @@ bool IsUint32Double(double value) {
value == FastUI2D(FastD2UI(value));
}
+bool DoubleToUint32IfEqualToSelf(double value, uint32_t* uint32_value) {
+ const double k2Pow52 = 4503599627370496.0;
+ const uint32_t kValidTopBits = 0x43300000;
+ const uint64_t kBottomBitMask = V8_2PART_UINT64_C(0x00000000, FFFFFFFF);
+
+ // Add 2^52 to the double, to place valid uint32 values in the low-significant
+ // bits of the exponent, by effectively setting the (implicit) top bit of the
+ // significand. Note that this addition also normalises 0.0 and -0.0.
+ double shifted_value = value + k2Pow52;
+
+ // At this point, a valid uint32 valued double will be represented as:
+ //
+ // sign = 0
+ // exponent = 52
+ // significand = 1. 00...00 <value>
+ // implicit^ ^^^^^^^ 32 bits
+ // ^^^^^^^^^^^^^^^ 52 bits
+ //
+ // Therefore, we can first check the top 32 bits to make sure that the sign,
+ // exponent and remaining significand bits are valid, and only then check the
+ // value in the bottom 32 bits.
+
+ uint64_t result = bit_cast<uint64_t>(shifted_value);
+ if ((result >> 32) == kValidTopBits) {
+ *uint32_value = result & kBottomBitMask;
+ return FastUI2D(result & kBottomBitMask) == value;
+ }
+ return false;
+}
int32_t NumberToInt32(Object* number) {
if (number->IsSmi()) return Smi::cast(number)->value();
return DoubleToInt32(number->Number());
}
-
uint32_t NumberToUint32(Object* number) {
if (number->IsSmi()) return Smi::cast(number)->value();
return DoubleToUint32(number->Number());
}
+uint32_t PositiveNumberToUint32(Object* number) {
+ if (number->IsSmi()) {
+ int value = Smi::cast(number)->value();
+ if (value <= 0) return 0;
+ return value;
+ }
+ DCHECK(number->IsHeapNumber());
+ double value = number->Number();
+ // Catch all values smaller than 1 and use the double-negation trick for NANs.
+ if (!(value >= 1)) return 0;
+ uint32_t max = std::numeric_limits<uint32_t>::max();
+ if (value < max) return static_cast<uint32_t>(value);
+ return max;
+}
+
int64_t NumberToInt64(Object* number) {
if (number->IsSmi()) return Smi::cast(number)->value();
return static_cast<int64_t>(number->Number());
@@ -154,7 +197,12 @@ bool TryNumberToSize(Object* number, size_t* result) {
} else {
DCHECK(number->IsHeapNumber());
double value = HeapNumber::cast(number)->value();
- if (value >= 0 && value <= std::numeric_limits<size_t>::max()) {
+ // If value is compared directly to the limit, the limit will be
+ // casted to a double and could end up as limit + 1,
+ // because a double might not have enough mantissa bits for it.
+ // So we might as well cast the limit first, and use < instead of <=.
+ double maxSize = static_cast<double>(std::numeric_limits<size_t>::max());
+ if (value >= 0 && value < maxSize) {
*result = static_cast<size_t>(value);
return true;
} else {
diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc
index 7867719968..2d4aaa0692 100644
--- a/deps/v8/src/conversions.cc
+++ b/deps/v8/src/conversions.cc
@@ -8,6 +8,7 @@
#include <stdarg.h>
#include <cmath>
+#include "src/allocation.h"
#include "src/assert-scope.h"
#include "src/char-predicates-inl.h"
#include "src/codegen.h"
@@ -168,7 +169,7 @@ const char* DoubleToCString(double v, Vector<char> buffer) {
if (exponent < 0) exponent = -exponent;
builder.AddDecimalInteger(exponent);
}
- return builder.Finalize();
+ return builder.Finalize();
}
}
}
@@ -411,76 +412,91 @@ char* DoubleToPrecisionCString(double value, int p) {
return result;
}
-
char* DoubleToRadixCString(double value, int radix) {
DCHECK(radix >= 2 && radix <= 36);
-
+ DCHECK(std::isfinite(value));
+ DCHECK_NE(0.0, value);
// Character array used for conversion.
static const char chars[] = "0123456789abcdefghijklmnopqrstuvwxyz";
- // Buffer for the integer part of the result. 1024 chars is enough
- // for max integer value in radix 2. We need room for a sign too.
- static const int kBufferSize = 1100;
- char integer_buffer[kBufferSize];
- integer_buffer[kBufferSize - 1] = '\0';
-
- // Buffer for the decimal part of the result. We only generate up
- // to kBufferSize - 1 chars for the decimal part.
- char decimal_buffer[kBufferSize];
- decimal_buffer[kBufferSize - 1] = '\0';
-
- // Make sure the value is positive.
- bool is_negative = value < 0.0;
- if (is_negative) value = -value;
-
- // Get the integer part and the decimal part.
- double integer_part = std::floor(value);
- double decimal_part = value - integer_part;
+ // Temporary buffer for the result. We start with the decimal point in the
+ // middle and write to the left for the integer part and to the right for the
+ // fractional part. 1024 characters for the exponent and 52 for the mantissa
+ // either way, with additional space for sign, decimal point and string
+ // termination should be sufficient.
+ static const int kBufferSize = 2200;
+ char buffer[kBufferSize];
+ int integer_cursor = kBufferSize / 2;
+ int fraction_cursor = integer_cursor;
+
+ bool negative = value < 0;
+ if (negative) value = -value;
+
+ // Split the value into an integer part and a fractional part.
+ double integer = std::floor(value);
+ double fraction = value - integer;
+ // We only compute fractional digits up to the input double's precision.
+ double delta = 0.5 * (Double(value).NextDouble() - value);
+ delta = std::max(Double(0.0).NextDouble(), delta);
+ DCHECK_GT(delta, 0.0);
+ if (fraction > delta) {
+ // Insert decimal point.
+ buffer[fraction_cursor++] = '.';
+ do {
+ // Shift up by one digit.
+ fraction *= radix;
+ delta *= radix;
+ // Write digit.
+ int digit = static_cast<int>(fraction);
+ buffer[fraction_cursor++] = chars[digit];
+ // Calculate remainder.
+ fraction -= digit;
+ // Round to even.
+ if (fraction > 0.5 || (fraction == 0.5 && (digit & 1))) {
+ if (fraction + delta > 1) {
+ // We need to back trace already written digits in case of carry-over.
+ while (true) {
+ fraction_cursor--;
+ if (fraction_cursor == kBufferSize / 2) {
+ CHECK_EQ('.', buffer[fraction_cursor]);
+ // Carry over to the integer part.
+ integer += 1;
+ break;
+ }
+ char c = buffer[fraction_cursor];
+ // Reconstruct digit.
+ int digit = c > '9' ? (c - 'a' + 10) : (c - '0');
+ if (digit + 1 < radix) {
+ buffer[fraction_cursor++] = chars[digit + 1];
+ break;
+ }
+ }
+ break;
+ }
+ }
+ } while (fraction > delta);
+ }
- // Convert the integer part starting from the back. Always generate
- // at least one digit.
- int integer_pos = kBufferSize - 2;
- do {
- double remainder = modulo(integer_part, radix);
- integer_buffer[integer_pos--] = chars[static_cast<int>(remainder)];
- integer_part -= remainder;
- integer_part /= radix;
- } while (integer_part >= 1.0);
- // Sanity check.
- DCHECK(integer_pos > 0);
- // Add sign if needed.
- if (is_negative) integer_buffer[integer_pos--] = '-';
-
- // Convert the decimal part. Repeatedly multiply by the radix to
- // generate the next char. Never generate more than kBufferSize - 1
- // chars.
- //
- // TODO(1093998): We will often generate a full decimal_buffer of
- // chars because hitting zero will often not happen. The right
- // solution would be to continue until the string representation can
- // be read back and yield the original value. To implement this
- // efficiently, we probably have to modify dtoa.
- int decimal_pos = 0;
- while ((decimal_part > 0.0) && (decimal_pos < kBufferSize - 1)) {
- decimal_part *= radix;
- decimal_buffer[decimal_pos++] =
- chars[static_cast<int>(std::floor(decimal_part))];
- decimal_part -= std::floor(decimal_part);
+ // Compute integer digits. Fill unrepresented digits with zero.
+ while (Double(integer / radix).Exponent() > 0) {
+ integer /= radix;
+ buffer[--integer_cursor] = '0';
}
- decimal_buffer[decimal_pos] = '\0';
-
- // Compute the result size.
- int integer_part_size = kBufferSize - 2 - integer_pos;
- // Make room for zero termination.
- unsigned result_size = integer_part_size + decimal_pos;
- // If the number has a decimal part, leave room for the period.
- if (decimal_pos > 0) result_size++;
- // Allocate result and fill in the parts.
- SimpleStringBuilder builder(result_size + 1);
- builder.AddSubstring(integer_buffer + integer_pos + 1, integer_part_size);
- if (decimal_pos > 0) builder.AddCharacter('.');
- builder.AddSubstring(decimal_buffer, decimal_pos);
- return builder.Finalize();
+ do {
+ double remainder = modulo(integer, radix);
+ buffer[--integer_cursor] = chars[static_cast<int>(remainder)];
+ integer = (integer - remainder) / radix;
+ } while (integer > 0);
+
+ // Add sign and terminate string.
+ if (negative) buffer[--integer_cursor] = '-';
+ buffer[fraction_cursor++] = '\0';
+ DCHECK_LT(fraction_cursor, kBufferSize);
+ DCHECK_LE(0, integer_cursor);
+ // Allocate new string as return value.
+ char* result = NewArray<char>(fraction_cursor - integer_cursor);
+ memcpy(result, buffer + integer_cursor, fraction_cursor - integer_cursor);
+ return result;
}
diff --git a/deps/v8/src/conversions.h b/deps/v8/src/conversions.h
index 2dd91d9319..a408132fa8 100644
--- a/deps/v8/src/conversions.h
+++ b/deps/v8/src/conversions.h
@@ -167,7 +167,15 @@ inline bool IsInt32Double(double value);
// We also have to check for negative 0 as it is not a UInteger32.
inline bool IsUint32Double(double value);
+// Tries to convert |value| to a uint32, setting the result in |uint32_value|.
+// If the output does not compare equal to the input, returns false and the
+// value in |uint32_value| is left unspecified.
+// Used for conversions such as in ECMA-262 15.4.2.2, which check "ToUint32(len)
+// is equal to len".
+inline bool DoubleToUint32IfEqualToSelf(double value, uint32_t* uint32_value);
+
// Convert from Number object to C integer.
+inline uint32_t PositiveNumberToUint32(Object* number);
inline int32_t NumberToInt32(Object* number);
inline uint32_t NumberToUint32(Object* number);
inline int64_t NumberToInt64(Object* number);
diff --git a/deps/v8/src/counters-inl.h b/deps/v8/src/counters-inl.h
index 7219ef778a..ce77806cdc 100644
--- a/deps/v8/src/counters-inl.h
+++ b/deps/v8/src/counters-inl.h
@@ -10,6 +10,57 @@
namespace v8 {
namespace internal {
+void RuntimeCallTimer::Start(RuntimeCallCounter* counter,
+ RuntimeCallTimer* parent) {
+ DCHECK(!IsStarted());
+ counter_ = counter;
+ parent_.SetValue(parent);
+ if (FLAG_runtime_stats ==
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_SAMPLING) {
+ return;
+ }
+ base::TimeTicks now = Now();
+ if (parent) parent->Pause(now);
+ Resume(now);
+ DCHECK(IsStarted());
+}
+
+void RuntimeCallTimer::Pause(base::TimeTicks now) {
+ DCHECK(IsStarted());
+ elapsed_ += (now - start_ticks_);
+ start_ticks_ = base::TimeTicks();
+}
+
+void RuntimeCallTimer::Resume(base::TimeTicks now) {
+ DCHECK(!IsStarted());
+ start_ticks_ = now;
+}
+
+RuntimeCallTimer* RuntimeCallTimer::Stop() {
+ if (!IsStarted()) return parent();
+ base::TimeTicks now = Now();
+ Pause(now);
+ counter_->Increment();
+ CommitTimeToCounter();
+
+ RuntimeCallTimer* parent_timer = parent();
+ if (parent_timer) {
+ parent_timer->Resume(now);
+ }
+ return parent_timer;
+}
+
+void RuntimeCallTimer::CommitTimeToCounter() {
+ counter_->Add(elapsed_);
+ elapsed_ = base::TimeDelta();
+}
+
+bool RuntimeCallTimer::IsStarted() { return start_ticks_ != base::TimeTicks(); }
+
+base::TimeTicks RuntimeCallTimer::Now() {
+ return base::TimeTicks::HighResolutionNow();
+}
+
RuntimeCallTimerScope::RuntimeCallTimerScope(
Isolate* isolate, RuntimeCallStats::CounterId counter_id) {
if (V8_UNLIKELY(FLAG_runtime_stats)) {
diff --git a/deps/v8/src/counters.cc b/deps/v8/src/counters.cc
index 5089eb22e8..66e4def3df 100644
--- a/deps/v8/src/counters.cc
+++ b/deps/v8/src/counters.cc
@@ -216,10 +216,11 @@ class RuntimeCallStatEntries {
// binary size increase: std::vector::push_back expands to a large amount of
// instructions, and this function is invoked repeatedly by macros.
V8_NOINLINE void Add(RuntimeCallCounter* counter) {
- if (counter->count == 0) return;
- entries.push_back(Entry(counter->name, counter->time, counter->count));
- total_time += counter->time;
- total_call_count += counter->count;
+ if (counter->count() == 0) return;
+ entries.push_back(
+ Entry(counter->name(), counter->time(), counter->count()));
+ total_time += counter->time();
+ total_call_count += counter->count();
}
private:
@@ -273,20 +274,33 @@ class RuntimeCallStatEntries {
};
void RuntimeCallCounter::Reset() {
- count = 0;
- time = base::TimeDelta();
+ count_ = 0;
+ time_ = base::TimeDelta();
}
void RuntimeCallCounter::Dump(v8::tracing::TracedValue* value) {
- value->BeginArray(name);
- value->AppendLongInteger(count);
- value->AppendLongInteger(time.InMicroseconds());
+ value->BeginArray(name_);
+ value->AppendDouble(count_);
+ value->AppendDouble(time_.InMicroseconds());
value->EndArray();
}
void RuntimeCallCounter::Add(RuntimeCallCounter* other) {
- count += other->count;
- time += other->time;
+ count_ += other->count();
+ time_ += other->time();
+}
+
+void RuntimeCallTimer::Snapshot() {
+ base::TimeTicks now = Now();
+ // Pause only / topmost timer in the timer stack.
+ Pause(now);
+ // Commit all the timer's elapsed time to the counters.
+ RuntimeCallTimer* timer = this;
+ while (timer != nullptr) {
+ timer->CommitTimeToCounter();
+ timer = timer->parent();
+ }
+ Resume(now);
}
// static
@@ -310,10 +324,14 @@ const RuntimeCallStats::CounterId RuntimeCallStats::counters[] = {
};
// static
+const int RuntimeCallStats::counters_count =
+ arraysize(RuntimeCallStats::counters);
+
+// static
void RuntimeCallStats::Enter(RuntimeCallStats* stats, RuntimeCallTimer* timer,
CounterId counter_id) {
RuntimeCallCounter* counter = &(stats->*counter_id);
- DCHECK(counter->name != nullptr);
+ DCHECK(counter->name() != nullptr);
timer->Start(counter, stats->current_timer_.Value());
stats->current_timer_.SetValue(timer);
}
@@ -329,7 +347,7 @@ void RuntimeCallStats::Leave(RuntimeCallStats* stats, RuntimeCallTimer* timer) {
RuntimeCallTimer* next = stats->current_timer_.Value();
while (next && next->parent() != timer) next = next->parent();
if (next == nullptr) return;
- next->parent_.SetValue(timer->Stop());
+ next->set_parent(timer->Stop());
}
}
@@ -348,13 +366,13 @@ void RuntimeCallStats::CorrectCurrentCounterId(RuntimeCallStats* stats,
RuntimeCallTimer* timer = stats->current_timer_.Value();
// When RCS are enabled dynamically there might be no current timer set up.
if (timer == nullptr) return;
- timer->counter_ = &(stats->*counter_id);
+ timer->set_counter(&(stats->*counter_id));
}
void RuntimeCallStats::Print(std::ostream& os) {
RuntimeCallStatEntries entries;
if (current_timer_.Value() != nullptr) {
- current_timer_.Value()->Elapsed();
+ current_timer_.Value()->Snapshot();
}
for (const RuntimeCallStats::CounterId counter_id :
RuntimeCallStats::counters) {
@@ -388,7 +406,7 @@ void RuntimeCallStats::Dump(v8::tracing::TracedValue* value) {
for (const RuntimeCallStats::CounterId counter_id :
RuntimeCallStats::counters) {
RuntimeCallCounter* counter = &(this->*counter_id);
- if (counter->count > 0) counter->Dump(value);
+ if (counter->count() > 0) counter->Dump(value);
}
in_use_ = false;
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index 4415250b24..4f3706d665 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -484,65 +484,51 @@ double AggregatedMemoryHistogram<Histogram>::Aggregate(double current_ms,
value * ((current_ms - last_ms_) / interval_ms);
}
-struct RuntimeCallCounter {
- explicit RuntimeCallCounter(const char* name) : name(name) {}
+class RuntimeCallCounter final {
+ public:
+ explicit RuntimeCallCounter(const char* name) : name_(name) {}
V8_NOINLINE void Reset();
V8_NOINLINE void Dump(v8::tracing::TracedValue* value);
void Add(RuntimeCallCounter* other);
- const char* name;
- int64_t count = 0;
- base::TimeDelta time;
+ const char* name() const { return name_; }
+ int64_t count() const { return count_; }
+ base::TimeDelta time() const { return time_; }
+ void Increment() { count_++; }
+ void Add(base::TimeDelta delta) { time_ += delta; }
+
+ private:
+ const char* name_;
+ int64_t count_ = 0;
+ base::TimeDelta time_;
};
// RuntimeCallTimer is used to keep track of the stack of currently active
// timers used for properly measuring the own time of a RuntimeCallCounter.
-class RuntimeCallTimer {
+class RuntimeCallTimer final {
public:
RuntimeCallCounter* counter() { return counter_; }
- base::ElapsedTimer timer() { return timer_; }
+ void set_counter(RuntimeCallCounter* counter) { counter_ = counter; }
RuntimeCallTimer* parent() const { return parent_.Value(); }
+ void set_parent(RuntimeCallTimer* timer) { parent_.SetValue(timer); }
+ const char* name() const { return counter_->name(); }
- private:
- friend class RuntimeCallStats;
-
- inline void Start(RuntimeCallCounter* counter, RuntimeCallTimer* parent) {
- counter_ = counter;
- parent_.SetValue(parent);
- if (FLAG_runtime_stats !=
- v8::tracing::TracingCategoryObserver::ENABLED_BY_SAMPLING) {
- timer_.Start();
- }
- }
-
- inline RuntimeCallTimer* Stop() {
- if (!timer_.IsStarted()) return parent();
- base::TimeDelta delta = timer_.Elapsed();
- timer_.Stop();
- counter_->count++;
- counter_->time += delta;
- if (parent()) {
- // Adjust parent timer so that it does not include sub timer's time.
- parent()->counter_->time -= delta;
- }
- return parent();
- }
+ inline bool IsStarted();
- inline void Elapsed() {
- base::TimeDelta delta = timer_.Elapsed();
- counter_->time += delta;
- if (parent()) {
- parent()->counter_->time -= delta;
- parent()->Elapsed();
- }
- timer_.Restart();
- }
+ inline void Start(RuntimeCallCounter* counter, RuntimeCallTimer* parent);
+ void Snapshot();
+ inline RuntimeCallTimer* Stop();
- const char* name() { return counter_->name; }
+ private:
+ inline void Pause(base::TimeTicks now);
+ inline void Resume(base::TimeTicks now);
+ inline void CommitTimeToCounter();
+ inline base::TimeTicks Now();
RuntimeCallCounter* counter_ = nullptr;
base::AtomicValue<RuntimeCallTimer*> parent_;
- base::ElapsedTimer timer_;
+ base::TimeTicks start_ticks_;
+ base::TimeDelta elapsed_;
};
#define FOR_EACH_API_COUNTER(V) \
@@ -571,6 +557,7 @@ class RuntimeCallTimer {
V(FunctionTemplate_GetFunction) \
V(FunctionTemplate_New) \
V(FunctionTemplate_NewRemoteInstance) \
+ V(FunctionTemplate_NewWithCache) \
V(FunctionTemplate_NewWithFastHandler) \
V(Int16Array_New) \
V(Int32Array_New) \
@@ -641,6 +628,8 @@ class RuntimeCallTimer {
V(Promise_HasRejectHandler) \
V(Promise_Resolver_New) \
V(Promise_Resolver_Resolve) \
+ V(Promise_Result) \
+ V(Promise_Status) \
V(Promise_Then) \
V(Proxy_New) \
V(RangeError_New) \
@@ -696,23 +685,36 @@ class RuntimeCallTimer {
V(AccessorNameGetterCallback_FunctionPrototype) \
V(AccessorNameGetterCallback_StringLength) \
V(AccessorNameSetterCallback) \
- V(Compile) \
- V(CompileCode) \
V(CompileCodeLazy) \
V(CompileDeserialize) \
V(CompileEval) \
V(CompileFullCode) \
+ V(CompileAnalyse) \
+ V(CompileBackgroundIgnition) \
+ V(CompileFunction) \
+ V(CompileGetFromOptimizedCodeMap) \
+ V(CompileGetUnoptimizedCode) \
V(CompileIgnition) \
- V(CompilerDispatcher) \
+ V(CompileIgnitionFinalization) \
+ V(CompileInnerFunction) \
+ V(CompileRenumber) \
+ V(CompileRewriteReturnResult) \
+ V(CompileScopeAnalysis) \
+ V(CompileScript) \
V(CompileSerialize) \
+ V(CompilerDispatcher) \
V(DeoptimizeCode) \
V(FunctionCallback) \
V(GC) \
+ V(GC_AllAvailableGarbage) \
+ V(GCEpilogueCallback) \
+ V(GCPrologueCallback) \
V(GenericNamedPropertyDefinerCallback) \
V(GenericNamedPropertyDeleterCallback) \
V(GenericNamedPropertyDescriptorCallback) \
V(GenericNamedPropertyQueryCallback) \
V(GenericNamedPropertySetterCallback) \
+ V(GetMoreDataCallback) \
V(IndexedPropertyDefinerCallback) \
V(IndexedPropertyDeleterCallback) \
V(IndexedPropertyDescriptorCallback) \
@@ -728,11 +730,16 @@ class RuntimeCallTimer {
V(Object_DeleteProperty) \
V(OptimizeCode) \
V(ParseArrowFunctionLiteral) \
+ V(ParseBackgroundArrowFunctionLiteral) \
+ V(ParseBackgroundFunctionLiteral) \
V(ParseEval) \
V(ParseFunction) \
V(ParseFunctionLiteral) \
V(ParseProgram) \
V(PreParseArrowFunctionLiteral) \
+ V(PreParseBackgroundArrowFunctionLiteral) \
+ V(PreParseBackgroundNoVariableResolution) \
+ V(PreParseBackgroundWithVariableResolution) \
V(PreParseNoVariableResolution) \
V(PreParseWithVariableResolution) \
V(PropertyCallback) \
@@ -741,6 +748,9 @@ class RuntimeCallTimer {
V(PrototypeObject_DeleteProperty) \
V(RecompileConcurrent) \
V(RecompileSynchronous) \
+ V(TestCounter1) \
+ V(TestCounter2) \
+ V(TestCounter3) \
/* Dummy counter for the unexpected stub miss. */ \
V(UnexpectedStubMiss)
@@ -750,8 +760,6 @@ class RuntimeCallTimer {
V(KeyedLoadIC_LoadIndexedInterceptorStub) \
V(KeyedLoadIC_KeyedLoadSloppyArgumentsStub) \
V(KeyedLoadIC_LoadElementDH) \
- V(KeyedLoadIC_LoadFastElementStub) \
- V(KeyedLoadIC_LoadDictionaryElementStub) \
V(KeyedLoadIC_SlowStub) \
V(KeyedStoreIC_ElementsTransitionAndStoreStub) \
V(KeyedStoreIC_KeyedStoreSloppyArgumentsStub) \
@@ -778,7 +786,6 @@ class RuntimeCallTimer {
V(LoadIC_LoadFieldDH) \
V(LoadIC_LoadFieldFromPrototypeDH) \
V(LoadIC_LoadField) \
- V(LoadIC_LoadFieldStub) \
V(LoadIC_LoadGlobal) \
V(LoadIC_LoadInterceptor) \
V(LoadIC_LoadNonexistentDH) \
@@ -786,6 +793,7 @@ class RuntimeCallTimer {
V(LoadIC_LoadNormal) \
V(LoadIC_LoadScriptContextFieldStub) \
V(LoadIC_LoadViaGetter) \
+ V(LoadIC_NonReceiver) \
V(LoadIC_Premonomorphic) \
V(LoadIC_SlowStub) \
V(LoadIC_StringLengthStub) \
@@ -797,6 +805,7 @@ class RuntimeCallTimer {
V(StoreIC_HandlerCacheHit_Accessor) \
V(StoreIC_HandlerCacheHit_Data) \
V(StoreIC_HandlerCacheHit_Transition) \
+ V(StoreIC_NonReceiver) \
V(StoreIC_Premonomorphic) \
V(StoreIC_SlowStub) \
V(StoreIC_StoreCallback) \
@@ -812,7 +821,7 @@ class RuntimeCallTimer {
V(StoreIC_StoreTransitionDH) \
V(StoreIC_StoreViaSetter)
-class RuntimeCallStats : public ZoneObject {
+class RuntimeCallStats final : public ZoneObject {
public:
typedef RuntimeCallCounter RuntimeCallStats::*CounterId;
@@ -838,26 +847,29 @@ class RuntimeCallStats : public ZoneObject {
#undef CALL_BUILTIN_COUNTER
static const CounterId counters[];
+ static const int counters_count;
// Starting measuring the time for a function. This will establish the
// connection to the parent counter for properly calculating the own times.
- static void Enter(RuntimeCallStats* stats, RuntimeCallTimer* timer,
- CounterId counter_id);
+ V8_EXPORT_PRIVATE static void Enter(RuntimeCallStats* stats,
+ RuntimeCallTimer* timer,
+ CounterId counter_id);
// Leave a scope for a measured runtime function. This will properly add
// the time delta to the current_counter and subtract the delta from its
// parent.
- static void Leave(RuntimeCallStats* stats, RuntimeCallTimer* timer);
+ V8_EXPORT_PRIVATE static void Leave(RuntimeCallStats* stats,
+ RuntimeCallTimer* timer);
// Set counter id for the innermost measurement. It can be used to refine
// event kind when a runtime entry counter is too generic.
- static void CorrectCurrentCounterId(RuntimeCallStats* stats,
- CounterId counter_id);
+ V8_EXPORT_PRIVATE static void CorrectCurrentCounterId(RuntimeCallStats* stats,
+ CounterId counter_id);
- void Reset();
+ V8_EXPORT_PRIVATE void Reset();
// Add all entries from another stats object.
void Add(RuntimeCallStats* other);
- void Print(std::ostream& os);
+ V8_EXPORT_PRIVATE void Print(std::ostream& os);
V8_NOINLINE void Dump(v8::tracing::TracedValue* value);
RuntimeCallStats() {
@@ -887,6 +899,36 @@ class RuntimeCallStats : public ZoneObject {
CHANGE_CURRENT_RUNTIME_COUNTER(isolate->counters()->runtime_call_stats(), \
Handler_##counter_name)
+// A RuntimeCallTimerScopes wraps around a RuntimeCallTimer to measure the
+// the time of C++ scope.
+class RuntimeCallTimerScope {
+ public:
+ inline RuntimeCallTimerScope(Isolate* isolate,
+ RuntimeCallStats::CounterId counter_id);
+ // This constructor is here just to avoid calling GetIsolate() when the
+ // stats are disabled and the isolate is not directly available.
+ inline RuntimeCallTimerScope(HeapObject* heap_object,
+ RuntimeCallStats::CounterId counter_id);
+ inline RuntimeCallTimerScope(RuntimeCallStats* stats,
+ RuntimeCallStats::CounterId counter_id);
+
+ inline ~RuntimeCallTimerScope() {
+ if (V8_UNLIKELY(stats_ != nullptr)) {
+ RuntimeCallStats::Leave(stats_, &timer_);
+ }
+ }
+
+ private:
+ V8_INLINE void Initialize(RuntimeCallStats* stats,
+ RuntimeCallStats::CounterId counter_id) {
+ stats_ = stats;
+ RuntimeCallStats::Enter(stats_, &timer_, counter_id);
+ }
+
+ RuntimeCallStats* stats_ = nullptr;
+ RuntimeCallTimer timer_;
+};
+
#define HISTOGRAM_RANGE_LIST(HR) \
/* Generic range histograms */ \
HR(detached_context_age_in_gc, V8.DetachedContextAgeInGC, 0, 20, 21) \
@@ -1050,8 +1092,6 @@ class RuntimeCallStats : public ZoneObject {
SC(ic_compare_miss, V8.ICCompareMiss) \
SC(ic_call_miss, V8.ICCallMiss) \
SC(ic_keyed_call_miss, V8.ICKeyedCallMiss) \
- SC(ic_load_miss, V8.ICLoadMiss) \
- SC(ic_keyed_load_miss, V8.ICKeyedLoadMiss) \
SC(ic_store_miss, V8.ICStoreMiss) \
SC(ic_keyed_store_miss, V8.ICKeyedStoreMiss) \
SC(cow_arrays_created_runtime, V8.COWArraysCreatedRuntime) \
@@ -1298,36 +1338,6 @@ class Counters {
DISALLOW_IMPLICIT_CONSTRUCTORS(Counters);
};
-// A RuntimeCallTimerScopes wraps around a RuntimeCallTimer to measure the
-// the time of C++ scope.
-class RuntimeCallTimerScope {
- public:
- inline RuntimeCallTimerScope(Isolate* isolate,
- RuntimeCallStats::CounterId counter_id);
- // This constructor is here just to avoid calling GetIsolate() when the
- // stats are disabled and the isolate is not directly available.
- inline RuntimeCallTimerScope(HeapObject* heap_object,
- RuntimeCallStats::CounterId counter_id);
- inline RuntimeCallTimerScope(RuntimeCallStats* stats,
- RuntimeCallStats::CounterId counter_id);
-
- inline ~RuntimeCallTimerScope() {
- if (V8_UNLIKELY(stats_ != nullptr)) {
- RuntimeCallStats::Leave(stats_, &timer_);
- }
- }
-
- private:
- V8_INLINE void Initialize(RuntimeCallStats* stats,
- RuntimeCallStats::CounterId counter_id) {
- stats_ = stats;
- RuntimeCallStats::Enter(stats_, &timer_, counter_id);
- }
-
- RuntimeCallStats* stats_ = nullptr;
- RuntimeCallTimer timer_;
-};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
index e092a9e040..2fb746dcbd 100644
--- a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
@@ -5,6 +5,7 @@
#include "src/crankshaft/arm/lithium-codegen-arm.h"
#include "src/base/bits.h"
+#include "src/builtins/builtins-constructor.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/crankshaft/arm/lithium-gap-resolver-arm.h"
@@ -164,15 +165,18 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
__ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else {
- if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
- FastNewFunctionContextStub stub(isolate());
+ if (slots <=
+ ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+ Callable callable = CodeFactory::FastNewFunctionContext(
+ isolate(), info()->scope()->scope_type());
__ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
Operand(slots));
- __ CallStub(&stub);
- // Result of FastNewFunctionContextStub is always in new space.
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ // Result of the FastNewFunctionContext builtin is always in new space.
need_write_barrier = false;
} else {
__ push(r1);
+ __ Push(Smi::FromInt(info()->scope()->scope_type()));
__ CallRuntime(Runtime::kNewFunctionContext);
}
}
@@ -2873,7 +2877,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
// protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
// it needs to bail out.
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
- __ ldr(result, FieldMemOperand(result, Cell::kValueOffset));
+ __ ldr(result, FieldMemOperand(result, PropertyCell::kValueOffset));
__ cmp(result, Operand(Smi::FromInt(Isolate::kProtectorValid)));
DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
}
@@ -3125,7 +3129,7 @@ void LCodeGen::DoContext(LContext* instr) {
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- __ Move(scratch0(), instr->hydrogen()->pairs());
+ __ Move(scratch0(), instr->hydrogen()->declarations());
__ push(scratch0());
__ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
__ push(scratch0());
@@ -4041,13 +4045,17 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
if (Smi::IsValid(int_key)) {
__ mov(r3, Operand(Smi::FromInt(int_key)));
} else {
- // We should never get here at runtime because there is a smi check on
- // the key before this point.
- __ stop("expected smi");
+ Abort(kArrayIndexConstantValueTooBig);
}
} else {
- __ Move(r3, ToRegister(key));
- __ SmiTag(r3);
+ Label is_smi;
+ __ SmiTag(r3, ToRegister(key), SetCC);
+ // Deopt if the key is outside Smi range. The stub expects Smi and would
+ // bump the elements into dictionary mode (and trigger a deopt) anyways.
+ __ b(vc, &is_smi);
+ __ PopSafepointRegisters();
+ DeoptimizeIf(al, instr, DeoptimizeReason::kOverflow);
+ __ bind(&is_smi);
}
GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
diff --git a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
index 4d8e6615e7..141ac3f610 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
+++ b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
@@ -6,6 +6,7 @@
#include "src/arm64/frames-arm64.h"
#include "src/base/bits.h"
+#include "src/builtins/builtins-constructor.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/crankshaft/arm64/lithium-gap-resolver-arm64.h"
@@ -618,14 +619,17 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
__ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else {
- if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
- FastNewFunctionContextStub stub(isolate());
+ if (slots <=
+ ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+ Callable callable = CodeFactory::FastNewFunctionContext(
+ isolate(), info()->scope()->scope_type());
__ Mov(FastNewFunctionContextDescriptor::SlotsRegister(), slots);
- __ CallStub(&stub);
- // Result of FastNewFunctionContextStub is always in new space.
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ // Result of the FastNewFunctionContext builtin is always in new space.
need_write_barrier = false;
} else {
__ Push(x1);
+ __ Push(Smi::FromInt(info()->scope()->scope_type()));
__ CallRuntime(Runtime::kNewFunctionContext);
}
}
@@ -3243,7 +3247,7 @@ void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
// protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
// it needs to bail out.
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
- __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
+ __ Ldr(result, FieldMemOperand(result, PropertyCell::kValueOffset));
__ Cmp(result, Operand(Smi::FromInt(Isolate::kProtectorValid)));
DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
}
@@ -4595,7 +4599,7 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
// TODO(all): if Mov could handle object in new space then it could be used
// here.
- __ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
+ __ LoadHeapObject(scratch1, instr->hydrogen()->declarations());
__ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags()));
__ Push(scratch1, scratch2);
__ LoadHeapObject(scratch1, instr->hydrogen()->feedback_vector());
diff --git a/deps/v8/src/crankshaft/hydrogen-instructions.h b/deps/v8/src/crankshaft/hydrogen-instructions.h
index 9b9e6742e4..7ffb2a3a7b 100644
--- a/deps/v8/src/crankshaft/hydrogen-instructions.h
+++ b/deps/v8/src/crankshaft/hydrogen-instructions.h
@@ -1947,7 +1947,7 @@ class HDeclareGlobals final : public HUnaryOperation {
Handle<TypeFeedbackVector>);
HValue* context() { return OperandAt(0); }
- Handle<FixedArray> pairs() const { return pairs_; }
+ Handle<FixedArray> declarations() const { return declarations_; }
int flags() const { return flags_; }
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
@@ -1960,17 +1960,17 @@ class HDeclareGlobals final : public HUnaryOperation {
}
private:
- HDeclareGlobals(HValue* context, Handle<FixedArray> pairs, int flags,
+ HDeclareGlobals(HValue* context, Handle<FixedArray> declarations, int flags,
Handle<TypeFeedbackVector> feedback_vector)
: HUnaryOperation(context),
- pairs_(pairs),
+ declarations_(declarations),
feedback_vector_(feedback_vector),
flags_(flags) {
set_representation(Representation::Tagged());
SetAllSideEffects();
}
- Handle<FixedArray> pairs_;
+ Handle<FixedArray> declarations_;
Handle<TypeFeedbackVector> feedback_vector_;
int flags_;
};
diff --git a/deps/v8/src/crankshaft/hydrogen-types.cc b/deps/v8/src/crankshaft/hydrogen-types.cc
index 684e6ad09c..ad2d461fe8 100644
--- a/deps/v8/src/crankshaft/hydrogen-types.cc
+++ b/deps/v8/src/crankshaft/hydrogen-types.cc
@@ -6,6 +6,7 @@
#include "src/field-type.h"
#include "src/handles-inl.h"
+#include "src/objects-inl.h"
#include "src/ostreams.h"
namespace v8 {
diff --git a/deps/v8/src/crankshaft/hydrogen.cc b/deps/v8/src/crankshaft/hydrogen.cc
index 754da77c94..9ff2308361 100644
--- a/deps/v8/src/crankshaft/hydrogen.cc
+++ b/deps/v8/src/crankshaft/hydrogen.cc
@@ -118,7 +118,7 @@ class HOptimizedGraphBuilderWithPositions : public HOptimizedGraphBuilder {
HCompilationJob::Status HCompilationJob::PrepareJobImpl() {
if (!isolate()->use_crankshaft() ||
- info()->shared_info()->dont_crankshaft()) {
+ info()->shared_info()->must_use_ignition_turbo()) {
// Crankshaft is entirely disabled.
return FAILED;
}
@@ -142,7 +142,6 @@ HCompilationJob::Status HCompilationJob::PrepareJobImpl() {
}
}
DCHECK(info()->shared_info()->has_deoptimization_support());
- DCHECK(!info()->shared_info()->never_compiled());
// Check the whitelist for Crankshaft.
if (!info()->shared_info()->PassesFilter(FLAG_hydrogen_filter)) {
@@ -1363,10 +1362,6 @@ HGraph* HGraphBuilder::CreateGraph() {
DCHECK(!FLAG_minimal);
graph_ = new (zone()) HGraph(info_, descriptor_);
if (FLAG_hydrogen_stats) isolate()->GetHStatistics()->Initialize(info_);
- if (!info_->IsStub() && is_tracking_positions()) {
- TraceInlinedFunction(info_->shared_info(), SourcePosition::Unknown(),
- SourcePosition::kNotInlined);
- }
CompilationPhase phase("H_Block building", info_);
set_current_block(graph()->entry_block());
if (!BuildGraph()) return NULL;
@@ -1374,49 +1369,6 @@ HGraph* HGraphBuilder::CreateGraph() {
return graph_;
}
-void HGraphBuilder::TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
- SourcePosition position,
- int inlining_id) {
- DCHECK(is_tracking_positions());
-
- if (!shared->script()->IsUndefined(isolate())) {
- Handle<Script> script(Script::cast(shared->script()), isolate());
-
- if (FLAG_hydrogen_track_positions &&
- !script->source()->IsUndefined(isolate())) {
- CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
- Object* source_name = script->name();
- OFStream os(tracing_scope.file());
- os << "--- FUNCTION SOURCE (";
- if (source_name->IsString()) {
- os << String::cast(source_name)->ToCString().get() << ":";
- }
- os << shared->DebugName()->ToCString().get() << ") id{";
- os << info_->optimization_id() << "," << inlining_id << "} ---\n";
- {
- DisallowHeapAllocation no_allocation;
- int start = shared->start_position();
- int len = shared->end_position() - start;
- String::SubStringRange source(String::cast(script->source()), start,
- len);
- for (const auto& c : source) {
- os << AsReversiblyEscapedUC16(c);
- }
- }
-
- os << "\n--- END ---\n";
- }
- }
-
- if (FLAG_hydrogen_track_positions &&
- inlining_id != SourcePosition::kNotInlined) {
- CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "INLINE (" << shared->DebugName()->ToCString().get() << ") id{"
- << info_->optimization_id() << "," << inlining_id << "} AS "
- << inlining_id << " AT " << position.ScriptOffset() << std::endl;
- }
-}
HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
DCHECK(current_block() != NULL);
@@ -1764,12 +1716,12 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(HValue* receiver,
details_index->ClearFlag(HValue::kCanOverflow);
HValue* details = Add<HLoadKeyed>(elements, details_index, nullptr, nullptr,
FAST_ELEMENTS);
- int details_mask = PropertyDetails::TypeField::kMask;
+ int details_mask = PropertyDetails::KindField::kMask;
details = AddUncasted<HBitwise>(Token::BIT_AND, details,
Add<HConstant>(details_mask));
IfBuilder details_compare(this);
- details_compare.If<HCompareNumericAndBranch>(
- details, graph()->GetConstant0(), Token::EQ);
+ details_compare.If<HCompareNumericAndBranch>(details, New<HConstant>(kData),
+ Token::EQ);
details_compare.Then();
HValue* result_index =
AddUncasted<HAdd>(base_index, Add<HConstant>(start_offset + 1));
@@ -2289,6 +2241,9 @@ HValue* HGraphBuilder::BuildUncheckedStringAdd(
IfBuilder if_createcons(this);
if_createcons.If<HCompareNumericAndBranch>(
length, Add<HConstant>(ConsString::kMinLength), Token::GTE);
+ if_createcons.And();
+ if_createcons.If<HCompareNumericAndBranch>(
+ length, Add<HConstant>(ConsString::kMaxLength), Token::LTE);
if_createcons.Then();
{
// Create a cons string.
@@ -3994,7 +3949,7 @@ void HOptimizedGraphBuilder::VisitExpressions(ZoneList<Expression*>* exprs,
bool HOptimizedGraphBuilder::BuildGraph() {
- if (IsSubclassConstructor(current_info()->literal()->kind())) {
+ if (IsDerivedConstructor(current_info()->literal()->kind())) {
Bailout(kSuperReference);
return false;
}
@@ -5099,18 +5054,23 @@ void HOptimizedGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
// space for nested functions that don't need pretenuring.
HConstant* shared_info_value = Add<HConstant>(shared_info);
HInstruction* instr;
+ Handle<TypeFeedbackVector> vector(current_feedback_vector(), isolate());
+ HValue* vector_value = Add<HConstant>(vector);
+ int index = TypeFeedbackVector::GetIndex(expr->LiteralFeedbackSlot());
+ HValue* index_value = Add<HConstant>(index);
if (!expr->pretenure()) {
- FastNewClosureStub stub(isolate());
- FastNewClosureDescriptor descriptor(isolate());
- HValue* values[] = {shared_info_value};
- HConstant* stub_value = Add<HConstant>(stub.GetCode());
- instr = New<HCallWithDescriptor>(stub_value, 0, descriptor,
+ Callable callable = CodeFactory::FastNewClosure(isolate());
+ HValue* values[] = {shared_info_value, vector_value, index_value};
+ HConstant* stub_value = Add<HConstant>(callable.code());
+ instr = New<HCallWithDescriptor>(stub_value, 0, callable.descriptor(),
ArrayVector(values));
} else {
Add<HPushArguments>(shared_info_value);
+ Add<HPushArguments>(vector_value);
+ Add<HPushArguments>(index_value);
Runtime::FunctionId function_id =
expr->pretenure() ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure;
- instr = New<HCallRuntime>(Runtime::FunctionForId(function_id), 1);
+ instr = New<HCallRuntime>(Runtime::FunctionForId(function_id), 3);
}
return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -5334,7 +5294,8 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
Callable callable = CodeFactory::LoadGlobalICInOptimizedCode(
isolate(), ast_context()->typeof_mode());
HValue* stub = Add<HConstant>(callable.code());
- HValue* values[] = {slot_value, vector_value};
+ HValue* name = Add<HConstant>(variable->name());
+ HValue* values[] = {name, slot_value, vector_value};
HCallWithDescriptor* instr = New<HCallWithDescriptor>(
Code::LOAD_GLOBAL_IC, stub, 0, callable.descriptor(),
ArrayVector(values));
@@ -5446,7 +5407,9 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
}
}
}
- } else if (!boilerplate->HasFastDoubleElements()) {
+ } else if (boilerplate->HasFastDoubleElements()) {
+ if (elements->Size() > kMaxRegularHeapObjectSize) return false;
+ } else {
return false;
}
}
@@ -5460,7 +5423,8 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
int limit = boilerplate->map()->NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
PropertyDetails details = descriptors->GetDetails(i);
- if (details.type() != DATA) continue;
+ if (details.location() != kField) continue;
+ DCHECK_EQ(kData, details.kind());
if ((*max_properties)-- == 0) return false;
FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
if (boilerplate->IsUnboxedDoubleField(field_index)) continue;
@@ -5509,7 +5473,8 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
site_context.ExitScope(site, boilerplate);
} else {
NoObservableSideEffectsScope no_effects(this);
- Handle<FixedArray> constant_properties = expr->constant_properties();
+ Handle<FixedArray> constant_properties =
+ expr->GetOrBuildConstantProperties(isolate());
int literal_index = expr->literal_index();
int flags = expr->ComputeFlags(true);
@@ -5558,6 +5523,7 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
if (info.CanAccessMonomorphic()) {
HValue* checked_literal = Add<HCheckMaps>(literal, map);
DCHECK(!info.IsAccessorConstant());
+ info.MarkAsInitializingStore();
store = BuildMonomorphicAccess(
&info, literal, checked_literal, value,
BailoutId::None(), BailoutId::None());
@@ -5632,7 +5598,8 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
site_context.ExitScope(site, boilerplate_object);
} else {
NoObservableSideEffectsScope no_effects(this);
- Handle<FixedArray> constants = expr->constant_elements();
+ Handle<ConstantElementsPair> constants =
+ expr->GetOrBuildConstantElements(isolate());
int literal_index = expr->literal_index();
int flags = expr->ComputeFlags(true);
@@ -5799,9 +5766,8 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
}
// This is a normal store.
- instr = New<HStoreNamedField>(
- checked_object->ActualValue(), field_access, value,
- transition_to_field ? INITIALIZING_STORE : STORE_TO_INITIALIZED_ENTRY);
+ instr = New<HStoreNamedField>(checked_object->ActualValue(), field_access,
+ value, info->StoreMode());
}
if (transition_to_field) {
@@ -7553,6 +7519,12 @@ void HOptimizedGraphBuilder::BuildLoad(Property* expr,
HValue* string = Pop();
HInstruction* char_code = BuildStringCharCodeAt(string, index);
AddInstruction(char_code);
+ if (char_code->IsConstant()) {
+ HConstant* c_code = HConstant::cast(char_code);
+ if (c_code->HasNumberValue() && std::isnan(c_code->DoubleValue())) {
+ Add<HDeoptimize>(DeoptimizeReason::kOutOfBounds, Deoptimizer::EAGER);
+ }
+ }
instr = NewUncasted<HStringCharFromCode>(char_code);
} else if (expr->key()->IsPropertyName()) {
@@ -7606,27 +7578,38 @@ void HOptimizedGraphBuilder::VisitProperty(Property* expr) {
BuildLoad(expr, expr->id());
}
-
-HInstruction* HGraphBuilder::BuildConstantMapCheck(Handle<JSObject> constant) {
+HInstruction* HGraphBuilder::BuildConstantMapCheck(Handle<JSObject> constant,
+ bool ensure_no_elements) {
HCheckMaps* check = Add<HCheckMaps>(
Add<HConstant>(constant), handle(constant->map()));
check->ClearDependsOnFlag(kElementsKind);
+ if (ensure_no_elements) {
+ // TODO(ishell): remove this once we support NO_ELEMENTS elements kind.
+ HValue* elements = AddLoadElements(check, nullptr);
+ HValue* empty_elements =
+ Add<HConstant>(isolate()->factory()->empty_fixed_array());
+ IfBuilder if_empty(this);
+ if_empty.IfNot<HCompareObjectEqAndBranch>(elements, empty_elements);
+ if_empty.ThenDeopt(DeoptimizeReason::kWrongMap);
+ if_empty.End();
+ }
return check;
}
-
HInstruction* HGraphBuilder::BuildCheckPrototypeMaps(Handle<JSObject> prototype,
- Handle<JSObject> holder) {
+ Handle<JSObject> holder,
+ bool ensure_no_elements) {
PrototypeIterator iter(isolate(), prototype, kStartAtReceiver);
while (holder.is_null() ||
!PrototypeIterator::GetCurrent(iter).is_identical_to(holder)) {
- BuildConstantMapCheck(PrototypeIterator::GetCurrent<JSObject>(iter));
+ BuildConstantMapCheck(PrototypeIterator::GetCurrent<JSObject>(iter),
+ ensure_no_elements);
iter.Advance();
if (iter.IsAtEnd()) {
return NULL;
}
}
- return BuildConstantMapCheck(PrototypeIterator::GetCurrent<JSObject>(iter));
+ return BuildConstantMapCheck(holder);
}
@@ -7965,7 +7948,7 @@ int HOptimizedGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
if (target_shared->force_inline()) {
return 0;
}
- if (target->shared()->IsBuiltin()) {
+ if (!target->shared()->IsUserJavaScript()) {
return kNotInlinable;
}
@@ -8078,7 +8061,7 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
TraceInline(target, caller, "parse failure");
return false;
}
- if (target_shared->dont_crankshaft()) {
+ if (target_shared->must_use_ignition_turbo()) {
TraceInline(target, caller, "ParseAndAnalyze found incompatibility");
return false;
}
@@ -8161,10 +8144,6 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
&bounds_)
.Run();
- if (is_tracking_positions()) {
- TraceInlinedFunction(target_shared, source_position(), inlining_id);
- }
-
// Save the pending call context. Set up new one for the inlined function.
// The function state is new-allocated because we need to delete it
// in two different places.
@@ -8491,6 +8470,23 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinGetterCall(
}
}
+// static
+bool HOptimizedGraphBuilder::NoElementsInPrototypeChain(
+ Handle<Map> receiver_map) {
+ // TODO(ishell): remove this once we support NO_ELEMENTS elements kind.
+ PrototypeIterator iter(receiver_map);
+ Handle<Object> empty_fixed_array =
+ iter.isolate()->factory()->empty_fixed_array();
+ while (true) {
+ Handle<JSObject> current = PrototypeIterator::GetCurrent<JSObject>(iter);
+ if (current->elements() != *empty_fixed_array) return false;
+ iter.Advance();
+ if (iter.IsAtEnd()) {
+ return true;
+ }
+ }
+}
+
bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
Handle<JSFunction> function, Handle<Map> receiver_map, BailoutId ast_id,
int args_count_no_receiver) {
@@ -8745,6 +8741,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
}
case kArrayShift: {
if (!CanInlineArrayResizeOperation(receiver_map)) return false;
+ if (!NoElementsInPrototypeChain(receiver_map)) return false;
ElementsKind kind = receiver_map->elements_kind();
// If there may be elements accessors in the prototype chain, the fast
@@ -8758,7 +8755,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
// in a map change.
BuildCheckPrototypeMaps(
handle(JSObject::cast(receiver_map->prototype()), isolate()),
- Handle<JSObject>::null());
+ Handle<JSObject>::null(), true);
// Threshold for fast inlined Array.shift().
HConstant* inline_threshold = Add<HConstant>(static_cast<int32_t>(16));
@@ -9686,7 +9683,7 @@ bool HOptimizedGraphBuilder::TryInlineArrayCall(Expression* expression,
// Checks whether allocation using the given constructor can be inlined.
static bool IsAllocationInlineable(Handle<JSFunction> constructor) {
return constructor->has_initial_map() &&
- !IsSubclassConstructor(constructor->shared()->kind()) &&
+ !IsDerivedConstructor(constructor->shared()->kind()) &&
constructor->initial_map()->instance_type() == JS_OBJECT_TYPE &&
constructor->initial_map()->instance_size() <
HAllocate::kMaxInlineSize;
@@ -10973,15 +10970,12 @@ static bool IsClassOfTest(CompareOperation* expr) {
Literal* literal = expr->right()->AsLiteral();
if (literal == NULL) return false;
if (!literal->value()->IsString()) return false;
- if (!call->is_jsruntime() &&
- call->function()->function_id != Runtime::kInlineClassOf) {
- return false;
- }
- DCHECK(call->arguments()->length() == 1);
+ if (call->is_jsruntime()) return false;
+ if (call->function()->function_id != Runtime::kInlineClassOf) return false;
+ DCHECK_EQ(call->arguments()->length(), 1);
return true;
}
-
void HOptimizedGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
DCHECK(!HasStackOverflow());
DCHECK(current_block() != NULL);
@@ -11208,8 +11202,9 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
Handle<JSFunction>::cast(HConstant::cast(right)->handle(isolate()));
// Make sure that the {function} already has a meaningful initial map
// (i.e. we constructed at least one instance using the constructor
- // {function}).
- if (function->has_initial_map()) {
+ // {function}), and has an instance as .prototype.
+ if (function->has_initial_map() &&
+ !function->map()->has_non_instance_prototype()) {
// Lookup @@hasInstance on the {function}.
Handle<Map> function_map(function->map(), isolate());
PropertyAccessInfo has_instance(
@@ -11502,6 +11497,9 @@ void HOptimizedGraphBuilder::VisitEmptyParentheses(EmptyParentheses* expr) {
UNREACHABLE();
}
+void HOptimizedGraphBuilder::VisitGetIterator(GetIterator* expr) {
+ UNREACHABLE();
+}
HValue* HOptimizedGraphBuilder::AddThisFunction() {
return AddInstruction(BuildThisFunction());
@@ -11653,7 +11651,8 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
int copied_fields = 0;
for (int i = 0; i < limit; i++) {
PropertyDetails details = descriptors->GetDetails(i);
- if (details.type() != DATA) continue;
+ if (details.location() != kField) continue;
+ DCHECK_EQ(kData, details.kind());
copied_fields++;
FieldIndex field_index = FieldIndex::ForDescriptor(*boilerplate_map, i);
@@ -11847,6 +11846,7 @@ void HOptimizedGraphBuilder::VisitVariableDeclaration(
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
+ globals_.Add(variable->name(), zone());
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_.Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
@@ -11885,6 +11885,7 @@ void HOptimizedGraphBuilder::VisitFunctionDeclaration(
Variable* variable = proxy->var();
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
+ globals_.Add(variable->name(), zone());
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_.Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
@@ -11969,16 +11970,6 @@ void HOptimizedGraphBuilder::GenerateIsTypedArray(CallRuntime* call) {
}
-void HOptimizedGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HHasInstanceTypeAndBranch* result =
- New<HHasInstanceTypeAndBranch>(value, JS_REGEXP_TYPE);
- return ast_context()->ReturnControl(result, call->id());
-}
-
-
void HOptimizedGraphBuilder::GenerateToInteger(CallRuntime* call) {
DCHECK_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -12127,19 +12118,6 @@ void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
return ast_context()->ReturnInstruction(result, call->id());
}
-// Support for direct creation of new objects.
-void HOptimizedGraphBuilder::GenerateNewObject(CallRuntime* call) {
- DCHECK_EQ(2, call->arguments()->length());
- CHECK_ALIVE(VisitExpressions(call->arguments()));
- FastNewObjectStub stub(isolate());
- FastNewObjectDescriptor descriptor(isolate());
- HValue* values[] = {Pop(), Pop()};
- HConstant* stub_value = Add<HConstant>(stub.GetCode());
- HInstruction* result =
- New<HCallWithDescriptor>(stub_value, 0, descriptor, ArrayVector(values));
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
// Support for direct calls from JavaScript to native RegExp code.
void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
DCHECK_EQ(4, call->arguments()->length());
diff --git a/deps/v8/src/crankshaft/hydrogen.h b/deps/v8/src/crankshaft/hydrogen.h
index 9f2508a7bf..203c374d25 100644
--- a/deps/v8/src/crankshaft/hydrogen.h
+++ b/deps/v8/src/crankshaft/hydrogen.h
@@ -463,12 +463,6 @@ class HGraph final : public ZoneObject {
void DecrementInNoSideEffectsScope() { no_side_effects_scope_count_--; }
bool IsInsideNoSideEffectsScope() { return no_side_effects_scope_count_ > 0; }
- // If we are tracking source positions then this function assigns a unique
- // identifier to each inlining and dumps function source if it was inlined
- // for the first time during the current optimization.
- int TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
- SourcePosition position);
-
private:
HConstant* ReinsertConstantIfNecessary(HConstant* constant);
HConstant* GetConstant(SetOncePointer<HConstant>* pointer,
@@ -1807,9 +1801,11 @@ class HGraphBuilder {
HValue* previous_object_size,
HValue* payload);
- HInstruction* BuildConstantMapCheck(Handle<JSObject> constant);
+ HInstruction* BuildConstantMapCheck(Handle<JSObject> constant,
+ bool ensure_no_elements = false);
HInstruction* BuildCheckPrototypeMaps(Handle<JSObject> prototype,
- Handle<JSObject> holder);
+ Handle<JSObject> holder,
+ bool ensure_no_elements = false);
HInstruction* BuildGetNativeContext(HValue* closure);
HInstruction* BuildGetNativeContext();
@@ -1853,9 +1849,6 @@ class HGraphBuilder {
bool is_tracking_positions() { return track_positions_; }
- void TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
- SourcePosition position, int inlining_id);
-
HValue* BuildAllocateEmptyArrayBuffer(HValue* byte_length);
template <typename ViewClass>
void BuildArrayBufferViewInitialization(HValue* obj,
@@ -2163,10 +2156,8 @@ class HOptimizedGraphBuilder : public HGraphBuilder,
F(IsSmi) \
F(IsArray) \
F(IsTypedArray) \
- F(IsRegExp) \
F(IsJSProxy) \
F(Call) \
- F(NewObject) \
F(ToInteger) \
F(ToObject) \
F(ToString) \
@@ -2386,6 +2377,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder,
TailCallMode syntactic_tail_call_mode);
static bool IsReadOnlyLengthDescriptor(Handle<Map> jsarray_map);
static bool CanInlineArrayResizeOperation(Handle<Map> receiver_map);
+ static bool NoElementsInPrototypeChain(Handle<Map> receiver_map);
// If --trace-inlining, print a line of the inlining trace. Inlining
// succeeded if the reason string is NULL and failed if there is a
@@ -2464,7 +2456,19 @@ class HOptimizedGraphBuilder : public HGraphBuilder,
field_type_(HType::Tagged()),
access_(HObjectAccess::ForMap()),
lookup_type_(NOT_FOUND),
- details_(NONE, DATA, Representation::None()) {}
+ details_(PropertyDetails::Empty()),
+ store_mode_(STORE_TO_INITIALIZED_ENTRY) {}
+
+ // Ensure the full store is performed.
+ void MarkAsInitializingStore() {
+ DCHECK_EQ(STORE, access_type_);
+ store_mode_ = INITIALIZING_STORE;
+ }
+
+ StoreFieldOrKeyedMode StoreMode() {
+ DCHECK_EQ(STORE, access_type_);
+ return store_mode_;
+ }
// Checkes whether this PropertyAccessInfo can be handled as a monomorphic
// load named. It additionally fills in the fields necessary to generate the
@@ -2522,14 +2526,16 @@ class HOptimizedGraphBuilder : public HGraphBuilder,
bool IsProperty() const { return IsFound() && !IsTransition(); }
bool IsTransition() const { return lookup_type_ == TRANSITION_TYPE; }
bool IsData() const {
- return lookup_type_ == DESCRIPTOR_TYPE && details_.type() == DATA;
+ return lookup_type_ == DESCRIPTOR_TYPE && details_.kind() == kData &&
+ details_.location() == kField;
}
bool IsDataConstant() const {
- return lookup_type_ == DESCRIPTOR_TYPE &&
- details_.type() == DATA_CONSTANT;
+ return lookup_type_ == DESCRIPTOR_TYPE && details_.kind() == kData &&
+ details_.location() == kDescriptor;
}
bool IsAccessorConstant() const {
- return !IsTransition() && details_.type() == ACCESSOR_CONSTANT;
+ return !IsTransition() && details_.kind() == kAccessor &&
+ details_.location() == kDescriptor;
}
bool IsConfigurable() const { return details_.IsConfigurable(); }
bool IsReadOnly() const { return details_.IsReadOnly(); }
@@ -2578,6 +2584,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder,
transition_ = handle(target);
number_ = transition_->LastAdded();
details_ = transition_->instance_descriptors()->GetDetails(number_);
+ MarkAsInitializingStore();
}
void NotFound() {
lookup_type_ = NOT_FOUND;
@@ -2588,7 +2595,8 @@ class HOptimizedGraphBuilder : public HGraphBuilder,
return details_.representation();
}
bool IsTransitionToData() const {
- return IsTransition() && details_.type() == DATA;
+ return IsTransition() && details_.kind() == kData &&
+ details_.location() == kField;
}
Zone* zone() { return builder_->zone(); }
@@ -2623,6 +2631,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder,
Handle<Map> transition_;
int number_;
PropertyDetails details_;
+ StoreFieldOrKeyedMode store_mode_;
};
HValue* BuildMonomorphicAccess(PropertyAccessInfo* info, HValue* object,
@@ -2804,7 +2813,6 @@ class HOptimizedGraphBuilder : public HGraphBuilder,
friend class FunctionState; // Pushes and pops the state stack.
friend class AstContext; // Pushes and pops the AST context stack.
- friend class KeyedLoadFastElementStub;
friend class HOsrBuilder;
DISALLOW_COPY_AND_ASSIGN(HOptimizedGraphBuilder);
diff --git a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
index d9044cab45..978ae2f1c2 100644
--- a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
@@ -7,6 +7,7 @@
#include "src/crankshaft/ia32/lithium-codegen-ia32.h"
#include "src/base/bits.h"
+#include "src/builtins/builtins-constructor.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
@@ -176,15 +177,18 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
__ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else {
- if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
- FastNewFunctionContextStub stub(isolate());
+ if (slots <=
+ ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+ Callable callable = CodeFactory::FastNewFunctionContext(
+ isolate(), info()->scope()->scope_type());
__ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
Immediate(slots));
- __ CallStub(&stub);
- // Result of FastNewFunctionContextStub is always in new space.
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ // Result of the FastNewFunctionContext builtin is always in new space.
need_write_barrier = false;
} else {
- __ push(edi);
+ __ Push(edi);
+ __ Push(Smi::FromInt(info()->scope()->scope_type()));
__ CallRuntime(Runtime::kNewFunctionContext);
}
}
@@ -2762,9 +2766,9 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// Normal function. Replace undefined or null with global receiver.
__ cmp(receiver, factory()->null_value());
- __ j(equal, &global_object, Label::kNear);
+ __ j(equal, &global_object, dist);
__ cmp(receiver, factory()->undefined_value());
- __ j(equal, &global_object, Label::kNear);
+ __ j(equal, &global_object, dist);
// The receiver should be a JS object.
__ test(receiver, Immediate(kSmiTagMask));
@@ -2772,7 +2776,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, scratch);
DeoptimizeIf(below, instr, DeoptimizeReason::kNotAJavaScriptObject);
- __ jmp(&receiver_ok, Label::kNear);
+ __ jmp(&receiver_ok, dist);
__ bind(&global_object);
__ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
__ mov(receiver, ContextOperand(receiver, Context::NATIVE_CONTEXT_INDEX));
@@ -2869,7 +2873,7 @@ void LCodeGen::DoContext(LContext* instr) {
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
- __ push(Immediate(instr->hydrogen()->pairs()));
+ __ push(Immediate(instr->hydrogen()->declarations()));
__ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
__ push(Immediate(instr->hydrogen()->feedback_vector()));
CallRuntime(Runtime::kDeclareGlobals, instr);
@@ -3855,13 +3859,18 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
if (Smi::IsValid(int_key)) {
__ mov(ebx, Immediate(Smi::FromInt(int_key)));
} else {
- // We should never get here at runtime because there is a smi check on
- // the key before this point.
- __ int3();
+ Abort(kArrayIndexConstantValueTooBig);
}
} else {
+ Label is_smi;
__ Move(ebx, ToRegister(key));
__ SmiTag(ebx);
+ // Deopt if the key is outside Smi range. The stub expects Smi and would
+ // bump the elements into dictionary mode (and trigger a deopt) anyways.
+ __ j(no_overflow, &is_smi);
+ __ PopSafepointRegisters();
+ DeoptimizeIf(no_condition, instr, DeoptimizeReason::kOverflow);
+ __ bind(&is_smi);
}
GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
diff --git a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
index abbf2085c6..36019cc94d 100644
--- a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
@@ -28,6 +28,7 @@
#include "src/crankshaft/mips/lithium-codegen-mips.h"
#include "src/base/bits.h"
+#include "src/builtins/builtins-constructor.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/crankshaft/hydrogen-osr.h"
@@ -202,15 +203,18 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
__ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else {
- if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
- FastNewFunctionContextStub stub(isolate());
+ if (slots <=
+ ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+ Callable callable = CodeFactory::FastNewFunctionContext(
+ isolate(), info()->scope()->scope_type());
__ li(FastNewFunctionContextDescriptor::SlotsRegister(),
Operand(slots));
- __ CallStub(&stub);
- // Result of FastNewFunctionContextStub is always in new space.
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ // Result of the FastNewFunctionContext builtin is always in new space.
need_write_barrier = false;
} else {
__ push(a1);
+ __ Push(Smi::FromInt(info()->scope()->scope_type()));
__ CallRuntime(Runtime::kNewFunctionContext);
}
}
@@ -1764,18 +1768,18 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
FPURegister left_reg = ToDoubleRegister(left);
FPURegister right_reg = ToDoubleRegister(right);
FPURegister result_reg = ToDoubleRegister(instr->result());
+
Label nan, done;
if (operation == HMathMinMax::kMathMax) {
- __ MaxNaNCheck_d(result_reg, left_reg, right_reg, &nan);
+ __ Float64Max(result_reg, left_reg, right_reg, &nan);
} else {
DCHECK(operation == HMathMinMax::kMathMin);
- __ MinNaNCheck_d(result_reg, left_reg, right_reg, &nan);
+ __ Float64Min(result_reg, left_reg, right_reg, &nan);
}
__ Branch(&done);
__ bind(&nan);
- __ LoadRoot(scratch, Heap::kNanValueRootIndex);
- __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
+ __ add_d(result_reg, left_reg, right_reg);
__ bind(&done);
}
@@ -2799,7 +2803,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
// protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
// it needs to bail out.
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
- __ lw(result, FieldMemOperand(result, Cell::kValueOffset));
+ __ lw(result, FieldMemOperand(result, PropertyCell::kValueOffset));
DeoptimizeIf(ne, instr, DeoptimizeReason::kHole, result,
Operand(Smi::FromInt(Isolate::kProtectorValid)));
}
@@ -3058,7 +3062,7 @@ void LCodeGen::DoContext(LContext* instr) {
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- __ li(scratch0(), instr->hydrogen()->pairs());
+ __ li(scratch0(), instr->hydrogen()->declarations());
__ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
__ Push(scratch0(), scratch1());
__ li(scratch0(), instr->hydrogen()->feedback_vector());
@@ -4016,13 +4020,19 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
if (Smi::IsValid(int_key)) {
__ li(a3, Operand(Smi::FromInt(int_key)));
} else {
- // We should never get here at runtime because there is a smi check on
- // the key before this point.
- __ stop("expected smi");
+ Abort(kArrayIndexConstantValueTooBig);
}
} else {
- __ mov(a3, ToRegister(key));
- __ SmiTag(a3);
+ Label is_smi;
+ __ SmiTagCheckOverflow(a3, ToRegister(key), at);
+ // Deopt if the key is outside Smi range. The stub expects Smi and would
+ // bump the elements into dictionary mode (and trigger a deopt) anyways.
+ __ BranchOnNoOverflow(&is_smi, at);
+ RestoreRegistersStateStub stub(isolate());
+ __ push(ra);
+ __ CallStub(&stub);
+ DeoptimizeIf(al, instr, DeoptimizeReason::kOverflow);
+ __ bind(&is_smi);
}
GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
diff --git a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
index 1531996c19..350cede90b 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
+++ b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
@@ -4,6 +4,7 @@
#include "src/crankshaft/mips64/lithium-codegen-mips64.h"
+#include "src/builtins/builtins-constructor.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/crankshaft/hydrogen-osr.h"
@@ -178,15 +179,18 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
__ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else {
- if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
- FastNewFunctionContextStub stub(isolate());
+ if (slots <=
+ ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+ Callable callable = CodeFactory::FastNewFunctionContext(
+ isolate(), info()->scope()->scope_type());
__ li(FastNewFunctionContextDescriptor::SlotsRegister(),
Operand(slots));
- __ CallStub(&stub);
- // Result of FastNewFunctionContextStub is always in new space.
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ // Result of the FastNewFunctionContext builtin is always in new space.
need_write_barrier = false;
} else {
__ push(a1);
+ __ Push(Smi::FromInt(info()->scope()->scope_type()));
__ CallRuntime(Runtime::kNewFunctionContext);
}
}
@@ -1888,16 +1892,15 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
FPURegister result_reg = ToDoubleRegister(instr->result());
Label nan, done;
if (operation == HMathMinMax::kMathMax) {
- __ MaxNaNCheck_d(result_reg, left_reg, right_reg, &nan);
+ __ Float64Max(result_reg, left_reg, right_reg, &nan);
} else {
DCHECK(operation == HMathMinMax::kMathMin);
- __ MinNaNCheck_d(result_reg, left_reg, right_reg, &nan);
+ __ Float64Min(result_reg, left_reg, right_reg, &nan);
}
__ Branch(&done);
__ bind(&nan);
- __ LoadRoot(scratch, Heap::kNanValueRootIndex);
- __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
+ __ add_d(result_reg, left_reg, right_reg);
__ bind(&done);
}
@@ -2976,7 +2979,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
// it needs to bail out.
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
// The comparison only needs LS bits of value, which is a smi.
- __ ld(result, FieldMemOperand(result, Cell::kValueOffset));
+ __ ld(result, FieldMemOperand(result, PropertyCell::kValueOffset));
DeoptimizeIf(ne, instr, DeoptimizeReason::kHole, result,
Operand(Smi::FromInt(Isolate::kProtectorValid)));
}
@@ -3245,7 +3248,7 @@ void LCodeGen::DoContext(LContext* instr) {
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- __ li(scratch0(), instr->hydrogen()->pairs());
+ __ li(scratch0(), instr->hydrogen()->declarations());
__ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
__ Push(scratch0(), scratch1());
__ li(scratch0(), instr->hydrogen()->feedback_vector());
@@ -4246,15 +4249,7 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
LOperand* key = instr->key();
if (key->IsConstantOperand()) {
- LConstantOperand* constant_key = LConstantOperand::cast(key);
- int32_t int_key = ToInteger32(constant_key);
- if (Smi::IsValid(int_key)) {
- __ li(a3, Operand(Smi::FromInt(int_key)));
- } else {
- // We should never get here at runtime because there is a smi check on
- // the key before this point.
- __ stop("expected smi");
- }
+ __ li(a3, Operand(ToSmi(LConstantOperand::cast(key))));
} else {
__ mov(a3, ToRegister(key));
__ SmiTag(a3);
diff --git a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
index 9c65586820..1450a714c4 100644
--- a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
+++ b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
@@ -5,6 +5,7 @@
#include "src/crankshaft/ppc/lithium-codegen-ppc.h"
#include "src/base/bits.h"
+#include "src/builtins/builtins-constructor.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/crankshaft/hydrogen-osr.h"
@@ -186,15 +187,18 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
__ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else {
- if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
- FastNewFunctionContextStub stub(isolate());
+ if (slots <=
+ ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+ Callable callable = CodeFactory::FastNewFunctionContext(
+ isolate(), info()->scope()->scope_type());
__ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
Operand(slots));
- __ CallStub(&stub);
- // Result of FastNewFunctionContextStub is always in new space.
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ // Result of the FastNewFunctionContext builtin is always in new space.
need_write_barrier = false;
} else {
__ push(r4);
+ __ Push(Smi::FromInt(info()->scope()->scope_type()));
__ CallRuntime(Runtime::kNewFunctionContext);
}
}
@@ -1986,16 +1990,32 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
DoubleRegister result = ToDoubleRegister(instr->result());
switch (instr->op()) {
case Token::ADD:
- __ fadd(result, left, right);
+ if (CpuFeatures::IsSupported(VSX)) {
+ __ xsadddp(result, left, right);
+ } else {
+ __ fadd(result, left, right);
+ }
break;
case Token::SUB:
- __ fsub(result, left, right);
+ if (CpuFeatures::IsSupported(VSX)) {
+ __ xssubdp(result, left, right);
+ } else {
+ __ fsub(result, left, right);
+ }
break;
case Token::MUL:
- __ fmul(result, left, right);
+ if (CpuFeatures::IsSupported(VSX)) {
+ __ xsmuldp(result, left, right);
+ } else {
+ __ fmul(result, left, right);
+ }
break;
case Token::DIV:
- __ fdiv(result, left, right);
+ if (CpuFeatures::IsSupported(VSX)) {
+ __ xsdivdp(result, left, right);
+ } else {
+ __ fdiv(result, left, right);
+ }
break;
case Token::MOD: {
__ PrepareCallCFunction(0, 2, scratch0());
@@ -3049,7 +3069,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
// protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
// it needs to bail out.
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
- __ LoadP(result, FieldMemOperand(result, Cell::kValueOffset));
+ __ LoadP(result, FieldMemOperand(result, PropertyCell::kValueOffset));
__ CmpSmiLiteral(result, Smi::FromInt(Isolate::kProtectorValid), r0);
DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
}
@@ -3307,7 +3327,7 @@ void LCodeGen::DoContext(LContext* instr) {
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- __ Move(scratch0(), instr->hydrogen()->pairs());
+ __ Move(scratch0(), instr->hydrogen()->declarations());
__ push(scratch0());
__ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags()));
__ push(scratch0());
@@ -4331,12 +4351,21 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
if (Smi::IsValid(int_key)) {
__ LoadSmiLiteral(r6, Smi::FromInt(int_key));
} else {
- // We should never get here at runtime because there is a smi check on
- // the key before this point.
- __ stop("expected smi");
+ Abort(kArrayIndexConstantValueTooBig);
}
} else {
+ Label is_smi;
+#if V8_TARGET_ARCH_PPC64
__ SmiTag(r6, ToRegister(key));
+#else
+ // Deopt if the key is outside Smi range. The stub expects Smi and would
+ // bump the elements into dictionary mode (and trigger a deopt) anyways.
+ __ SmiTagCheckOverflow(r6, ToRegister(key), r0);
+ __ BranchOnNoOverflow(&is_smi);
+ __ PopSafepointRegisters();
+ DeoptimizeIf(al, instr, DeoptimizeReason::kOverflow, cr0);
+ __ bind(&is_smi);
+#endif
}
GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
diff --git a/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc b/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc
index c44df9550a..7bbc917bc6 100644
--- a/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc
+++ b/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc
@@ -6,6 +6,7 @@
#include "src/crankshaft/s390/lithium-codegen-s390.h"
#include "src/base/bits.h"
+#include "src/builtins/builtins-constructor.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/crankshaft/hydrogen-osr.h"
@@ -177,15 +178,18 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
__ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else {
- if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
- FastNewFunctionContextStub stub(isolate());
+ if (slots <=
+ ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+ Callable callable = CodeFactory::FastNewFunctionContext(
+ isolate(), info()->scope()->scope_type());
__ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
Operand(slots));
- __ CallStub(&stub);
- // Result of FastNewFunctionContextStub is always in new space.
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ // Result of the FastNewFunctionContext builtin is always in new space.
need_write_barrier = false;
} else {
__ push(r3);
+ __ Push(Smi::FromInt(info()->scope()->scope_type()));
__ CallRuntime(Runtime::kNewFunctionContext);
}
}
@@ -1283,8 +1287,12 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
__ bge(&done, Label::kNear);
// If there is no remainder then we are done.
- __ lr(scratch, result);
- __ msr(scratch, divisor);
+ if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
+ __ msrkc(scratch, result, divisor);
+ } else {
+ __ lr(scratch, result);
+ __ msr(scratch, divisor);
+ }
__ Cmp32(dividend, scratch);
__ beq(&done, Label::kNear);
@@ -1415,36 +1423,48 @@ void LCodeGen::DoMulI(LMulI* instr) {
Register right = ToRegister(right_op);
if (can_overflow) {
-#if V8_TARGET_ARCH_S390X
- // result = left * right.
- if (instr->hydrogen()->representation().IsSmi()) {
- __ SmiUntag(result, left);
- __ SmiUntag(scratch, right);
- __ msgr(result, scratch);
+ if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
+ // result = left * right.
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiUntag(scratch, right);
+ __ MulPWithCondition(result, left, scratch);
+ } else {
+ __ msrkc(result, left, right);
+ __ LoadW(result, result);
+ }
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
} else {
- __ LoadRR(result, left);
- __ msgr(result, right);
- }
- __ TestIfInt32(result, r0);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
- if (instr->hydrogen()->representation().IsSmi()) {
- __ SmiTag(result);
- }
+#if V8_TARGET_ARCH_S390X
+ // result = left * right.
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiUntag(result, left);
+ __ SmiUntag(scratch, right);
+ __ msgr(result, scratch);
+ } else {
+ __ LoadRR(result, left);
+ __ msgr(result, right);
+ }
+ __ TestIfInt32(result, r0);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiTag(result);
+ }
#else
- // r0:scratch = scratch * right
- if (instr->hydrogen()->representation().IsSmi()) {
- __ SmiUntag(scratch, left);
- __ mr_z(r0, right);
- __ LoadRR(result, scratch);
- } else {
// r0:scratch = scratch * right
- __ LoadRR(scratch, left);
- __ mr_z(r0, right);
- __ LoadRR(result, scratch);
- }
- __ TestIfInt32(r0, result, scratch);
- DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiUntag(scratch, left);
+ __ mr_z(r0, right);
+ __ LoadRR(result, scratch);
+ } else {
+ // r0:scratch = scratch * right
+ __ LoadRR(scratch, left);
+ __ mr_z(r0, right);
+ __ LoadRR(result, scratch);
+ }
+ __ TestIfInt32(r0, result, scratch);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
#endif
+ }
} else {
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiUntag(result, left);
@@ -1721,35 +1741,12 @@ void LCodeGen::DoSubI(LSubI* instr) {
}
}
-void LCodeGen::DoRSubI(LRSubI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- LOperand* result = instr->result();
-
- DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) &&
- right->IsConstantOperand());
-
-#if V8_TARGET_ARCH_S390X
- // The overflow detection needs to be tested on the lower 32-bits.
- // As a result, on 64-bit, we need to force 32-bit arithmetic operations
- // to set the CC overflow bit properly. The result is then sign-extended.
- bool checkOverflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
-#else
- bool checkOverflow = true;
-#endif
-
- Operand right_operand = ToOperand(right);
- __ mov(r0, right_operand);
-
- if (!checkOverflow) {
- __ SubP_ExtendSrc(ToRegister(result), r0, ToRegister(left));
- } else {
- __ Sub32(ToRegister(result), r0, ToRegister(left));
- }
-}
-
void LCodeGen::DoConstantI(LConstantI* instr) {
- __ mov(ToRegister(instr->result()), Operand(instr->value()));
+ Register dst = ToRegister(instr->result());
+ if (instr->value() == 0)
+ __ XorP(dst, dst);
+ else
+ __ Load(dst, Operand(instr->value()));
}
void LCodeGen::DoConstantS(LConstantS* instr) {
@@ -1992,20 +1989,38 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
DoubleRegister left = ToDoubleRegister(instr->left());
DoubleRegister right = ToDoubleRegister(instr->right());
DoubleRegister result = ToDoubleRegister(instr->result());
- // All operations except MOD are computed in-place.
- DCHECK(instr->op() == Token::MOD || left.is(result));
switch (instr->op()) {
case Token::ADD:
- __ adbr(result, right);
+ if (CpuFeatures::IsSupported(VECTOR_FACILITY)) {
+ __ vfa(result, left, right);
+ } else {
+ DCHECK(result.is(left));
+ __ adbr(result, right);
+ }
break;
case Token::SUB:
- __ sdbr(result, right);
+ if (CpuFeatures::IsSupported(VECTOR_FACILITY)) {
+ __ vfs(result, left, right);
+ } else {
+ DCHECK(result.is(left));
+ __ sdbr(result, right);
+ }
break;
case Token::MUL:
- __ mdbr(result, right);
+ if (CpuFeatures::IsSupported(VECTOR_FACILITY)) {
+ __ vfm(result, left, right);
+ } else {
+ DCHECK(result.is(left));
+ __ mdbr(result, right);
+ }
break;
case Token::DIV:
- __ ddbr(result, right);
+ if (CpuFeatures::IsSupported(VECTOR_FACILITY)) {
+ __ vfd(result, left, right);
+ } else {
+ DCHECK(result.is(left));
+ __ ddbr(result, right);
+ }
break;
case Token::MOD: {
__ PrepareCallCFunction(0, 2, scratch0());
@@ -3012,7 +3027,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
// protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
// it needs to bail out.
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
- __ LoadP(result, FieldMemOperand(result, Cell::kValueOffset));
+ __ LoadP(result, FieldMemOperand(result, PropertyCell::kValueOffset));
__ CmpSmiLiteral(result, Smi::FromInt(Isolate::kProtectorValid), r0);
DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
}
@@ -3258,7 +3273,7 @@ void LCodeGen::DoContext(LContext* instr) {
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- __ Move(scratch0(), instr->hydrogen()->pairs());
+ __ Move(scratch0(), instr->hydrogen()->declarations());
__ push(scratch0());
__ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags()));
__ push(scratch0());
@@ -3391,31 +3406,17 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
void LCodeGen::EmitMathAbs(LMathAbs* instr) {
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
- Label done;
- __ CmpP(input, Operand::Zero());
- __ Move(result, input);
- __ bge(&done, Label::kNear);
- __ LoadComplementRR(result, result);
+ __ LoadPositiveP(result, input);
// Deoptimize on overflow.
DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
- __ bind(&done);
}
#if V8_TARGET_ARCH_S390X
void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) {
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
- Label done;
- __ Cmp32(input, Operand::Zero());
- __ Move(result, input);
- __ bge(&done, Label::kNear);
-
- // Deoptimize on overflow.
- __ Cmp32(input, Operand(0x80000000));
- DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
-
- __ LoadComplementRR(result, result);
- __ bind(&done);
+ __ LoadPositive32(result, input);
+ DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
}
#endif
@@ -3537,9 +3538,13 @@ void LCodeGen::DoMathFround(LMathFround* instr) {
}
void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
DoubleRegister result = ToDoubleRegister(instr->result());
- __ sqdbr(result, input);
+ LOperand* input = instr->value();
+ if (input->IsDoubleRegister()) {
+ __ Sqrt(result, ToDoubleRegister(instr->value()));
+ } else {
+ __ Sqrt(result, ToMemOperand(input));
+ }
}
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
@@ -4287,12 +4292,21 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
if (Smi::IsValid(int_key)) {
__ LoadSmiLiteral(r5, Smi::FromInt(int_key));
} else {
- // We should never get here at runtime because there is a smi check on
- // the key before this point.
- __ stop("expected smi");
+ Abort(kArrayIndexConstantValueTooBig);
}
} else {
+ Label is_smi;
+#if V8_TARGET_ARCH_S390X
__ SmiTag(r5, ToRegister(key));
+#else
+ // Deopt if the key is outside Smi range. The stub expects Smi and would
+ // bump the elements into dictionary mode (and trigger a deopt) anyways.
+ __ Add32(r5, ToRegister(key), ToRegister(key));
+ __ b(nooverflow, &is_smi);
+ __ PopSafepointRegisters();
+ DeoptimizeIf(al, instr, DeoptimizeReason::kOverflow, cr0);
+ __ bind(&is_smi);
+#endif
}
GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
@@ -4877,14 +4891,42 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
- __ TestIfSmi(ToRegister(input));
+ if (input->IsRegister()) {
+ __ TestIfSmi(ToRegister(input));
+ } else if (input->IsStackSlot()) {
+ MemOperand value = ToMemOperand(input);
+#if !V8_TARGET_LITTLE_ENDIAN
+#if V8_TARGET_ARCH_S390X
+ __ TestIfSmi(MemOperand(value.rb(), value.offset() + 7));
+#else
+ __ TestIfSmi(MemOperand(value.rb(), value.offset() + 3));
+#endif
+#else
+ __ TestIfSmi(value);
+#endif
+ }
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
}
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
- __ TestIfSmi(ToRegister(input));
+ if (input->IsRegister()) {
+ __ TestIfSmi(ToRegister(input));
+ } else if (input->IsStackSlot()) {
+ MemOperand value = ToMemOperand(input);
+#if !V8_TARGET_LITTLE_ENDIAN
+#if V8_TARGET_ARCH_S390X
+ __ TestIfSmi(MemOperand(value.rb(), value.offset() + 7));
+#else
+ __ TestIfSmi(MemOperand(value.rb(), value.offset() + 3));
+#endif
+#else
+ __ TestIfSmi(value);
+#endif
+ } else {
+ UNIMPLEMENTED();
+ }
DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
}
}
diff --git a/deps/v8/src/crankshaft/s390/lithium-s390.cc b/deps/v8/src/crankshaft/s390/lithium-s390.cc
index 3d14764032..79868f5579 100644
--- a/deps/v8/src/crankshaft/s390/lithium-s390.cc
+++ b/deps/v8/src/crankshaft/s390/lithium-s390.cc
@@ -619,7 +619,9 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
LArithmeticD* result = new (zone()) LArithmeticD(op, left, right);
- return DefineSameAsFirst(result);
+ return CpuFeatures::IsSupported(VECTOR_FACILITY)
+ ? DefineAsRegister(result)
+ : DefineSameAsFirst(result);
}
}
@@ -1056,7 +1058,7 @@ LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
}
LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
- LOperand* input = UseRegisterAtStart(instr->value());
+ LOperand* input = UseAtStart(instr->value());
LMathSqrt* result = new (zone()) LMathSqrt(input);
return DefineAsRegister(result);
}
@@ -1353,12 +1355,6 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
DCHECK(instr->left()->representation().Equals(instr->representation()));
DCHECK(instr->right()->representation().Equals(instr->representation()));
- if (instr->left()->IsConstant() &&
- !instr->CheckFlag(HValue::kCanOverflow)) {
- // If lhs is constant, do reverse subtraction instead.
- return DoRSub(instr);
- }
-
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new (zone()) LSubI(left, right);
@@ -1374,21 +1370,6 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
}
}
-LInstruction* LChunkBuilder::DoRSub(HSub* instr) {
- DCHECK(instr->representation().IsSmiOrInteger32());
- DCHECK(instr->left()->representation().Equals(instr->representation()));
- DCHECK(instr->right()->representation().Equals(instr->representation()));
- DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
-
- // Note: The lhs of the subtraction becomes the rhs of the
- // reverse-subtraction.
- LOperand* left = UseRegisterAtStart(instr->right());
- LOperand* right = UseOrConstantAtStart(instr->left());
- LRSubI* rsb = new (zone()) LRSubI(left, right);
- LInstruction* result = DefineAsRegister(rsb);
- return result;
-}
-
LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
LOperand* multiplier_op = UseRegister(mul->left());
LOperand* multiplicand_op = UseRegister(mul->right());
@@ -1697,7 +1678,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
}
LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* value = UseAtStart(instr->value());
LInstruction* result = new (zone()) LCheckNonSmi(value);
if (!instr->value()->type().IsHeapObject()) {
result = AssignEnvironment(result);
@@ -1706,7 +1687,7 @@ LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
}
LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
- LOperand* value = UseRegisterAtStart(instr->value());
+ LOperand* value = UseAtStart(instr->value());
return AssignEnvironment(new (zone()) LCheckSmi(value));
}
diff --git a/deps/v8/src/crankshaft/s390/lithium-s390.h b/deps/v8/src/crankshaft/s390/lithium-s390.h
index b946d4f271..f9710b1092 100644
--- a/deps/v8/src/crankshaft/s390/lithium-s390.h
+++ b/deps/v8/src/crankshaft/s390/lithium-s390.h
@@ -133,7 +133,6 @@ class LCodeGen;
V(StringCharFromCode) \
V(StringCompareAndBranch) \
V(SubI) \
- V(RSubI) \
V(TaggedToI) \
V(ThisFunction) \
V(TransitionElementsKind) \
@@ -1090,20 +1089,6 @@ class LSubI final : public LTemplateInstruction<1, 2, 0> {
DECLARE_HYDROGEN_ACCESSOR(Sub)
};
-class LRSubI final : public LTemplateInstruction<1, 2, 0> {
- public:
- LRSubI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(RSubI, "rsub-i")
- DECLARE_HYDROGEN_ACCESSOR(Sub)
-};
-
class LConstantI final : public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
@@ -2141,7 +2126,6 @@ class LChunkBuilder final : public LChunkBuilderBase {
LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
LInstruction* DoMultiplySub(HValue* minuend, HMul* mul);
- LInstruction* DoRSub(HSub* instr);
static bool HasMagicNumberForDivisor(int32_t divisor);
diff --git a/deps/v8/src/crankshaft/typing.cc b/deps/v8/src/crankshaft/typing.cc
index f21d235cb3..bbf629d45e 100644
--- a/deps/v8/src/crankshaft/typing.cc
+++ b/deps/v8/src/crankshaft/typing.cc
@@ -756,6 +756,7 @@ void AstTyper::VisitEmptyParentheses(EmptyParentheses* expr) {
UNREACHABLE();
}
+void AstTyper::VisitGetIterator(GetIterator* expr) { UNREACHABLE(); }
void AstTyper::VisitThisFunction(ThisFunction* expr) {}
diff --git a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
index 6889040996..f09af7136e 100644
--- a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
@@ -7,6 +7,7 @@
#include "src/crankshaft/x64/lithium-codegen-x64.h"
#include "src/base/bits.h"
+#include "src/builtins/builtins-constructor.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/crankshaft/hydrogen-osr.h"
@@ -179,14 +180,17 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
__ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else {
- if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
- FastNewFunctionContextStub stub(isolate());
+ if (slots <=
+ ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+ Callable callable = CodeFactory::FastNewFunctionContext(
+ isolate(), info()->scope()->scope_type());
__ Set(FastNewFunctionContextDescriptor::SlotsRegister(), slots);
- __ CallStub(&stub);
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
// Result of FastNewFunctionContextStub is always in new space.
need_write_barrier = false;
} else {
__ Push(rdi);
+ __ Push(Smi::FromInt(info()->scope()->scope_type()));
__ CallRuntime(Runtime::kNewFunctionContext);
}
}
@@ -2825,7 +2829,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
// protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
// it needs to bail out.
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
- __ Cmp(FieldOperand(result, Cell::kValueOffset),
+ __ Cmp(FieldOperand(result, PropertyCell::kValueOffset),
Smi::FromInt(Isolate::kProtectorValid));
DeoptimizeIf(not_equal, instr, DeoptimizeReason::kHole);
}
@@ -2958,9 +2962,9 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// Normal function. Replace undefined or null with global receiver.
__ CompareRoot(receiver, Heap::kNullValueRootIndex);
- __ j(equal, &global_object, Label::kNear);
+ __ j(equal, &global_object, dist);
__ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
- __ j(equal, &global_object, Label::kNear);
+ __ j(equal, &global_object, dist);
// The receiver should be a JS object.
Condition is_smi = __ CheckSmi(receiver);
@@ -2968,7 +2972,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, kScratchRegister);
DeoptimizeIf(below, instr, DeoptimizeReason::kNotAJavaScriptObject);
- __ jmp(&receiver_ok, Label::kNear);
+ __ jmp(&receiver_ok, dist);
__ bind(&global_object);
__ movp(receiver, FieldOperand(function, JSFunction::kContextOffset));
__ movp(receiver, ContextOperand(receiver, Context::NATIVE_CONTEXT_INDEX));
@@ -3062,7 +3066,7 @@ void LCodeGen::DoContext(LContext* instr) {
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
- __ Push(instr->hydrogen()->pairs());
+ __ Push(instr->hydrogen()->declarations());
__ Push(Smi::FromInt(instr->hydrogen()->flags()));
__ Push(instr->hydrogen()->feedback_vector());
CallRuntime(Runtime::kDeclareGlobals, instr);
diff --git a/deps/v8/src/crankshaft/x87/OWNERS b/deps/v8/src/crankshaft/x87/OWNERS
index dd9998b261..61245ae8e2 100644
--- a/deps/v8/src/crankshaft/x87/OWNERS
+++ b/deps/v8/src/crankshaft/x87/OWNERS
@@ -1 +1,2 @@
weiliang.lin@intel.com
+chunyang.dai@intel.com
diff --git a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
index b83d97f981..9c932bc6ae 100644
--- a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
+++ b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
@@ -7,6 +7,7 @@
#include "src/crankshaft/x87/lithium-codegen-x87.h"
#include "src/base/bits.h"
+#include "src/builtins/builtins-constructor.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
@@ -146,15 +147,18 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
__ CallRuntime(Runtime::kNewScriptContext);
deopt_mode = Safepoint::kLazyDeopt;
} else {
- if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
- FastNewFunctionContextStub stub(isolate());
+ if (slots <=
+ ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+ Callable callable = CodeFactory::FastNewFunctionContext(
+ isolate(), info()->scope()->scope_type());
__ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
Immediate(slots));
- __ CallStub(&stub);
- // Result of FastNewFunctionContextStub is always in new space.
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ // Result of the FastNewFunctionContext builtin is always in new space.
need_write_barrier = false;
} else {
- __ push(edi);
+ __ Push(edi);
+ __ Push(Smi::FromInt(info()->scope()->scope_type()));
__ CallRuntime(Runtime::kNewFunctionContext);
}
}
@@ -3017,7 +3021,16 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// object as a receiver to normal functions. Values have to be
// passed unchanged to builtins and strict-mode functions.
Label receiver_ok, global_object;
- Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
+ Label::Distance dist;
+
+ // For x87 debug version jitted code's size exceeds 128 bytes whether
+ // FLAG_deopt_every_n_times
+ // is set or not. Always use Label:kFar for label distance for debug mode.
+ if (FLAG_debug_code)
+ dist = Label::kFar;
+ else
+ dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
+
Register scratch = ToRegister(instr->temp());
if (!instr->hydrogen()->known_function()) {
@@ -3037,9 +3050,9 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// Normal function. Replace undefined or null with global receiver.
__ cmp(receiver, factory()->null_value());
- __ j(equal, &global_object, Label::kNear);
+ __ j(equal, &global_object, dist);
__ cmp(receiver, factory()->undefined_value());
- __ j(equal, &global_object, Label::kNear);
+ __ j(equal, &global_object, dist);
// The receiver should be a JS object.
__ test(receiver, Immediate(kSmiTagMask));
@@ -3047,7 +3060,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, scratch);
DeoptimizeIf(below, instr, DeoptimizeReason::kNotAJavaScriptObject);
- __ jmp(&receiver_ok, Label::kNear);
+ __ jmp(&receiver_ok, dist);
__ bind(&global_object);
__ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
__ mov(receiver, ContextOperand(receiver, Context::NATIVE_CONTEXT_INDEX));
@@ -3144,7 +3157,7 @@ void LCodeGen::DoContext(LContext* instr) {
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
- __ push(Immediate(instr->hydrogen()->pairs()));
+ __ push(Immediate(instr->hydrogen()->declarations()));
__ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
__ push(Immediate(instr->hydrogen()->feedback_vector()));
CallRuntime(Runtime::kDeclareGlobals, instr);
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index fd9afee808..a34bafcc65 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -27,9 +27,12 @@
#include "src/base/debug/stack_trace.h"
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
+#include "src/base/platform/time.h"
#include "src/base/sys-info.h"
#include "src/basic-block-profiler.h"
#include "src/interpreter/interpreter.h"
+#include "src/msan.h"
+#include "src/objects-inl.h"
#include "src/snapshot/natives.h"
#include "src/utils.h"
#include "src/v8.h"
@@ -62,15 +65,76 @@ namespace {
const int MB = 1024 * 1024;
const int kMaxWorkers = 50;
+#define USE_VM 1
+#define VM_THRESHOLD 65536
+// TODO(titzer): allocations should fail if >= 2gb because of
+// array buffers storing the lengths as a SMI internally.
+#define TWO_GB (2u * 1024u * 1024u * 1024u)
class ShellArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public:
virtual void* Allocate(size_t length) {
+#if USE_VM
+ if (RoundToPageSize(&length)) {
+ void* data = VirtualMemoryAllocate(length);
+#if DEBUG
+ if (data) {
+ // In debug mode, check the memory is zero-initialized.
+ size_t limit = length / sizeof(uint64_t);
+ uint64_t* ptr = reinterpret_cast<uint64_t*>(data);
+ for (size_t i = 0; i < limit; i++) {
+ DCHECK_EQ(0u, ptr[i]);
+ }
+ }
+#endif
+ return data;
+ }
+#endif
void* data = AllocateUninitialized(length);
return data == NULL ? data : memset(data, 0, length);
}
- virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
- virtual void Free(void* data, size_t) { free(data); }
+ virtual void* AllocateUninitialized(size_t length) {
+#if USE_VM
+ if (RoundToPageSize(&length)) return VirtualMemoryAllocate(length);
+#endif
+// Work around for GCC bug on AIX
+// See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79839
+#if V8_OS_AIX && _LINUX_SOURCE_COMPAT
+ return __linux_malloc(length);
+#else
+ return malloc(length);
+#endif
+ }
+ virtual void Free(void* data, size_t length) {
+#if USE_VM
+ if (RoundToPageSize(&length)) {
+ base::VirtualMemory::ReleaseRegion(data, length);
+ return;
+ }
+#endif
+ free(data);
+ }
+ // If {length} is at least {VM_THRESHOLD}, round up to next page size
+ // and return {true}. Otherwise return {false}.
+ bool RoundToPageSize(size_t* length) {
+ const size_t kPageSize = base::OS::CommitPageSize();
+ if (*length >= VM_THRESHOLD && *length < TWO_GB) {
+ *length = ((*length + kPageSize - 1) / kPageSize) * kPageSize;
+ return true;
+ }
+ return false;
+ }
+#if USE_VM
+ void* VirtualMemoryAllocate(size_t length) {
+ void* data = base::VirtualMemory::ReserveRegion(length);
+ if (data && !base::VirtualMemory::CommitRegion(data, length, false)) {
+ base::VirtualMemory::ReleaseRegion(data, length);
+ return nullptr;
+ }
+ MSAN_MEMORY_IS_INITIALIZED(data, length);
+ return data;
+ }
+#endif
};
@@ -366,12 +430,6 @@ bool CounterMap::Match(void* key1, void* key2) {
}
-// Converts a V8 value to a C string.
-const char* Shell::ToCString(const v8::String::Utf8Value& value) {
- return *value ? *value : "<string conversion failed>";
-}
-
-
ScriptCompiler::CachedData* CompileForCachedData(
Local<String> source, Local<Value> name,
ScriptCompiler::CompileOptions compile_options) {
@@ -810,20 +868,24 @@ void Shell::RealmGlobal(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
MaybeLocal<Context> Shell::CreateRealm(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
+ const v8::FunctionCallbackInfo<v8::Value>& args, int index,
+ v8::MaybeLocal<Value> global_object) {
Isolate* isolate = args.GetIsolate();
TryCatch try_catch(isolate);
PerIsolateData* data = PerIsolateData::Get(isolate);
- Global<Context>* old_realms = data->realms_;
- int index = data->realm_count_;
- data->realms_ = new Global<Context>[++data->realm_count_];
- for (int i = 0; i < index; ++i) {
- data->realms_[i].Reset(isolate, old_realms[i]);
- old_realms[i].Reset();
- }
- delete[] old_realms;
+ if (index < 0) {
+ Global<Context>* old_realms = data->realms_;
+ index = data->realm_count_;
+ data->realms_ = new Global<Context>[++data->realm_count_];
+ for (int i = 0; i < index; ++i) {
+ data->realms_[i].Reset(isolate, old_realms[i]);
+ old_realms[i].Reset();
+ }
+ delete[] old_realms;
+ }
Local<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
- Local<Context> context = Context::New(isolate, NULL, global_template);
+ Local<Context> context =
+ Context::New(isolate, NULL, global_template, global_object);
if (context.IsEmpty()) {
DCHECK(try_catch.HasCaught());
try_catch.ReThrow();
@@ -835,10 +897,20 @@ MaybeLocal<Context> Shell::CreateRealm(
return context;
}
+void Shell::DisposeRealm(const v8::FunctionCallbackInfo<v8::Value>& args,
+ int index) {
+ Isolate* isolate = args.GetIsolate();
+ PerIsolateData* data = PerIsolateData::Get(isolate);
+ DisposeModuleEmbedderData(data->realms_[index].Get(isolate));
+ data->realms_[index].Reset();
+ isolate->ContextDisposedNotification();
+ isolate->IdleNotificationDeadline(g_platform->MonotonicallyIncreasingTime());
+}
+
// Realm.create() creates a new realm with a distinct security token
// and returns its index.
void Shell::RealmCreate(const v8::FunctionCallbackInfo<v8::Value>& args) {
- CreateRealm(args);
+ CreateRealm(args, -1, v8::MaybeLocal<Value>());
}
// Realm.createAllowCrossRealmAccess() creates a new realm with the same
@@ -846,12 +918,26 @@ void Shell::RealmCreate(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::RealmCreateAllowCrossRealmAccess(
const v8::FunctionCallbackInfo<v8::Value>& args) {
Local<Context> context;
- if (CreateRealm(args).ToLocal(&context)) {
+ if (CreateRealm(args, -1, v8::MaybeLocal<Value>()).ToLocal(&context)) {
context->SetSecurityToken(
args.GetIsolate()->GetEnteredContext()->GetSecurityToken());
}
}
+// Realm.navigate(i) creates a new realm with a distinct security token
+// in place of realm i.
+void Shell::RealmNavigate(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+ PerIsolateData* data = PerIsolateData::Get(isolate);
+ int index = data->RealmIndexOrThrow(args, 0);
+ if (index == -1) return;
+
+ Local<Context> context = Local<Context>::New(isolate, data->realms_[index]);
+ v8::MaybeLocal<Value> global_object = context->Global();
+ DisposeRealm(args, index);
+ CreateRealm(args, index, global_object);
+}
+
// Realm.dispose(i) disposes the reference to the realm i.
void Shell::RealmDispose(const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
@@ -863,10 +949,7 @@ void Shell::RealmDispose(const v8::FunctionCallbackInfo<v8::Value>& args) {
Throw(args.GetIsolate(), "Invalid realm index");
return;
}
- DisposeModuleEmbedderData(data->realms_[index].Get(isolate));
- data->realms_[index].Reset();
- isolate->ContextDisposedNotification();
- isolate->IdleNotificationDeadline(g_platform->MonotonicallyIncreasingTime());
+ DisposeRealm(args, index);
}
@@ -1200,12 +1283,17 @@ void Shell::Version(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Shell::ReportException(Isolate* isolate, v8::TryCatch* try_catch) {
HandleScope handle_scope(isolate);
- Local<Context> context;
- bool enter_context = !isolate->InContext();
+ Local<Context> context = isolate->GetCurrentContext();
+ bool enter_context = context.IsEmpty();
if (enter_context) {
context = Local<Context>::New(isolate, evaluation_context_);
context->Enter();
}
+ // Converts a V8 value to a C string.
+ auto ToCString = [](const v8::String::Utf8Value& value) {
+ return *value ? *value : "<string conversion failed>";
+ };
+
v8::String::Utf8Value exception(try_catch->Exception());
const char* exception_string = ToCString(exception);
Local<Message> message = try_catch->Message();
@@ -1213,40 +1301,40 @@ void Shell::ReportException(Isolate* isolate, v8::TryCatch* try_catch) {
// V8 didn't provide any extra information about this error; just
// print the exception.
printf("%s\n", exception_string);
+ } else if (message->GetScriptOrigin().Options().IsWasm()) {
+ // Print <WASM>[(function index)]((function name))+(offset): (message).
+ int function_index = message->GetLineNumber(context).FromJust() - 1;
+ int offset = message->GetStartColumn(context).FromJust();
+ printf("<WASM>[%d]+%d: %s\n", function_index, offset, exception_string);
} else {
// Print (filename):(line number): (message).
v8::String::Utf8Value filename(message->GetScriptOrigin().ResourceName());
const char* filename_string = ToCString(filename);
- Maybe<int> maybeline = message->GetLineNumber(isolate->GetCurrentContext());
- int linenum = maybeline.IsJust() ? maybeline.FromJust() : -1;
+ int linenum = message->GetLineNumber(context).FromMaybe(-1);
printf("%s:%i: %s\n", filename_string, linenum, exception_string);
Local<String> sourceline;
- if (message->GetSourceLine(isolate->GetCurrentContext())
- .ToLocal(&sourceline)) {
+ if (message->GetSourceLine(context).ToLocal(&sourceline)) {
// Print line of source code.
v8::String::Utf8Value sourcelinevalue(sourceline);
const char* sourceline_string = ToCString(sourcelinevalue);
printf("%s\n", sourceline_string);
// Print wavy underline (GetUnderline is deprecated).
- int start =
- message->GetStartColumn(isolate->GetCurrentContext()).FromJust();
+ int start = message->GetStartColumn(context).FromJust();
for (int i = 0; i < start; i++) {
printf(" ");
}
- int end = message->GetEndColumn(isolate->GetCurrentContext()).FromJust();
+ int end = message->GetEndColumn(context).FromJust();
for (int i = start; i < end; i++) {
printf("^");
}
printf("\n");
}
- Local<Value> stack_trace_string;
- if (try_catch->StackTrace(isolate->GetCurrentContext())
- .ToLocal(&stack_trace_string) &&
- stack_trace_string->IsString()) {
- v8::String::Utf8Value stack_trace(
- Local<String>::Cast(stack_trace_string));
- printf("%s\n", ToCString(stack_trace));
- }
+ }
+ Local<Value> stack_trace_string;
+ if (try_catch->StackTrace(context).ToLocal(&stack_trace_string) &&
+ stack_trace_string->IsString()) {
+ v8::String::Utf8Value stack_trace(Local<String>::Cast(stack_trace_string));
+ printf("%s\n", ToCString(stack_trace));
}
printf("\n");
if (enter_context) context->Exit();
@@ -1455,6 +1543,10 @@ Local<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
.ToLocalChecked(),
FunctionTemplate::New(isolate, RealmCreateAllowCrossRealmAccess));
realm_template->Set(
+ String::NewFromUtf8(isolate, "navigate", NewStringType::kNormal)
+ .ToLocalChecked(),
+ FunctionTemplate::New(isolate, RealmNavigate));
+ realm_template->Set(
String::NewFromUtf8(isolate, "dispose", NewStringType::kNormal)
.ToLocalChecked(),
FunctionTemplate::New(isolate, RealmDispose));
@@ -1524,9 +1616,43 @@ Local<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
return global_template;
}
-static void EmptyMessageCallback(Local<Message> message, Local<Value> error) {
- // Nothing to be done here, exceptions thrown up to the shell will be reported
+static void PrintNonErrorsMessageCallback(Local<Message> message,
+ Local<Value> error) {
+ // Nothing to do here for errors, exceptions thrown up to the shell will be
+ // reported
// separately by {Shell::ReportException} after they are caught.
+ // Do print other kinds of messages.
+ switch (message->ErrorLevel()) {
+ case v8::Isolate::kMessageWarning:
+ case v8::Isolate::kMessageLog:
+ case v8::Isolate::kMessageInfo:
+ case v8::Isolate::kMessageDebug: {
+ break;
+ }
+
+ case v8::Isolate::kMessageError: {
+ // Ignore errors, printed elsewhere.
+ return;
+ }
+
+ default: {
+ UNREACHABLE();
+ break;
+ }
+ }
+ // Converts a V8 value to a C string.
+ auto ToCString = [](const v8::String::Utf8Value& value) {
+ return *value ? *value : "<string conversion failed>";
+ };
+ Isolate* isolate = Isolate::GetCurrent();
+ v8::String::Utf8Value msg(message->Get());
+ const char* msg_string = ToCString(msg);
+ // Print (filename):(line number): (message).
+ v8::String::Utf8Value filename(message->GetScriptOrigin().ResourceName());
+ const char* filename_string = ToCString(filename);
+ Maybe<int> maybeline = message->GetLineNumber(isolate->GetCurrentContext());
+ int linenum = maybeline.IsJust() ? maybeline.FromJust() : -1;
+ printf("%s:%i: %s\n", filename_string, linenum, msg_string);
}
void Shell::Initialize(Isolate* isolate) {
@@ -1534,7 +1660,11 @@ void Shell::Initialize(Isolate* isolate) {
if (i::StrLength(i::FLAG_map_counters) != 0)
MapCounters(isolate, i::FLAG_map_counters);
// Disable default message reporting.
- isolate->AddMessageListener(EmptyMessageCallback);
+ isolate->AddMessageListenerWithErrorLevel(
+ PrintNonErrorsMessageCallback,
+ v8::Isolate::kMessageError | v8::Isolate::kMessageWarning |
+ v8::Isolate::kMessageInfo | v8::Isolate::kMessageDebug |
+ v8::Isolate::kMessageLog);
}
@@ -1595,7 +1725,7 @@ void Shell::WriteIgnitionDispatchCountersFile(v8::Isolate* isolate) {
void Shell::OnExit(v8::Isolate* isolate) {
- if (i::FLAG_dump_counters) {
+ if (i::FLAG_dump_counters || i::FLAG_dump_counters_nvp) {
int number_of_counters = 0;
for (CounterMap::Iterator i(counter_map_); i.More(); i.Next()) {
number_of_counters++;
@@ -1607,24 +1737,44 @@ void Shell::OnExit(v8::Isolate* isolate) {
counters[j].key = i.CurrentKey();
}
std::sort(counters, counters + number_of_counters);
- printf("+----------------------------------------------------------------+"
- "-------------+\n");
- printf("| Name |"
- " Value |\n");
- printf("+----------------------------------------------------------------+"
- "-------------+\n");
- for (j = 0; j < number_of_counters; j++) {
- Counter* counter = counters[j].counter;
- const char* key = counters[j].key;
- if (counter->is_histogram()) {
- printf("| c:%-60s | %11i |\n", key, counter->count());
- printf("| t:%-60s | %11i |\n", key, counter->sample_total());
- } else {
- printf("| %-62s | %11i |\n", key, counter->count());
+
+ if (i::FLAG_dump_counters_nvp) {
+ // Dump counters as name-value pairs.
+ for (j = 0; j < number_of_counters; j++) {
+ Counter* counter = counters[j].counter;
+ const char* key = counters[j].key;
+ if (counter->is_histogram()) {
+ printf("\"c:%s\"=%i\n", key, counter->count());
+ printf("\"t:%s\"=%i\n", key, counter->sample_total());
+ } else {
+ printf("\"%s\"=%i\n", key, counter->count());
+ }
}
+ } else {
+ // Dump counters in formatted boxes.
+ printf(
+ "+----------------------------------------------------------------+"
+ "-------------+\n");
+ printf(
+ "| Name |"
+ " Value |\n");
+ printf(
+ "+----------------------------------------------------------------+"
+ "-------------+\n");
+ for (j = 0; j < number_of_counters; j++) {
+ Counter* counter = counters[j].counter;
+ const char* key = counters[j].key;
+ if (counter->is_histogram()) {
+ printf("| c:%-60s | %11i |\n", key, counter->count());
+ printf("| t:%-60s | %11i |\n", key, counter->sample_total());
+ } else {
+ printf("| %-62s | %11i |\n", key, counter->count());
+ }
+ }
+ printf(
+ "+----------------------------------------------------------------+"
+ "-------------+\n");
}
- printf("+----------------------------------------------------------------+"
- "-------------+\n");
delete [] counters;
}
@@ -1771,13 +1921,14 @@ class InspectorFrontend final : public v8_inspector::V8Inspector::Channel {
virtual ~InspectorFrontend() = default;
private:
- void sendProtocolResponse(int callId,
- const v8_inspector::StringView& message) override {
- Send(message);
+ void sendResponse(
+ int callId,
+ std::unique_ptr<v8_inspector::StringBuffer> message) override {
+ Send(message->string());
}
- void sendProtocolNotification(
- const v8_inspector::StringView& message) override {
- Send(message);
+ void sendNotification(
+ std::unique_ptr<v8_inspector::StringBuffer> message) override {
+ Send(message->string());
}
void flushProtocolNotifications() override {}
@@ -1806,7 +1957,21 @@ class InspectorFrontend final : public v8_inspector::V8Inspector::Channel {
Local<Value> args[] = {message};
MaybeLocal<Value> result = Local<Function>::Cast(callback)->Call(
context, Undefined(isolate_), 1, args);
- CHECK(!result.IsEmpty()); // Listeners may not throw.
+#ifdef DEBUG
+ if (try_catch.HasCaught()) {
+ Local<Object> exception = Local<Object>::Cast(try_catch.Exception());
+ Local<String> key = v8::String::NewFromUtf8(isolate_, "message",
+ v8::NewStringType::kNormal)
+ .ToLocalChecked();
+ Local<String> expected =
+ v8::String::NewFromUtf8(isolate_,
+ "Maximum call stack size exceeded",
+ v8::NewStringType::kNormal)
+ .ToLocalChecked();
+ Local<Value> value = exception->Get(context, key).ToLocalChecked();
+ CHECK(value->StrictEquals(expected));
+ }
+#endif
}
}
@@ -2467,6 +2632,8 @@ void Shell::CollectGarbage(Isolate* isolate) {
void Shell::EmptyMessageQueues(Isolate* isolate) {
if (!i::FLAG_verify_predictable) {
while (v8::platform::PumpMessageLoop(g_platform, isolate)) continue;
+ v8::platform::RunIdleTasks(g_platform, isolate,
+ 50.0 / base::Time::kMillisecondsPerSecond);
}
}
@@ -2854,7 +3021,7 @@ int Shell::Main(int argc, char* argv[]) {
base::SysInfo::AmountOfVirtualMemory());
Shell::counter_map_ = new CounterMap();
- if (i::FLAG_dump_counters || i::FLAG_gc_stats) {
+ if (i::FLAG_dump_counters || i::FLAG_dump_counters_nvp || i::FLAG_gc_stats) {
create_params.counter_lookup_callback = LookupCounter;
create_params.create_histogram_callback = CreateHistogram;
create_params.add_histogram_sample_callback = AddHistogramSample;
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index 5e7abafb04..c3729f92ba 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -11,6 +11,7 @@
#include "src/base/hashmap.h"
#include "src/base/platform/time.h"
#include "src/list.h"
+#include "src/utils.h"
#include "src/base/once.h"
@@ -324,7 +325,6 @@ class Shell : public i::AllStatic {
Local<Value> name, bool print_result,
bool report_exceptions);
static bool ExecuteModule(Isolate* isolate, const char* file_name);
- static const char* ToCString(const v8::String::Utf8Value& value);
static void ReportException(Isolate* isolate, TryCatch* try_catch);
static Local<String> ReadFile(Isolate* isolate, const char* name);
static Local<Context> CreateEvaluationContext(Isolate* isolate);
@@ -360,6 +360,7 @@ class Shell : public i::AllStatic {
static void RealmOwner(const v8::FunctionCallbackInfo<v8::Value>& args);
static void RealmGlobal(const v8::FunctionCallbackInfo<v8::Value>& args);
static void RealmCreate(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void RealmNavigate(const v8::FunctionCallbackInfo<v8::Value>& args);
static void RealmCreateAllowCrossRealmAccess(
const v8::FunctionCallbackInfo<v8::Value>& args);
static void RealmDispose(const v8::FunctionCallbackInfo<v8::Value>& args);
@@ -456,7 +457,10 @@ class Shell : public i::AllStatic {
static bool SetOptions(int argc, char* argv[]);
static Local<ObjectTemplate> CreateGlobalTemplate(Isolate* isolate);
static MaybeLocal<Context> CreateRealm(
- const v8::FunctionCallbackInfo<v8::Value>& args);
+ const v8::FunctionCallbackInfo<v8::Value>& args, int index,
+ v8::MaybeLocal<Value> global_object);
+ static void DisposeRealm(const v8::FunctionCallbackInfo<v8::Value>& args,
+ int index);
static MaybeLocal<Module> FetchModuleTree(v8::Local<v8::Context> context,
const std::string& file_name);
};
diff --git a/deps/v8/src/debug/arm/debug-arm.cc b/deps/v8/src/debug/arm/debug-arm.cc
index d96ec31bfa..145f371f99 100644
--- a/deps/v8/src/debug/arm/debug-arm.cc
+++ b/deps/v8/src/debug/arm/debug-arm.cc
@@ -136,7 +136,7 @@ void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::INTERNAL);
ParameterCount dummy(0);
- __ FloodFunctionIfStepping(r1, no_reg, dummy, dummy);
+ __ CheckDebugHook(r1, no_reg, dummy, dummy);
{ ConstantPoolUnavailableScope constant_pool_unavailable(masm);
// Load context from the function.
diff --git a/deps/v8/src/debug/arm64/debug-arm64.cc b/deps/v8/src/debug/arm64/debug-arm64.cc
index e344924a61..75eb2837c2 100644
--- a/deps/v8/src/debug/arm64/debug-arm64.cc
+++ b/deps/v8/src/debug/arm64/debug-arm64.cc
@@ -147,7 +147,7 @@ void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
__ Pop(fp, lr); // Frame, Return address.
ParameterCount dummy(0);
- __ FloodFunctionIfStepping(x1, no_reg, dummy, dummy);
+ __ CheckDebugHook(x1, no_reg, dummy, dummy);
UseScratchRegisterScope temps(masm);
Register scratch = temps.AcquireX();
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index 8970520edc..96cd98d3f2 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -12,6 +12,8 @@
#include "src/debug/debug.h"
#include "src/frames-inl.h"
#include "src/globals.h"
+#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/bytecodes.h"
#include "src/isolate-inl.h"
namespace v8 {
@@ -21,12 +23,10 @@ static inline bool IsDebugContext(Isolate* isolate, Context* context) {
return context->native_context() == *isolate->debug()->debug_context();
}
-
-MaybeHandle<Object> DebugEvaluate::Global(
- Isolate* isolate, Handle<String> source, bool disable_break,
- Handle<HeapObject> context_extension) {
+MaybeHandle<Object> DebugEvaluate::Global(Isolate* isolate,
+ Handle<String> source) {
// Handle the processing of break.
- DisableBreak disable_break_scope(isolate->debug(), disable_break);
+ DisableBreak disable_break_scope(isolate->debug());
// Enter the top context from before the debugger was invoked.
SaveContext save(isolate);
@@ -41,19 +41,15 @@ MaybeHandle<Object> DebugEvaluate::Global(
Handle<Context> context = isolate->native_context();
Handle<JSObject> receiver(context->global_proxy());
Handle<SharedFunctionInfo> outer_info(context->closure()->shared(), isolate);
- return Evaluate(isolate, outer_info, context, context_extension, receiver,
- source);
+ return Evaluate(isolate, outer_info, context, receiver, source);
}
-
MaybeHandle<Object> DebugEvaluate::Local(Isolate* isolate,
StackFrame::Id frame_id,
int inlined_jsframe_index,
- Handle<String> source,
- bool disable_break,
- Handle<HeapObject> context_extension) {
+ Handle<String> source) {
// Handle the processing of break.
- DisableBreak disable_break_scope(isolate->debug(), disable_break);
+ DisableBreak disable_break_scope(isolate->debug());
// Get the frame where the debugging is performed.
StackTraceFrameIterator it(isolate, frame_id);
@@ -78,9 +74,8 @@ MaybeHandle<Object> DebugEvaluate::Local(Isolate* isolate,
Handle<Context> context = context_builder.evaluation_context();
Handle<JSObject> receiver(context->global_proxy());
- MaybeHandle<Object> maybe_result =
- Evaluate(isolate, context_builder.outer_info(), context,
- context_extension, receiver, source);
+ MaybeHandle<Object> maybe_result = Evaluate(
+ isolate, context_builder.outer_info(), context, receiver, source);
if (!maybe_result.is_null()) context_builder.UpdateValues();
return maybe_result;
}
@@ -89,20 +84,7 @@ MaybeHandle<Object> DebugEvaluate::Local(Isolate* isolate,
// Compile and evaluate source for the given context.
MaybeHandle<Object> DebugEvaluate::Evaluate(
Isolate* isolate, Handle<SharedFunctionInfo> outer_info,
- Handle<Context> context, Handle<HeapObject> context_extension,
- Handle<Object> receiver, Handle<String> source) {
- if (context_extension->IsJSObject()) {
- Handle<JSObject> extension = Handle<JSObject>::cast(context_extension);
- Handle<JSFunction> closure(context->closure(), isolate);
- context = isolate->factory()->NewWithContext(
- closure, context,
- ScopeInfo::CreateForWithScope(
- isolate, context->IsNativeContext()
- ? Handle<ScopeInfo>::null()
- : Handle<ScopeInfo>(context->scope_info())),
- extension);
- }
-
+ Handle<Context> context, Handle<Object> receiver, Handle<String> source) {
Handle<JSFunction> eval_fun;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, eval_fun,
@@ -112,9 +94,13 @@ MaybeHandle<Object> DebugEvaluate::Evaluate(
Object);
Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result, Execution::Call(isolate, eval_fun, receiver, 0, NULL),
- Object);
+ {
+ NoSideEffectScope no_side_effect(isolate,
+ FLAG_side_effect_free_debug_evaluate);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result, Execution::Call(isolate, eval_fun, receiver, 0, NULL),
+ Object);
+ }
// Skip the global proxy as it has no properties and always delegates to the
// real global object.
@@ -158,8 +144,8 @@ DebugEvaluate::ContextBuilder::ContextBuilder(Isolate* isolate,
// - Look up in the original context.
// - Check the whitelist to find out whether to skip contexts during lookup.
const ScopeIterator::Option option = ScopeIterator::COLLECT_NON_LOCALS;
- for (ScopeIterator it(isolate, &frame_inspector, option);
- !it.Failed() && !it.Done(); it.Next()) {
+ for (ScopeIterator it(isolate, &frame_inspector, option); !it.Done();
+ it.Next()) {
ScopeIterator::ScopeType scope_type = it.Type();
if (scope_type == ScopeIterator::ScopeTypeLocal) {
DCHECK_EQ(FUNCTION_SCOPE, it.CurrentScopeInfo()->scope_type());
@@ -239,7 +225,7 @@ void DebugEvaluate::ContextBuilder::MaterializeArgumentsObject(
Handle<JSObject> target, Handle<JSFunction> function) {
// Do not materialize the arguments object for eval or top-level code.
// Skip if "arguments" is already taken.
- if (!function->shared()->is_function()) return;
+ if (function->shared()->is_toplevel()) return;
Maybe<bool> maybe = JSReceiver::HasOwnProperty(
target, isolate_->factory()->arguments_string());
DCHECK(maybe.IsJust());
@@ -269,5 +255,265 @@ void DebugEvaluate::ContextBuilder::MaterializeReceiver(
JSObject::SetOwnPropertyIgnoreAttributes(target, name, recv, NONE).Check();
}
+namespace {
+
+bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
+ switch (id) {
+ // Whitelist for intrinsics amd runtime functions.
+ // Conversions.
+ case Runtime::kToInteger:
+ case Runtime::kInlineToInteger:
+ case Runtime::kToObject:
+ case Runtime::kInlineToObject:
+ case Runtime::kToString:
+ case Runtime::kInlineToString:
+ case Runtime::kToLength:
+ case Runtime::kInlineToLength:
+ // Loads.
+ case Runtime::kLoadLookupSlotForCall:
+ // Errors.
+ case Runtime::kThrowReferenceError:
+ // Strings.
+ case Runtime::kInlineStringCharCodeAt:
+ case Runtime::kStringCharCodeAt:
+ case Runtime::kStringIndexOf:
+ case Runtime::kStringReplaceOneCharWithString:
+ case Runtime::kSubString:
+ case Runtime::kInlineSubString:
+ case Runtime::kStringToLowerCase:
+ case Runtime::kStringToUpperCase:
+ case Runtime::kRegExpInternalReplace:
+ // Literals.
+ case Runtime::kCreateArrayLiteral:
+ case Runtime::kCreateObjectLiteral:
+ case Runtime::kCreateRegExpLiteral:
+ // Misc.
+ case Runtime::kInlineCall:
+ case Runtime::kCall:
+ case Runtime::kInlineMaxSmi:
+ case Runtime::kMaxSmi:
+ return true;
+ default:
+ if (FLAG_trace_side_effect_free_debug_evaluate) {
+ PrintF("[debug-evaluate] intrinsic %s may cause side effect.\n",
+ Runtime::FunctionForId(id)->name);
+ }
+ return false;
+ }
+}
+
+bool BytecodeHasNoSideEffect(interpreter::Bytecode bytecode) {
+ typedef interpreter::Bytecode Bytecode;
+ typedef interpreter::Bytecodes Bytecodes;
+ if (Bytecodes::IsWithoutExternalSideEffects(bytecode)) return true;
+ if (Bytecodes::IsCallOrNew(bytecode)) return true;
+ if (Bytecodes::WritesBooleanToAccumulator(bytecode)) return true;
+ if (Bytecodes::IsJumpIfToBoolean(bytecode)) return true;
+ if (Bytecodes::IsPrefixScalingBytecode(bytecode)) return true;
+ switch (bytecode) {
+ // Whitelist for bytecodes.
+ // Loads.
+ case Bytecode::kLdaLookupSlot:
+ case Bytecode::kLdaGlobal:
+ case Bytecode::kLdaNamedProperty:
+ case Bytecode::kLdaKeyedProperty:
+ // Arithmetics.
+ case Bytecode::kAdd:
+ case Bytecode::kAddSmi:
+ case Bytecode::kSub:
+ case Bytecode::kSubSmi:
+ case Bytecode::kMul:
+ case Bytecode::kDiv:
+ case Bytecode::kMod:
+ case Bytecode::kBitwiseAnd:
+ case Bytecode::kBitwiseAndSmi:
+ case Bytecode::kBitwiseOr:
+ case Bytecode::kBitwiseOrSmi:
+ case Bytecode::kBitwiseXor:
+ case Bytecode::kShiftLeft:
+ case Bytecode::kShiftLeftSmi:
+ case Bytecode::kShiftRight:
+ case Bytecode::kShiftRightSmi:
+ case Bytecode::kShiftRightLogical:
+ case Bytecode::kInc:
+ case Bytecode::kDec:
+ case Bytecode::kLogicalNot:
+ case Bytecode::kToBooleanLogicalNot:
+ case Bytecode::kTypeOf:
+ // Contexts.
+ case Bytecode::kCreateBlockContext:
+ case Bytecode::kCreateCatchContext:
+ case Bytecode::kCreateFunctionContext:
+ case Bytecode::kCreateEvalContext:
+ case Bytecode::kCreateWithContext:
+ // Literals.
+ case Bytecode::kCreateArrayLiteral:
+ case Bytecode::kCreateObjectLiteral:
+ case Bytecode::kCreateRegExpLiteral:
+ // Misc.
+ case Bytecode::kCreateUnmappedArguments:
+ case Bytecode::kThrow:
+ case Bytecode::kIllegal:
+ case Bytecode::kCallJSRuntime:
+ case Bytecode::kStackCheck:
+ case Bytecode::kReturn:
+ case Bytecode::kSetPendingMessage:
+ return true;
+ default:
+ if (FLAG_trace_side_effect_free_debug_evaluate) {
+ PrintF("[debug-evaluate] bytecode %s may cause side effect.\n",
+ Bytecodes::ToString(bytecode));
+ }
+ return false;
+ }
+}
+
+bool BuiltinHasNoSideEffect(Builtins::Name id) {
+ switch (id) {
+ // Whitelist for builtins.
+ // Math builtins.
+ case Builtins::kMathAbs:
+ case Builtins::kMathAcos:
+ case Builtins::kMathAcosh:
+ case Builtins::kMathAsin:
+ case Builtins::kMathAsinh:
+ case Builtins::kMathAtan:
+ case Builtins::kMathAtanh:
+ case Builtins::kMathAtan2:
+ case Builtins::kMathCeil:
+ case Builtins::kMathCbrt:
+ case Builtins::kMathExpm1:
+ case Builtins::kMathClz32:
+ case Builtins::kMathCos:
+ case Builtins::kMathCosh:
+ case Builtins::kMathExp:
+ case Builtins::kMathFloor:
+ case Builtins::kMathFround:
+ case Builtins::kMathHypot:
+ case Builtins::kMathImul:
+ case Builtins::kMathLog:
+ case Builtins::kMathLog1p:
+ case Builtins::kMathLog2:
+ case Builtins::kMathLog10:
+ case Builtins::kMathMax:
+ case Builtins::kMathMin:
+ case Builtins::kMathPow:
+ case Builtins::kMathRandom:
+ case Builtins::kMathRound:
+ case Builtins::kMathSign:
+ case Builtins::kMathSin:
+ case Builtins::kMathSinh:
+ case Builtins::kMathSqrt:
+ case Builtins::kMathTan:
+ case Builtins::kMathTanh:
+ case Builtins::kMathTrunc:
+ // Number builtins.
+ case Builtins::kNumberConstructor:
+ case Builtins::kNumberIsFinite:
+ case Builtins::kNumberIsInteger:
+ case Builtins::kNumberIsNaN:
+ case Builtins::kNumberIsSafeInteger:
+ case Builtins::kNumberParseFloat:
+ case Builtins::kNumberParseInt:
+ case Builtins::kNumberPrototypeToExponential:
+ case Builtins::kNumberPrototypeToFixed:
+ case Builtins::kNumberPrototypeToPrecision:
+ case Builtins::kNumberPrototypeToString:
+ case Builtins::kNumberPrototypeValueOf:
+ // String builtins. Strings are immutable.
+ case Builtins::kStringFromCharCode:
+ case Builtins::kStringFromCodePoint:
+ case Builtins::kStringConstructor:
+ case Builtins::kStringPrototypeCharAt:
+ case Builtins::kStringPrototypeCharCodeAt:
+ case Builtins::kStringPrototypeEndsWith:
+ case Builtins::kStringPrototypeIncludes:
+ case Builtins::kStringPrototypeIndexOf:
+ case Builtins::kStringPrototypeLastIndexOf:
+ case Builtins::kStringPrototypeStartsWith:
+ case Builtins::kStringPrototypeSubstr:
+ case Builtins::kStringPrototypeSubstring:
+ case Builtins::kStringPrototypeToString:
+ case Builtins::kStringPrototypeTrim:
+ case Builtins::kStringPrototypeTrimLeft:
+ case Builtins::kStringPrototypeTrimRight:
+ case Builtins::kStringPrototypeValueOf:
+ // JSON builtins.
+ case Builtins::kJsonParse:
+ case Builtins::kJsonStringify:
+ return true;
+ default:
+ if (FLAG_trace_side_effect_free_debug_evaluate) {
+ PrintF("[debug-evaluate] built-in %s may cause side effect.\n",
+ Builtins::name(id));
+ }
+ return false;
+ }
+}
+
+static const Address accessors_with_no_side_effect[] = {
+ // Whitelist for accessors.
+ FUNCTION_ADDR(Accessors::StringLengthGetter),
+ FUNCTION_ADDR(Accessors::ArrayLengthGetter)};
+
+} // anonymous namespace
+
+// static
+bool DebugEvaluate::FunctionHasNoSideEffect(Handle<SharedFunctionInfo> info) {
+ if (FLAG_trace_side_effect_free_debug_evaluate) {
+ PrintF("[debug-evaluate] Checking function %s for side effect.\n",
+ info->DebugName()->ToCString().get());
+ }
+
+ DCHECK(info->is_compiled());
+
+ if (info->HasBytecodeArray()) {
+ // Check bytecodes against whitelist.
+ Handle<BytecodeArray> bytecode_array(info->bytecode_array());
+ if (FLAG_trace_side_effect_free_debug_evaluate) bytecode_array->Print();
+ for (interpreter::BytecodeArrayIterator it(bytecode_array); !it.done();
+ it.Advance()) {
+ interpreter::Bytecode bytecode = it.current_bytecode();
+
+ if (interpreter::Bytecodes::IsCallRuntime(bytecode)) {
+ Runtime::FunctionId id =
+ (bytecode == interpreter::Bytecode::kInvokeIntrinsic)
+ ? it.GetIntrinsicIdOperand(0)
+ : it.GetRuntimeIdOperand(0);
+ if (IntrinsicHasNoSideEffect(id)) continue;
+ return false;
+ }
+
+ if (BytecodeHasNoSideEffect(bytecode)) continue;
+
+ // Did not match whitelist.
+ return false;
+ }
+ return true;
+ } else {
+ // Check built-ins against whitelist.
+ int builtin_index = info->code()->builtin_index();
+ if (builtin_index >= 0 && builtin_index < Builtins::builtin_count &&
+ BuiltinHasNoSideEffect(static_cast<Builtins::Name>(builtin_index))) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+// static
+bool DebugEvaluate::CallbackHasNoSideEffect(Address function_addr) {
+ for (size_t i = 0; i < arraysize(accessors_with_no_side_effect); i++) {
+ if (function_addr == accessors_with_no_side_effect[i]) return true;
+ }
+
+ if (FLAG_trace_side_effect_free_debug_evaluate) {
+ PrintF("[debug-evaluate] API Callback at %p may cause side effect.\n",
+ reinterpret_cast<void*>(function_addr));
+ }
+ return false;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/debug/debug-evaluate.h b/deps/v8/src/debug/debug-evaluate.h
index 26f4e414e7..3b4d1f4640 100644
--- a/deps/v8/src/debug/debug-evaluate.h
+++ b/deps/v8/src/debug/debug-evaluate.h
@@ -13,9 +13,7 @@ namespace internal {
class DebugEvaluate : public AllStatic {
public:
- static MaybeHandle<Object> Global(Isolate* isolate, Handle<String> source,
- bool disable_break,
- Handle<HeapObject> context_extension);
+ static MaybeHandle<Object> Global(Isolate* isolate, Handle<String> source);
// Evaluate a piece of JavaScript in the context of a stack frame for
// debugging. Things that need special attention are:
@@ -24,8 +22,10 @@ class DebugEvaluate : public AllStatic {
// - The arguments object needs to materialized.
static MaybeHandle<Object> Local(Isolate* isolate, StackFrame::Id frame_id,
int inlined_jsframe_index,
- Handle<String> source, bool disable_break,
- Handle<HeapObject> context_extension);
+ Handle<String> source);
+
+ static bool FunctionHasNoSideEffect(Handle<SharedFunctionInfo> info);
+ static bool CallbackHasNoSideEffect(Address function_addr);
private:
// This class builds a context chain for evaluation of expressions
@@ -85,7 +85,6 @@ class DebugEvaluate : public AllStatic {
static MaybeHandle<Object> Evaluate(Isolate* isolate,
Handle<SharedFunctionInfo> outer_info,
Handle<Context> context,
- Handle<HeapObject> context_extension,
Handle<Object> receiver,
Handle<String> source);
};
diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc
index 5da1656fad..15d6ed5b4d 100644
--- a/deps/v8/src/debug/debug-frames.cc
+++ b/deps/v8/src/debug/debug-frames.cc
@@ -5,33 +5,38 @@
#include "src/debug/debug-frames.h"
#include "src/frames-inl.h"
+#include "src/wasm/wasm-interpreter.h"
+#include "src/wasm/wasm-objects.h"
namespace v8 {
namespace internal {
-FrameInspector::FrameInspector(StandardFrame* frame, int inlined_jsframe_index,
+FrameInspector::FrameInspector(StandardFrame* frame, int inlined_frame_index,
Isolate* isolate)
- : frame_(frame), deoptimized_frame_(NULL), isolate_(isolate) {
+ : frame_(frame),
+ frame_summary_(FrameSummary::Get(frame, inlined_frame_index)),
+ deoptimized_frame_(nullptr),
+ isolate_(isolate) {
JavaScriptFrame* js_frame =
frame->is_java_script() ? javascript_frame() : nullptr;
DCHECK(js_frame || frame->is_wasm());
has_adapted_arguments_ = js_frame && js_frame->has_adapted_arguments();
- is_bottommost_ = inlined_jsframe_index == 0;
+ is_bottommost_ = inlined_frame_index == 0;
is_optimized_ = frame_->is_optimized();
is_interpreted_ = frame_->is_interpreted();
+
// Calculate the deoptimized frame.
- if (frame->is_optimized()) {
+ if (is_optimized_) {
DCHECK(js_frame != nullptr);
// TODO(turbofan): Revisit once we support deoptimization.
if (js_frame->LookupCode()->is_turbofanned() &&
- js_frame->function()->shared()->asm_function() &&
- !FLAG_turbo_asm_deoptimization) {
+ js_frame->function()->shared()->asm_function()) {
is_optimized_ = false;
return;
}
deoptimized_frame_ = Deoptimizer::DebuggerInspectableFrame(
- js_frame, inlined_jsframe_index, isolate);
+ js_frame, inlined_frame_index, isolate);
}
}
@@ -48,16 +53,11 @@ int FrameInspector::GetParametersCount() {
}
Handle<Script> FrameInspector::GetScript() {
- Object* script = is_optimized_
- ? deoptimized_frame_->GetFunction()->shared()->script()
- : frame_->script();
- return handle(Script::cast(script), isolate_);
+ return Handle<Script>::cast(frame_summary_.script());
}
Handle<JSFunction> FrameInspector::GetFunction() {
- DCHECK(!frame_->is_wasm());
- return is_optimized_ ? deoptimized_frame_->GetFunction()
- : handle(javascript_frame()->function(), isolate_);
+ return frame_summary_.AsJavaScript().function();
}
Handle<Object> FrameInspector::GetParameter(int index) {
@@ -69,8 +69,7 @@ Handle<Object> FrameInspector::GetExpression(int index) {
// TODO(turbofan): Revisit once we support deoptimization.
if (frame_->is_java_script() &&
javascript_frame()->LookupCode()->is_turbofanned() &&
- javascript_frame()->function()->shared()->asm_function() &&
- !FLAG_turbo_asm_deoptimization) {
+ javascript_frame()->function()->shared()->asm_function()) {
return isolate_->factory()->undefined_value();
}
return is_optimized_ ? deoptimized_frame_->GetExpression(index)
@@ -78,22 +77,16 @@ Handle<Object> FrameInspector::GetExpression(int index) {
}
int FrameInspector::GetSourcePosition() {
- return is_optimized_ ? deoptimized_frame_->GetSourcePosition()
- : frame_->position();
+ return frame_summary_.SourcePosition();
}
-bool FrameInspector::IsConstructor() {
- return is_optimized_ && !is_bottommost_
- ? deoptimized_frame_->HasConstructStub()
- : frame_->IsConstructor();
-}
+bool FrameInspector::IsConstructor() { return frame_summary_.is_constructor(); }
Handle<Object> FrameInspector::GetContext() {
return is_optimized_ ? deoptimized_frame_->GetContext()
: handle(frame_->context(), isolate_);
}
-
// To inspect all the provided arguments the frame might need to be
// replaced with the arguments frame.
void FrameInspector::SetArgumentsFrame(StandardFrame* frame) {
@@ -211,15 +204,11 @@ int DebugFrameHelper::FindIndexedNonNativeFrame(StackTraceFrameIterator* it,
int index) {
int count = -1;
for (; !it->done(); it->Advance()) {
- if (it->is_wasm()) {
- if (++count == index) return 0;
- continue;
- }
List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
- it->javascript_frame()->Summarize(&frames);
+ it->frame()->Summarize(&frames);
for (int i = frames.length() - 1; i >= 0; i--) {
// Omit functions from native and extension scripts.
- if (!frames[i].function()->shared()->IsSubjectToDebugging()) continue;
+ if (!frames[i].is_subject_to_debugging()) continue;
if (++count == index) return i;
}
}
diff --git a/deps/v8/src/debug/debug-frames.h b/deps/v8/src/debug/debug-frames.h
index e8698e70ae..2793693774 100644
--- a/deps/v8/src/debug/debug-frames.h
+++ b/deps/v8/src/debug/debug-frames.h
@@ -15,11 +15,13 @@ namespace internal {
class FrameInspector {
public:
- FrameInspector(StandardFrame* frame, int inlined_jsframe_index,
+ FrameInspector(StandardFrame* frame, int inlined_frame_index,
Isolate* isolate);
~FrameInspector();
+ FrameSummary& summary() { return frame_summary_; }
+
int GetParametersCount();
Handle<JSFunction> GetFunction();
Handle<Script> GetScript();
@@ -33,7 +35,6 @@ class FrameInspector {
return frame_->is_arguments_adaptor() ? ArgumentsAdaptorFrame::cast(frame_)
: JavaScriptFrame::cast(frame_);
}
- inline WasmFrame* wasm_frame() { return WasmFrame::cast(frame_); }
JavaScriptFrame* GetArgumentsFrame() { return javascript_frame(); }
void SetArgumentsFrame(StandardFrame* frame);
@@ -52,6 +53,7 @@ class FrameInspector {
Handle<String> parameter_name);
StandardFrame* frame_;
+ FrameSummary frame_summary_;
DeoptimizedFrameInfo* deoptimized_frame_;
Isolate* isolate_;
bool is_optimized_;
diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h
index 443ed4232f..2e8abc6e54 100644
--- a/deps/v8/src/debug/debug-interface.h
+++ b/deps/v8/src/debug/debug-interface.h
@@ -5,205 +5,214 @@
#ifndef V8_DEBUG_DEBUG_INTERFACE_H_
#define V8_DEBUG_DEBUG_INTERFACE_H_
+#include <functional>
+
#include "include/v8-debug.h"
#include "include/v8-util.h"
#include "include/v8.h"
+#include "src/debug/interface-types.h"
+
namespace v8 {
+namespace debug {
-class DebugInterface {
+/**
+ * An event details object passed to the debug event listener.
+ */
+class EventDetails : public v8::Debug::EventDetails {
public:
/**
- * An event details object passed to the debug event listener.
+ * Event type.
*/
- class EventDetails : public v8::Debug::EventDetails {
- public:
- /**
- * Event type.
- */
- virtual v8::DebugEvent GetEvent() const = 0;
-
- /**
- * Access to execution state and event data of the debug event. Don't store
- * these cross callbacks as their content becomes invalid.
- */
- virtual Local<Object> GetExecutionState() const = 0;
- virtual Local<Object> GetEventData() const = 0;
-
- /**
- * Get the context active when the debug event happened. Note this is not
- * the current active context as the JavaScript part of the debugger is
- * running in its own context which is entered at this point.
- */
- virtual Local<Context> GetEventContext() const = 0;
-
- /**
- * Client data passed with the corresponding callback when it was
- * registered.
- */
- virtual Local<Value> GetCallbackData() const = 0;
-
- virtual ~EventDetails() {}
- };
+ virtual v8::DebugEvent GetEvent() const = 0;
/**
- * Debug event callback function.
- *
- * \param event_details object providing information about the debug event
- *
- * A EventCallback does not take possession of the event data,
- * and must not rely on the data persisting after the handler returns.
+ * Access to execution state and event data of the debug event. Don't store
+ * these cross callbacks as their content becomes invalid.
*/
- typedef void (*EventCallback)(const EventDetails& event_details);
-
- static bool SetDebugEventListener(Isolate* isolate, EventCallback that,
- Local<Value> data = Local<Value>());
+ virtual Local<Object> GetExecutionState() const = 0;
+ virtual Local<Object> GetEventData() const = 0;
/**
- * Debugger is running in its own context which is entered while debugger
- * messages are being dispatched. This is an explicit getter for this
- * debugger context. Note that the content of the debugger context is subject
- * to change. The Context exists only when the debugger is active, i.e. at
- * least one DebugEventListener or MessageHandler is set.
+ * Get the context active when the debug event happened. Note this is not
+ * the current active context as the JavaScript part of the debugger is
+ * running in its own context which is entered at this point.
*/
- static Local<Context> GetDebugContext(Isolate* isolate);
+ virtual Local<Context> GetEventContext() const = 0;
/**
- * Run a JavaScript function in the debugger.
- * \param fun the function to call
- * \param data passed as second argument to the function
- * With this call the debugger is entered and the function specified is called
- * with the execution state as the first argument. This makes it possible to
- * get access to information otherwise not available during normal JavaScript
- * execution e.g. details on stack frames. Receiver of the function call will
- * be the debugger context global object, however this is a subject to change.
- * The following example shows a JavaScript function which when passed to
- * v8::Debug::Call will return the current line of JavaScript execution.
- *
- * \code
- * function frame_source_line(exec_state) {
- * return exec_state.frame(0).sourceLine();
- * }
- * \endcode
+ * Client data passed with the corresponding callback when it was
+ * registered.
*/
- // TODO(dcarney): data arg should be a MaybeLocal
- static MaybeLocal<Value> Call(Local<Context> context,
- v8::Local<v8::Function> fun,
- Local<Value> data = Local<Value>());
+ virtual Local<Value> GetCallbackData() const = 0;
- /**
- * Enable/disable LiveEdit functionality for the given Isolate
- * (default Isolate if not provided). V8 will abort if LiveEdit is
- * unexpectedly used. LiveEdit is enabled by default.
- */
- static void SetLiveEditEnabled(Isolate* isolate, bool enable);
+ virtual ~EventDetails() {}
+};
- // Schedule a debugger break to happen when JavaScript code is run
- // in the given isolate.
- static void DebugBreak(Isolate* isolate);
+/**
+ * Debug event callback function.
+ *
+ * \param event_details object providing information about the debug event
+ *
+ * A EventCallback does not take possession of the event data,
+ * and must not rely on the data persisting after the handler returns.
+ */
+typedef void (*EventCallback)(const EventDetails& event_details);
+
+bool SetDebugEventListener(Isolate* isolate, EventCallback that,
+ Local<Value> data = Local<Value>());
+
+/**
+ * Debugger is running in its own context which is entered while debugger
+ * messages are being dispatched. This is an explicit getter for this
+ * debugger context. Note that the content of the debugger context is subject
+ * to change. The Context exists only when the debugger is active, i.e. at
+ * least one DebugEventListener or MessageHandler is set.
+ */
+Local<Context> GetDebugContext(Isolate* isolate);
+
+/**
+ * Run a JavaScript function in the debugger.
+ * \param fun the function to call
+ * \param data passed as second argument to the function
+ * With this call the debugger is entered and the function specified is called
+ * with the execution state as the first argument. This makes it possible to
+ * get access to information otherwise not available during normal JavaScript
+ * execution e.g. details on stack frames. Receiver of the function call will
+ * be the debugger context global object, however this is a subject to change.
+ * The following example shows a JavaScript function which when passed to
+ * v8::Debug::Call will return the current line of JavaScript execution.
+ *
+ * \code
+ * function frame_source_line(exec_state) {
+ * return exec_state.frame(0).sourceLine();
+ * }
+ * \endcode
+ */
+// TODO(dcarney): data arg should be a MaybeLocal
+MaybeLocal<Value> Call(Local<Context> context, v8::Local<v8::Function> fun,
+ Local<Value> data = Local<Value>());
+
+/**
+ * Enable/disable LiveEdit functionality for the given Isolate
+ * (default Isolate if not provided). V8 will abort if LiveEdit is
+ * unexpectedly used. LiveEdit is enabled by default.
+ */
+void SetLiveEditEnabled(Isolate* isolate, bool enable);
+
+// Schedule a debugger break to happen when JavaScript code is run
+// in the given isolate.
+void DebugBreak(Isolate* isolate);
+
+// Remove scheduled debugger break in given isolate if it has not
+// happened yet.
+void CancelDebugBreak(Isolate* isolate);
+
+/**
+ * Returns array of internal properties specific to the value type. Result has
+ * the following format: [<name>, <value>,...,<name>, <value>]. Result array
+ * will be allocated in the current context.
+ */
+MaybeLocal<Array> GetInternalProperties(Isolate* isolate, Local<Value> value);
+
+enum ExceptionBreakState {
+ NoBreakOnException = 0,
+ BreakOnUncaughtException = 1,
+ BreakOnAnyException = 2
+};
+
+/**
+ * Defines if VM will pause on exceptions or not.
+ * If BreakOnAnyExceptions is set then VM will pause on caught and uncaught
+ * exception, if BreakOnUncaughtException is set then VM will pause only on
+ * uncaught exception, otherwise VM won't stop on any exception.
+ */
+void ChangeBreakOnException(Isolate* isolate, ExceptionBreakState state);
+
+enum StepAction {
+ StepOut = 0, // Step out of the current function.
+ StepNext = 1, // Step to the next statement in the current function.
+ StepIn = 2, // Step into new functions invoked or the next statement
+ // in the current function.
+ StepFrame = 3 // Step into a new frame or return to previous frame.
+};
- // Remove scheduled debugger break in given isolate if it has not
- // happened yet.
- static void CancelDebugBreak(Isolate* isolate);
+void PrepareStep(Isolate* isolate, StepAction action);
+void ClearStepping(Isolate* isolate);
+
+/**
+ * Out-of-memory callback function.
+ * The function is invoked when the heap size is close to the hard limit.
+ *
+ * \param data the parameter provided during callback installation.
+ */
+typedef void (*OutOfMemoryCallback)(void* data);
+void SetOutOfMemoryCallback(Isolate* isolate, OutOfMemoryCallback callback,
+ void* data);
+
+/**
+ * Native wrapper around v8::internal::Script object.
+ */
+class Script {
+ public:
+ v8::Isolate* GetIsolate() const;
+
+ ScriptOriginOptions OriginOptions() const;
+ bool WasCompiled() const;
+ int Id() const;
+ int LineOffset() const;
+ int ColumnOffset() const;
+ std::vector<int> LineEnds() const;
+ MaybeLocal<String> Name() const;
+ MaybeLocal<String> SourceURL() const;
+ MaybeLocal<String> SourceMappingURL() const;
+ MaybeLocal<Value> ContextData() const;
+ MaybeLocal<String> Source() const;
+ bool IsWasm() const;
+ bool GetPossibleBreakpoints(const debug::Location& start,
+ const debug::Location& end,
+ std::vector<debug::Location>* locations) const;
/**
- * Returns array of internal properties specific to the value type. Result has
- * the following format: [<name>, <value>,...,<name>, <value>]. Result array
- * will be allocated in the current context.
+ * script parameter is a wrapper v8::internal::JSObject for
+ * v8::internal::Script.
+ * This function gets v8::internal::Script from v8::internal::JSObject and
+ * wraps it with DebugInterface::Script.
+ * Returns empty local if not called with a valid wrapper of
+ * v8::internal::Script.
*/
- static MaybeLocal<Array> GetInternalProperties(Isolate* isolate,
- Local<Value> value);
+ static MaybeLocal<Script> Wrap(Isolate* isolate,
+ v8::Local<v8::Object> script);
- enum ExceptionBreakState {
- NoBreakOnException = 0,
- BreakOnUncaughtException = 1,
- BreakOnAnyException = 2
- };
+ private:
+ int GetSourcePosition(const debug::Location& location) const;
+};
- /**
- * Defines if VM will pause on exceptions or not.
- * If BreakOnAnyExceptions is set then VM will pause on caught and uncaught
- * exception, if BreakOnUncaughtException is set then VM will pause only on
- * uncaught exception, otherwise VM won't stop on any exception.
- */
- static void ChangeBreakOnException(Isolate* isolate,
- ExceptionBreakState state);
+// Specialization for wasm Scripts.
+class WasmScript : public Script {
+ public:
+ static WasmScript* Cast(Script* script);
- enum StepAction {
- StepOut = 0, // Step out of the current function.
- StepNext = 1, // Step to the next statement in the current function.
- StepIn = 2, // Step into new functions invoked or the next statement
- // in the current function.
- StepFrame = 3 // Step into a new frame or return to previous frame.
- };
+ int NumFunctions() const;
+ int NumImportedFunctions() const;
- static void PrepareStep(Isolate* isolate, StepAction action);
- static void ClearStepping(Isolate* isolate);
+ debug::WasmDisassembly DisassembleFunction(int function_index) const;
+};
- /**
- * Defines location inside script.
- * Lines and columns are 0-based.
- */
- class Location {
- public:
- Location(int lineNumber, int columnNumber);
- /**
- * Create empty location.
- */
- Location();
-
- int GetLineNumber() const;
- int GetColumnNumber() const;
- bool IsEmpty() const;
-
- private:
- int lineNumber_;
- int columnNumber_;
- };
+void GetLoadedScripts(Isolate* isolate, PersistentValueVector<Script>& scripts);
- /**
- * Native wrapper around v8::internal::Script object.
- */
- class Script {
- public:
- v8::Isolate* GetIsolate() const;
-
- ScriptOriginOptions OriginOptions() const;
- bool WasCompiled() const;
- int Id() const;
- int LineOffset() const;
- int ColumnOffset() const;
- std::vector<int> LineEnds() const;
- MaybeLocal<String> Name() const;
- MaybeLocal<String> SourceURL() const;
- MaybeLocal<String> SourceMappingURL() const;
- MaybeLocal<String> ContextData() const;
- MaybeLocal<String> Source() const;
- bool GetPossibleBreakpoints(const Location& start, const Location& end,
- std::vector<Location>* locations) const;
-
- /**
- * script parameter is a wrapper v8::internal::JSObject for
- * v8::internal::Script.
- * This function gets v8::internal::Script from v8::internal::JSObject and
- * wraps it with DebugInterface::Script.
- * Returns empty local if not called with a valid wrapper of
- * v8::internal::Script.
- */
- static MaybeLocal<Script> Wrap(Isolate* isolate,
- v8::Local<v8::Object> script);
-
- private:
- int GetSourcePosition(const Location& location) const;
- };
+MaybeLocal<UnboundScript> CompileInspectorScript(Isolate* isolate,
+ Local<String> source);
- /**
- * Return array of compiled scripts.
- */
- static void GetLoadedScripts(Isolate* isolate,
- PersistentValueVector<Script>& scripts);
-};
+typedef std::function<void(debug::PromiseDebugActionType type, int id,
+ void* data)>
+ AsyncTaskListener;
+void SetAsyncTaskListener(Isolate* isolate, AsyncTaskListener listener,
+ void* data);
+
+int EstimatedValueSize(Isolate* isolate, v8::Local<v8::Value> value);
+} // namespace debug
} // namespace v8
#endif // V8_DEBUG_DEBUG_INTERFACE_H_
diff --git a/deps/v8/src/debug/debug-scopes.cc b/deps/v8/src/debug/debug-scopes.cc
index c84d32ae7d..3434a83088 100644
--- a/deps/v8/src/debug/debug-scopes.cc
+++ b/deps/v8/src/debug/debug-scopes.cc
@@ -6,13 +6,14 @@
#include <memory>
+#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/debug/debug.h"
#include "src/frames-inl.h"
#include "src/globals.h"
#include "src/isolate-inl.h"
#include "src/parsing/parse-info.h"
-#include "src/parsing/parser.h"
+#include "src/parsing/parsing.h"
#include "src/parsing/rewriter.h"
namespace v8 {
@@ -23,8 +24,7 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
: isolate_(isolate),
frame_inspector_(frame_inspector),
nested_scope_chain_(4),
- seen_script_scope_(false),
- failed_(false) {
+ seen_script_scope_(false) {
if (!frame_inspector->GetContext()->IsContext()) {
// Optimized frame, context or function cannot be materialized. Give up.
return;
@@ -61,10 +61,9 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
// inspect the function scope.
// This can only happen if we set a break point inside right before the
// return, which requires a debug info to be available.
- Handle<DebugInfo> debug_info(shared_info->GetDebugInfo());
// Find the break point where execution has stopped.
- BreakLocation location = BreakLocation::FromFrame(debug_info, GetFrame());
+ BreakLocation location = BreakLocation::FromFrame(GetFrame());
ignore_nested_scopes = location.IsReturn();
}
@@ -110,7 +109,7 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
// Inner function.
info.reset(new ParseInfo(&zone, shared_info));
}
- if (Parser::ParseStatic(info.get()) && Rewriter::Rewrite(info.get())) {
+ if (parsing::ParseAny(info.get()) && Rewriter::Rewrite(info.get())) {
DeclarationScope* scope = info->literal()->scope();
if (!ignore_nested_scopes || collect_non_locals) {
CollectNonLocals(info.get(), scope);
@@ -119,26 +118,26 @@ ScopeIterator::ScopeIterator(Isolate* isolate, FrameInspector* frame_inspector,
DeclarationScope::Analyze(info.get(), AnalyzeMode::kDebugger);
RetrieveScopeChain(scope);
}
- } else if (!ignore_nested_scopes) {
+ } else {
// A failed reparse indicates that the preparser has diverged from the
// parser or that the preparse data given to the initial parse has been
// faulty. We fail in debug mode but in release mode we only provide the
// information we get from the context chain but nothing about
// completely stack allocated scopes or stack allocated locals.
// Or it could be due to stack overflow.
- DCHECK(isolate_->has_pending_exception());
- failed_ = true;
+ // Silently fail by presenting an empty context chain.
+ CHECK(isolate_->has_pending_exception());
+ isolate_->clear_pending_exception();
+ context_ = Handle<Context>();
}
UnwrapEvaluationContext();
}
-
ScopeIterator::ScopeIterator(Isolate* isolate, Handle<JSFunction> function)
: isolate_(isolate),
frame_inspector_(NULL),
context_(function->context()),
- seen_script_scope_(false),
- failed_(false) {
+ seen_script_scope_(false) {
if (!function->shared()->IsSubjectToDebugging()) context_ = Handle<Context>();
UnwrapEvaluationContext();
}
@@ -148,8 +147,7 @@ ScopeIterator::ScopeIterator(Isolate* isolate,
: isolate_(isolate),
frame_inspector_(NULL),
context_(generator->context()),
- seen_script_scope_(false),
- failed_(false) {
+ seen_script_scope_(false) {
if (!generator->function()->shared()->IsSubjectToDebugging()) {
context_ = Handle<Context>();
}
@@ -212,7 +210,7 @@ MUST_USE_RESULT MaybeHandle<JSObject> ScopeIterator::MaterializeScopeDetails() {
void ScopeIterator::Next() {
- DCHECK(!failed_);
+ DCHECK(!Done());
ScopeType scope_type = Type();
if (scope_type == ScopeTypeGlobal) {
// The global scope is always the last in the chain.
@@ -249,7 +247,7 @@ void ScopeIterator::Next() {
// Return the type of the current scope.
ScopeIterator::ScopeType ScopeIterator::Type() {
- DCHECK(!failed_);
+ DCHECK(!Done());
if (!nested_scope_chain_.is_empty()) {
Handle<ScopeInfo> scope_info = nested_scope_chain_.last().scope_info;
switch (scope_info->scope_type()) {
@@ -272,7 +270,7 @@ ScopeIterator::ScopeType ScopeIterator::Type() {
DCHECK(!scope_info->HasContext() || context_->IsBlockContext());
return ScopeTypeBlock;
case EVAL_SCOPE:
- DCHECK(!scope_info->HasContext() || context_->IsFunctionContext());
+ DCHECK(!scope_info->HasContext() || context_->IsEvalContext());
return ScopeTypeEval;
}
UNREACHABLE();
@@ -283,7 +281,7 @@ ScopeIterator::ScopeType ScopeIterator::Type() {
// fake it.
return seen_script_scope_ ? ScopeTypeGlobal : ScopeTypeScript;
}
- if (context_->IsFunctionContext()) {
+ if (context_->IsFunctionContext() || context_->IsEvalContext()) {
return ScopeTypeClosure;
}
if (context_->IsCatchContext()) {
@@ -304,7 +302,7 @@ ScopeIterator::ScopeType ScopeIterator::Type() {
MaybeHandle<JSObject> ScopeIterator::ScopeObject() {
- DCHECK(!failed_);
+ DCHECK(!Done());
switch (Type()) {
case ScopeIterator::ScopeTypeGlobal:
return Handle<JSObject>(CurrentContext()->global_proxy());
@@ -346,7 +344,7 @@ bool ScopeIterator::HasContext() {
bool ScopeIterator::SetVariableValue(Handle<String> variable_name,
Handle<Object> new_value) {
- DCHECK(!failed_);
+ DCHECK(!Done());
switch (Type()) {
case ScopeIterator::ScopeTypeGlobal:
break;
@@ -372,20 +370,19 @@ bool ScopeIterator::SetVariableValue(Handle<String> variable_name,
Handle<ScopeInfo> ScopeIterator::CurrentScopeInfo() {
- DCHECK(!failed_);
+ DCHECK(!Done());
if (!nested_scope_chain_.is_empty()) {
return nested_scope_chain_.last().scope_info;
- } else if (context_->IsBlockContext()) {
+ } else if (context_->IsBlockContext() || context_->IsFunctionContext() ||
+ context_->IsEvalContext()) {
return Handle<ScopeInfo>(context_->scope_info());
- } else if (context_->IsFunctionContext()) {
- return Handle<ScopeInfo>(context_->closure()->shared()->scope_info());
}
return Handle<ScopeInfo>::null();
}
Handle<Context> ScopeIterator::CurrentContext() {
- DCHECK(!failed_);
+ DCHECK(!Done());
if (Type() == ScopeTypeGlobal || Type() == ScopeTypeScript ||
nested_scope_chain_.is_empty()) {
return context_;
@@ -402,7 +399,7 @@ Handle<StringSet> ScopeIterator::GetNonLocals() { return non_locals_; }
// Debug print of the content of the current scope.
void ScopeIterator::DebugPrint() {
OFStream os(stdout);
- DCHECK(!failed_);
+ DCHECK(!Done());
switch (Type()) {
case ScopeIterator::ScopeTypeGlobal:
os << "Global:\n";
@@ -530,7 +527,7 @@ MaybeHandle<JSObject> ScopeIterator::MaterializeLocalScope() {
// context.
Handle<JSObject> ScopeIterator::MaterializeClosure() {
Handle<Context> context = CurrentContext();
- DCHECK(context->IsFunctionContext());
+ DCHECK(context->IsFunctionContext() || context->IsEvalContext());
Handle<SharedFunctionInfo> shared(context->closure()->shared());
Handle<ScopeInfo> scope_info(shared->scope_info());
@@ -728,18 +725,21 @@ bool ScopeIterator::SetInnerScopeVariableValue(Handle<String> variable_name,
// This method copies structure of MaterializeClosure method above.
bool ScopeIterator::SetClosureVariableValue(Handle<String> variable_name,
Handle<Object> new_value) {
- DCHECK(CurrentContext()->IsFunctionContext());
+ DCHECK(CurrentContext()->IsFunctionContext() ||
+ CurrentContext()->IsEvalContext());
return SetContextVariableValue(CurrentScopeInfo(), CurrentContext(),
variable_name, new_value);
}
bool ScopeIterator::SetScriptVariableValue(Handle<String> variable_name,
Handle<Object> new_value) {
+ Handle<String> internalized_variable_name =
+ isolate_->factory()->InternalizeString(variable_name);
Handle<Context> context = CurrentContext();
Handle<ScriptContextTable> script_contexts(
context->global_object()->native_context()->script_context_table());
ScriptContextTable::LookupResult lookup_result;
- if (ScriptContextTable::Lookup(script_contexts, variable_name,
+ if (ScriptContextTable::Lookup(script_contexts, internalized_variable_name,
&lookup_result)) {
Handle<Context> script_context = ScriptContextTable::GetContext(
script_contexts, lookup_result.context_index);
@@ -838,8 +838,12 @@ void ScopeIterator::GetNestedScopeChain(Isolate* isolate, Scope* scope,
int position) {
if (scope->is_function_scope()) {
// Do not collect scopes of nested inner functions inside the current one.
+ // Nested arrow functions could have the same end positions.
Handle<JSFunction> function = frame_inspector_->GetFunction();
- if (scope->end_position() < function->shared()->end_position()) return;
+ if (scope->start_position() > function->shared()->start_position() &&
+ scope->end_position() <= function->shared()->end_position()) {
+ return;
+ }
}
if (scope->is_hidden()) {
// We need to add this chain element in case the scope has a context
diff --git a/deps/v8/src/debug/debug-scopes.h b/deps/v8/src/debug/debug-scopes.h
index 87c85b8ba5..d187f3e7bd 100644
--- a/deps/v8/src/debug/debug-scopes.h
+++ b/deps/v8/src/debug/debug-scopes.h
@@ -50,12 +50,7 @@ class ScopeIterator {
MUST_USE_RESULT MaybeHandle<JSObject> MaterializeScopeDetails();
// More scopes?
- bool Done() {
- DCHECK(!failed_);
- return context_.is_null();
- }
-
- bool Failed() { return failed_; }
+ bool Done() { return context_.is_null(); }
// Move to the next scope.
void Next();
@@ -103,7 +98,6 @@ class ScopeIterator {
List<ExtendedScopeInfo> nested_scope_chain_;
Handle<StringSet> non_locals_;
bool seen_script_scope_;
- bool failed_;
inline JavaScriptFrame* GetFrame() {
return frame_inspector_->GetArgumentsFrame();
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index 960327b545..314efba870 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -14,6 +14,7 @@
#include "src/compilation-cache.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/compiler.h"
+#include "src/debug/debug-evaluate.h"
#include "src/debug/liveedit.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
@@ -42,6 +43,7 @@ Debug::Debug(Isolate* isolate)
command_received_(0),
command_queue_(isolate->logger(), kQueueInitialSize),
is_active_(false),
+ hook_on_function_call_(false),
is_suppressed_(false),
live_edit_enabled_(true), // TODO(yangguo): set to false by default.
break_disabled_(false),
@@ -49,18 +51,22 @@ Debug::Debug(Isolate* isolate)
in_debug_event_listener_(false),
break_on_exception_(false),
break_on_uncaught_exception_(false),
+ side_effect_check_failed_(false),
debug_info_list_(NULL),
feature_tracker_(isolate),
isolate_(isolate) {
ThreadInit();
}
-BreakLocation BreakLocation::FromFrame(Handle<DebugInfo> debug_info,
- JavaScriptFrame* frame) {
- FrameSummary summary = FrameSummary::GetFirst(frame);
+BreakLocation BreakLocation::FromFrame(StandardFrame* frame) {
+ // TODO(clemensh): Handle Wasm frames.
+ DCHECK(!frame->is_wasm());
+
+ auto summary = FrameSummary::GetFirst(frame).AsJavaScript();
int offset = summary.code_offset();
Handle<AbstractCode> abstract_code = summary.abstract_code();
if (abstract_code->IsCode()) offset = offset - 1;
+ Handle<DebugInfo> debug_info(summary.function()->shared()->GetDebugInfo());
auto it = BreakIterator::GetIterator(debug_info, abstract_code);
it->SkipTo(BreakIndexFromCodeOffset(debug_info, abstract_code, offset));
return it->GetBreakLocation();
@@ -69,7 +75,7 @@ BreakLocation BreakLocation::FromFrame(Handle<DebugInfo> debug_info,
void BreakLocation::AllAtCurrentStatement(Handle<DebugInfo> debug_info,
JavaScriptFrame* frame,
List<BreakLocation>* result_out) {
- FrameSummary summary = FrameSummary::GetFirst(frame);
+ auto summary = FrameSummary::GetFirst(frame).AsJavaScript();
int offset = summary.code_offset();
Handle<AbstractCode> abstract_code = summary.abstract_code();
if (abstract_code->IsCode()) offset = offset - 1;
@@ -401,10 +407,12 @@ void Debug::ThreadInit() {
thread_local_.last_fp_ = 0;
thread_local_.target_fp_ = 0;
thread_local_.return_value_ = Handle<Object>();
+ thread_local_.async_task_count_ = 0;
clear_suspended_generator();
// TODO(isolates): frames_are_dropped_?
base::NoBarrier_Store(&thread_local_.current_debug_scope_,
static_cast<base::AtomicWord>(0));
+ UpdateHookOnFunctionCall();
}
@@ -453,7 +461,7 @@ bool Debug::Load() {
// Disable breakpoints and interrupts while compiling and running the
// debugger scripts including the context creation code.
- DisableBreak disable(this, true);
+ DisableBreak disable(this);
PostponeInterruptsScope postpone(isolate_);
// Create the debugger context.
@@ -465,7 +473,8 @@ bool Debug::Load() {
static const int kFirstContextSnapshotIndex = 0;
Handle<Context> context = isolate_->bootstrapper()->CreateEnvironment(
MaybeHandle<JSGlobalProxy>(), v8::Local<ObjectTemplate>(), &no_extensions,
- kFirstContextSnapshotIndex, DEBUG_CONTEXT);
+ kFirstContextSnapshotIndex, v8::DeserializeInternalFieldsCallback(),
+ DEBUG_CONTEXT);
// Fail if no context could be created.
if (context.is_null()) return false;
@@ -507,25 +516,41 @@ void Debug::Break(JavaScriptFrame* frame) {
// Postpone interrupt during breakpoint processing.
PostponeInterruptsScope postpone(isolate_);
- // Get the debug info (create it if it does not exist).
- Handle<JSFunction> function(frame->function());
- Handle<SharedFunctionInfo> shared(function->shared());
- if (!EnsureDebugInfo(shared, function)) {
- // Return if we failed to retrieve the debug info.
- return;
+ // Return if we fail to retrieve debug info for javascript frames.
+ if (frame->is_java_script()) {
+ JavaScriptFrame* js_frame = JavaScriptFrame::cast(frame);
+
+ // Get the debug info (create it if it does not exist).
+ Handle<JSFunction> function(js_frame->function());
+ Handle<SharedFunctionInfo> shared(function->shared());
+ if (!EnsureDebugInfo(shared, function)) return;
}
- Handle<DebugInfo> debug_info(shared->GetDebugInfo(), isolate_);
- // Find the break location where execution has stopped.
- BreakLocation location = BreakLocation::FromFrame(debug_info, frame);
+ BreakLocation location = BreakLocation::FromFrame(frame);
// Find actual break points, if any, and trigger debug break event.
- Handle<Object> break_points_hit = CheckBreakPoints(debug_info, &location);
- if (!break_points_hit->IsUndefined(isolate_)) {
+ MaybeHandle<FixedArray> break_points_hit;
+ if (!break_points_active()) {
+ // Don't try to find hit breakpoints.
+ } else if (frame->is_wasm_interpreter_entry()) {
+ // TODO(clemensh): Find hit breakpoints for wasm.
+ UNIMPLEMENTED();
+ } else {
+ // Get the debug info, which must exist if we reach here.
+ Handle<DebugInfo> debug_info(
+ JavaScriptFrame::cast(frame)->function()->shared()->GetDebugInfo(),
+ isolate_);
+
+ break_points_hit = CheckBreakPoints(debug_info, &location);
+ }
+
+ if (!break_points_hit.is_null()) {
// Clear all current stepping setup.
ClearStepping();
// Notify the debug event listeners.
- OnDebugBreak(break_points_hit, false);
+ Handle<JSArray> jsarr = isolate_->factory()->NewJSArrayWithElements(
+ break_points_hit.ToHandleChecked());
+ OnDebugBreak(jsarr, false);
return;
}
@@ -552,11 +577,9 @@ void Debug::Break(JavaScriptFrame* frame) {
// Fall through.
case StepIn: {
FrameSummary summary = FrameSummary::GetFirst(frame);
- int offset = summary.code_offset();
- step_break = step_break || location.IsReturn() ||
- (current_fp != last_fp) ||
- (thread_local_.last_statement_position_ !=
- summary.abstract_code()->SourceStatementPosition(offset));
+ step_break = step_break || location.IsReturn() || current_fp != last_fp ||
+ thread_local_.last_statement_position_ !=
+ summary.SourceStatementPosition();
break;
}
case StepFrame:
@@ -578,42 +601,19 @@ void Debug::Break(JavaScriptFrame* frame) {
// Find break point objects for this location, if any, and evaluate them.
-// Return an array of break point objects that evaluated true.
-Handle<Object> Debug::CheckBreakPoints(Handle<DebugInfo> debug_info,
- BreakLocation* location,
- bool* has_break_points) {
- Factory* factory = isolate_->factory();
+// Return an array of break point objects that evaluated true, or an empty
+// handle if none evaluated true.
+MaybeHandle<FixedArray> Debug::CheckBreakPoints(Handle<DebugInfo> debug_info,
+ BreakLocation* location,
+ bool* has_break_points) {
bool has_break_points_to_check =
break_points_active_ && location->HasBreakPoint(debug_info);
if (has_break_points) *has_break_points = has_break_points_to_check;
- if (!has_break_points_to_check) return factory->undefined_value();
+ if (!has_break_points_to_check) return {};
Handle<Object> break_point_objects =
debug_info->GetBreakPointObjects(location->position());
- // Count the number of break points hit. If there are multiple break points
- // they are in a FixedArray.
- Handle<FixedArray> break_points_hit;
- int break_points_hit_count = 0;
- DCHECK(!break_point_objects->IsUndefined(isolate_));
- if (break_point_objects->IsFixedArray()) {
- Handle<FixedArray> array(FixedArray::cast(*break_point_objects));
- break_points_hit = factory->NewFixedArray(array->length());
- for (int i = 0; i < array->length(); i++) {
- Handle<Object> break_point_object(array->get(i), isolate_);
- if (CheckBreakPoint(break_point_object)) {
- break_points_hit->set(break_points_hit_count++, *break_point_object);
- }
- }
- } else {
- break_points_hit = factory->NewFixedArray(1);
- if (CheckBreakPoint(break_point_objects)) {
- break_points_hit->set(break_points_hit_count++, *break_point_objects);
- }
- }
- if (break_points_hit_count == 0) return factory->undefined_value();
- Handle<JSArray> result = factory->NewJSArrayWithElements(break_points_hit);
- result->set_length(Smi::FromInt(break_points_hit_count));
- return result;
+ return Debug::GetHitBreakPointObjects(break_point_objects);
}
@@ -637,10 +637,10 @@ bool Debug::IsMutedAtCurrentLocation(JavaScriptFrame* frame) {
bool has_break_points_at_all = false;
for (int i = 0; i < break_locations.length(); i++) {
bool has_break_points;
- Handle<Object> check_result =
+ MaybeHandle<FixedArray> check_result =
CheckBreakPoints(debug_info, &break_locations[i], &has_break_points);
has_break_points_at_all |= has_break_points;
- if (has_break_points && !check_result->IsUndefined(isolate_)) return false;
+ if (has_break_points && !check_result.is_null()) return false;
}
return has_break_points_at_all;
}
@@ -655,7 +655,10 @@ MaybeHandle<Object> Debug::CallFunction(const char* name, int argc,
Handle<JSFunction> fun = Handle<JSFunction>::cast(
JSReceiver::GetProperty(isolate_, holder, name).ToHandleChecked());
Handle<Object> undefined = isolate_->factory()->undefined_value();
- return Execution::TryCall(isolate_, fun, undefined, argc, args);
+ MaybeHandle<Object> maybe_exception;
+ return Execution::TryCall(isolate_, fun, undefined, argc, args,
+ Execution::MessageHandling::kReport,
+ &maybe_exception);
}
@@ -901,19 +904,47 @@ bool Debug::IsBreakOnException(ExceptionBreakType type) {
}
}
+MaybeHandle<FixedArray> Debug::GetHitBreakPointObjects(
+ Handle<Object> break_point_objects) {
+ DCHECK(!break_point_objects->IsUndefined(isolate_));
+ if (!break_point_objects->IsFixedArray()) {
+ if (!CheckBreakPoint(break_point_objects)) return {};
+ Handle<FixedArray> break_points_hit = isolate_->factory()->NewFixedArray(1);
+ break_points_hit->set(0, *break_point_objects);
+ return break_points_hit;
+ }
+
+ Handle<FixedArray> array(FixedArray::cast(*break_point_objects));
+ int num_objects = array->length();
+ Handle<FixedArray> break_points_hit =
+ isolate_->factory()->NewFixedArray(num_objects);
+ int break_points_hit_count = 0;
+ for (int i = 0; i < num_objects; ++i) {
+ Handle<Object> break_point_object(array->get(i), isolate_);
+ if (CheckBreakPoint(break_point_object)) {
+ break_points_hit->set(break_points_hit_count++, *break_point_object);
+ }
+ }
+ if (break_points_hit_count == 0) return {};
+ break_points_hit->Shrink(break_points_hit_count);
+ return break_points_hit;
+}
void Debug::PrepareStepIn(Handle<JSFunction> function) {
CHECK(last_step_action() >= StepIn);
- if (!is_active()) return;
+ if (ignore_events()) return;
if (in_debug_scope()) return;
+ if (break_disabled()) return;
FloodWithOneShot(function);
}
void Debug::PrepareStepInSuspendedGenerator() {
CHECK(has_suspended_generator());
- if (!is_active()) return;
+ if (ignore_events()) return;
if (in_debug_scope()) return;
+ if (break_disabled()) return;
thread_local_.last_step_action_ = StepIn;
+ UpdateHookOnFunctionCall();
Handle<JSFunction> function(
JSGeneratorObject::cast(thread_local_.suspended_generator_)->function());
FloodWithOneShot(function);
@@ -921,9 +952,10 @@ void Debug::PrepareStepInSuspendedGenerator() {
}
void Debug::PrepareStepOnThrow() {
- if (!is_active()) return;
if (last_step_action() == StepNone) return;
+ if (ignore_events()) return;
if (in_debug_scope()) return;
+ if (break_disabled()) return;
ClearOneShot();
@@ -974,6 +1006,7 @@ void Debug::PrepareStep(StepAction step_action) {
feature_tracker()->Track(DebugFeatureTracker::kStepping);
thread_local_.last_step_action_ = step_action;
+ UpdateHookOnFunctionCall();
// If the function on the top frame is unresolved perform step out. This will
// be the case when calling unknown function and having the debugger stopped
@@ -989,7 +1022,7 @@ void Debug::PrepareStep(StepAction step_action) {
}
// Get the debug info (create it if it does not exist).
- FrameSummary summary = FrameSummary::GetFirst(frame);
+ auto summary = FrameSummary::GetFirst(frame).AsJavaScript();
Handle<JSFunction> function(summary.function());
Handle<SharedFunctionInfo> shared(function->shared());
if (!EnsureDebugInfo(shared, function)) {
@@ -997,8 +1030,7 @@ void Debug::PrepareStep(StepAction step_action) {
return;
}
- Handle<DebugInfo> debug_info(shared->GetDebugInfo());
- BreakLocation location = BreakLocation::FromFrame(debug_info, frame);
+ BreakLocation location = BreakLocation::FromFrame(frame);
// Any step at a return is a step-out.
if (location.IsReturn()) step_action = StepOut;
@@ -1104,6 +1136,7 @@ void Debug::ClearStepping() {
thread_local_.last_statement_position_ = kNoSourcePosition;
thread_local_.last_fp_ = 0;
thread_local_.target_fp_ = 0;
+ UpdateHookOnFunctionCall();
}
@@ -1176,34 +1209,6 @@ static Address ComputeNewPcForRedirect(Code* new_code, Code* old_code,
}
-// Count the number of continuations at which the current pc offset is at.
-static int ComputeContinuationIndexFromPcOffset(Code* code, int pc_offset) {
- DCHECK_EQ(code->kind(), Code::FUNCTION);
- Address pc = code->instruction_start() + pc_offset;
- int mask = RelocInfo::ModeMask(RelocInfo::GENERATOR_CONTINUATION);
- int index = 0;
- for (RelocIterator it(code, mask); !it.done(); it.next()) {
- index++;
- RelocInfo* rinfo = it.rinfo();
- Address current_pc = rinfo->pc();
- if (current_pc == pc) break;
- DCHECK(current_pc < pc);
- }
- return index;
-}
-
-
-// Find the pc offset for the given continuation index.
-static int ComputePcOffsetFromContinuationIndex(Code* code, int index) {
- DCHECK_EQ(code->kind(), Code::FUNCTION);
- DCHECK(code->has_debug_break_slots());
- int mask = RelocInfo::ModeMask(RelocInfo::GENERATOR_CONTINUATION);
- RelocIterator it(code, mask);
- for (int i = 1; i < index; i++) it.next();
- return static_cast<int>(it.rinfo()->pc() - code->instruction_start());
-}
-
-
class RedirectActiveFunctions : public ThreadVisitor {
public:
explicit RedirectActiveFunctions(SharedFunctionInfo* shared)
@@ -1268,18 +1273,20 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
}
List<Handle<JSFunction> > functions;
- List<Handle<JSGeneratorObject> > suspended_generators;
// Flush all optimized code maps. Note that the below heap iteration does not
// cover this, because the given function might have been inlined into code
// for which no JSFunction exists.
{
- SharedFunctionInfo::Iterator iterator(isolate_);
+ SharedFunctionInfo::GlobalIterator iterator(isolate_);
while (SharedFunctionInfo* shared = iterator.Next()) {
shared->ClearCodeFromOptimizedCodeMap();
}
}
+ // The native context also has a list of OSR'd optimized code. Clear it.
+ isolate_->ClearOSROptimizedCode();
+
// Make sure we abort incremental marking.
isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
GarbageCollectionReason::kDebugger);
@@ -1293,9 +1300,6 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
// smarter here and avoid the heap walk.
HeapIterator iterator(isolate_->heap());
HeapObject* obj;
- // Continuation from old-style generators need to be recomputed.
- bool find_resumables =
- baseline_exists && IsResumableFunction(shared->kind());
while ((obj = iterator.next())) {
if (obj->IsJSFunction()) {
@@ -1307,25 +1311,12 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
if (baseline_exists && function->shared() == *shared) {
functions.Add(handle(function));
}
- } else if (find_resumables && obj->IsJSGeneratorObject()) {
- // This case handles async functions as well, as they use generator
- // objects for in-progress async function execution.
- JSGeneratorObject* generator_obj = JSGeneratorObject::cast(obj);
- if (!generator_obj->is_suspended()) continue;
- JSFunction* function = generator_obj->function();
- if (!function->Inlines(*shared)) continue;
- int pc_offset = generator_obj->continuation();
- int index =
- ComputeContinuationIndexFromPcOffset(function->code(), pc_offset);
- generator_obj->set_continuation(index);
- suspended_generators.Add(handle(generator_obj));
}
}
}
// We do not need to replace code to debug bytecode.
DCHECK(baseline_exists || functions.is_empty());
- DCHECK(baseline_exists || suspended_generators.is_empty());
// We do not need to recompile to debug bytecode.
if (baseline_exists && !shared->code()->has_debug_break_slots()) {
@@ -1337,12 +1328,6 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
JSFunction::EnsureLiterals(function);
}
- for (Handle<JSGeneratorObject> const generator_obj : suspended_generators) {
- int index = generator_obj->continuation();
- int pc_offset = ComputePcOffsetFromContinuationIndex(shared->code(), index);
- generator_obj->set_continuation(pc_offset);
- }
-
// Update PCs on the stack to point to recompiled code.
RedirectActiveFunctions redirect_visitor(*shared);
redirect_visitor.VisitThread(isolate_, isolate_->thread_local_top());
@@ -1384,24 +1369,18 @@ void FindBreakablePositions(Handle<DebugInfo> debug_info, int start_position,
bool Debug::GetPossibleBreakpoints(Handle<Script> script, int start_position,
int end_position, std::set<int>* positions) {
while (true) {
- if (!script->shared_function_infos()->IsWeakFixedArray()) return false;
-
- WeakFixedArray* infos =
- WeakFixedArray::cast(script->shared_function_infos());
HandleScope scope(isolate_);
List<Handle<SharedFunctionInfo>> candidates;
- {
- WeakFixedArray::Iterator iterator(infos);
- SharedFunctionInfo* info;
- while ((info = iterator.Next<SharedFunctionInfo>())) {
- if (info->end_position() < start_position ||
- info->start_position() >= end_position) {
- continue;
- }
- if (!info->IsSubjectToDebugging()) continue;
- if (!info->HasDebugCode() && !info->allows_lazy_compilation()) continue;
- candidates.Add(i::handle(info));
+ SharedFunctionInfo::ScriptIterator iterator(script);
+ for (SharedFunctionInfo* info = iterator.Next(); info != nullptr;
+ info = iterator.Next()) {
+ if (info->end_position() < start_position ||
+ info->start_position() >= end_position) {
+ continue;
}
+ if (!info->IsSubjectToDebugging()) continue;
+ if (!info->HasDebugCode() && !info->allows_lazy_compilation()) continue;
+ candidates.Add(i::handle(info));
}
bool was_compiled = false;
@@ -1432,9 +1411,16 @@ bool Debug::GetPossibleBreakpoints(Handle<Script> script, int start_position,
return false;
}
-void Debug::RecordAsyncFunction(Handle<JSGeneratorObject> generator_object) {
+void Debug::RecordGenerator(Handle<JSGeneratorObject> generator_object) {
if (last_step_action() <= StepOut) return;
- if (!IsAsyncFunction(generator_object->function()->shared()->kind())) return;
+
+ if (last_step_action() == StepNext) {
+ // Only consider this generator a step-next target if not stepping in.
+ JavaScriptFrameIterator stack_iterator(isolate_);
+ JavaScriptFrame* frame = stack_iterator.frame();
+ if (frame->UnpaddedFP() < thread_local_.target_fp_) return;
+ }
+
DCHECK(!has_suspended_generator());
thread_local_.suspended_generator_ = *generator_object;
ClearStepping();
@@ -1504,15 +1490,14 @@ Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
// find the inner most function containing this position.
// If there is no shared function info for this script at all, there is
// no point in looking for it by walking the heap.
- if (!script->shared_function_infos()->IsWeakFixedArray()) break;
SharedFunctionInfo* shared;
{
SharedFunctionInfoFinder finder(position);
- WeakFixedArray::Iterator iterator(script->shared_function_infos());
- SharedFunctionInfo* candidate;
- while ((candidate = iterator.Next<SharedFunctionInfo>())) {
- finder.NewCandidate(candidate);
+ SharedFunctionInfo::ScriptIterator iterator(script);
+ for (SharedFunctionInfo* info = iterator.Next(); info != nullptr;
+ info = iterator.Next()) {
+ finder.NewCandidate(info);
}
shared = finder.Result();
if (shared == NULL) break;
@@ -1613,15 +1598,11 @@ void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
HandleScope scope(isolate_);
- // Get the executing function in which the debug break occurred.
- Handle<SharedFunctionInfo> shared(frame->function()->shared());
-
// With no debug info there are no break points, so we can't be at a return.
- if (!shared->HasDebugInfo()) return false;
+ if (!frame->function()->shared()->HasDebugInfo()) return false;
DCHECK(!frame->is_optimized());
- Handle<DebugInfo> debug_info(shared->GetDebugInfo());
- BreakLocation location = BreakLocation::FromFrame(debug_info, frame);
+ BreakLocation location = BreakLocation::FromFrame(frame);
return location.IsReturn() || location.IsTailCall();
}
@@ -1705,12 +1686,11 @@ MaybeHandle<Object> Debug::MakeCompileEvent(Handle<Script> script,
return CallFunction("MakeCompileEvent", arraysize(argv), argv);
}
-MaybeHandle<Object> Debug::MakeAsyncTaskEvent(Handle<String> type,
- Handle<Object> id,
- Handle<String> name) {
+MaybeHandle<Object> Debug::MakeAsyncTaskEvent(Handle<Smi> type,
+ Handle<Smi> id) {
DCHECK(id->IsNumber());
// Create the async task event object.
- Handle<Object> argv[] = {type, id, name};
+ Handle<Object> argv[] = {type, id};
return CallFunction("MakeAsyncTaskEvent", arraysize(argv), argv);
}
@@ -1796,7 +1776,6 @@ void Debug::OnException(Handle<Object> exception, Handle<Object> promise) {
// Return to continue execution from where the exception was thrown.
}
-
void Debug::OnDebugBreak(Handle<Object> break_points_hit, bool auto_continue) {
// The caller provided for DebugScope.
AssertDebugContext();
@@ -1814,8 +1793,7 @@ void Debug::OnDebugBreak(Handle<Object> break_points_hit, bool auto_continue) {
if (!MakeBreakEvent(break_points_hit).ToHandle(&event_data)) return;
// Process debug event.
- ProcessDebugEvent(v8::Break,
- Handle<JSObject>::cast(event_data),
+ ProcessDebugEvent(v8::Break, Handle<JSObject>::cast(event_data),
auto_continue);
}
@@ -1825,21 +1803,91 @@ void Debug::OnCompileError(Handle<Script> script) {
}
-void Debug::OnBeforeCompile(Handle<Script> script) {
- ProcessCompileEvent(v8::BeforeCompile, script);
-}
-
-
// Handle debugger actions when a new script is compiled.
void Debug::OnAfterCompile(Handle<Script> script) {
ProcessCompileEvent(v8::AfterCompile, script);
}
-void Debug::OnAsyncTaskEvent(Handle<String> type, Handle<Object> id,
- Handle<String> name) {
- DCHECK(id->IsNumber());
+namespace {
+struct CollectedCallbackData {
+ Object** location;
+ int id;
+ Debug* debug;
+ Isolate* isolate;
+
+ CollectedCallbackData(Object** location, int id, Debug* debug,
+ Isolate* isolate)
+ : location(location), id(id), debug(debug), isolate(isolate) {}
+};
+
+void SendAsyncTaskEventCancel(const v8::WeakCallbackInfo<void>& info) {
+ std::unique_ptr<CollectedCallbackData> data(
+ reinterpret_cast<CollectedCallbackData*>(info.GetParameter()));
+ if (!data->debug->is_active()) return;
+ HandleScope scope(data->isolate);
+ data->debug->OnAsyncTaskEvent(debug::kDebugPromiseCollected, data->id);
+}
+
+void ResetPromiseHandle(const v8::WeakCallbackInfo<void>& info) {
+ CollectedCallbackData* data =
+ reinterpret_cast<CollectedCallbackData*>(info.GetParameter());
+ GlobalHandles::Destroy(data->location);
+ info.SetSecondPassCallback(&SendAsyncTaskEventCancel);
+}
+} // namespace
+
+int Debug::NextAsyncTaskId(Handle<JSObject> promise) {
+ LookupIterator it(promise, isolate_->factory()->promise_async_id_symbol());
+ Maybe<bool> maybe = JSReceiver::HasProperty(&it);
+ if (maybe.ToChecked()) {
+ MaybeHandle<Object> result = Object::GetProperty(&it);
+ return Handle<Smi>::cast(result.ToHandleChecked())->value();
+ }
+ Handle<Smi> async_id =
+ handle(Smi::FromInt(++thread_local_.async_task_count_), isolate_);
+ Object::SetProperty(&it, async_id, SLOPPY, Object::MAY_BE_STORE_FROM_KEYED)
+ .ToChecked();
+ Handle<Object> global_handle = isolate_->global_handles()->Create(*promise);
+ // We send EnqueueRecurring async task event when promise is fulfilled or
+ // rejected, WillHandle and DidHandle for every scheduled microtask for this
+ // promise.
+ // We need to send a cancel event when no other microtasks can be
+ // started for this promise and all current microtasks are finished.
+ // Since we holding promise when at least one microtask is scheduled (inside
+ // PromiseReactionJobInfo), we can send cancel event in weak callback.
+ GlobalHandles::MakeWeak(
+ global_handle.location(),
+ new CollectedCallbackData(global_handle.location(), async_id->value(),
+ this, isolate_),
+ &ResetPromiseHandle, v8::WeakCallbackType::kParameter);
+ return async_id->value();
+}
+
+void Debug::SetAsyncTaskListener(debug::AsyncTaskListener listener,
+ void* data) {
+ async_task_listener_ = listener;
+ async_task_listener_data_ = data;
+ UpdateState();
+}
+
+void Debug::OnAsyncTaskEvent(debug::PromiseDebugActionType type, int id) {
if (in_debug_scope() || ignore_events()) return;
+ if (async_task_listener_) {
+ async_task_listener_(type, id, async_task_listener_data_);
+ // There are three types of event listeners: C++ message_handler,
+ // JavaScript event listener and C++ event listener.
+ // Currently inspector still uses C++ event listener and installs
+ // more specific event listeners for part of events. Calling of
+ // C++ event listener is redundant when more specific event listener
+ // is presented. Other clients can install JavaScript event listener
+ // (e.g. some of NodeJS module).
+ bool non_inspector_listener_exists =
+ message_handler_ != nullptr ||
+ (event_listener_.is_null() && !event_listener_->IsForeign());
+ if (!non_inspector_listener_exists) return;
+ }
+
HandleScope scope(isolate_);
DebugScope debug_scope(this);
if (debug_scope.failed()) return;
@@ -1847,17 +1895,17 @@ void Debug::OnAsyncTaskEvent(Handle<String> type, Handle<Object> id,
// Create the script collected state object.
Handle<Object> event_data;
// Bail out and don't call debugger if exception.
- if (!MakeAsyncTaskEvent(type, id, name).ToHandle(&event_data)) return;
+ if (!MakeAsyncTaskEvent(handle(Smi::FromInt(type), isolate_),
+ handle(Smi::FromInt(id), isolate_))
+ .ToHandle(&event_data))
+ return;
// Process debug event.
- ProcessDebugEvent(v8::AsyncTaskEvent,
- Handle<JSObject>::cast(event_data),
+ ProcessDebugEvent(v8::AsyncTaskEvent, Handle<JSObject>::cast(event_data),
true);
}
-
-void Debug::ProcessDebugEvent(v8::DebugEvent event,
- Handle<JSObject> event_data,
+void Debug::ProcessDebugEvent(v8::DebugEvent event, Handle<JSObject> event_data,
bool auto_continue) {
HandleScope scope(isolate_);
@@ -1868,9 +1916,7 @@ void Debug::ProcessDebugEvent(v8::DebugEvent event,
// First notify the message handler if any.
if (message_handler_ != NULL) {
- NotifyMessageHandler(event,
- Handle<JSObject>::cast(exec_state),
- event_data,
+ NotifyMessageHandler(event, Handle<JSObject>::cast(exec_state), event_data,
auto_continue);
}
// Notify registered debug event listener. This can be either a C or
@@ -1893,9 +1939,8 @@ void Debug::CallEventCallback(v8::DebugEvent event,
in_debug_event_listener_ = true;
if (event_listener_->IsForeign()) {
// Invoke the C debug event listener.
- v8::DebugInterface::EventCallback callback =
- FUNCTION_CAST<v8::DebugInterface::EventCallback>(
- Handle<Foreign>::cast(event_listener_)->foreign_address());
+ debug::EventCallback callback = FUNCTION_CAST<debug::EventCallback>(
+ Handle<Foreign>::cast(event_listener_)->foreign_address());
EventDetailsImpl event_details(event,
Handle<JSObject>::cast(exec_state),
Handle<JSObject>::cast(event_data),
@@ -1922,6 +1967,10 @@ void Debug::CallEventCallback(v8::DebugEvent event,
void Debug::ProcessCompileEvent(v8::DebugEvent event, Handle<Script> script) {
if (ignore_events()) return;
+ if (script->type() != i::Script::TYPE_NORMAL &&
+ script->type() != i::Script::TYPE_WASM) {
+ return;
+ }
SuppressDebug while_processing(this);
bool in_nested_debug_scope = in_debug_scope();
@@ -1929,16 +1978,6 @@ void Debug::ProcessCompileEvent(v8::DebugEvent event, Handle<Script> script) {
DebugScope debug_scope(this);
if (debug_scope.failed()) return;
- if (event == v8::AfterCompile) {
- // If debugging there might be script break points registered for this
- // script. Make sure that these break points are set.
- Handle<Object> argv[] = {Script::GetWrapper(script)};
- if (CallFunction("UpdateScriptBreakPoints", arraysize(argv), argv)
- .is_null()) {
- return;
- }
- }
-
// Create the compile state object.
Handle<Object> event_data;
// Bail out and don't call debugger if exception.
@@ -1969,7 +2008,6 @@ Handle<Context> Debug::GetDebugContext() {
return handle(*debug_context(), isolate_);
}
-
void Debug::NotifyMessageHandler(v8::DebugEvent event,
Handle<JSObject> exec_state,
Handle<JSObject> event_data,
@@ -1985,8 +2023,6 @@ void Debug::NotifyMessageHandler(v8::DebugEvent event,
case v8::Break:
sendEventMessage = !auto_continue;
break;
- case v8::NewFunction:
- case v8::BeforeCompile:
case v8::CompileError:
case v8::AsyncTaskEvent:
break;
@@ -2006,9 +2042,7 @@ void Debug::NotifyMessageHandler(v8::DebugEvent event,
// active in which case no event is send.
if (sendEventMessage) {
MessageImpl message = MessageImpl::NewEvent(
- event,
- auto_continue,
- Handle<JSObject>::cast(exec_state),
+ event, auto_continue, Handle<JSObject>::cast(exec_state),
Handle<JSObject>::cast(event_data));
InvokeMessageHandler(message);
}
@@ -2025,7 +2059,7 @@ void Debug::NotifyMessageHandler(v8::DebugEvent event,
Handle<Object> cmd_processor_ctor =
JSReceiver::GetProperty(isolate_, exec_state, "debugCommandProcessor")
.ToHandleChecked();
- Handle<Object> ctor_args[] = { isolate_->factory()->ToBoolean(running) };
+ Handle<Object> ctor_args[] = {isolate_->factory()->ToBoolean(running)};
Handle<JSReceiver> cmd_processor = Handle<JSReceiver>::cast(
Execution::Call(isolate_, cmd_processor_ctor, exec_state, 1, ctor_args)
.ToHandleChecked());
@@ -2054,15 +2088,16 @@ void Debug::NotifyMessageHandler(v8::DebugEvent event,
Vector<const uc16> command_text(
const_cast<const uc16*>(command.text().start()),
command.text().length());
- Handle<String> request_text = isolate_->factory()->NewStringFromTwoByte(
- command_text).ToHandleChecked();
- Handle<Object> request_args[] = { request_text };
+ Handle<String> request_text = isolate_->factory()
+ ->NewStringFromTwoByte(command_text)
+ .ToHandleChecked();
+ Handle<Object> request_args[] = {request_text};
Handle<Object> answer_value;
Handle<String> answer;
MaybeHandle<Object> maybe_exception;
- MaybeHandle<Object> maybe_result =
- Execution::TryCall(isolate_, process_debug_request, cmd_processor, 1,
- request_args, &maybe_exception);
+ MaybeHandle<Object> maybe_result = Execution::TryCall(
+ isolate_, process_debug_request, cmd_processor, 1, request_args,
+ Execution::MessageHandling::kReport, &maybe_exception);
if (maybe_result.ToHandle(&answer_value)) {
if (answer_value->IsUndefined(isolate_)) {
@@ -2077,9 +2112,9 @@ void Debug::NotifyMessageHandler(v8::DebugEvent event,
PrintF("%s\n", answer->ToCString().get());
}
- Handle<Object> is_running_args[] = { answer };
- maybe_result = Execution::Call(
- isolate_, is_running, cmd_processor, 1, is_running_args);
+ Handle<Object> is_running_args[] = {answer};
+ maybe_result = Execution::Call(isolate_, is_running, cmd_processor, 1,
+ is_running_args);
Handle<Object> result;
if (!maybe_result.ToHandle(&result)) break;
running = result->IsTrue(isolate_);
@@ -2104,7 +2139,6 @@ void Debug::NotifyMessageHandler(v8::DebugEvent event,
command_queue_.Clear();
}
-
void Debug::SetEventListener(Handle<Object> callback,
Handle<Object> data) {
GlobalHandles* global_handles = isolate_->global_handles();
@@ -2116,7 +2150,7 @@ void Debug::SetEventListener(Handle<Object> callback,
event_listener_data_ = Handle<Object>();
// Set new entry.
- if (!callback->IsUndefined(isolate_) && !callback->IsNull(isolate_)) {
+ if (!callback->IsNullOrUndefined(isolate_)) {
event_listener_ = global_handles->Create(*callback);
if (data.is_null()) data = isolate_->factory()->undefined_value();
event_listener_data_ = global_handles->Create(*data);
@@ -2125,7 +2159,6 @@ void Debug::SetEventListener(Handle<Object> callback,
UpdateState();
}
-
void Debug::SetMessageHandler(v8::Debug::MessageHandler handler) {
message_handler_ = handler;
UpdateState();
@@ -2136,10 +2169,9 @@ void Debug::SetMessageHandler(v8::Debug::MessageHandler handler) {
}
}
-
-
void Debug::UpdateState() {
- bool is_active = message_handler_ != NULL || !event_listener_.is_null();
+ bool is_active = message_handler_ != nullptr || !event_listener_.is_null() ||
+ async_task_listener_ != nullptr;
if (is_active || in_debug_scope()) {
// Note that the debug context could have already been loaded to
// bootstrap test cases.
@@ -2152,6 +2184,12 @@ void Debug::UpdateState() {
is_active_ = is_active;
}
+void Debug::UpdateHookOnFunctionCall() {
+ STATIC_ASSERT(StepFrame > StepIn);
+ STATIC_ASSERT(LastStepAction == StepFrame);
+ hook_on_function_call_ = thread_local_.last_step_action_ >= StepIn ||
+ isolate_->needs_side_effect_check();
+}
// Calls the registered debug message handler. This callback is part of the
// public API.
@@ -2159,7 +2197,6 @@ void Debug::InvokeMessageHandler(MessageImpl message) {
if (message_handler_ != NULL) message_handler_(message);
}
-
// Puts a command coming from the public API on the queue. Creates
// a copy of the command string managed by the debugger. Up to this
// point, the command data was managed by the API client. Called
@@ -2179,7 +2216,6 @@ void Debug::EnqueueCommandMessage(Vector<const uint16_t> command,
if (!in_debug_scope()) isolate_->stack_guard()->RequestDebugCommand();
}
-
MaybeHandle<Object> Debug::Call(Handle<Object> fun, Handle<Object> data) {
DebugScope debug_scope(this);
if (debug_scope.failed()) return isolate_->factory()->undefined_value();
@@ -2238,7 +2274,6 @@ void Debug::HandleDebugBreak() {
ProcessDebugMessages(debug_command_only);
}
-
void Debug::ProcessDebugMessages(bool debug_command_only) {
isolate_->stack_guard()->ClearDebugCommand();
@@ -2262,11 +2297,10 @@ void Debug::PrintBreakLocation() {
if (iterator.done()) return;
JavaScriptFrame* frame = iterator.frame();
FrameSummary summary = FrameSummary::GetFirst(frame);
- int source_position =
- summary.abstract_code()->SourcePosition(summary.code_offset());
- Handle<Object> script_obj(summary.function()->shared()->script(), isolate_);
+ int source_position = summary.SourcePosition();
+ Handle<Object> script_obj = summary.script();
PrintF("[debug] break in function '");
- summary.function()->PrintName();
+ summary.FunctionName()->PrintOn(stdout);
PrintF("'.\n");
if (script_obj->IsScript()) {
Handle<Script> script = Handle<Script>::cast(script_obj);
@@ -2353,32 +2387,69 @@ DebugScope::~DebugScope() {
debug_->UpdateState();
}
+bool Debug::PerformSideEffectCheck(Handle<JSFunction> function) {
+ DCHECK(isolate_->needs_side_effect_check());
+ DisallowJavascriptExecution no_js(isolate_);
+ if (!Compiler::Compile(function, Compiler::KEEP_EXCEPTION)) return false;
+ Deoptimizer::DeoptimizeFunction(*function);
+ if (!function->shared()->HasNoSideEffect()) {
+ if (FLAG_trace_side_effect_free_debug_evaluate) {
+ PrintF("[debug-evaluate] Function %s failed side effect check.\n",
+ function->shared()->DebugName()->ToCString().get());
+ }
+ side_effect_check_failed_ = true;
+ // Throw an uncatchable termination exception.
+ isolate_->TerminateExecution();
+ return false;
+ }
+ return true;
+}
+
+bool Debug::PerformSideEffectCheckForCallback(Address function) {
+ DCHECK(isolate_->needs_side_effect_check());
+ if (DebugEvaluate::CallbackHasNoSideEffect(function)) return true;
+ side_effect_check_failed_ = true;
+ // Throw an uncatchable termination exception.
+ isolate_->TerminateExecution();
+ isolate_->OptionalRescheduleException(false);
+ return false;
+}
+
+NoSideEffectScope::~NoSideEffectScope() {
+ if (isolate_->needs_side_effect_check() &&
+ isolate_->debug()->side_effect_check_failed_) {
+ DCHECK(isolate_->has_pending_exception());
+ DCHECK_EQ(isolate_->heap()->termination_exception(),
+ isolate_->pending_exception());
+ // Convert the termination exception into a regular exception.
+ isolate_->CancelTerminateExecution();
+ isolate_->Throw(*isolate_->factory()->NewEvalError(
+ MessageTemplate::kNoSideEffectDebugEvaluate));
+ }
+ isolate_->set_needs_side_effect_check(old_needs_side_effect_check_);
+ isolate_->debug()->UpdateHookOnFunctionCall();
+ isolate_->debug()->side_effect_check_failed_ = false;
+}
-MessageImpl MessageImpl::NewEvent(DebugEvent event,
- bool running,
+MessageImpl MessageImpl::NewEvent(DebugEvent event, bool running,
Handle<JSObject> exec_state,
Handle<JSObject> event_data) {
- MessageImpl message(true, event, running,
- exec_state, event_data, Handle<String>(), NULL);
+ MessageImpl message(true, event, running, exec_state, event_data,
+ Handle<String>(), NULL);
return message;
}
-
-MessageImpl MessageImpl::NewResponse(DebugEvent event,
- bool running,
+MessageImpl MessageImpl::NewResponse(DebugEvent event, bool running,
Handle<JSObject> exec_state,
Handle<JSObject> event_data,
Handle<String> response_json,
v8::Debug::ClientData* client_data) {
- MessageImpl message(false, event, running,
- exec_state, event_data, response_json, client_data);
+ MessageImpl message(false, event, running, exec_state, event_data,
+ response_json, client_data);
return message;
}
-
-MessageImpl::MessageImpl(bool is_event,
- DebugEvent event,
- bool running,
+MessageImpl::MessageImpl(bool is_event, DebugEvent event, bool running,
Handle<JSObject> exec_state,
Handle<JSObject> event_data,
Handle<String> response_json,
@@ -2391,42 +2462,26 @@ MessageImpl::MessageImpl(bool is_event,
response_json_(response_json),
client_data_(client_data) {}
+bool MessageImpl::IsEvent() const { return is_event_; }
-bool MessageImpl::IsEvent() const {
- return is_event_;
-}
+bool MessageImpl::IsResponse() const { return !is_event_; }
+DebugEvent MessageImpl::GetEvent() const { return event_; }
-bool MessageImpl::IsResponse() const {
- return !is_event_;
-}
-
-
-DebugEvent MessageImpl::GetEvent() const {
- return event_;
-}
-
-
-bool MessageImpl::WillStartRunning() const {
- return running_;
-}
-
+bool MessageImpl::WillStartRunning() const { return running_; }
v8::Local<v8::Object> MessageImpl::GetExecutionState() const {
return v8::Utils::ToLocal(exec_state_);
}
-
v8::Isolate* MessageImpl::GetIsolate() const {
return reinterpret_cast<v8::Isolate*>(exec_state_->GetIsolate());
}
-
v8::Local<v8::Object> MessageImpl::GetEventData() const {
return v8::Utils::ToLocal(event_data_);
}
-
v8::Local<v8::String> MessageImpl::GetJSON() const {
Isolate* isolate = event_data_->GetIsolate();
v8::EscapableHandleScope scope(reinterpret_cast<v8::Isolate*>(isolate));
@@ -2440,8 +2495,10 @@ v8::Local<v8::String> MessageImpl::GetJSON() const {
return v8::Local<v8::String>();
}
- MaybeHandle<Object> maybe_json =
- Execution::TryCall(isolate, fun, event_data_, 0, NULL);
+ MaybeHandle<Object> maybe_exception;
+ MaybeHandle<Object> maybe_json = Execution::TryCall(
+ isolate, fun, event_data_, 0, nullptr,
+ Execution::MessageHandling::kReport, &maybe_exception);
Handle<Object> json;
if (!maybe_json.ToHandle(&json) || !json->IsString()) {
return v8::Local<v8::String>();
@@ -2471,12 +2528,10 @@ v8::Local<v8::Context> MessageImpl::GetEventContext() const {
return context;
}
-
v8::Debug::ClientData* MessageImpl::GetClientData() const {
return client_data_;
}
-
EventDetailsImpl::EventDetailsImpl(DebugEvent event,
Handle<JSObject> exec_state,
Handle<JSObject> event_data,
@@ -2522,17 +2577,12 @@ v8::Isolate* EventDetailsImpl::GetIsolate() const {
return reinterpret_cast<v8::Isolate*>(exec_state_->GetIsolate());
}
-CommandMessage::CommandMessage() : text_(Vector<uint16_t>::empty()),
- client_data_(NULL) {
-}
-
+CommandMessage::CommandMessage()
+ : text_(Vector<uint16_t>::empty()), client_data_(NULL) {}
CommandMessage::CommandMessage(const Vector<uint16_t>& text,
v8::Debug::ClientData* data)
- : text_(text),
- client_data_(data) {
-}
-
+ : text_(text), client_data_(data) {}
void CommandMessage::Dispose() {
text_.Dispose();
@@ -2540,25 +2590,21 @@ void CommandMessage::Dispose() {
client_data_ = NULL;
}
-
CommandMessage CommandMessage::New(const Vector<uint16_t>& command,
v8::Debug::ClientData* data) {
return CommandMessage(command.Clone(), data);
}
-
-CommandMessageQueue::CommandMessageQueue(int size) : start_(0), end_(0),
- size_(size) {
+CommandMessageQueue::CommandMessageQueue(int size)
+ : start_(0), end_(0), size_(size) {
messages_ = NewArray<CommandMessage>(size);
}
-
CommandMessageQueue::~CommandMessageQueue() {
while (!IsEmpty()) Get().Dispose();
DeleteArray(messages_);
}
-
CommandMessage CommandMessageQueue::Get() {
DCHECK(!IsEmpty());
int result = start_;
@@ -2566,7 +2612,6 @@ CommandMessage CommandMessageQueue::Get() {
return messages_[result];
}
-
void CommandMessageQueue::Put(const CommandMessage& message) {
if ((end_ + 1) % size_ == start_) {
Expand();
@@ -2575,7 +2620,6 @@ void CommandMessageQueue::Put(const CommandMessage& message) {
end_ = (end_ + 1) % size_;
}
-
void CommandMessageQueue::Expand() {
CommandMessageQueue new_queue(size_ * 2);
while (!IsEmpty()) {
@@ -2589,17 +2633,14 @@ void CommandMessageQueue::Expand() {
// Automatic destructor called on new_queue, freeing array_to_free.
}
-
LockingCommandMessageQueue::LockingCommandMessageQueue(Logger* logger, int size)
: logger_(logger), queue_(size) {}
-
bool LockingCommandMessageQueue::IsEmpty() const {
base::LockGuard<base::Mutex> lock_guard(&mutex_);
return queue_.IsEmpty();
}
-
CommandMessage LockingCommandMessageQueue::Get() {
base::LockGuard<base::Mutex> lock_guard(&mutex_);
CommandMessage result = queue_.Get();
@@ -2607,14 +2648,12 @@ CommandMessage LockingCommandMessageQueue::Get() {
return result;
}
-
void LockingCommandMessageQueue::Put(const CommandMessage& message) {
base::LockGuard<base::Mutex> lock_guard(&mutex_);
queue_.Put(message);
logger_->DebugEvent("Put", message.text());
}
-
void LockingCommandMessageQueue::Clear() {
base::LockGuard<base::Mutex> lock_guard(&mutex_);
queue_.Clear();
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index 6e49db6ebd..b3bb3c46a6 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -12,6 +12,7 @@
#include "src/base/hashmap.h"
#include "src/base/platform/platform.h"
#include "src/debug/debug-interface.h"
+#include "src/debug/interface-types.h"
#include "src/execution.h"
#include "src/factory.h"
#include "src/flags.h"
@@ -71,10 +72,12 @@ enum DebugBreakType {
DEBUG_BREAK_SLOT_AT_TAIL_CALL,
};
+const int kDebugPromiseNoID = 0;
+const int kDebugPromiseFirstID = 1;
+
class BreakLocation {
public:
- static BreakLocation FromFrame(Handle<DebugInfo> debug_info,
- JavaScriptFrame* frame);
+ static BreakLocation FromFrame(StandardFrame* frame);
static void AllAtCurrentStatement(Handle<DebugInfo> debug_info,
JavaScriptFrame* frame,
@@ -240,20 +243,17 @@ class DebugInfoListNode {
DebugInfoListNode* next_;
};
-
// Message delivered to the message handler callback. This is either a debugger
// event or the response to a command.
-class MessageImpl: public v8::Debug::Message {
+class MessageImpl : public v8::Debug::Message {
public:
// Create a message object for a debug event.
- static MessageImpl NewEvent(DebugEvent event,
- bool running,
+ static MessageImpl NewEvent(DebugEvent event, bool running,
Handle<JSObject> exec_state,
Handle<JSObject> event_data);
// Create a message object for the response to a debug command.
- static MessageImpl NewResponse(DebugEvent event,
- bool running,
+ static MessageImpl NewResponse(DebugEvent event, bool running,
Handle<JSObject> exec_state,
Handle<JSObject> event_data,
Handle<String> response_json,
@@ -272,26 +272,21 @@ class MessageImpl: public v8::Debug::Message {
virtual v8::Isolate* GetIsolate() const;
private:
- MessageImpl(bool is_event,
- DebugEvent event,
- bool running,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data,
- Handle<String> response_json,
- v8::Debug::ClientData* client_data);
-
- bool is_event_; // Does this message represent a debug event?
- DebugEvent event_; // Debug event causing the break.
- bool running_; // Will the VM start running after this event?
- Handle<JSObject> exec_state_; // Current execution state.
- Handle<JSObject> event_data_; // Data associated with the event.
+ MessageImpl(bool is_event, DebugEvent event, bool running,
+ Handle<JSObject> exec_state, Handle<JSObject> event_data,
+ Handle<String> response_json, v8::Debug::ClientData* client_data);
+
+ bool is_event_; // Does this message represent a debug event?
+ DebugEvent event_; // Debug event causing the break.
+ bool running_; // Will the VM start running after this event?
+ Handle<JSObject> exec_state_; // Current execution state.
+ Handle<JSObject> event_data_; // Data associated with the event.
Handle<String> response_json_; // Response JSON if message holds a response.
v8::Debug::ClientData* client_data_; // Client data passed with the request.
};
-
// Details of the debug event delivered to the debug event listener.
-class EventDetailsImpl : public v8::DebugInterface::EventDetails {
+class EventDetailsImpl : public debug::EventDetails {
public:
EventDetailsImpl(DebugEvent event,
Handle<JSObject> exec_state,
@@ -315,7 +310,6 @@ class EventDetailsImpl : public v8::DebugInterface::EventDetails {
v8::Debug::ClientData* client_data_; // Data passed to DebugBreakForCommand.
};
-
// Message send by user to v8 debugger or debugger output message.
// In addition to command text it may contain a pointer to some user data
// which are expected to be passed along with the command reponse to message
@@ -330,15 +324,14 @@ class CommandMessage {
void Dispose();
Vector<uint16_t> text() const { return text_; }
v8::Debug::ClientData* client_data() const { return client_data_; }
+
private:
- CommandMessage(const Vector<uint16_t>& text,
- v8::Debug::ClientData* data);
+ CommandMessage(const Vector<uint16_t>& text, v8::Debug::ClientData* data);
Vector<uint16_t> text_;
v8::Debug::ClientData* client_data_;
};
-
// A Queue of CommandMessage objects. A thread-safe version is
// LockingCommandMessageQueue, based on this class.
class CommandMessageQueue BASE_EMBEDDED {
@@ -349,6 +342,7 @@ class CommandMessageQueue BASE_EMBEDDED {
CommandMessage Get();
void Put(const CommandMessage& message);
void Clear() { start_ = end_ = 0; } // Queue is empty after Clear().
+
private:
// Doubles the size of the message queue, and copies the messages.
void Expand();
@@ -359,7 +353,6 @@ class CommandMessageQueue BASE_EMBEDDED {
int size_; // The size of the queue buffer. Queue can hold size-1 messages.
};
-
// LockingCommandMessageQueue is a thread-safe circular buffer of CommandMessage
// messages. The message data is not managed by LockingCommandMessageQueue.
// Pointers to the data are passed in and out. Implemented by adding a
@@ -371,6 +364,7 @@ class LockingCommandMessageQueue BASE_EMBEDDED {
CommandMessage Get();
void Put(const CommandMessage& message);
void Clear();
+
private:
Logger* logger_;
CommandMessageQueue queue_;
@@ -378,7 +372,6 @@ class LockingCommandMessageQueue BASE_EMBEDDED {
DISALLOW_COPY_AND_ASSIGN(LockingCommandMessageQueue);
};
-
class DebugFeatureTracker {
public:
enum Feature {
@@ -416,10 +409,8 @@ class Debug {
void OnThrow(Handle<Object> exception);
void OnPromiseReject(Handle<Object> promise, Handle<Object> value);
void OnCompileError(Handle<Script> script);
- void OnBeforeCompile(Handle<Script> script);
void OnAfterCompile(Handle<Script> script);
- void OnAsyncTaskEvent(Handle<String> type, Handle<Object> id,
- Handle<String> name);
+ void OnAsyncTaskEvent(debug::PromiseDebugActionType type, int id);
// API facing.
void SetEventListener(Handle<Object> callback, Handle<Object> data);
@@ -452,6 +443,13 @@ class Debug {
void ChangeBreakOnException(ExceptionBreakType type, bool enable);
bool IsBreakOnException(ExceptionBreakType type);
+ // The parameter is either a BreakPointInfo object, or a FixedArray of
+ // BreakPointInfo objects.
+ // Returns an empty handle if no breakpoint is hit, or a FixedArray with all
+ // hit breakpoints.
+ MaybeHandle<FixedArray> GetHitBreakPointObjects(
+ Handle<Object> break_point_objects);
+
// Stepping handling.
void PrepareStep(StepAction step_action);
void PrepareStepIn(Handle<JSFunction> function);
@@ -464,7 +462,11 @@ class Debug {
bool GetPossibleBreakpoints(Handle<Script> script, int start_position,
int end_position, std::set<int>* positions);
- void RecordAsyncFunction(Handle<JSGeneratorObject> generator_object);
+ void RecordGenerator(Handle<JSGeneratorObject> generator_object);
+
+ int NextAsyncTaskId(Handle<JSObject> promise);
+
+ void SetAsyncTaskListener(debug::AsyncTaskListener listener, void* data);
// Returns whether the operation succeeded. Compilation can only be triggered
// if a valid closure is passed as the second argument, otherwise the shared
@@ -510,6 +512,9 @@ class Debug {
return is_active() && !debug_context().is_null() && break_id() != 0;
}
+ bool PerformSideEffectCheck(Handle<JSFunction> function);
+ bool PerformSideEffectCheckForCallback(Address function);
+
// Flags and states.
DebugScope* debugger_entry() {
return reinterpret_cast<DebugScope*>(
@@ -543,6 +548,10 @@ class Debug {
return reinterpret_cast<Address>(&is_active_);
}
+ Address hook_on_function_call_address() {
+ return reinterpret_cast<Address>(&hook_on_function_call_);
+ }
+
Address after_break_target_address() {
return reinterpret_cast<Address>(&after_break_target_);
}
@@ -563,6 +572,7 @@ class Debug {
explicit Debug(Isolate* isolate);
void UpdateState();
+ void UpdateHookOnFunctionCall();
void Unload();
void SetNextBreakId() {
thread_local_.break_id_ = ++thread_local_.break_count_;
@@ -570,7 +580,9 @@ class Debug {
// Check whether there are commands in the command queue.
inline bool has_commands() const { return !command_queue_.IsEmpty(); }
- inline bool ignore_events() const { return is_suppressed_ || !is_active_; }
+ inline bool ignore_events() const {
+ return is_suppressed_ || !is_active_ || isolate_->needs_side_effect_check();
+ }
inline bool break_disabled() const {
return break_disabled_ || in_debug_event_listener_;
}
@@ -595,9 +607,8 @@ class Debug {
Handle<Object> promise);
MUST_USE_RESULT MaybeHandle<Object> MakeCompileEvent(
Handle<Script> script, v8::DebugEvent type);
- MUST_USE_RESULT MaybeHandle<Object> MakeAsyncTaskEvent(Handle<String> type,
- Handle<Object> id,
- Handle<String> name);
+ MUST_USE_RESULT MaybeHandle<Object> MakeAsyncTaskEvent(Handle<Smi> type,
+ Handle<Smi> id);
// Mirror cache handling.
void ClearMirrorCache();
@@ -607,13 +618,10 @@ class Debug {
Handle<Object> event_data,
v8::Debug::ClientData* client_data);
void ProcessCompileEvent(v8::DebugEvent event, Handle<Script> script);
- void ProcessDebugEvent(v8::DebugEvent event,
- Handle<JSObject> event_data,
+ void ProcessDebugEvent(v8::DebugEvent event, Handle<JSObject> event_data,
bool auto_continue);
- void NotifyMessageHandler(v8::DebugEvent event,
- Handle<JSObject> exec_state,
- Handle<JSObject> event_data,
- bool auto_continue);
+ void NotifyMessageHandler(v8::DebugEvent event, Handle<JSObject> exec_state,
+ Handle<JSObject> event_data, bool auto_continue);
void InvokeMessageHandler(MessageImpl message);
// Find the closest source position for a break point for a given position.
@@ -633,9 +641,9 @@ class Debug {
void ActivateStepOut(StackFrame* frame);
void RemoveDebugInfoAndClearFromShared(Handle<DebugInfo> debug_info);
- Handle<Object> CheckBreakPoints(Handle<DebugInfo> debug_info,
- BreakLocation* location,
- bool* has_break_points = nullptr);
+ MaybeHandle<FixedArray> CheckBreakPoints(Handle<DebugInfo> debug_info,
+ BreakLocation* location,
+ bool* has_break_points = nullptr);
bool IsMutedAtCurrentLocation(JavaScriptFrame* frame);
bool CheckBreakPoint(Handle<Object> break_point_object);
MaybeHandle<Object> CallFunction(const char* name, int argc,
@@ -657,20 +665,37 @@ class Debug {
v8::Debug::MessageHandler message_handler_;
+ debug::AsyncTaskListener async_task_listener_ = nullptr;
+ void* async_task_listener_data_ = nullptr;
+
static const int kQueueInitialSize = 4;
base::Semaphore command_received_; // Signaled for each command received.
LockingCommandMessageQueue command_queue_;
+ // Debugger is active, i.e. there is a debug event listener attached.
bool is_active_;
+ // Debugger needs to be notified on every new function call.
+ // Used for stepping and read-only checks
+ bool hook_on_function_call_;
+ // Suppress debug events.
bool is_suppressed_;
+ // LiveEdit is enabled.
bool live_edit_enabled_;
+ // Do not trigger debug break events.
bool break_disabled_;
+ // Do not break on break points.
bool break_points_active_;
+ // Nested inside a debug event listener.
bool in_debug_event_listener_;
+ // Trigger debug break events for all exceptions.
bool break_on_exception_;
+ // Trigger debug break events for uncaught exceptions.
bool break_on_uncaught_exception_;
+ // Termination exception because side effect check has failed.
+ bool side_effect_check_failed_;
- DebugInfoListNode* debug_info_list_; // List of active debug info objects.
+ // List of active debug info objects.
+ DebugInfoListNode* debug_info_list_;
// Storage location for jump when exiting debug break calls.
// Note that this address is not GC safe. It should be computed immediately
@@ -716,6 +741,8 @@ class Debug {
Handle<Object> return_value_;
Object* suspended_generator_;
+
+ int async_task_count_;
};
// Storage location for registers when handling debug break calls
@@ -728,6 +755,7 @@ class Debug {
friend class DisableBreak;
friend class LiveEdit;
friend class SuppressDebug;
+ friend class NoSideEffectScope;
friend Handle<FixedArray> GetDebuggedFunctions(); // In test-debug.cc
friend void CheckDebuggerUnloaded(bool check_functions); // In test-debug.cc
@@ -767,12 +795,12 @@ class DebugScope BASE_EMBEDDED {
// Stack allocated class for disabling break.
class DisableBreak BASE_EMBEDDED {
public:
- explicit DisableBreak(Debug* debug, bool disable_break)
+ explicit DisableBreak(Debug* debug)
: debug_(debug),
previous_break_disabled_(debug->break_disabled_),
previous_in_debug_event_listener_(debug->in_debug_event_listener_) {
- debug_->break_disabled_ = disable_break;
- debug_->in_debug_event_listener_ = disable_break;
+ debug_->break_disabled_ = true;
+ debug_->in_debug_event_listener_ = true;
}
~DisableBreak() {
debug_->break_disabled_ = previous_break_disabled_;
@@ -801,6 +829,23 @@ class SuppressDebug BASE_EMBEDDED {
DISALLOW_COPY_AND_ASSIGN(SuppressDebug);
};
+class NoSideEffectScope {
+ public:
+ NoSideEffectScope(Isolate* isolate, bool disallow_side_effects)
+ : isolate_(isolate),
+ old_needs_side_effect_check_(isolate->needs_side_effect_check()) {
+ isolate->set_needs_side_effect_check(old_needs_side_effect_check_ ||
+ disallow_side_effects);
+ isolate->debug()->UpdateHookOnFunctionCall();
+ isolate->debug()->side_effect_check_failed_ = false;
+ }
+ ~NoSideEffectScope();
+
+ private:
+ Isolate* isolate_;
+ bool old_needs_side_effect_check_;
+ DISALLOW_COPY_AND_ASSIGN(NoSideEffectScope);
+};
// Code generator routines.
class DebugCodegen : public AllStatic {
diff --git a/deps/v8/src/debug/debug.js b/deps/v8/src/debug/debug.js
index 8031763a13..512bedb69f 100644
--- a/deps/v8/src/debug/debug.js
+++ b/deps/v8/src/debug/debug.js
@@ -43,11 +43,9 @@ var sourceLineBeginningSkip = /^(?:\s*(?:\/\*.*?\*\/)*)*/;
// from the API include file debug.h.
Debug.DebugEvent = { Break: 1,
Exception: 2,
- NewFunction: 3,
- BeforeCompile: 4,
- AfterCompile: 5,
- CompileError: 6,
- AsyncTaskEvent: 7 };
+ AfterCompile: 3,
+ CompileError: 4,
+ AsyncTaskEvent: 5 };
// Types of exceptions that can be broken upon.
Debug.ExceptionBreak = { Caught : 0,
@@ -256,20 +254,6 @@ function ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column,
}
-// Creates a clone of script breakpoint that is linked to another script.
-ScriptBreakPoint.prototype.cloneForOtherScript = function (other_script) {
- var copy = new ScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId,
- other_script.id, this.line_, this.column_, this.groupId_,
- this.position_alignment_);
- copy.number_ = next_break_point_number++;
- script_break_points.push(copy);
-
- copy.active_ = this.active_;
- copy.condition_ = this.condition_;
- return copy;
-};
-
-
ScriptBreakPoint.prototype.number = function() {
return this.number_;
};
@@ -435,31 +419,6 @@ ScriptBreakPoint.prototype.clear = function () {
};
-// Function called from runtime when a new script is compiled to set any script
-// break points set in this script.
-function UpdateScriptBreakPoints(script) {
- for (var i = 0; i < script_break_points.length; i++) {
- var break_point = script_break_points[i];
- if ((break_point.type() == Debug.ScriptBreakPointType.ScriptName ||
- break_point.type() == Debug.ScriptBreakPointType.ScriptRegExp) &&
- break_point.matchesScript(script)) {
- break_point.set(script);
- }
- }
-}
-
-
-function GetScriptBreakPoints(script) {
- var result = [];
- for (var i = 0; i < script_break_points.length; i++) {
- if (script_break_points[i].matchesScript(script)) {
- result.push(script_break_points[i]);
- }
- }
- return result;
-}
-
-
Debug.setListener = function(listener, opt_data) {
if (!IS_FUNCTION(listener) && !IS_UNDEFINED(listener) && !IS_NULL(listener)) {
throw %make_type_error(kDebuggerType);
@@ -476,7 +435,7 @@ Debug.setListener = function(listener, opt_data) {
Debug.findScript = function(func_or_script_name) {
if (IS_FUNCTION(func_or_script_name)) {
return %FunctionGetScript(func_or_script_name);
- } else if (IS_REGEXP(func_or_script_name)) {
+ } else if (%IsRegExp(func_or_script_name)) {
var scripts = this.scripts();
var last_result = null;
var result_count = 0;
@@ -879,11 +838,8 @@ ExecutionState.prototype.prepareStep = function(action) {
throw %make_type_error(kDebuggerType);
};
-ExecutionState.prototype.evaluateGlobal = function(source, disable_break,
- opt_additional_context) {
- return MakeMirror(%DebugEvaluateGlobal(this.break_id, source,
- TO_BOOLEAN(disable_break),
- opt_additional_context));
+ExecutionState.prototype.evaluateGlobal = function(source) {
+ return MakeMirror(%DebugEvaluateGlobal(this.break_id, source));
};
ExecutionState.prototype.frameCount = function() {
@@ -1132,15 +1088,14 @@ function MakeScriptObject_(script, include_source) {
}
-function MakeAsyncTaskEvent(type, id, name) {
- return new AsyncTaskEvent(type, id, name);
+function MakeAsyncTaskEvent(type, id) {
+ return new AsyncTaskEvent(type, id);
}
-function AsyncTaskEvent(type, id, name) {
+function AsyncTaskEvent(type, id) {
this.type_ = type;
this.id_ = id;
- this.name_ = name;
}
@@ -1149,11 +1104,6 @@ AsyncTaskEvent.prototype.type = function() {
}
-AsyncTaskEvent.prototype.name = function() {
- return this.name_;
-}
-
-
AsyncTaskEvent.prototype.id = function() {
return this.id_;
}
@@ -1915,8 +1865,6 @@ DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
var expression = request.arguments.expression;
var frame = request.arguments.frame;
var global = request.arguments.global;
- var disable_break = request.arguments.disable_break;
- var additional_context = request.arguments.additional_context;
// The expression argument could be an integer so we convert it to a
// string.
@@ -1931,35 +1879,13 @@ DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
return response.failed('Arguments "frame" and "global" are exclusive');
}
- var additional_context_object;
- if (additional_context) {
- additional_context_object = {};
- for (var i = 0; i < additional_context.length; i++) {
- var mapping = additional_context[i];
-
- if (!IS_STRING(mapping.name)) {
- return response.failed("Context element #" + i +
- " doesn't contain name:string property");
- }
-
- var raw_value = DebugCommandProcessor.resolveValue_(mapping);
- additional_context_object[mapping.name] = raw_value;
- }
- }
-
// Global evaluate.
if (global) {
// Evaluate in the native context.
- response.body = this.exec_state_.evaluateGlobal(
- expression, TO_BOOLEAN(disable_break), additional_context_object);
+ response.body = this.exec_state_.evaluateGlobal(expression);
return;
}
- // Default value for disable_break is true.
- if (IS_UNDEFINED(disable_break)) {
- disable_break = true;
- }
-
// No frames no evaluate in frame.
if (this.exec_state_.frameCount() == 0) {
return response.failed('No frames');
@@ -1972,13 +1898,11 @@ DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
return response.failed('Invalid frame "' + frame + '"');
}
// Evaluate in the specified frame.
- response.body = this.exec_state_.frame(frame_number).evaluate(
- expression, TO_BOOLEAN(disable_break), additional_context_object);
+ response.body = this.exec_state_.frame(frame_number).evaluate(expression);
return;
} else {
// Evaluate in the selected frame.
- response.body = this.exec_state_.frame().evaluate(
- expression, TO_BOOLEAN(disable_break), additional_context_object);
+ response.body = this.exec_state_.frame().evaluate(expression);
return;
}
};
@@ -2464,12 +2388,6 @@ utils.InstallFunctions(utils, DONT_ENUM, [
"MakeCompileEvent", MakeCompileEvent,
"MakeAsyncTaskEvent", MakeAsyncTaskEvent,
"IsBreakPointTriggered", IsBreakPointTriggered,
- "UpdateScriptBreakPoints", UpdateScriptBreakPoints,
]);
-// Export to liveedit.js
-utils.Export(function(to) {
- to.GetScriptBreakPoints = GetScriptBreakPoints;
-});
-
})
diff --git a/deps/v8/src/debug/ia32/debug-ia32.cc b/deps/v8/src/debug/ia32/debug-ia32.cc
index 47ec69ec5b..1e0ee750ca 100644
--- a/deps/v8/src/debug/ia32/debug-ia32.cc
+++ b/deps/v8/src/debug/ia32/debug-ia32.cc
@@ -129,7 +129,7 @@ void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
__ pop(ebp);
ParameterCount dummy(0);
- __ FloodFunctionIfStepping(edi, no_reg, dummy, dummy);
+ __ CheckDebugHook(edi, no_reg, dummy, dummy);
// Load context from the function.
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
diff --git a/deps/v8/src/debug/interface-types.h b/deps/v8/src/debug/interface-types.h
new file mode 100644
index 0000000000..2b7072ce2b
--- /dev/null
+++ b/deps/v8/src/debug/interface-types.h
@@ -0,0 +1,75 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_DEBUG_INTERFACE_TYPES_H_
+#define V8_DEBUG_INTERFACE_TYPES_H_
+
+#include <cstdint>
+#include <string>
+#include <vector>
+
+namespace v8 {
+namespace debug {
+
+/**
+ * Defines location inside script.
+ * Lines and columns are 0-based.
+ */
+class Location {
+ public:
+ Location(int line_number, int column_number);
+ /**
+ * Create empty location.
+ */
+ Location();
+
+ int GetLineNumber() const;
+ int GetColumnNumber() const;
+ bool IsEmpty() const;
+
+ private:
+ int line_number_;
+ int column_number_;
+};
+
+/**
+ * The result of disassembling a wasm function.
+ * Consists of the disassembly string and an offset table mapping wasm byte
+ * offsets to line and column in the disassembly.
+ * The offset table entries are ordered by the byte_offset.
+ * All numbers are 0-based.
+ */
+struct WasmDisassemblyOffsetTableEntry {
+ WasmDisassemblyOffsetTableEntry(uint32_t byte_offset, int line, int column)
+ : byte_offset(byte_offset), line(line), column(column) {}
+
+ uint32_t byte_offset;
+ int line;
+ int column;
+};
+struct WasmDisassembly {
+ using OffsetTable = std::vector<WasmDisassemblyOffsetTableEntry>;
+ WasmDisassembly() {}
+ WasmDisassembly(std::string disassembly, OffsetTable offset_table)
+ : disassembly(std::move(disassembly)),
+ offset_table(std::move(offset_table)) {}
+
+ std::string disassembly;
+ OffsetTable offset_table;
+};
+
+enum PromiseDebugActionType {
+ kDebugEnqueueAsyncFunction,
+ kDebugEnqueuePromiseResolve,
+ kDebugEnqueuePromiseReject,
+ kDebugEnqueuePromiseResolveThenableJob,
+ kDebugPromiseCollected,
+ kDebugWillHandle,
+ kDebugDidHandle,
+};
+
+} // namespace debug
+} // namespace v8
+
+#endif // V8_DEBUG_INTERFACE_TYPES_H_
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index ace829739f..3ced3cc427 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -604,12 +604,9 @@ static int GetArrayLength(Handle<JSArray> array) {
return Smi::cast(length)->value();
}
-
-void FunctionInfoWrapper::SetInitialProperties(Handle<String> name,
- int start_position,
- int end_position, int param_num,
- int literal_count,
- int parent_index) {
+void FunctionInfoWrapper::SetInitialProperties(
+ Handle<String> name, int start_position, int end_position, int param_num,
+ int literal_count, int parent_index, int function_literal_id) {
HandleScope scope(isolate());
this->SetField(kFunctionNameOffset_, name);
this->SetSmiValueField(kStartPositionOffset_, start_position);
@@ -617,6 +614,7 @@ void FunctionInfoWrapper::SetInitialProperties(Handle<String> name,
this->SetSmiValueField(kParamNumOffset_, param_num);
this->SetSmiValueField(kLiteralNumOffset_, literal_count);
this->SetSmiValueField(kParentIndexOffset_, parent_index);
+ this->SetSmiValueField(kFunctionLiteralIdOffset_, function_literal_id);
}
void FunctionInfoWrapper::SetSharedFunctionInfo(
@@ -1038,15 +1036,36 @@ void LiveEdit::ReplaceFunctionCode(
isolate->compilation_cache()->Remove(shared_info);
}
-
-void LiveEdit::FunctionSourceUpdated(Handle<JSArray> shared_info_array) {
+void LiveEdit::FunctionSourceUpdated(Handle<JSArray> shared_info_array,
+ int new_function_literal_id) {
SharedInfoWrapper shared_info_wrapper(shared_info_array);
Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
+ shared_info->set_function_literal_id(new_function_literal_id);
DeoptimizeDependentFunctions(*shared_info);
shared_info_array->GetIsolate()->compilation_cache()->Remove(shared_info);
}
+void LiveEdit::FixupScript(Handle<Script> script, int max_function_literal_id) {
+ Isolate* isolate = script->GetIsolate();
+ Handle<FixedArray> old_infos(script->shared_function_infos(), isolate);
+ Handle<FixedArray> new_infos(
+ isolate->factory()->NewFixedArray(max_function_literal_id + 1));
+ script->set_shared_function_infos(*new_infos);
+ SharedFunctionInfo::ScriptIterator iterator(isolate, old_infos);
+ while (SharedFunctionInfo* shared = iterator.Next()) {
+ // We can't use SharedFunctionInfo::SetScript(info, undefined_value()) here,
+ // as we severed the link from the Script to the SharedFunctionInfo above.
+ Handle<SharedFunctionInfo> info(shared, isolate);
+ info->set_script(isolate->heap()->undefined_value());
+ Handle<Object> new_noscript_list = WeakFixedArray::Add(
+ isolate->factory()->noscript_shared_function_infos(), info);
+ isolate->heap()->SetRootNoScriptSharedFunctionInfos(*new_noscript_list);
+
+ // Put the SharedFunctionInfo at its new, correct location.
+ SharedFunctionInfo::SetScript(info, script);
+ }
+}
void LiveEdit::SetFunctionScript(Handle<JSValue> function_wrapper,
Handle<Object> script_handle) {
@@ -1173,6 +1192,10 @@ static Handle<Script> CreateScriptCopy(Handle<Script> original) {
copy->set_eval_from_shared(original->eval_from_shared());
copy->set_eval_from_position(original->eval_from_position());
+ Handle<FixedArray> infos(isolate->factory()->NewFixedArray(
+ original->shared_function_infos()->length()));
+ copy->set_shared_function_infos(*infos);
+
// Copy all the flags, but clear compilation state.
copy->set_flags(original->flags());
copy->set_compilation_state(Script::COMPILATION_STATE_INITIAL);
@@ -1180,7 +1203,6 @@ static Handle<Script> CreateScriptCopy(Handle<Script> original) {
return copy;
}
-
Handle<Object> LiveEdit::ChangeScriptSource(Handle<Script> original_script,
Handle<String> new_source,
Handle<Object> old_script_name) {
@@ -1856,10 +1878,8 @@ void LiveEditFunctionTracker::VisitFunctionLiteral(FunctionLiteral* node) {
// Recurse using the regular traversal.
AstTraversalVisitor::VisitFunctionLiteral(node);
// FunctionDone are called in post-order.
- // TODO(jgruber): If required, replace the (linear cost)
- // FindSharedFunctionInfo call with a more efficient implementation.
Handle<SharedFunctionInfo> info =
- script_->FindSharedFunctionInfo(node).ToHandleChecked();
+ script_->FindSharedFunctionInfo(isolate_, node).ToHandleChecked();
FunctionDone(info, node->scope());
}
@@ -1869,7 +1889,7 @@ void LiveEditFunctionTracker::FunctionStarted(FunctionLiteral* fun) {
info.SetInitialProperties(fun->name(), fun->start_position(),
fun->end_position(), fun->parameter_count(),
fun->materialized_literal_count(),
- current_parent_index_);
+ current_parent_index_, fun->function_literal_id());
current_parent_index_ = len_;
SetElementSloppy(result_, len_, info.GetJSArray());
len_++;
diff --git a/deps/v8/src/debug/liveedit.h b/deps/v8/src/debug/liveedit.h
index 2034dcb026..be70d2e50c 100644
--- a/deps/v8/src/debug/liveedit.h
+++ b/deps/v8/src/debug/liveedit.h
@@ -83,7 +83,10 @@ class LiveEdit : AllStatic {
static void ReplaceFunctionCode(Handle<JSArray> new_compile_info_array,
Handle<JSArray> shared_info_array);
- static void FunctionSourceUpdated(Handle<JSArray> shared_info_array);
+ static void FixupScript(Handle<Script> script, int max_function_literal_id);
+
+ static void FunctionSourceUpdated(Handle<JSArray> shared_info_array,
+ int new_function_literal_id);
// Updates script field in FunctionSharedInfo.
static void SetFunctionScript(Handle<JSValue> function_wrapper,
@@ -278,7 +281,7 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
void SetInitialProperties(Handle<String> name, int start_position,
int end_position, int param_num, int literal_count,
- int parent_index);
+ int parent_index, int function_literal_id);
void SetFunctionScopeInfo(Handle<Object> scope_info_array) {
this->SetField(kFunctionScopeInfoOffset_, scope_info_array);
@@ -311,7 +314,8 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
static const int kParentIndexOffset_ = 5;
static const int kSharedFunctionInfoOffset_ = 6;
static const int kLiteralNumOffset_ = 7;
- static const int kSize_ = 8;
+ static const int kFunctionLiteralIdOffset_ = 8;
+ static const int kSize_ = 9;
friend class JSArrayBasedStruct<FunctionInfoWrapper>;
};
diff --git a/deps/v8/src/debug/liveedit.js b/deps/v8/src/debug/liveedit.js
index e9ee8092a2..0076543f8b 100644
--- a/deps/v8/src/debug/liveedit.js
+++ b/deps/v8/src/debug/liveedit.js
@@ -27,15 +27,11 @@
// Imports
var FindScriptSourcePosition = global.Debug.findScriptSourcePosition;
- var GetScriptBreakPoints;
var GlobalArray = global.Array;
var MathFloor = global.Math.floor;
+ var MathMax = global.Math.max;
var SyntaxError = global.SyntaxError;
- utils.Import(function(from) {
- GetScriptBreakPoints = from.GetScriptBreakPoints;
- });
-
// -------------------------------------------------------------------
// Forward declaration for minifier.
@@ -80,6 +76,10 @@
}
throw failure;
}
+
+ var max_function_literal_id = new_compile_info.reduce(
+ (max, info) => MathMax(max, info.function_literal_id), 0);
+
var root_new_node = BuildCodeInfoTree(new_compile_info);
// Link recompiled script data with other data.
@@ -170,10 +170,6 @@
// command for correct stack state if the stack was modified.
preview_description.stack_modified = dropped_functions_number != 0;
- // Start with breakpoints. Convert their line/column positions and
- // temporary remove.
- var break_points_restorer = TemporaryRemoveBreakPoints(script, change_log);
-
var old_script;
// Create an old script only if there are function that should be linked
@@ -186,8 +182,7 @@
// Update the script text and create a new script representing an old
// version of the script.
- old_script = %LiveEditReplaceScript(script, new_source,
- old_script_name);
+ old_script = %LiveEditReplaceScript(script, new_source, old_script_name);
var link_to_old_script_report = new GlobalArray();
change_log.push( { linked_to_old_script: link_to_old_script_report } );
@@ -201,12 +196,6 @@
preview_description.created_script_name = old_script_name;
}
- // Link to an actual script all the functions that we are going to use.
- for (var i = 0; i < link_to_original_script_list.length; i++) {
- %LiveEditFunctionSetScript(
- link_to_original_script_list[i].info.shared_function_info, script);
- }
-
for (var i = 0; i < replace_code_list.length; i++) {
PatchFunctionCode(replace_code_list[i], change_log);
}
@@ -221,14 +210,24 @@
position_patch_report);
if (update_positions_list[i].live_shared_function_infos) {
- update_positions_list[i].live_shared_function_infos.
- forEach(function (info) {
- %LiveEditFunctionSourceUpdated(info.raw_array);
- });
+ var new_function_literal_id =
+ update_positions_list[i]
+ .corresponding_node.info.function_literal_id;
+ update_positions_list[i].live_shared_function_infos.forEach(function(
+ info) {
+ %LiveEditFunctionSourceUpdated(
+ info.raw_array, new_function_literal_id);
+ });
}
}
- break_points_restorer(pos_translator, old_script);
+ %LiveEditFixupScript(script, max_function_literal_id);
+
+ // Link all the functions we're going to use to an actual script.
+ for (var i = 0; i < link_to_original_script_list.length; i++) {
+ %LiveEditFunctionSetScript(
+ link_to_original_script_list[i].info.shared_function_info, script);
+ }
preview_description.updated = true;
return preview_description;
@@ -368,79 +367,6 @@
}
}
-
- // Returns function that restores breakpoints.
- function TemporaryRemoveBreakPoints(original_script, change_log) {
- var script_break_points = GetScriptBreakPoints(original_script);
-
- var break_points_update_report = [];
- change_log.push( { break_points_update: break_points_update_report } );
-
- var break_point_old_positions = [];
- for (var i = 0; i < script_break_points.length; i++) {
- var break_point = script_break_points[i];
-
- break_point.clear();
-
- // TODO(LiveEdit): be careful with resource offset here.
- var break_point_position = FindScriptSourcePosition(original_script,
- break_point.line(), break_point.column());
-
- var old_position_description = {
- position: break_point_position,
- line: break_point.line(),
- column: break_point.column()
- };
- break_point_old_positions.push(old_position_description);
- }
-
-
- // Restores breakpoints and creates their copies in the "old" copy of
- // the script.
- return function (pos_translator, old_script_copy_opt) {
- // Update breakpoints (change positions and restore them in old version
- // of script.
- for (var i = 0; i < script_break_points.length; i++) {
- var break_point = script_break_points[i];
- if (old_script_copy_opt) {
- var clone = break_point.cloneForOtherScript(old_script_copy_opt);
- clone.set(old_script_copy_opt);
-
- break_points_update_report.push( {
- type: "copied_to_old",
- id: break_point.number(),
- new_id: clone.number(),
- positions: break_point_old_positions[i]
- } );
- }
-
- var updated_position = pos_translator.Translate(
- break_point_old_positions[i].position,
- PosTranslator.ShiftWithTopInsideChunkHandler);
-
- var new_location =
- original_script.locationFromPosition(updated_position, false);
-
- break_point.update_positions(new_location.line, new_location.column);
-
- var new_position_description = {
- position: updated_position,
- line: new_location.line,
- column: new_location.column
- };
-
- break_point.set(original_script);
-
- break_points_update_report.push( { type: "position_changed",
- id: break_point.number(),
- old_positions: break_point_old_positions[i],
- new_positions: new_position_description
- } );
- }
- };
- }
-
-
function Assert(condition, message) {
if (!condition) {
if (message) {
@@ -742,6 +668,8 @@
old_children[old_index].corresponding_node = UNDEFINED;
old_node.status = FunctionStatus.CHANGED;
}
+ } else {
+ ProcessNode(old_children[old_index], new_children[new_index]);
}
} else {
old_children[old_index].status = FunctionStatus.DAMAGED;
@@ -845,6 +773,7 @@
this.scope_info = raw_array[4];
this.outer_index = raw_array[5];
this.shared_function_info = raw_array[6];
+ this.function_literal_id = raw_array[8];
this.next_sibling_index = null;
this.raw_array = raw_array;
}
diff --git a/deps/v8/src/debug/mips/debug-mips.cc b/deps/v8/src/debug/mips/debug-mips.cc
index 4d8b54f4b6..c6daf2d226 100644
--- a/deps/v8/src/debug/mips/debug-mips.cc
+++ b/deps/v8/src/debug/mips/debug-mips.cc
@@ -130,7 +130,7 @@ void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::INTERNAL);
ParameterCount dummy(0);
- __ FloodFunctionIfStepping(a1, no_reg, dummy, dummy);
+ __ CheckDebugHook(a1, no_reg, dummy, dummy);
// Load context from the function.
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
diff --git a/deps/v8/src/debug/mips64/debug-mips64.cc b/deps/v8/src/debug/mips64/debug-mips64.cc
index 2a6ce7b5cd..0230f13145 100644
--- a/deps/v8/src/debug/mips64/debug-mips64.cc
+++ b/deps/v8/src/debug/mips64/debug-mips64.cc
@@ -132,7 +132,7 @@ void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::INTERNAL);
ParameterCount dummy(0);
- __ FloodFunctionIfStepping(a1, no_reg, dummy, dummy);
+ __ CheckDebugHook(a1, no_reg, dummy, dummy);
// Load context from the function.
__ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
diff --git a/deps/v8/src/debug/mirrors.js b/deps/v8/src/debug/mirrors.js
index 4bc86da7f4..2713be36b7 100644
--- a/deps/v8/src/debug/mirrors.js
+++ b/deps/v8/src/debug/mirrors.js
@@ -13,8 +13,6 @@ var IsNaN = global.isNaN;
var JSONStringify = global.JSON.stringify;
var MapEntries;
var MapIteratorNext;
-var promiseStateSymbol = utils.ImportNow("promise_state_symbol");
-var promiseResultSymbol = utils.ImportNow("promise_result_symbol");
var SetIteratorNext;
var SetValues;
@@ -106,12 +104,6 @@ function ClearMirrorCache(value) {
}
-function ObjectIsPromise(value) {
- return IS_RECEIVER(value) &&
- !IS_UNDEFINED(%DebugGetProperty(value, promiseStateSymbol));
-}
-
-
/**
* Returns the mirror for a specified value or object.
*
@@ -156,7 +148,7 @@ function MakeMirror(value, opt_transient) {
mirror = new DateMirror(value);
} else if (IS_FUNCTION(value)) {
mirror = new FunctionMirror(value);
- } else if (IS_REGEXP(value)) {
+ } else if (%IsRegExp(value)) {
mirror = new RegExpMirror(value);
} else if (IS_ERROR(value)) {
mirror = new ErrorMirror(value);
@@ -168,7 +160,7 @@ function MakeMirror(value, opt_transient) {
mirror = new SetMirror(value);
} else if (IS_MAP_ITERATOR(value) || IS_SET_ITERATOR(value)) {
mirror = new IteratorMirror(value);
- } else if (ObjectIsPromise(value)) {
+ } else if (%is_promise(value)) {
mirror = new PromiseMirror(value);
} else if (IS_GENERATOR(value)) {
mirror = new GeneratorMirror(value);
@@ -231,11 +223,10 @@ function inherits(ctor, superCtor) {
var kMaxProtocolStringLength = 80;
-// A copy of the PropertyType enum from property-details.h
+// A copy of the PropertyKind enum from property-details.h
var PropertyType = {};
-PropertyType.Data = 0;
-PropertyType.DataConstant = 2;
-PropertyType.AccessorConstant = 3;
+PropertyType.Data = 0;
+PropertyType.Accessor = 1;
// Different attributes for a property.
@@ -807,7 +798,7 @@ ObjectMirror.prototype.lookupProperty = function(value) {
// Skip properties which are defined through accessors.
var property = properties[i];
- if (property.propertyType() != PropertyType.AccessorConstant) {
+ if (property.propertyType() == PropertyType.Data) {
if (property.value_ === value.value_) {
return property;
}
@@ -1273,7 +1264,7 @@ inherits(PromiseMirror, ObjectMirror);
function PromiseGetStatus_(value) {
- var status = %DebugGetProperty(value, promiseStateSymbol);
+ var status = %PromiseStatus(value);
if (status == 0) return "pending";
if (status == 1) return "resolved";
return "rejected";
@@ -1281,7 +1272,7 @@ function PromiseGetStatus_(value) {
function PromiseGetValue_(value) {
- return %DebugGetProperty(value, promiseResultSymbol);
+ return %PromiseResult(value);
}
@@ -1553,7 +1544,7 @@ PropertyMirror.prototype.attributes = function() {
PropertyMirror.prototype.propertyType = function() {
- return %DebugPropertyTypeFromDetails(this.details_);
+ return %DebugPropertyKindFromDetails(this.details_);
};
@@ -1611,7 +1602,7 @@ PropertyMirror.prototype.setter = function() {
*/
PropertyMirror.prototype.isNative = function() {
return this.is_interceptor_ ||
- ((this.propertyType() == PropertyType.AccessorConstant) &&
+ ((this.propertyType() == PropertyType.Accessor) &&
!this.hasGetter() && !this.hasSetter());
};
@@ -2019,14 +2010,11 @@ FrameMirror.prototype.allScopes = function(opt_ignore_nested_scopes) {
};
-FrameMirror.prototype.evaluate = function(source, disable_break,
- opt_context_object) {
+FrameMirror.prototype.evaluate = function(source) {
return MakeMirror(%DebugEvaluate(this.break_id_,
this.details_.frameId(),
this.details_.inlinedFrameIndex(),
- source,
- TO_BOOLEAN(disable_break),
- opt_context_object));
+ source));
};
diff --git a/deps/v8/src/debug/ppc/debug-ppc.cc b/deps/v8/src/debug/ppc/debug-ppc.cc
index e57aa3caa2..acca19a75e 100644
--- a/deps/v8/src/debug/ppc/debug-ppc.cc
+++ b/deps/v8/src/debug/ppc/debug-ppc.cc
@@ -137,7 +137,7 @@ void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::INTERNAL);
ParameterCount dummy(0);
- __ FloodFunctionIfStepping(r4, no_reg, dummy, dummy);
+ __ CheckDebugHook(r4, no_reg, dummy, dummy);
// Load context from the function.
__ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
diff --git a/deps/v8/src/debug/s390/debug-s390.cc b/deps/v8/src/debug/s390/debug-s390.cc
index b745d5b966..a225c72f13 100644
--- a/deps/v8/src/debug/s390/debug-s390.cc
+++ b/deps/v8/src/debug/s390/debug-s390.cc
@@ -141,7 +141,7 @@ void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
__ LeaveFrame(StackFrame::INTERNAL);
ParameterCount dummy(0);
- __ FloodFunctionIfStepping(r3, no_reg, dummy, dummy);
+ __ CheckDebugHook(r3, no_reg, dummy, dummy);
// Load context from the function.
__ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
diff --git a/deps/v8/src/debug/x64/debug-x64.cc b/deps/v8/src/debug/x64/debug-x64.cc
index 4f80e18c85..afdc3303a2 100644
--- a/deps/v8/src/debug/x64/debug-x64.cc
+++ b/deps/v8/src/debug/x64/debug-x64.cc
@@ -129,7 +129,7 @@ void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
__ popq(rbp);
ParameterCount dummy(0);
- __ FloodFunctionIfStepping(rdi, no_reg, dummy, dummy);
+ __ CheckDebugHook(rdi, no_reg, dummy, dummy);
// Load context from the function.
__ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
diff --git a/deps/v8/src/debug/x87/OWNERS b/deps/v8/src/debug/x87/OWNERS
index dd9998b261..61245ae8e2 100644
--- a/deps/v8/src/debug/x87/OWNERS
+++ b/deps/v8/src/debug/x87/OWNERS
@@ -1 +1,2 @@
weiliang.lin@intel.com
+chunyang.dai@intel.com
diff --git a/deps/v8/src/debug/x87/debug-x87.cc b/deps/v8/src/debug/x87/debug-x87.cc
index c29eac19c8..8810f01f42 100644
--- a/deps/v8/src/debug/x87/debug-x87.cc
+++ b/deps/v8/src/debug/x87/debug-x87.cc
@@ -129,7 +129,7 @@ void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
__ pop(ebp);
ParameterCount dummy(0);
- __ FloodFunctionIfStepping(edi, no_reg, dummy, dummy);
+ __ CheckDebugHook(edi, no_reg, dummy, dummy);
// Load context from the function.
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index dddf62e1cc..97f82cb3e1 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -257,9 +257,8 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc());
int deopt_index = safepoint.deoptimization_index();
// Turbofan deopt is checked when we are patching addresses on stack.
- bool turbofanned = code->is_turbofanned() &&
- function->shared()->asm_function() &&
- !FLAG_turbo_asm_deoptimization;
+ bool turbofanned =
+ code->is_turbofanned() && function->shared()->asm_function();
bool safe_to_deopt =
deopt_index != Safepoint::kNoDeoptimizationIndex || turbofanned;
bool builtin = code->kind() == Code::BUILTIN;
@@ -391,14 +390,13 @@ void Deoptimizer::MarkAllCodeForContext(Context* context) {
}
}
-
-void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
+void Deoptimizer::DeoptimizeFunction(JSFunction* function, Code* code) {
Isolate* isolate = function->GetIsolate();
RuntimeCallTimerScope runtimeTimer(isolate,
&RuntimeCallStats::DeoptimizeCode);
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
TRACE_EVENT0("v8", "V8.DeoptimizeCode");
- Code* code = function->code();
+ if (code == nullptr) code = function->code();
if (code->kind() == Code::OPTIMIZED_FUNCTION) {
// Mark the code for deoptimization and unlink any functions that also
// refer to that code. The code cannot be shared across native contexts,
@@ -627,19 +625,15 @@ namespace {
int LookupCatchHandler(TranslatedFrame* translated_frame, int* data_out) {
switch (translated_frame->kind()) {
case TranslatedFrame::kFunction: {
- BailoutId node_id = translated_frame->node_id();
+#ifdef DEBUG
JSFunction* function =
JSFunction::cast(translated_frame->begin()->GetRawValue());
Code* non_optimized_code = function->shared()->code();
- FixedArray* raw_data = non_optimized_code->deoptimization_data();
- DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
- unsigned pc_and_state =
- Deoptimizer::GetOutputInfo(data, node_id, function->shared());
- unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
HandlerTable* table =
HandlerTable::cast(non_optimized_code->handler_table());
- HandlerTable::CatchPrediction prediction;
- return table->LookupRange(pc_offset, data_out, &prediction);
+ DCHECK_EQ(0, table->NumberOfRangeEntries());
+#endif
+ break;
}
case TranslatedFrame::kInterpretedFunction: {
int bytecode_offset = translated_frame->node_id().ToInt();
@@ -647,8 +641,7 @@ int LookupCatchHandler(TranslatedFrame* translated_frame, int* data_out) {
JSFunction::cast(translated_frame->begin()->GetRawValue());
BytecodeArray* bytecode = function->shared()->bytecode_array();
HandlerTable* table = HandlerTable::cast(bytecode->handler_table());
- HandlerTable::CatchPrediction prediction;
- return table->LookupRange(bytecode_offset, data_out, &prediction);
+ return table->LookupRange(bytecode_offset, data_out, nullptr);
}
default:
break;
@@ -2721,6 +2714,7 @@ DeoptimizedFrameInfo::DeoptimizedFrameInfo(TranslatedState* state,
Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code* code, Address pc) {
+ CHECK(code->instruction_start() <= pc && pc <= code->instruction_end());
SourcePosition last_position = SourcePosition::Unknown();
DeoptimizeReason last_reason = DeoptimizeReason::kNoReason;
int last_deopt_id = kNoDeoptimizationId;
@@ -2730,9 +2724,7 @@ Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code* code, Address pc) {
RelocInfo::ModeMask(RelocInfo::DEOPT_INLINING_ID);
for (RelocIterator it(code, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
- if (info->pc() >= pc) {
- return DeoptInfo(last_position, last_reason, last_deopt_id);
- }
+ if (info->pc() >= pc) break;
if (info->rmode() == RelocInfo::DEOPT_SCRIPT_OFFSET) {
int script_offset = static_cast<int>(info->data());
it.next();
@@ -2745,7 +2737,7 @@ Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code* code, Address pc) {
last_reason = static_cast<DeoptimizeReason>(info->data());
}
}
- return DeoptInfo(SourcePosition::Unknown(), DeoptimizeReason::kNoReason, -1);
+ return DeoptInfo(last_position, last_reason, last_deopt_id);
}
@@ -3622,11 +3614,335 @@ void TranslatedState::Prepare(bool has_adapted_arguments,
UpdateFromPreviouslyMaterializedObjects();
}
+class TranslatedState::CapturedObjectMaterializer {
+ public:
+ CapturedObjectMaterializer(TranslatedState* state, int frame_index,
+ int field_count)
+ : state_(state), frame_index_(frame_index), field_count_(field_count) {}
+
+ Handle<Object> FieldAt(int* value_index) {
+ CHECK(field_count_ > 0);
+ --field_count_;
+ return state_->MaterializeAt(frame_index_, value_index);
+ }
+
+ ~CapturedObjectMaterializer() { CHECK_EQ(0, field_count_); }
+
+ private:
+ TranslatedState* state_;
+ int frame_index_;
+ int field_count_;
+};
+
+Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
+ TranslatedValue* slot, int frame_index, int* value_index) {
+ int length = slot->GetChildrenCount();
+
+ CapturedObjectMaterializer materializer(this, frame_index, length);
+
+ Handle<Object> result;
+ if (slot->value_.ToHandle(&result)) {
+ // This has been previously materialized, return the previous value.
+ // We still need to skip all the nested objects.
+ for (int i = 0; i < length; i++) {
+ materializer.FieldAt(value_index);
+ }
+
+ return result;
+ }
+
+ Handle<Object> map_object = materializer.FieldAt(value_index);
+ Handle<Map> map = Map::GeneralizeAllFields(Handle<Map>::cast(map_object));
+ switch (map->instance_type()) {
+ case MUTABLE_HEAP_NUMBER_TYPE:
+ case HEAP_NUMBER_TYPE: {
+ // Reuse the HeapNumber value directly as it is already properly
+ // tagged and skip materializing the HeapNumber explicitly.
+ Handle<Object> object = materializer.FieldAt(value_index);
+ slot->value_ = object;
+ // On 32-bit architectures, there is an extra slot there because
+ // the escape analysis calculates the number of slots as
+ // object-size/pointer-size. To account for this, we read out
+ // any extra slots.
+ for (int i = 0; i < length - 2; i++) {
+ materializer.FieldAt(value_index);
+ }
+ return object;
+ }
+ case JS_OBJECT_TYPE:
+ case JS_ERROR_TYPE:
+ case JS_ARGUMENTS_TYPE: {
+ Handle<JSObject> object =
+ isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED);
+ slot->value_ = object;
+ Handle<Object> properties = materializer.FieldAt(value_index);
+ Handle<Object> elements = materializer.FieldAt(value_index);
+ object->set_properties(FixedArray::cast(*properties));
+ object->set_elements(FixedArrayBase::cast(*elements));
+ for (int i = 0; i < length - 3; ++i) {
+ Handle<Object> value = materializer.FieldAt(value_index);
+ FieldIndex index = FieldIndex::ForPropertyIndex(object->map(), i);
+ object->FastPropertyAtPut(index, *value);
+ }
+ return object;
+ }
+ case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
+ case JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE:
+ case JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT8_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT16_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_INT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE: {
+ Handle<JSArrayIterator> object = Handle<JSArrayIterator>::cast(
+ isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
+ slot->value_ = object;
+ Handle<Object> properties = materializer.FieldAt(value_index);
+ Handle<Object> elements = materializer.FieldAt(value_index);
+ Handle<Object> iterated_object = materializer.FieldAt(value_index);
+ Handle<Object> next_index = materializer.FieldAt(value_index);
+ Handle<Object> iterated_object_map = materializer.FieldAt(value_index);
+ object->set_properties(FixedArray::cast(*properties));
+ object->set_elements(FixedArrayBase::cast(*elements));
+ object->set_object(*iterated_object);
+ object->set_index(*next_index);
+ object->set_object_map(*iterated_object_map);
+ return object;
+ }
+ case JS_ARRAY_TYPE: {
+ Handle<JSArray> object = Handle<JSArray>::cast(
+ isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
+ slot->value_ = object;
+ Handle<Object> properties = materializer.FieldAt(value_index);
+ Handle<Object> elements = materializer.FieldAt(value_index);
+ Handle<Object> length = materializer.FieldAt(value_index);
+ object->set_properties(FixedArray::cast(*properties));
+ object->set_elements(FixedArrayBase::cast(*elements));
+ object->set_length(*length);
+ return object;
+ }
+ case JS_FUNCTION_TYPE: {
+ Handle<SharedFunctionInfo> temporary_shared =
+ isolate_->factory()->NewSharedFunctionInfo(
+ isolate_->factory()->empty_string(), MaybeHandle<Code>(), false);
+ Handle<JSFunction> object =
+ isolate_->factory()->NewFunctionFromSharedFunctionInfo(
+ map, temporary_shared, isolate_->factory()->undefined_value(),
+ NOT_TENURED);
+ slot->value_ = object;
+ Handle<Object> properties = materializer.FieldAt(value_index);
+ Handle<Object> elements = materializer.FieldAt(value_index);
+ Handle<Object> prototype = materializer.FieldAt(value_index);
+ Handle<Object> shared = materializer.FieldAt(value_index);
+ Handle<Object> context = materializer.FieldAt(value_index);
+ Handle<Object> literals = materializer.FieldAt(value_index);
+ Handle<Object> entry = materializer.FieldAt(value_index);
+ Handle<Object> next_link = materializer.FieldAt(value_index);
+ object->ReplaceCode(*isolate_->builtins()->CompileLazy());
+ object->set_map(*map);
+ object->set_properties(FixedArray::cast(*properties));
+ object->set_elements(FixedArrayBase::cast(*elements));
+ object->set_prototype_or_initial_map(*prototype);
+ object->set_shared(SharedFunctionInfo::cast(*shared));
+ object->set_context(Context::cast(*context));
+ object->set_literals(LiteralsArray::cast(*literals));
+ CHECK(entry->IsNumber()); // Entry to compile lazy stub.
+ CHECK(next_link->IsUndefined(isolate_));
+ return object;
+ }
+ case CONS_STRING_TYPE: {
+ Handle<ConsString> object = Handle<ConsString>::cast(
+ isolate_->factory()
+ ->NewConsString(isolate_->factory()->undefined_string(),
+ isolate_->factory()->undefined_string())
+ .ToHandleChecked());
+ slot->value_ = object;
+ Handle<Object> hash = materializer.FieldAt(value_index);
+ Handle<Object> length = materializer.FieldAt(value_index);
+ Handle<Object> first = materializer.FieldAt(value_index);
+ Handle<Object> second = materializer.FieldAt(value_index);
+ object->set_map(*map);
+ object->set_length(Smi::cast(*length)->value());
+ object->set_first(String::cast(*first));
+ object->set_second(String::cast(*second));
+ CHECK(hash->IsNumber()); // The {Name::kEmptyHashField} value.
+ return object;
+ }
+ case CONTEXT_EXTENSION_TYPE: {
+ Handle<ContextExtension> object =
+ isolate_->factory()->NewContextExtension(
+ isolate_->factory()->NewScopeInfo(1),
+ isolate_->factory()->undefined_value());
+ slot->value_ = object;
+ Handle<Object> scope_info = materializer.FieldAt(value_index);
+ Handle<Object> extension = materializer.FieldAt(value_index);
+ object->set_scope_info(ScopeInfo::cast(*scope_info));
+ object->set_extension(*extension);
+ return object;
+ }
+ case FIXED_ARRAY_TYPE: {
+ Handle<Object> lengthObject = materializer.FieldAt(value_index);
+ int32_t length = 0;
+ CHECK(lengthObject->ToInt32(&length));
+ Handle<FixedArray> object = isolate_->factory()->NewFixedArray(length);
+ // We need to set the map, because the fixed array we are
+ // materializing could be a context or an arguments object,
+ // in which case we must retain that information.
+ object->set_map(*map);
+ slot->value_ = object;
+ for (int i = 0; i < length; ++i) {
+ Handle<Object> value = materializer.FieldAt(value_index);
+ object->set(i, *value);
+ }
+ return object;
+ }
+ case FIXED_DOUBLE_ARRAY_TYPE: {
+ DCHECK_EQ(*map, isolate_->heap()->fixed_double_array_map());
+ Handle<Object> lengthObject = materializer.FieldAt(value_index);
+ int32_t length = 0;
+ CHECK(lengthObject->ToInt32(&length));
+ Handle<FixedArrayBase> object =
+ isolate_->factory()->NewFixedDoubleArray(length);
+ slot->value_ = object;
+ if (length > 0) {
+ Handle<FixedDoubleArray> double_array =
+ Handle<FixedDoubleArray>::cast(object);
+ for (int i = 0; i < length; ++i) {
+ Handle<Object> value = materializer.FieldAt(value_index);
+ CHECK(value->IsNumber());
+ double_array->set(i, value->Number());
+ }
+ }
+ return object;
+ }
+ case STRING_TYPE:
+ case ONE_BYTE_STRING_TYPE:
+ case CONS_ONE_BYTE_STRING_TYPE:
+ case SLICED_STRING_TYPE:
+ case SLICED_ONE_BYTE_STRING_TYPE:
+ case EXTERNAL_STRING_TYPE:
+ case EXTERNAL_ONE_BYTE_STRING_TYPE:
+ case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ case SHORT_EXTERNAL_STRING_TYPE:
+ case SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE:
+ case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ case INTERNALIZED_STRING_TYPE:
+ case ONE_BYTE_INTERNALIZED_STRING_TYPE:
+ case EXTERNAL_INTERNALIZED_STRING_TYPE:
+ case EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
+ case EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ case SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE:
+ case SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
+ case SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
+ case SYMBOL_TYPE:
+ case ODDBALL_TYPE:
+ case SIMD128_VALUE_TYPE:
+ case JS_GLOBAL_OBJECT_TYPE:
+ case JS_GLOBAL_PROXY_TYPE:
+ case JS_API_OBJECT_TYPE:
+ case JS_SPECIAL_API_OBJECT_TYPE:
+ case JS_VALUE_TYPE:
+ case JS_MESSAGE_OBJECT_TYPE:
+ case JS_DATE_TYPE:
+ case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+ case JS_GENERATOR_OBJECT_TYPE:
+ case JS_MODULE_NAMESPACE_TYPE:
+ case JS_ARRAY_BUFFER_TYPE:
+ case JS_REGEXP_TYPE:
+ case JS_TYPED_ARRAY_TYPE:
+ case JS_DATA_VIEW_TYPE:
+ case JS_SET_TYPE:
+ case JS_MAP_TYPE:
+ case JS_SET_ITERATOR_TYPE:
+ case JS_MAP_ITERATOR_TYPE:
+ case JS_STRING_ITERATOR_TYPE:
+ case JS_WEAK_MAP_TYPE:
+ case JS_WEAK_SET_TYPE:
+ case JS_PROMISE_CAPABILITY_TYPE:
+ case JS_PROMISE_TYPE:
+ case JS_BOUND_FUNCTION_TYPE:
+ case JS_PROXY_TYPE:
+ case MAP_TYPE:
+ case ALLOCATION_SITE_TYPE:
+ case ACCESSOR_INFO_TYPE:
+ case SHARED_FUNCTION_INFO_TYPE:
+ case FUNCTION_TEMPLATE_INFO_TYPE:
+ case ACCESSOR_PAIR_TYPE:
+ case BYTE_ARRAY_TYPE:
+ case BYTECODE_ARRAY_TYPE:
+ case TRANSITION_ARRAY_TYPE:
+ case FOREIGN_TYPE:
+ case SCRIPT_TYPE:
+ case CODE_TYPE:
+ case PROPERTY_CELL_TYPE:
+ case MODULE_TYPE:
+ case MODULE_INFO_ENTRY_TYPE:
+ case FREE_SPACE_TYPE:
+#define FIXED_TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case FIXED_##TYPE##_ARRAY_TYPE:
+ TYPED_ARRAYS(FIXED_TYPED_ARRAY_CASE)
+#undef FIXED_TYPED_ARRAY_CASE
+ case FILLER_TYPE:
+ case ACCESS_CHECK_INFO_TYPE:
+ case INTERCEPTOR_INFO_TYPE:
+ case CALL_HANDLER_INFO_TYPE:
+ case OBJECT_TEMPLATE_INFO_TYPE:
+ case ALLOCATION_MEMENTO_TYPE:
+ case TYPE_FEEDBACK_INFO_TYPE:
+ case ALIASED_ARGUMENTS_ENTRY_TYPE:
+ case BOX_TYPE:
+ case PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE:
+ case PROMISE_REACTION_JOB_INFO_TYPE:
+ case DEBUG_INFO_TYPE:
+ case BREAK_POINT_INFO_TYPE:
+ case CELL_TYPE:
+ case WEAK_CELL_TYPE:
+ case PROTOTYPE_INFO_TYPE:
+ case TUPLE2_TYPE:
+ case TUPLE3_TYPE:
+ case CONSTANT_ELEMENTS_PAIR_TYPE:
+ OFStream os(stderr);
+ os << "[couldn't handle instance type " << map->instance_type() << "]"
+ << std::endl;
+ UNREACHABLE();
+ break;
+ }
+ UNREACHABLE();
+ return Handle<Object>::null();
+}
Handle<Object> TranslatedState::MaterializeAt(int frame_index,
int* value_index) {
+ CHECK_LT(static_cast<size_t>(frame_index), frames().size());
TranslatedFrame* frame = &(frames_[frame_index]);
- CHECK(static_cast<size_t>(*value_index) < frame->values_.size());
+ CHECK_LT(static_cast<size_t>(*value_index), frame->values_.size());
TranslatedValue* slot = &(frame->values_[*value_index]);
(*value_index)++;
@@ -3670,176 +3986,11 @@ Handle<Object> TranslatedState::MaterializeAt(int frame_index,
return arguments;
}
case TranslatedValue::kCapturedObject: {
- int length = slot->GetChildrenCount();
-
// The map must be a tagged object.
CHECK(frame->values_[*value_index].kind() == TranslatedValue::kTagged);
-
- Handle<Object> result;
- if (slot->value_.ToHandle(&result)) {
- // This has been previously materialized, return the previous value.
- // We still need to skip all the nested objects.
- for (int i = 0; i < length; i++) {
- MaterializeAt(frame_index, value_index);
- }
-
- return result;
- }
-
- Handle<Object> map_object = MaterializeAt(frame_index, value_index);
- Handle<Map> map =
- Map::GeneralizeAllFieldRepresentations(Handle<Map>::cast(map_object));
- switch (map->instance_type()) {
- case MUTABLE_HEAP_NUMBER_TYPE:
- case HEAP_NUMBER_TYPE: {
- // Reuse the HeapNumber value directly as it is already properly
- // tagged and skip materializing the HeapNumber explicitly.
- Handle<Object> object = MaterializeAt(frame_index, value_index);
- slot->value_ = object;
- // On 32-bit architectures, there is an extra slot there because
- // the escape analysis calculates the number of slots as
- // object-size/pointer-size. To account for this, we read out
- // any extra slots.
- for (int i = 0; i < length - 2; i++) {
- MaterializeAt(frame_index, value_index);
- }
- return object;
- }
- case JS_OBJECT_TYPE:
- case JS_ERROR_TYPE:
- case JS_ARGUMENTS_TYPE: {
- Handle<JSObject> object =
- isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED);
- slot->value_ = object;
- Handle<Object> properties = MaterializeAt(frame_index, value_index);
- Handle<Object> elements = MaterializeAt(frame_index, value_index);
- object->set_properties(FixedArray::cast(*properties));
- object->set_elements(FixedArrayBase::cast(*elements));
- for (int i = 0; i < length - 3; ++i) {
- Handle<Object> value = MaterializeAt(frame_index, value_index);
- FieldIndex index = FieldIndex::ForPropertyIndex(object->map(), i);
- object->FastPropertyAtPut(index, *value);
- }
- return object;
- }
- case JS_ARRAY_TYPE: {
- Handle<JSArray> object = Handle<JSArray>::cast(
- isolate_->factory()->NewJSObjectFromMap(map, NOT_TENURED));
- slot->value_ = object;
- Handle<Object> properties = MaterializeAt(frame_index, value_index);
- Handle<Object> elements = MaterializeAt(frame_index, value_index);
- Handle<Object> length = MaterializeAt(frame_index, value_index);
- object->set_properties(FixedArray::cast(*properties));
- object->set_elements(FixedArrayBase::cast(*elements));
- object->set_length(*length);
- return object;
- }
- case JS_FUNCTION_TYPE: {
- Handle<SharedFunctionInfo> temporary_shared =
- isolate_->factory()->NewSharedFunctionInfo(
- isolate_->factory()->empty_string(), MaybeHandle<Code>(),
- false);
- Handle<JSFunction> object =
- isolate_->factory()->NewFunctionFromSharedFunctionInfo(
- map, temporary_shared, isolate_->factory()->undefined_value(),
- NOT_TENURED);
- slot->value_ = object;
- Handle<Object> properties = MaterializeAt(frame_index, value_index);
- Handle<Object> elements = MaterializeAt(frame_index, value_index);
- Handle<Object> prototype = MaterializeAt(frame_index, value_index);
- Handle<Object> shared = MaterializeAt(frame_index, value_index);
- Handle<Object> context = MaterializeAt(frame_index, value_index);
- Handle<Object> literals = MaterializeAt(frame_index, value_index);
- Handle<Object> entry = MaterializeAt(frame_index, value_index);
- Handle<Object> next_link = MaterializeAt(frame_index, value_index);
- object->ReplaceCode(*isolate_->builtins()->CompileLazy());
- object->set_map(*map);
- object->set_properties(FixedArray::cast(*properties));
- object->set_elements(FixedArrayBase::cast(*elements));
- object->set_prototype_or_initial_map(*prototype);
- object->set_shared(SharedFunctionInfo::cast(*shared));
- object->set_context(Context::cast(*context));
- object->set_literals(LiteralsArray::cast(*literals));
- CHECK(entry->IsNumber()); // Entry to compile lazy stub.
- CHECK(next_link->IsUndefined(isolate_));
- return object;
- }
- case CONS_STRING_TYPE: {
- Handle<ConsString> object = Handle<ConsString>::cast(
- isolate_->factory()
- ->NewConsString(isolate_->factory()->undefined_string(),
- isolate_->factory()->undefined_string())
- .ToHandleChecked());
- slot->value_ = object;
- Handle<Object> hash = MaterializeAt(frame_index, value_index);
- Handle<Object> length = MaterializeAt(frame_index, value_index);
- Handle<Object> first = MaterializeAt(frame_index, value_index);
- Handle<Object> second = MaterializeAt(frame_index, value_index);
- object->set_map(*map);
- object->set_length(Smi::cast(*length)->value());
- object->set_first(String::cast(*first));
- object->set_second(String::cast(*second));
- CHECK(hash->IsNumber()); // The {Name::kEmptyHashField} value.
- return object;
- }
- case CONTEXT_EXTENSION_TYPE: {
- Handle<ContextExtension> object =
- isolate_->factory()->NewContextExtension(
- isolate_->factory()->NewScopeInfo(1),
- isolate_->factory()->undefined_value());
- slot->value_ = object;
- Handle<Object> scope_info = MaterializeAt(frame_index, value_index);
- Handle<Object> extension = MaterializeAt(frame_index, value_index);
- object->set_scope_info(ScopeInfo::cast(*scope_info));
- object->set_extension(*extension);
- return object;
- }
- case FIXED_ARRAY_TYPE: {
- Handle<Object> lengthObject = MaterializeAt(frame_index, value_index);
- int32_t length = 0;
- CHECK(lengthObject->ToInt32(&length));
- Handle<FixedArray> object =
- isolate_->factory()->NewFixedArray(length);
- // We need to set the map, because the fixed array we are
- // materializing could be a context or an arguments object,
- // in which case we must retain that information.
- object->set_map(*map);
- slot->value_ = object;
- for (int i = 0; i < length; ++i) {
- Handle<Object> value = MaterializeAt(frame_index, value_index);
- object->set(i, *value);
- }
- return object;
- }
- case FIXED_DOUBLE_ARRAY_TYPE: {
- DCHECK_EQ(*map, isolate_->heap()->fixed_double_array_map());
- Handle<Object> lengthObject = MaterializeAt(frame_index, value_index);
- int32_t length = 0;
- CHECK(lengthObject->ToInt32(&length));
- Handle<FixedArrayBase> object =
- isolate_->factory()->NewFixedDoubleArray(length);
- slot->value_ = object;
- if (length > 0) {
- Handle<FixedDoubleArray> double_array =
- Handle<FixedDoubleArray>::cast(object);
- for (int i = 0; i < length; ++i) {
- Handle<Object> value = MaterializeAt(frame_index, value_index);
- CHECK(value->IsNumber());
- double_array->set(i, value->Number());
- }
- }
- return object;
- }
- default:
- PrintF(stderr, "[couldn't handle instance type %d]\n",
- map->instance_type());
- FATAL("unreachable");
- return Handle<Object>::null();
- }
- UNREACHABLE();
- break;
+ CHECK(frame->values_[*value_index].GetValue()->IsMap());
+ return MaterializeCapturedObjectAt(slot, frame_index, value_index);
}
-
case TranslatedValue::kDuplicatedObject: {
int object_index = slot->object_index();
TranslatedState::ObjectPosition pos = object_positions_[object_index];
@@ -3869,13 +4020,12 @@ Handle<Object> TranslatedState::MaterializeAt(int frame_index,
return Handle<Object>::null();
}
-
Handle<Object> TranslatedState::MaterializeObjectAt(int object_index) {
+ CHECK_LT(static_cast<size_t>(object_index), object_positions_.size());
TranslatedState::ObjectPosition pos = object_positions_[object_index];
return MaterializeAt(pos.frame_index_, &(pos.value_index_));
}
-
bool TranslatedState::GetAdaptedArguments(Handle<JSObject>* result,
int frame_index) {
if (frame_index == 0) {
@@ -3915,7 +4065,6 @@ bool TranslatedState::GetAdaptedArguments(Handle<JSObject>* result,
}
}
-
TranslatedFrame* TranslatedState::GetArgumentsInfoFromJSFrameIndex(
int jsframe_index, int* args_count) {
for (size_t i = 0; i < frames_.size(); i++) {
@@ -3924,7 +4073,8 @@ TranslatedFrame* TranslatedState::GetArgumentsInfoFromJSFrameIndex(
if (jsframe_index > 0) {
jsframe_index--;
} else {
- // We have the JS function frame, now check if it has arguments adaptor.
+ // We have the JS function frame, now check if it has arguments
+ // adaptor.
if (i > 0 &&
frames_[i - 1].kind() == TranslatedFrame::kArgumentsAdaptor) {
*args_count = frames_[i - 1].height();
@@ -3939,8 +4089,7 @@ TranslatedFrame* TranslatedState::GetArgumentsInfoFromJSFrameIndex(
return nullptr;
}
-
-void TranslatedState::StoreMaterializedValuesAndDeopt() {
+void TranslatedState::StoreMaterializedValuesAndDeopt(JavaScriptFrame* frame) {
MaterializedObjectStore* materialized_store =
isolate_->materialized_object_store();
Handle<FixedArray> previously_materialized_objects =
@@ -3986,12 +4135,11 @@ void TranslatedState::StoreMaterializedValuesAndDeopt() {
CHECK(frames_[0].kind() == TranslatedFrame::kFunction ||
frames_[0].kind() == TranslatedFrame::kInterpretedFunction ||
frames_[0].kind() == TranslatedFrame::kTailCallerFunction);
- Object* const function = frames_[0].front().GetRawValue();
- Deoptimizer::DeoptimizeFunction(JSFunction::cast(function));
+ CHECK_EQ(frame->function(), frames_[0].front().GetRawValue());
+ Deoptimizer::DeoptimizeFunction(frame->function(), frame->LookupCode());
}
}
-
void TranslatedState::UpdateFromPreviouslyMaterializedObjects() {
MaterializedObjectStore* materialized_store =
isolate_->materialized_object_store();
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index 4d84fb76e8..7d74af9958 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -254,7 +254,7 @@ class TranslatedState {
void Prepare(bool has_adapted_arguments, Address stack_frame_pointer);
// Store newly materialized values into the isolate.
- void StoreMaterializedValuesAndDeopt();
+ void StoreMaterializedValuesAndDeopt(JavaScriptFrame* frame);
typedef std::vector<TranslatedFrame>::iterator iterator;
iterator begin() { return frames_.begin(); }
@@ -292,6 +292,9 @@ class TranslatedState {
void UpdateFromPreviouslyMaterializedObjects();
Handle<Object> MaterializeAt(int frame_index, int* value_index);
Handle<Object> MaterializeObjectAt(int object_index);
+ class CapturedObjectMaterializer;
+ Handle<Object> MaterializeCapturedObjectAt(TranslatedValue* slot,
+ int frame_index, int* value_index);
bool GetAdaptedArguments(Handle<JSObject>* result, int frame_index);
static uint32_t GetUInt32Slot(Address fp, int slot_index);
@@ -419,8 +422,9 @@ class Deoptimizer : public Malloced {
// Deoptimize the function now. Its current optimized code will never be run
// again and any activations of the optimized code will get deoptimized when
- // execution returns.
- static void DeoptimizeFunction(JSFunction* function);
+ // execution returns. If {code} is specified then the given code is targeted
+ // instead of the function code (e.g. OSR code not installed on function).
+ static void DeoptimizeFunction(JSFunction* function, Code* code = nullptr);
// Deoptimize all code in the given isolate.
static void DeoptimizeAll(Isolate* isolate);
diff --git a/deps/v8/src/elements-kind.cc b/deps/v8/src/elements-kind.cc
index 7bb75c4c9c..8651b7681d 100644
--- a/deps/v8/src/elements-kind.cc
+++ b/deps/v8/src/elements-kind.cc
@@ -7,6 +7,7 @@
#include "src/api.h"
#include "src/base/lazy-instance.h"
#include "src/elements.h"
+#include "src/objects-inl.h"
#include "src/objects.h"
namespace v8 {
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index ccbdb40706..121f6fc2d9 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -185,14 +185,15 @@ static void CopyDictionaryToObjectElements(
WriteBarrierMode write_barrier_mode = IsFastObjectElementsKind(to_kind)
? UPDATE_WRITE_BARRIER
: SKIP_WRITE_BARRIER;
+ Isolate* isolate = from->GetIsolate();
for (int i = 0; i < copy_size; i++) {
int entry = from->FindEntry(i + from_start);
if (entry != SeededNumberDictionary::kNotFound) {
Object* value = from->ValueAt(entry);
- DCHECK(!value->IsTheHole(from->GetIsolate()));
+ DCHECK(!value->IsTheHole(isolate));
to->set(i + to_start, value, write_barrier_mode);
} else {
- to->set_the_hole(i + to_start);
+ to->set_the_hole(isolate, i + to_start);
}
}
}
@@ -603,7 +604,7 @@ class ElementsAccessorBase : public ElementsAccessor {
static bool HasElementImpl(Isolate* isolate, Handle<JSObject> holder,
uint32_t index,
Handle<FixedArrayBase> backing_store,
- PropertyFilter filter) {
+ PropertyFilter filter = ALL_PROPERTIES) {
return Subclass::GetEntryForIndexImpl(isolate, *holder, *backing_store,
index, filter) != kMaxUInt32;
}
@@ -618,15 +619,16 @@ class ElementsAccessorBase : public ElementsAccessor {
}
Handle<Object> Get(Handle<JSObject> holder, uint32_t entry) final {
- return Subclass::GetImpl(holder, entry);
+ return Subclass::GetInternalImpl(holder, entry);
}
- static Handle<Object> GetImpl(Handle<JSObject> holder, uint32_t entry) {
- return Subclass::GetImpl(holder->elements(), entry);
+ static Handle<Object> GetInternalImpl(Handle<JSObject> holder,
+ uint32_t entry) {
+ return Subclass::GetImpl(holder->GetIsolate(), holder->elements(), entry);
}
- static Handle<Object> GetImpl(FixedArrayBase* backing_store, uint32_t entry) {
- Isolate* isolate = backing_store->GetIsolate();
+ static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase* backing_store,
+ uint32_t entry) {
uint32_t index = GetIndexForEntryImpl(backing_store, entry);
return handle(BackingStore::cast(backing_store)->get(index), isolate);
}
@@ -758,13 +760,10 @@ class ElementsAccessorBase : public ElementsAccessor {
}
if (2 * length <= capacity) {
// If more than half the elements won't be used, trim the array.
- isolate->heap()->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
- *backing_store, capacity - length);
+ isolate->heap()->RightTrimFixedArray(*backing_store, capacity - length);
} else {
// Otherwise, fill the unused tail with holes.
- for (uint32_t i = length; i < old_length; i++) {
- BackingStore::cast(*backing_store)->set_the_hole(i);
- }
+ BackingStore::cast(*backing_store)->FillWithHoles(length, old_length);
}
} else {
// Check whether the backing store should be expanded.
@@ -1034,7 +1033,7 @@ class ElementsAccessorBase : public ElementsAccessor {
PropertyDetails details = Subclass::GetDetailsImpl(*object, entry);
if (details.kind() == kData) {
- value = Subclass::GetImpl(object, entry);
+ value = Subclass::GetImpl(isolate, object->elements(), entry);
} else {
LookupIterator it(isolate, object, index, LookupIterator::OWN);
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
@@ -1248,17 +1247,28 @@ class ElementsAccessorBase : public ElementsAccessor {
static PropertyDetails GetDetailsImpl(FixedArrayBase* backing_store,
uint32_t entry) {
- return PropertyDetails(NONE, DATA, 0, PropertyCellType::kNoCell);
+ return PropertyDetails(kData, NONE, 0, PropertyCellType::kNoCell);
}
static PropertyDetails GetDetailsImpl(JSObject* holder, uint32_t entry) {
- return PropertyDetails(NONE, DATA, 0, PropertyCellType::kNoCell);
+ return PropertyDetails(kData, NONE, 0, PropertyCellType::kNoCell);
}
PropertyDetails GetDetails(JSObject* holder, uint32_t entry) final {
return Subclass::GetDetailsImpl(holder, entry);
}
+ Handle<FixedArray> CreateListFromArray(Isolate* isolate,
+ Handle<JSArray> array) final {
+ return Subclass::CreateListFromArrayImpl(isolate, array);
+ };
+
+ static Handle<FixedArray> CreateListFromArrayImpl(Isolate* isolate,
+ Handle<JSArray> array) {
+ UNREACHABLE();
+ return Handle<FixedArray>();
+ }
+
private:
DISALLOW_COPY_AND_ASSIGN(ElementsAccessorBase);
};
@@ -1374,7 +1384,7 @@ class DictionaryElementsAccessor
if (!dict->IsKey(isolate, key)) continue;
DCHECK(!dict->IsDeleted(i));
PropertyDetails details = dict->DetailsAt(i);
- if (details.type() == ACCESSOR_CONSTANT) return true;
+ if (details.kind() == kAccessor) return true;
}
return false;
}
@@ -1384,12 +1394,9 @@ class DictionaryElementsAccessor
return backing_store->ValueAt(entry);
}
- static Handle<Object> GetImpl(Handle<JSObject> holder, uint32_t entry) {
- return GetImpl(holder->elements(), entry);
- }
-
- static Handle<Object> GetImpl(FixedArrayBase* backing_store, uint32_t entry) {
- return handle(GetRaw(backing_store, entry), backing_store->GetIsolate());
+ static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase* backing_store,
+ uint32_t entry) {
+ return handle(GetRaw(backing_store, entry), isolate);
}
static inline void SetImpl(Handle<JSObject> holder, uint32_t entry,
@@ -1410,7 +1417,7 @@ class DictionaryElementsAccessor
if (attributes != NONE) object->RequireSlowElements(dictionary);
dictionary->ValueAtPut(entry, *value);
PropertyDetails details = dictionary->DetailsAt(entry);
- details = PropertyDetails(attributes, DATA, details.dictionary_index(),
+ details = PropertyDetails(kData, attributes, details.dictionary_index(),
PropertyCellType::kNoCell);
dictionary->DetailsAtPut(entry, details);
}
@@ -1418,15 +1425,14 @@ class DictionaryElementsAccessor
static void AddImpl(Handle<JSObject> object, uint32_t index,
Handle<Object> value, PropertyAttributes attributes,
uint32_t new_capacity) {
- PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
+ PropertyDetails details(kData, attributes, 0, PropertyCellType::kNoCell);
Handle<SeededNumberDictionary> dictionary =
object->HasFastElements() || object->HasFastStringWrapperElements()
? JSObject::NormalizeElements(object)
: handle(SeededNumberDictionary::cast(object->elements()));
Handle<SeededNumberDictionary> new_dictionary =
- SeededNumberDictionary::AddNumberEntry(
- dictionary, index, value, details,
- object->map()->is_prototype_map());
+ SeededNumberDictionary::AddNumberEntry(dictionary, index, value,
+ details, object);
if (attributes != NONE) object->RequireSlowElements(*new_dictionary);
if (dictionary.is_identical_to(new_dictionary)) return;
object->set_elements(*new_dictionary);
@@ -1588,7 +1594,7 @@ class DictionaryElementsAccessor
continue;
}
- if (dictionary->DetailsAt(i).type() == ACCESSOR_CONSTANT) {
+ if (dictionary->DetailsAt(i).kind() == kAccessor) {
// Restart from beginning in slow path, otherwise we may observably
// access getters out of order
return false;
@@ -1766,15 +1772,14 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
SeededNumberDictionary::New(isolate, capacity);
PropertyDetails details = PropertyDetails::Empty();
- bool used_as_prototype = object->map()->is_prototype_map();
int j = 0;
for (int i = 0; j < capacity; i++) {
if (IsHoleyElementsKind(kind)) {
if (BackingStore::cast(*store)->is_the_hole(isolate, i)) continue;
}
- Handle<Object> value = Subclass::GetImpl(*store, i);
- dictionary = SeededNumberDictionary::AddNumberEntry(
- dictionary, i, value, details, used_as_prototype);
+ Handle<Object> value = Subclass::GetImpl(isolate, *store, i);
+ dictionary = SeededNumberDictionary::AddNumberEntry(dictionary, i, value,
+ details, object);
j++;
}
return dictionary;
@@ -1799,8 +1804,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
return;
}
- isolate->heap()->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
- *backing_store, length - entry);
+ isolate->heap()->RightTrimFixedArray(*backing_store, length - entry);
}
static void DeleteCommon(Handle<JSObject> obj, uint32_t entry,
@@ -1816,7 +1820,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
}
Isolate* isolate = obj->GetIsolate();
- backing_store->set_the_hole(entry);
+ backing_store->set_the_hole(isolate, entry);
// TODO(verwaest): Move this out of elements.cc.
// If an old space backing store is larger than a certain size and
@@ -1934,7 +1938,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
for (uint32_t i = 0; i < length; i++) {
if (IsFastPackedElementsKind(KindTraits::Kind) ||
HasEntryImpl(isolate, *elements, i)) {
- accumulator->AddKey(Subclass::GetImpl(*elements, i), convert);
+ accumulator->AddKey(Subclass::GetImpl(isolate, *elements, i), convert);
}
}
}
@@ -2075,7 +2079,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
uint32_t length = elements->length();
for (uint32_t index = 0; index < length; ++index) {
if (!HasEntryImpl(isolate, *elements, index)) continue;
- Handle<Object> value = Subclass::GetImpl(*elements, index);
+ Handle<Object> value = Subclass::GetImpl(isolate, *elements, index);
if (get_entries) {
value = MakeEntryPair(isolate, index, value);
}
@@ -2265,6 +2269,24 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
}
}
+ static Handle<FixedArray> CreateListFromArrayImpl(Isolate* isolate,
+ Handle<JSArray> array) {
+ uint32_t length = 0;
+ array->length()->ToArrayLength(&length);
+ Handle<FixedArray> result = isolate->factory()->NewFixedArray(length);
+ Handle<FixedArrayBase> elements(array->elements(), isolate);
+ for (uint32_t i = 0; i < length; i++) {
+ if (!Subclass::HasElementImpl(isolate, array, i, elements)) continue;
+ Handle<Object> value;
+ value = Subclass::GetImpl(isolate, *elements, i);
+ if (value->IsName()) {
+ value = isolate->factory()->InternalizeName(Handle<Name>::cast(value));
+ }
+ result->set(i, *value);
+ }
+ return result;
+ }
+
private:
// SpliceShrinkStep might modify the backing_store.
static void SpliceShrinkStep(Isolate* isolate, Handle<JSArray> receiver,
@@ -2323,7 +2345,8 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
DCHECK(length > 0);
int new_length = length - 1;
int remove_index = remove_position == AT_START ? 0 : new_length;
- Handle<Object> result = Subclass::GetImpl(*backing_store, remove_index);
+ Handle<Object> result =
+ Subclass::GetImpl(isolate, *backing_store, remove_index);
if (remove_position == AT_START) {
Subclass::MoveElements(isolate, receiver, backing_store, 0, 1, new_length,
0, 0);
@@ -2544,12 +2567,8 @@ class FastDoubleElementsAccessor
explicit FastDoubleElementsAccessor(const char* name)
: FastElementsAccessor<Subclass, KindTraits>(name) {}
- static Handle<Object> GetImpl(Handle<JSObject> holder, uint32_t entry) {
- return GetImpl(holder->elements(), entry);
- }
-
- static Handle<Object> GetImpl(FixedArrayBase* backing_store, uint32_t entry) {
- Isolate* isolate = backing_store->GetIsolate();
+ static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase* backing_store,
+ uint32_t entry) {
return FixedDoubleArray::get(FixedDoubleArray::cast(backing_store), entry,
isolate);
}
@@ -2696,21 +2715,18 @@ class TypedElementsAccessor
BackingStore::cast(backing_store)->SetValue(entry, value);
}
- static Handle<Object> GetImpl(Handle<JSObject> holder, uint32_t entry) {
- return GetImpl(holder->elements(), entry);
- }
-
- static Handle<Object> GetImpl(FixedArrayBase* backing_store, uint32_t entry) {
+ static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase* backing_store,
+ uint32_t entry) {
return BackingStore::get(BackingStore::cast(backing_store), entry);
}
static PropertyDetails GetDetailsImpl(JSObject* holder, uint32_t entry) {
- return PropertyDetails(DONT_DELETE, DATA, 0, PropertyCellType::kNoCell);
+ return PropertyDetails(kData, DONT_DELETE, 0, PropertyCellType::kNoCell);
}
static PropertyDetails GetDetailsImpl(FixedArrayBase* backing_store,
uint32_t entry) {
- return PropertyDetails(DONT_DELETE, DATA, 0, PropertyCellType::kNoCell);
+ return PropertyDetails(kData, DONT_DELETE, 0, PropertyCellType::kNoCell);
}
static bool HasElementImpl(Isolate* isolate, Handle<JSObject> holder,
@@ -2749,10 +2765,14 @@ class TypedElementsAccessor
: kMaxUInt32;
}
+ static bool WasNeutered(JSObject* holder) {
+ JSArrayBufferView* view = JSArrayBufferView::cast(holder);
+ return view->WasNeutered();
+ }
+
static uint32_t GetCapacityImpl(JSObject* holder,
FixedArrayBase* backing_store) {
- JSArrayBufferView* view = JSArrayBufferView::cast(holder);
- if (view->WasNeutered()) return 0;
+ if (WasNeutered(holder)) return 0;
return backing_store->length();
}
@@ -2764,10 +2784,11 @@ class TypedElementsAccessor
static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver,
KeyAccumulator* accumulator,
AddKeyConversion convert) {
+ Isolate* isolate = receiver->GetIsolate();
Handle<FixedArrayBase> elements(receiver->elements());
uint32_t length = AccessorClass::GetCapacityImpl(*receiver, *elements);
for (uint32_t i = 0; i < length; i++) {
- Handle<Object> value = AccessorClass::GetImpl(*elements, i);
+ Handle<Object> value = AccessorClass::GetImpl(isolate, *elements, i);
accumulator->AddKey(value, convert);
}
}
@@ -2781,7 +2802,8 @@ class TypedElementsAccessor
Handle<FixedArrayBase> elements(object->elements());
uint32_t length = AccessorClass::GetCapacityImpl(*object, *elements);
for (uint32_t index = 0; index < length; ++index) {
- Handle<Object> value = AccessorClass::GetImpl(*elements, index);
+ Handle<Object> value =
+ AccessorClass::GetImpl(isolate, *elements, index);
if (get_entries) {
value = MakeEntryPair(isolate, index, value);
}
@@ -2799,6 +2821,12 @@ class TypedElementsAccessor
DCHECK(JSObject::PrototypeHasNoElements(isolate, *receiver));
DisallowHeapAllocation no_gc;
+ // TODO(caitp): return Just(false) here when implementing strict throwing on
+ // neutered views.
+ if (WasNeutered(*receiver)) {
+ return Just(value->IsUndefined(isolate) && length > start_from);
+ }
+
BackingStore* elements = BackingStore::cast(receiver->elements());
if (value->IsUndefined(isolate) &&
length > static_cast<uint32_t>(elements->length())) {
@@ -2848,6 +2876,8 @@ class TypedElementsAccessor
DCHECK(JSObject::PrototypeHasNoElements(isolate, *receiver));
DisallowHeapAllocation no_gc;
+ if (WasNeutered(*receiver)) return Just<int64_t>(-1);
+
BackingStore* elements = BackingStore::cast(receiver->elements());
if (!value->IsNumber()) return Just<int64_t>(-1);
@@ -2904,12 +2934,8 @@ class SloppyArgumentsElementsAccessor
USE(KindTraits::Kind);
}
- static Handle<Object> GetImpl(Handle<JSObject> holder, uint32_t entry) {
- return GetImpl(holder->elements(), entry);
- }
-
- static Handle<Object> GetImpl(FixedArrayBase* parameters, uint32_t entry) {
- Isolate* isolate = parameters->GetIsolate();
+ static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase* parameters,
+ uint32_t entry) {
Handle<FixedArray> parameter_map(FixedArray::cast(parameters), isolate);
uint32_t length = parameter_map->length() - 2;
if (entry < length) {
@@ -2922,7 +2948,7 @@ class SloppyArgumentsElementsAccessor
} else {
// Object is not mapped, defer to the arguments.
Handle<Object> result = ArgumentsAccessor::GetImpl(
- FixedArray::cast(parameter_map->get(1)), entry - length);
+ isolate, FixedArray::cast(parameter_map->get(1)), entry - length);
// Elements of the arguments object in slow mode might be slow aliases.
if (result->IsAliasedArgumentsEntry()) {
DisallowHeapAllocation no_gc;
@@ -3020,7 +3046,7 @@ class SloppyArgumentsElementsAccessor
uint32_t length = GetCapacityImpl(*receiver, *elements);
for (uint32_t entry = 0; entry < length; entry++) {
if (!HasEntryImpl(isolate, *elements, entry)) continue;
- Handle<Object> value = GetImpl(*elements, entry);
+ Handle<Object> value = GetImpl(isolate, *elements, entry);
accumulator->AddKey(value, convert);
}
}
@@ -3071,7 +3097,7 @@ class SloppyArgumentsElementsAccessor
FixedArray* parameter_map = FixedArray::cast(holder->elements());
uint32_t length = parameter_map->length() - 2;
if (entry < length) {
- return PropertyDetails(NONE, DATA, 0, PropertyCellType::kNoCell);
+ return PropertyDetails(kData, NONE, 0, PropertyCellType::kNoCell);
}
FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
return ArgumentsAccessor::GetDetailsImpl(arguments, entry - length);
@@ -3156,7 +3182,8 @@ class SloppyArgumentsElementsAccessor
continue;
}
- Handle<Object> element_k = GetImpl(*parameter_map, entry);
+ Handle<Object> element_k =
+ Subclass::GetImpl(isolate, *parameter_map, entry);
if (element_k->IsAccessorPair()) {
LookupIterator it(isolate, object, k, LookupIterator::OWN);
@@ -3195,7 +3222,8 @@ class SloppyArgumentsElementsAccessor
continue;
}
- Handle<Object> element_k = GetImpl(*parameter_map, entry);
+ Handle<Object> element_k =
+ Subclass::GetImpl(isolate, *parameter_map, entry);
if (element_k->IsAccessorPair()) {
LookupIterator it(isolate, object, k, LookupIterator::OWN);
@@ -3256,11 +3284,10 @@ class SlowSloppyArgumentsElementsAccessor
old_elements->IsSeededNumberDictionary()
? Handle<SeededNumberDictionary>::cast(old_elements)
: JSObject::NormalizeElements(object);
- PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
+ PropertyDetails details(kData, attributes, 0, PropertyCellType::kNoCell);
Handle<SeededNumberDictionary> new_dictionary =
- SeededNumberDictionary::AddNumberEntry(
- dictionary, index, value, details,
- object->map()->is_prototype_map());
+ SeededNumberDictionary::AddNumberEntry(dictionary, index, value,
+ details, object);
if (attributes != NONE) object->RequireSlowElements(*new_dictionary);
if (*dictionary != *new_dictionary) {
FixedArray::cast(object->elements())->set(1, *new_dictionary);
@@ -3283,17 +3310,17 @@ class SlowSloppyArgumentsElementsAccessor
context->set(context_entry, *value);
// Redefining attributes of an aliased element destroys fast aliasing.
- parameter_map->set_the_hole(entry + 2);
+ parameter_map->set_the_hole(isolate, entry + 2);
// For elements that are still writable we re-establish slow aliasing.
if ((attributes & READ_ONLY) == 0) {
value = isolate->factory()->NewAliasedArgumentsEntry(context_entry);
}
- PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell);
+ PropertyDetails details(kData, attributes, 0, PropertyCellType::kNoCell);
Handle<SeededNumberDictionary> arguments(
SeededNumberDictionary::cast(parameter_map->get(1)), isolate);
arguments = SeededNumberDictionary::AddNumberEntry(
- arguments, entry, value, details, object->map()->is_prototype_map());
+ arguments, entry, value, details, object);
// If the attributes were NONE, we would have called set rather than
// reconfigure.
DCHECK_NE(NONE, attributes);
@@ -3340,9 +3367,9 @@ class FastSloppyArgumentsElementsAccessor
uint32_t entry = GetEntryForIndexImpl(isolate, *receiver, parameters, i,
ALL_PROPERTIES);
if (entry != kMaxUInt32 && HasEntryImpl(isolate, parameters, entry)) {
- elements->set(insertion_index, *GetImpl(parameters, entry));
+ elements->set(insertion_index, *GetImpl(isolate, parameters, entry));
} else {
- elements->set_the_hole(insertion_index);
+ elements->set_the_hole(isolate, insertion_index);
}
insertion_index++;
}
@@ -3440,6 +3467,11 @@ class StringWrapperElementsAccessor
USE(KindTraits::Kind);
}
+ static Handle<Object> GetInternalImpl(Handle<JSObject> holder,
+ uint32_t entry) {
+ return GetImpl(holder, entry);
+ }
+
static Handle<Object> GetImpl(Handle<JSObject> holder, uint32_t entry) {
Isolate* isolate = holder->GetIsolate();
Handle<String> string(GetString(*holder), isolate);
@@ -3448,7 +3480,14 @@ class StringWrapperElementsAccessor
return isolate->factory()->LookupSingleCharacterStringFromCode(
String::Flatten(string)->Get(entry));
}
- return BackingStoreAccessor::GetImpl(holder, entry - length);
+ return BackingStoreAccessor::GetImpl(isolate, holder->elements(),
+ entry - length);
+ }
+
+ static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase* elements,
+ uint32_t entry) {
+ UNREACHABLE();
+ return Handle<Object>();
}
static PropertyDetails GetDetailsImpl(JSObject* holder, uint32_t entry) {
@@ -3456,8 +3495,7 @@ class StringWrapperElementsAccessor
if (entry < length) {
PropertyAttributes attributes =
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
- return PropertyDetails(attributes, v8::internal::DATA, 0,
- PropertyCellType::kNoCell);
+ return PropertyDetails(kData, attributes, 0, PropertyCellType::kNoCell);
}
return BackingStoreAccessor::GetDetailsImpl(holder, entry - length);
}
@@ -3672,9 +3710,9 @@ MaybeHandle<Object> ArrayConstructInitializeElements(Handle<JSArray> array,
JSArray::Initialize(array, JSArray::kPreallocatedArrayElements);
return array;
- } else if (args->length() == 1 && args->at<Object>(0)->IsNumber()) {
+ } else if (args->length() == 1 && args->at(0)->IsNumber()) {
uint32_t length;
- if (!args->at<Object>(0)->ToArrayLength(&length)) {
+ if (!args->at(0)->ToArrayLength(&length)) {
return ThrowArrayLengthRangeError(array->GetIsolate());
}
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index fc2e6a4fdb..28635d5bea 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -175,6 +175,9 @@ class ElementsAccessor {
ElementsKind source_kind,
Handle<FixedArrayBase> destination, int size) = 0;
+ virtual Handle<FixedArray> CreateListFromArray(Isolate* isolate,
+ Handle<JSArray> array) = 0;
+
protected:
friend class LookupIterator;
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index 59421c70be..e5d4ad9d49 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -54,11 +54,10 @@ static void PrintDeserializedCodeInfo(Handle<JSFunction> function) {
namespace {
-MUST_USE_RESULT MaybeHandle<Object> Invoke(Isolate* isolate, bool is_construct,
- Handle<Object> target,
- Handle<Object> receiver, int argc,
- Handle<Object> args[],
- Handle<Object> new_target) {
+MUST_USE_RESULT MaybeHandle<Object> Invoke(
+ Isolate* isolate, bool is_construct, Handle<Object> target,
+ Handle<Object> receiver, int argc, Handle<Object> args[],
+ Handle<Object> new_target, Execution::MessageHandling message_handling) {
DCHECK(!receiver->IsJSGlobalObject());
#ifdef USE_SIMULATOR
@@ -69,7 +68,9 @@ MUST_USE_RESULT MaybeHandle<Object> Invoke(Isolate* isolate, bool is_construct,
StackLimitCheck check(isolate);
if (check.HasOverflowed()) {
isolate->StackOverflow();
- isolate->ReportPendingMessages();
+ if (message_handling == Execution::MessageHandling::kReport) {
+ isolate->ReportPendingMessages();
+ }
return MaybeHandle<Object>();
}
#endif
@@ -89,7 +90,9 @@ MUST_USE_RESULT MaybeHandle<Object> Invoke(Isolate* isolate, bool is_construct,
bool has_exception = value.is_null();
DCHECK(has_exception == isolate->has_pending_exception());
if (has_exception) {
- isolate->ReportPendingMessages();
+ if (message_handling == Execution::MessageHandling::kReport) {
+ isolate->ReportPendingMessages();
+ }
return MaybeHandle<Object>();
} else {
isolate->clear_pending_message();
@@ -103,7 +106,9 @@ MUST_USE_RESULT MaybeHandle<Object> Invoke(Isolate* isolate, bool is_construct,
CHECK(AllowJavascriptExecution::IsAllowed(isolate));
if (!ThrowOnJavascriptExecution::IsAllowed(isolate)) {
isolate->ThrowIllegalOperation();
- isolate->ReportPendingMessages();
+ if (message_handling == Execution::MessageHandling::kReport) {
+ isolate->ReportPendingMessages();
+ }
return MaybeHandle<Object>();
}
@@ -150,7 +155,9 @@ MUST_USE_RESULT MaybeHandle<Object> Invoke(Isolate* isolate, bool is_construct,
bool has_exception = value->IsException(isolate);
DCHECK(has_exception == isolate->has_pending_exception());
if (has_exception) {
- isolate->ReportPendingMessages();
+ if (message_handling == Execution::MessageHandling::kReport) {
+ isolate->ReportPendingMessages();
+ }
return MaybeHandle<Object>();
} else {
isolate->clear_pending_message();
@@ -159,13 +166,10 @@ MUST_USE_RESULT MaybeHandle<Object> Invoke(Isolate* isolate, bool is_construct,
return Handle<Object>(value, isolate);
}
-} // namespace
-
-
-// static
-MaybeHandle<Object> Execution::Call(Isolate* isolate, Handle<Object> callable,
- Handle<Object> receiver, int argc,
- Handle<Object> argv[]) {
+MaybeHandle<Object> CallInternal(Isolate* isolate, Handle<Object> callable,
+ Handle<Object> receiver, int argc,
+ Handle<Object> argv[],
+ Execution::MessageHandling message_handling) {
// Convert calls on global objects to be calls on the global
// receiver instead to avoid having a 'this' pointer which refers
// directly to a global object.
@@ -174,7 +178,17 @@ MaybeHandle<Object> Execution::Call(Isolate* isolate, Handle<Object> callable,
handle(Handle<JSGlobalObject>::cast(receiver)->global_proxy(), isolate);
}
return Invoke(isolate, false, callable, receiver, argc, argv,
- isolate->factory()->undefined_value());
+ isolate->factory()->undefined_value(), message_handling);
+}
+
+} // namespace
+
+// static
+MaybeHandle<Object> Execution::Call(Isolate* isolate, Handle<Object> callable,
+ Handle<Object> receiver, int argc,
+ Handle<Object> argv[]) {
+ return CallInternal(isolate, callable, receiver, argc, argv,
+ MessageHandling::kReport);
}
@@ -190,18 +204,21 @@ MaybeHandle<Object> Execution::New(Isolate* isolate, Handle<Object> constructor,
Handle<Object> new_target, int argc,
Handle<Object> argv[]) {
return Invoke(isolate, true, constructor,
- isolate->factory()->undefined_value(), argc, argv, new_target);
+ isolate->factory()->undefined_value(), argc, argv, new_target,
+ MessageHandling::kReport);
}
-
MaybeHandle<Object> Execution::TryCall(Isolate* isolate,
Handle<Object> callable,
Handle<Object> receiver, int argc,
Handle<Object> args[],
+ MessageHandling message_handling,
MaybeHandle<Object>* exception_out) {
bool is_termination = false;
MaybeHandle<Object> maybe_result;
if (exception_out != NULL) *exception_out = MaybeHandle<Object>();
+ DCHECK_IMPLIES(message_handling == MessageHandling::kKeepPending,
+ exception_out == nullptr);
// Enter a try-block while executing the JavaScript code. To avoid
// duplicate error printing it must be non-verbose. Also, to avoid
// creating message objects during stack overflow we shouldn't
@@ -211,24 +228,25 @@ MaybeHandle<Object> Execution::TryCall(Isolate* isolate,
catcher.SetVerbose(false);
catcher.SetCaptureMessage(false);
- maybe_result = Call(isolate, callable, receiver, argc, args);
+ maybe_result =
+ CallInternal(isolate, callable, receiver, argc, args, message_handling);
if (maybe_result.is_null()) {
- DCHECK(catcher.HasCaught());
DCHECK(isolate->has_pending_exception());
- DCHECK(isolate->external_caught_exception());
if (isolate->pending_exception() ==
isolate->heap()->termination_exception()) {
is_termination = true;
} else {
- if (exception_out != NULL) {
+ if (exception_out != nullptr) {
+ DCHECK(catcher.HasCaught());
+ DCHECK(isolate->external_caught_exception());
*exception_out = v8::Utils::OpenHandle(*catcher.Exception());
}
}
- isolate->OptionalRescheduleException(true);
+ if (message_handling == MessageHandling::kReport) {
+ isolate->OptionalRescheduleException(true);
+ }
}
-
- DCHECK(!isolate->has_pending_exception());
}
// Re-request terminate execution interrupt to trigger later.
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index 6f4bb331a3..ee5d3a6f73 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -15,6 +15,9 @@ namespace internal {
class Execution final : public AllStatic {
public:
+ // Whether to report pending messages, or keep them pending on the isolate.
+ enum class MessageHandling { kReport, kKeepPending };
+
// Call a function, the caller supplies a receiver and an array
// of arguments.
//
@@ -36,16 +39,18 @@ class Execution final : public AllStatic {
int argc,
Handle<Object> argv[]);
- // Call a function, just like Call(), but make sure to silently catch
- // any thrown exceptions. The return value is either the result of
- // calling the function (if caught exception is false) or the exception
- // that occurred (if caught exception is true).
- // In the exception case, exception_out holds the caught exceptions, unless
- // it is a termination exception.
+ // Call a function, just like Call(), but handle don't report exceptions
+ // externally.
+ // The return value is either the result of calling the function (if no
+ // exception occurred), or an empty handle.
+ // If message_handling is MessageHandling::kReport, exceptions (except for
+ // termination exceptions) will be stored in exception_out (if not a
+ // nullptr).
static MaybeHandle<Object> TryCall(Isolate* isolate, Handle<Object> callable,
Handle<Object> receiver, int argc,
Handle<Object> argv[],
- MaybeHandle<Object>* exception_out = NULL);
+ MessageHandling message_handling,
+ MaybeHandle<Object>* exception_out);
};
@@ -79,13 +84,13 @@ class StackGuard final {
// it has been set up.
void ClearThread(const ExecutionAccess& lock);
-#define INTERRUPT_LIST(V) \
- V(DEBUGBREAK, DebugBreak, 0) \
- V(DEBUGCOMMAND, DebugCommand, 1) \
- V(TERMINATE_EXECUTION, TerminateExecution, 2) \
- V(GC_REQUEST, GC, 3) \
- V(INSTALL_CODE, InstallCode, 4) \
- V(API_INTERRUPT, ApiInterrupt, 5) \
+#define INTERRUPT_LIST(V) \
+ V(DEBUGBREAK, DebugBreak, 0) \
+ V(DEBUGCOMMAND, DebugCommand, 1) \
+ V(TERMINATE_EXECUTION, TerminateExecution, 2) \
+ V(GC_REQUEST, GC, 3) \
+ V(INSTALL_CODE, InstallCode, 4) \
+ V(API_INTERRUPT, ApiInterrupt, 5) \
V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 6)
#define V(NAME, Name, id) \
diff --git a/deps/v8/src/extensions/externalize-string-extension.cc b/deps/v8/src/extensions/externalize-string-extension.cc
index 2ed3ad27e5..b81b7826e1 100644
--- a/deps/v8/src/extensions/externalize-string-extension.cc
+++ b/deps/v8/src/extensions/externalize-string-extension.cc
@@ -7,6 +7,7 @@
#include "src/api.h"
#include "src/handles.h"
#include "src/isolate.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/external-reference-table.cc b/deps/v8/src/external-reference-table.cc
index 2e9fc46590..57dc09ce3c 100644
--- a/deps/v8/src/external-reference-table.cc
+++ b/deps/v8/src/external-reference-table.cc
@@ -228,6 +228,10 @@ void ExternalReferenceTable::AddReferences(Isolate* isolate) {
"f64_asin_wrapper");
Add(ExternalReference::f64_mod_wrapper_function(isolate).address(),
"f64_mod_wrapper");
+ Add(ExternalReference::wasm_call_trap_callback_for_testing(isolate).address(),
+ "wasm::call_trap_callback_for_testing");
+ Add(ExternalReference::libc_memchr_function(isolate).address(),
+ "libc_memchr");
Add(ExternalReference::log_enter_external_function(isolate).address(),
"Logger::EnterExternal");
Add(ExternalReference::log_leave_external_function(isolate).address(),
@@ -249,12 +253,16 @@ void ExternalReferenceTable::AddReferences(Isolate* isolate) {
"double_absolute_constant");
Add(ExternalReference::address_of_double_neg_constant().address(),
"double_negate_constant");
+ Add(ExternalReference::promise_hook_address(isolate).address(),
+ "Isolate::promise_hook_address()");
// Debug addresses
Add(ExternalReference::debug_after_break_target_address(isolate).address(),
"Debug::after_break_target_address()");
Add(ExternalReference::debug_is_active_address(isolate).address(),
"Debug::is_active_address()");
+ Add(ExternalReference::debug_hook_on_function_call_address(isolate).address(),
+ "Debug::hook_on_function_call_address()");
Add(ExternalReference::debug_last_step_action_address(isolate).address(),
"Debug::step_in_enabled_address()");
Add(ExternalReference::debug_suspended_generator_address(isolate).address(),
@@ -363,9 +371,8 @@ void ExternalReferenceTable::AddAccessors(Isolate* isolate) {
};
static const AccessorRefTable getters[] = {
-#define ACCESSOR_INFO_DECLARATION(name) \
- { FUNCTION_ADDR(&Accessors::name##Getter), \
- "Redirect to Accessors::" #name "Getter"},
+#define ACCESSOR_INFO_DECLARATION(name) \
+ {FUNCTION_ADDR(&Accessors::name##Getter), "Accessors::" #name "Getter"},
ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
#undef ACCESSOR_INFO_DECLARATION
};
@@ -377,10 +384,7 @@ void ExternalReferenceTable::AddAccessors(Isolate* isolate) {
};
for (unsigned i = 0; i < arraysize(getters); ++i) {
- const char* name = getters[i].name + 12; // Skip "Redirect to " prefix.
- Add(getters[i].address, name);
- Add(AccessorInfo::redirect(isolate, getters[i].address, ACCESSOR_GETTER),
- getters[i].name);
+ Add(getters[i].address, getters[i].name);
}
for (unsigned i = 0; i < arraysize(setters); ++i) {
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index 3e812d56e3..a0030526ee 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -6,12 +6,15 @@
#include "src/accessors.h"
#include "src/allocation-site-scopes.h"
+#include "src/ast/ast.h"
#include "src/base/bits.h"
#include "src/bootstrapper.h"
#include "src/compiler.h"
#include "src/conversions.h"
#include "src/isolate-inl.h"
#include "src/macro-assembler.h"
+#include "src/objects/module-info.h"
+#include "src/objects/scope-info.h"
namespace v8 {
namespace internal {
@@ -102,6 +105,14 @@ Handle<PrototypeInfo> Factory::NewPrototypeInfo() {
return result;
}
+Handle<Tuple2> Factory::NewTuple2(Handle<Object> value1,
+ Handle<Object> value2) {
+ Handle<Tuple2> result = Handle<Tuple2>::cast(NewStruct(TUPLE2_TYPE));
+ result->set_value1(*value1);
+ result->set_value2(*value2);
+ return result;
+}
+
Handle<Tuple3> Factory::NewTuple3(Handle<Object> value1, Handle<Object> value2,
Handle<Object> value3) {
Handle<Tuple3> result = Handle<Tuple3>::cast(NewStruct(TUPLE3_TYPE));
@@ -120,6 +131,15 @@ Handle<ContextExtension> Factory::NewContextExtension(
return result;
}
+Handle<ConstantElementsPair> Factory::NewConstantElementsPair(
+ ElementsKind elements_kind, Handle<FixedArrayBase> constant_values) {
+ Handle<ConstantElementsPair> result = Handle<ConstantElementsPair>::cast(
+ NewStruct(CONSTANT_ELEMENTS_PAIR_TYPE));
+ result->set_elements_kind(elements_kind);
+ result->set_constant_values(*constant_values);
+ return result;
+}
+
Handle<Oddball> Factory::NewOddball(Handle<Map> map, const char* to_string,
Handle<Object> to_number,
const char* type_of, byte kind) {
@@ -158,7 +178,6 @@ Handle<FixedArray> Factory::NewFixedArrayWithHoles(int size,
FixedArray);
}
-
Handle<FixedArray> Factory::NewUninitializedFixedArray(int size) {
CALL_HEAP_FUNCTION(
isolate(),
@@ -183,11 +202,7 @@ Handle<FixedArrayBase> Factory::NewFixedDoubleArrayWithHoles(
DCHECK(0 <= size);
Handle<FixedArrayBase> array = NewFixedDoubleArray(size, pretenure);
if (size > 0) {
- Handle<FixedDoubleArray> double_array =
- Handle<FixedDoubleArray>::cast(array);
- for (int i = 0; i < size; ++i) {
- double_array->set_the_hole(i);
- }
+ Handle<FixedDoubleArray>::cast(array)->FillWithHoles(0, size);
}
return array;
}
@@ -440,7 +455,6 @@ Handle<String> Factory::NewInternalizedStringImpl(
String);
}
-
MaybeHandle<Map> Factory::InternalizedStringMapForString(
Handle<String> string) {
// If the string is in new space it cannot be used as internalized.
@@ -448,10 +462,12 @@ MaybeHandle<Map> Factory::InternalizedStringMapForString(
// Find the corresponding internalized string map for strings.
switch (string->map()->instance_type()) {
- case STRING_TYPE: return internalized_string_map();
+ case STRING_TYPE:
+ return internalized_string_map();
case ONE_BYTE_STRING_TYPE:
return one_byte_internalized_string_map();
- case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
+ case EXTERNAL_STRING_TYPE:
+ return external_internalized_string_map();
case EXTERNAL_ONE_BYTE_STRING_TYPE:
return external_one_byte_internalized_string_map();
case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
@@ -885,13 +901,24 @@ Handle<Context> Factory::NewModuleContext(Handle<Module> module,
return context;
}
-
Handle<Context> Factory::NewFunctionContext(int length,
- Handle<JSFunction> function) {
- DCHECK(function->shared()->scope_info()->scope_type() == FUNCTION_SCOPE);
+ Handle<JSFunction> function,
+ ScopeType scope_type) {
+ DCHECK(function->shared()->scope_info()->scope_type() == scope_type);
DCHECK(length >= Context::MIN_CONTEXT_SLOTS);
Handle<FixedArray> array = NewFixedArray(length);
- array->set_map_no_write_barrier(*function_context_map());
+ Handle<Map> map;
+ switch (scope_type) {
+ case EVAL_SCOPE:
+ map = eval_context_map();
+ break;
+ case FUNCTION_SCOPE:
+ map = function_context_map();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ array->set_map_no_write_barrier(*map);
Handle<Context> context = Handle<Context>::cast(array);
context->set_closure(*function);
context->set_previous(function->context());
@@ -971,15 +998,6 @@ Handle<Context> Factory::NewBlockContext(Handle<JSFunction> function,
return context;
}
-Handle<Context> Factory::NewPromiseResolvingFunctionContext(int length) {
- DCHECK_GE(length, Context::MIN_CONTEXT_SLOTS);
- Handle<FixedArray> array = NewFixedArray(length);
- array->set_map_no_write_barrier(*function_context_map());
- Handle<Context> context = Handle<Context>::cast(array);
- context->set_extension(*the_hole_value());
- return context;
-}
-
Handle<Struct> Factory::NewStruct(InstanceType type) {
CALL_HEAP_FUNCTION(
isolate(),
@@ -987,39 +1005,6 @@ Handle<Struct> Factory::NewStruct(InstanceType type) {
Struct);
}
-Handle<PromiseResolveThenableJobInfo> Factory::NewPromiseResolveThenableJobInfo(
- Handle<JSReceiver> thenable, Handle<JSReceiver> then,
- Handle<JSFunction> resolve, Handle<JSFunction> reject,
- Handle<Object> debug_id, Handle<Object> debug_name,
- Handle<Context> context) {
- Handle<PromiseResolveThenableJobInfo> result =
- Handle<PromiseResolveThenableJobInfo>::cast(
- NewStruct(PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE));
- result->set_thenable(*thenable);
- result->set_then(*then);
- result->set_resolve(*resolve);
- result->set_reject(*reject);
- result->set_debug_id(*debug_id);
- result->set_debug_name(*debug_name);
- result->set_context(*context);
- return result;
-}
-
-Handle<PromiseReactionJobInfo> Factory::NewPromiseReactionJobInfo(
- Handle<Object> value, Handle<Object> tasks, Handle<Object> deferred,
- Handle<Object> debug_id, Handle<Object> debug_name,
- Handle<Context> context) {
- Handle<PromiseReactionJobInfo> result = Handle<PromiseReactionJobInfo>::cast(
- NewStruct(PROMISE_REACTION_JOB_INFO_TYPE));
- result->set_value(*value);
- result->set_tasks(*tasks);
- result->set_deferred(*deferred);
- result->set_debug_id(*debug_id);
- result->set_debug_name(*debug_name);
- result->set_context(*context);
- return result;
-}
-
Handle<AliasedArgumentsEntry> Factory::NewAliasedArgumentsEntry(
int aliased_context_slot) {
Handle<AliasedArgumentsEntry> entry = Handle<AliasedArgumentsEntry>::cast(
@@ -1053,7 +1038,7 @@ Handle<Script> Factory::NewScript(Handle<String> source) {
script->set_line_ends(heap->undefined_value());
script->set_eval_from_shared(heap->undefined_value());
script->set_eval_from_position(0);
- script->set_shared_function_infos(Smi::kZero);
+ script->set_shared_function_infos(*empty_fixed_array(), SKIP_WRITE_BARRIER);
script->set_flags(0);
heap->set_script_list(*WeakFixedArray::Add(script_list(), script));
@@ -1356,6 +1341,7 @@ DEFINE_ERROR(ReferenceError, reference_error)
DEFINE_ERROR(SyntaxError, syntax_error)
DEFINE_ERROR(TypeError, type_error)
DEFINE_ERROR(WasmCompileError, wasm_compile_error)
+DEFINE_ERROR(WasmLinkError, wasm_link_error)
DEFINE_ERROR(WasmRuntimeError, wasm_runtime_error)
#undef DEFINE_ERROR
@@ -1505,6 +1491,17 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
}
Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
+ Handle<SharedFunctionInfo> info, Handle<Context> context,
+ Handle<LiteralsArray> literals, PretenureFlag pretenure) {
+ int map_index =
+ Context::FunctionMapIndex(info->language_mode(), info->kind());
+ Handle<Map> initial_map(Map::cast(context->native_context()->get(map_index)));
+
+ return NewFunctionFromSharedFunctionInfo(initial_map, info, context, literals,
+ pretenure);
+}
+
+Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<Map> initial_map, Handle<SharedFunctionInfo> info,
Handle<Object> context_or_undefined, PretenureFlag pretenure) {
DCHECK_EQ(JS_FUNCTION_TYPE, initial_map->instance_type());
@@ -1523,6 +1520,26 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
return result;
}
+Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
+ Handle<Map> initial_map, Handle<SharedFunctionInfo> info,
+ Handle<Object> context_or_undefined, Handle<LiteralsArray> literals,
+ PretenureFlag pretenure) {
+ DCHECK_EQ(JS_FUNCTION_TYPE, initial_map->instance_type());
+ Handle<JSFunction> result =
+ NewFunction(initial_map, info, context_or_undefined, pretenure);
+
+ result->set_literals(*literals);
+ if (info->ic_age() != isolate()->heap()->global_ic_age()) {
+ info->ResetForNewContext(isolate()->heap()->global_ic_age());
+ }
+
+ if (context_or_undefined->IsContext()) {
+ // Give compiler a chance to pre-initialize.
+ Compiler::PostInstantiation(result, pretenure);
+ }
+
+ return result;
+}
Handle<ScopeInfo> Factory::NewScopeInfo(int length) {
Handle<FixedArray> array = NewFixedArray(length, TENURED);
@@ -1600,6 +1617,7 @@ Handle<Code> Factory::NewCode(const CodeDesc& desc,
code->set_prologue_offset(prologue_offset);
code->set_constant_pool_offset(desc.instr_size - desc.constant_pool_size);
code->set_builtin_index(-1);
+ code->set_protected_instructions(*empty_fixed_array());
if (code->kind() == Code::OPTIMIZED_FUNCTION) {
code->set_marked_for_deoptimization(false);
@@ -1692,12 +1710,12 @@ Handle<JSGlobalObject> Factory::NewJSGlobalObject(
for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
PropertyDetails details = descs->GetDetails(i);
// Only accessors are expected.
- DCHECK_EQ(ACCESSOR_CONSTANT, details.type());
- PropertyDetails d(details.attributes(), ACCESSOR_CONSTANT, i + 1,
+ DCHECK_EQ(kAccessor, details.kind());
+ PropertyDetails d(kAccessor, details.attributes(), i + 1,
PropertyCellType::kMutable);
Handle<Name> name(descs->GetKey(i));
Handle<PropertyCell> cell = NewPropertyCell();
- cell->set_value(descs->GetCallbacksObject(i));
+ cell->set_value(descs->GetValue(i));
// |dictionary| already contains enough space for all properties.
USE(GlobalDictionary::Add(dictionary, name, cell, d));
}
@@ -1806,7 +1824,13 @@ void Factory::NewJSArrayStorage(Handle<JSArray> array,
Handle<JSModuleNamespace> Factory::NewJSModuleNamespace() {
Handle<Map> map = isolate()->js_module_namespace_map();
- return Handle<JSModuleNamespace>::cast(NewJSObjectFromMap(map));
+ Handle<JSModuleNamespace> module_namespace(
+ Handle<JSModuleNamespace>::cast(NewJSObjectFromMap(map)));
+ FieldIndex index = FieldIndex::ForDescriptor(
+ *map, JSModuleNamespace::kToStringTagFieldIndex);
+ module_namespace->FastPropertyAtPut(index,
+ isolate()->heap()->Module_string());
+ return module_namespace;
}
Handle<JSGeneratorObject> Factory::NewJSGeneratorObject(
@@ -2224,6 +2248,7 @@ void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object,
map->set_is_prototype_map(true);
}
JSObject::NotifyMapChange(old_map, map, isolate());
+ old_map->NotifyLeafMapLayoutChange();
// Check that the already allocated object has the same size and type as
// objects allocated using the constructor.
@@ -2264,6 +2289,17 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
return shared;
}
+Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfoForLiteral(
+ FunctionLiteral* literal, Handle<Script> script) {
+ Handle<Code> code = isolate()->builtins()->CompileLazy();
+ Handle<ScopeInfo> scope_info(ScopeInfo::Empty(isolate()));
+ Handle<SharedFunctionInfo> result = NewSharedFunctionInfo(
+ literal->name(), literal->materialized_literal_count(), literal->kind(),
+ code, scope_info);
+ SharedFunctionInfo::InitFromFunctionLiteral(result, literal);
+ SharedFunctionInfo::SetScript(result, script);
+ return result;
+}
Handle<JSMessageObject> Factory::NewJSMessageObject(
MessageTemplate::Template message, Handle<Object> argument,
@@ -2280,6 +2316,7 @@ Handle<JSMessageObject> Factory::NewJSMessageObject(
message_obj->set_end_position(end_position);
message_obj->set_script(*script);
message_obj->set_stack_frames(*stack_frames);
+ message_obj->set_error_level(v8::Isolate::kMessageError);
return message_obj;
}
@@ -2295,6 +2332,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
// Set pointer fields.
share->set_name(*name);
+ share->set_function_data(*undefined_value(), SKIP_WRITE_BARRIER);
Handle<Code> code;
if (!maybe_code.ToHandle(&code)) {
code = isolate()->builtins()->Illegal();
@@ -2308,7 +2346,6 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
: isolate()->builtins()->ConstructedNonConstructable();
share->SetConstructStub(*construct_stub);
share->set_instance_class_name(*Object_string());
- share->set_function_data(*undefined_value(), SKIP_WRITE_BARRIER);
share->set_script(*undefined_value(), SKIP_WRITE_BARRIER);
share->set_debug_info(DebugInfo::uninitialized(), SKIP_WRITE_BARRIER);
share->set_function_identifier(*undefined_value(), SKIP_WRITE_BARRIER);
@@ -2316,6 +2353,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
Handle<TypeFeedbackMetadata> feedback_metadata =
TypeFeedbackMetadata::New(isolate(), &empty_spec);
share->set_feedback_metadata(*feedback_metadata, SKIP_WRITE_BARRIER);
+ share->set_function_literal_id(FunctionLiteral::kIdTypeInvalid);
#if TRACE_MAPS
share->set_unique_id(isolate()->GetNextUniqueSharedFunctionInfoId());
#endif
@@ -2616,31 +2654,31 @@ void Factory::SetFunctionInstanceDescriptor(Handle<Map> map,
Handle<AccessorInfo> length =
Accessors::FunctionLengthInfo(isolate(), roc_attribs);
{ // Add length.
- AccessorConstantDescriptor d(Handle<Name>(Name::cast(length->name())),
- length, roc_attribs);
+ Descriptor d = Descriptor::AccessorConstant(
+ Handle<Name>(Name::cast(length->name())), length, roc_attribs);
map->AppendDescriptor(&d);
}
STATIC_ASSERT(JSFunction::kNameDescriptorIndex == 1);
Handle<AccessorInfo> name =
- Accessors::FunctionNameInfo(isolate(), ro_attribs);
+ Accessors::FunctionNameInfo(isolate(), roc_attribs);
{ // Add name.
- AccessorConstantDescriptor d(Handle<Name>(Name::cast(name->name())), name,
- roc_attribs);
+ Descriptor d = Descriptor::AccessorConstant(
+ Handle<Name>(Name::cast(name->name())), name, roc_attribs);
map->AppendDescriptor(&d);
}
Handle<AccessorInfo> args =
Accessors::FunctionArgumentsInfo(isolate(), ro_attribs);
{ // Add arguments.
- AccessorConstantDescriptor d(Handle<Name>(Name::cast(args->name())), args,
- ro_attribs);
+ Descriptor d = Descriptor::AccessorConstant(
+ Handle<Name>(Name::cast(args->name())), args, ro_attribs);
map->AppendDescriptor(&d);
}
Handle<AccessorInfo> caller =
Accessors::FunctionCallerInfo(isolate(), ro_attribs);
{ // Add caller.
- AccessorConstantDescriptor d(Handle<Name>(Name::cast(caller->name())),
- caller, ro_attribs);
+ Descriptor d = Descriptor::AccessorConstant(
+ Handle<Name>(Name::cast(caller->name())), caller, ro_attribs);
map->AppendDescriptor(&d);
}
if (IsFunctionModeWithPrototype(function_mode)) {
@@ -2649,8 +2687,8 @@ void Factory::SetFunctionInstanceDescriptor(Handle<Map> map,
}
Handle<AccessorInfo> prototype =
Accessors::FunctionPrototypeInfo(isolate(), ro_attribs);
- AccessorConstantDescriptor d(Handle<Name>(Name::cast(prototype->name())),
- prototype, ro_attribs);
+ Descriptor d = Descriptor::AccessorConstant(
+ Handle<Name>(Name::cast(prototype->name())), prototype, ro_attribs);
map->AppendDescriptor(&d);
}
}
@@ -2684,8 +2722,8 @@ void Factory::SetStrictFunctionInstanceDescriptor(Handle<Map> map,
{ // Add length.
Handle<AccessorInfo> length =
Accessors::FunctionLengthInfo(isolate(), roc_attribs);
- AccessorConstantDescriptor d(handle(Name::cast(length->name())), length,
- roc_attribs);
+ Descriptor d = Descriptor::AccessorConstant(
+ handle(Name::cast(length->name())), length, roc_attribs);
map->AppendDescriptor(&d);
}
@@ -2693,8 +2731,8 @@ void Factory::SetStrictFunctionInstanceDescriptor(Handle<Map> map,
{ // Add name.
Handle<AccessorInfo> name =
Accessors::FunctionNameInfo(isolate(), roc_attribs);
- AccessorConstantDescriptor d(handle(Name::cast(name->name())), name,
- roc_attribs);
+ Descriptor d = Descriptor::AccessorConstant(
+ handle(Name::cast(name->name())), name, roc_attribs);
map->AppendDescriptor(&d);
}
if (IsFunctionModeWithPrototype(function_mode)) {
@@ -2704,31 +2742,46 @@ void Factory::SetStrictFunctionInstanceDescriptor(Handle<Map> map,
: ro_attribs;
Handle<AccessorInfo> prototype =
Accessors::FunctionPrototypeInfo(isolate(), attribs);
- AccessorConstantDescriptor d(Handle<Name>(Name::cast(prototype->name())),
- prototype, attribs);
+ Descriptor d = Descriptor::AccessorConstant(
+ Handle<Name>(Name::cast(prototype->name())), prototype, attribs);
map->AppendDescriptor(&d);
}
}
-Handle<JSFixedArrayIterator> Factory::NewJSFixedArrayIterator(
- Handle<FixedArray> array) {
- // Create the "next" function (must be unique per iterator object).
- Handle<Code> code(
- isolate()->builtins()->builtin(Builtins::kFixedArrayIteratorNext));
- // TODO(neis): Don't create a new SharedFunctionInfo each time.
- Handle<JSFunction> next = isolate()->factory()->NewFunctionWithoutPrototype(
- isolate()->factory()->next_string(), code, false);
- next->shared()->set_native(true);
-
- // Create the iterator.
- Handle<Map> map(isolate()->native_context()->fixed_array_iterator_map());
- Handle<JSFixedArrayIterator> iterator =
- Handle<JSFixedArrayIterator>::cast(NewJSObjectFromMap(map));
- iterator->set_initial_next(*next);
- iterator->set_array(*array);
- iterator->set_index(0);
- iterator->InObjectPropertyAtPut(JSFixedArrayIterator::kNextIndex, *next);
- return iterator;
+Handle<Map> Factory::CreateClassFunctionMap(Handle<JSFunction> empty_function) {
+ Handle<Map> map = NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
+ SetClassFunctionInstanceDescriptor(map);
+ map->set_is_constructor(true);
+ map->set_is_callable();
+ Map::SetPrototype(map, empty_function);
+ return map;
+}
+
+void Factory::SetClassFunctionInstanceDescriptor(Handle<Map> map) {
+ Map::EnsureDescriptorSlack(map, 2);
+
+ PropertyAttributes rw_attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
+ PropertyAttributes roc_attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
+
+ STATIC_ASSERT(JSFunction::kLengthDescriptorIndex == 0);
+ { // Add length.
+ Handle<AccessorInfo> length =
+ Accessors::FunctionLengthInfo(isolate(), roc_attribs);
+ Descriptor d = Descriptor::AccessorConstant(
+ handle(Name::cast(length->name())), length, roc_attribs);
+ map->AppendDescriptor(&d);
+ }
+
+ {
+ // Add prototype.
+ Handle<AccessorInfo> prototype =
+ Accessors::FunctionPrototypeInfo(isolate(), rw_attribs);
+ Descriptor d = Descriptor::AccessorConstant(
+ Handle<Name>(Name::cast(prototype->name())), prototype, rw_attribs);
+ map->AppendDescriptor(&d);
+ }
}
} // namespace internal
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index d059b10888..c2aa069810 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -69,22 +69,12 @@ class V8_EXPORT_PRIVATE Factory final {
// Create a new boxed value.
Handle<Box> NewBox(Handle<Object> value);
- // Create a new PromiseReactionJobInfo struct.
- Handle<PromiseReactionJobInfo> NewPromiseReactionJobInfo(
- Handle<Object> value, Handle<Object> tasks, Handle<Object> deferred,
- Handle<Object> debug_id, Handle<Object> debug_name,
- Handle<Context> context);
-
- // Create a new PromiseResolveThenableJobInfo struct.
- Handle<PromiseResolveThenableJobInfo> NewPromiseResolveThenableJobInfo(
- Handle<JSReceiver> thenable, Handle<JSReceiver> then,
- Handle<JSFunction> resolve, Handle<JSFunction> reject,
- Handle<Object> debug_id, Handle<Object> debug_name,
- Handle<Context> context);
-
// Create a new PrototypeInfo struct.
Handle<PrototypeInfo> NewPrototypeInfo();
+ // Create a new Tuple2 struct.
+ Handle<Tuple2> NewTuple2(Handle<Object> value1, Handle<Object> value2);
+
// Create a new Tuple3 struct.
Handle<Tuple3> NewTuple3(Handle<Object> value1, Handle<Object> value2,
Handle<Object> value3);
@@ -93,6 +83,10 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<ContextExtension> NewContextExtension(Handle<ScopeInfo> scope_info,
Handle<Object> extension);
+ // Create a new ConstantElementsPair struct.
+ Handle<ConstantElementsPair> NewConstantElementsPair(
+ ElementsKind elements_kind, Handle<FixedArrayBase> constant_values);
+
// Create a pre-tenured empty AccessorPair.
Handle<AccessorPair> NewAccessorPair();
@@ -293,8 +287,9 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<JSFunction> function,
Handle<ScopeInfo> scope_info);
- // Create a function context.
- Handle<Context> NewFunctionContext(int length, Handle<JSFunction> function);
+ // Create a function or eval context.
+ Handle<Context> NewFunctionContext(int length, Handle<JSFunction> function,
+ ScopeType scope_type);
// Create a catch context.
Handle<Context> NewCatchContext(Handle<JSFunction> function,
@@ -319,8 +314,6 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<Context> NewBlockContext(Handle<JSFunction> function,
Handle<Context> previous,
Handle<ScopeInfo> scope_info);
- // Create a promise context.
- Handle<Context> NewPromiseResolvingFunctionContext(int length);
// Allocate a new struct. The struct is pretenured (allocated directly in
// the old generation).
@@ -435,6 +428,12 @@ class V8_EXPORT_PRIVATE Factory final {
MutableMode mode = IMMUTABLE,
PretenureFlag pretenure = NOT_TENURED);
+ Handle<HeapNumber> NewMutableHeapNumber(
+ PretenureFlag pretenure = NOT_TENURED) {
+ double hole_nan = bit_cast<double>(kHoleNanInt64);
+ return NewHeapNumber(hole_nan, MUTABLE, pretenure);
+ }
+
#define SIMD128_NEW_DECL(TYPE, Type, type, lane_count, lane_type) \
Handle<Type> New##Type(lane_type lanes[lane_count], \
PretenureFlag pretenure = NOT_TENURED);
@@ -540,9 +539,6 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<JSMapIterator> NewJSMapIterator();
Handle<JSSetIterator> NewJSSetIterator();
- Handle<JSFixedArrayIterator> NewJSFixedArrayIterator(
- Handle<FixedArray> array);
-
// Allocates a bound function.
MaybeHandle<JSBoundFunction> NewJSBoundFunction(
Handle<JSReceiver> target_function, Handle<Object> bound_this,
@@ -575,6 +571,15 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
Handle<Map> initial_map, Handle<SharedFunctionInfo> function_info,
+ Handle<Object> context_or_undefined, Handle<LiteralsArray> literals,
+ PretenureFlag pretenure = TENURED);
+
+ Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
+ Handle<SharedFunctionInfo> function_info, Handle<Context> context,
+ Handle<LiteralsArray> literals, PretenureFlag pretenure = TENURED);
+
+ Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
+ Handle<Map> initial_map, Handle<SharedFunctionInfo> function_info,
Handle<Object> context_or_undefined, PretenureFlag pretenure = TENURED);
Handle<JSFunction> NewFunctionFromSharedFunctionInfo(
@@ -645,6 +650,7 @@ class V8_EXPORT_PRIVATE Factory final {
DECLARE_ERROR(SyntaxError)
DECLARE_ERROR(TypeError)
DECLARE_ERROR(WasmCompileError)
+ DECLARE_ERROR(WasmLinkError)
DECLARE_ERROR(WasmRuntimeError)
#undef DECLARE_ERROR
@@ -706,6 +712,9 @@ class V8_EXPORT_PRIVATE Factory final {
MaybeHandle<Code> code,
bool is_constructor);
+ Handle<SharedFunctionInfo> NewSharedFunctionInfoForLiteral(
+ FunctionLiteral* literal, Handle<Script> script);
+
static bool IsFunctionModeWithPrototype(FunctionMode function_mode) {
return (function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE ||
function_mode == FUNCTION_WITH_READONLY_PROTOTYPE);
@@ -716,6 +725,8 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<Map> CreateStrictFunctionMap(FunctionMode function_mode,
Handle<JSFunction> empty_function);
+ Handle<Map> CreateClassFunctionMap(Handle<JSFunction> empty_function);
+
// Allocates a new JSMessageObject object.
Handle<JSMessageObject> NewJSMessageObject(MessageTemplate::Template message,
Handle<Object> argument,
@@ -797,6 +808,8 @@ class V8_EXPORT_PRIVATE Factory final {
void SetStrictFunctionInstanceDescriptor(Handle<Map> map,
FunctionMode function_mode);
+
+ void SetClassFunctionInstanceDescriptor(Handle<Map> map);
};
} // namespace internal
diff --git a/deps/v8/src/fast-accessor-assembler.cc b/deps/v8/src/fast-accessor-assembler.cc
index ee9b241186..e09db74666 100644
--- a/deps/v8/src/fast-accessor-assembler.cc
+++ b/deps/v8/src/fast-accessor-assembler.cc
@@ -10,18 +10,20 @@
#include "src/handles-inl.h"
#include "src/objects.h" // For FAA::LoadInternalField impl.
-using v8::internal::CodeStubAssembler;
-using v8::internal::compiler::Node;
-
namespace v8 {
namespace internal {
+using compiler::Node;
+using compiler::CodeAssemblerLabel;
+using compiler::CodeAssemblerVariable;
+
FastAccessorAssembler::FastAccessorAssembler(Isolate* isolate)
: zone_(isolate->allocator(), ZONE_NAME),
isolate_(isolate),
- assembler_(new CodeStubAssembler(isolate, zone(), 1,
- Code::ComputeFlags(Code::STUB),
- "FastAccessorAssembler")),
+ assembler_state_(new compiler::CodeAssemblerState(
+ isolate, zone(), 1, Code::ComputeFlags(Code::STUB),
+ "FastAccessorAssembler")),
+ assembler_(new CodeStubAssembler(assembler_state_.get())),
state_(kBuilding) {}
FastAccessorAssembler::~FastAccessorAssembler() { Clear(); }
@@ -40,19 +42,18 @@ FastAccessorAssembler::ValueId FastAccessorAssembler::GetReceiver() {
}
FastAccessorAssembler::ValueId FastAccessorAssembler::LoadInternalField(
- ValueId value, int field_no) {
+ ValueId value_id, int field_no) {
CHECK_EQ(kBuilding, state_);
- CodeStubAssembler::Variable result(assembler_.get(),
- MachineRepresentation::kTagged);
+ CodeAssemblerVariable result(assembler_.get(),
+ MachineRepresentation::kTagged);
LabelId is_not_jsobject = MakeLabel();
- CodeStubAssembler::Label merge(assembler_.get(), &result);
+ CodeAssemblerLabel merge(assembler_.get(), &result);
- CheckIsJSObjectOrJump(value, is_not_jsobject);
+ CheckIsJSObjectOrJump(value_id, is_not_jsobject);
Node* internal_field = assembler_->LoadObjectField(
- FromId(value), JSObject::kHeaderSize + kPointerSize * field_no,
- MachineType::Pointer());
+ FromId(value_id), JSObject::kHeaderSize + kPointerSize * field_no);
result.Bind(internal_field);
assembler_->Goto(&merge);
@@ -68,14 +69,15 @@ FastAccessorAssembler::ValueId FastAccessorAssembler::LoadInternalField(
}
FastAccessorAssembler::ValueId
-FastAccessorAssembler::LoadInternalFieldUnchecked(ValueId value, int field_no) {
+FastAccessorAssembler::LoadInternalFieldUnchecked(ValueId value_id,
+ int field_no) {
CHECK_EQ(kBuilding, state_);
// Defensive debug checks.
if (FLAG_debug_code) {
LabelId is_jsobject = MakeLabel();
LabelId is_not_jsobject = MakeLabel();
- CheckIsJSObjectOrJump(value, is_not_jsobject);
+ CheckIsJSObjectOrJump(value_id, is_not_jsobject);
assembler_->Goto(FromId(is_jsobject));
SetLabel(is_not_jsobject);
@@ -86,58 +88,56 @@ FastAccessorAssembler::LoadInternalFieldUnchecked(ValueId value, int field_no) {
}
Node* result = assembler_->LoadObjectField(
- FromId(value), JSObject::kHeaderSize + kPointerSize * field_no,
- MachineType::Pointer());
+ FromId(value_id), JSObject::kHeaderSize + kPointerSize * field_no);
return FromRaw(result);
}
-FastAccessorAssembler::ValueId FastAccessorAssembler::LoadValue(ValueId value,
- int offset) {
+FastAccessorAssembler::ValueId FastAccessorAssembler::LoadValue(
+ ValueId value_id, int offset) {
CHECK_EQ(kBuilding, state_);
- return FromRaw(assembler_->LoadBufferObject(FromId(value), offset,
+ return FromRaw(assembler_->LoadBufferObject(FromId(value_id), offset,
MachineType::IntPtr()));
}
-FastAccessorAssembler::ValueId FastAccessorAssembler::LoadObject(ValueId value,
- int offset) {
+FastAccessorAssembler::ValueId FastAccessorAssembler::LoadObject(
+ ValueId value_id, int offset) {
CHECK_EQ(kBuilding, state_);
return FromRaw(assembler_->LoadBufferObject(
- assembler_->LoadBufferObject(FromId(value), offset,
- MachineType::Pointer()),
- 0, MachineType::AnyTagged()));
+ assembler_->LoadBufferObject(FromId(value_id), offset), 0,
+ MachineType::AnyTagged()));
}
-FastAccessorAssembler::ValueId FastAccessorAssembler::ToSmi(ValueId value) {
+FastAccessorAssembler::ValueId FastAccessorAssembler::ToSmi(ValueId value_id) {
CHECK_EQ(kBuilding, state_);
- return FromRaw(assembler_->SmiTag(FromId(value)));
+ return FromRaw(assembler_->SmiTag(FromId(value_id)));
}
-void FastAccessorAssembler::ReturnValue(ValueId value) {
+void FastAccessorAssembler::ReturnValue(ValueId value_id) {
CHECK_EQ(kBuilding, state_);
- assembler_->Return(FromId(value));
+ assembler_->Return(FromId(value_id));
}
-void FastAccessorAssembler::CheckFlagSetOrReturnNull(ValueId value, int mask) {
+void FastAccessorAssembler::CheckFlagSetOrReturnNull(ValueId value_id,
+ int mask) {
CHECK_EQ(kBuilding, state_);
- CodeStubAssembler::Label pass(assembler_.get());
- CodeStubAssembler::Label fail(assembler_.get());
+ CodeAssemblerLabel pass(assembler_.get());
+ CodeAssemblerLabel fail(assembler_.get());
+ Node* value = FromId(value_id);
assembler_->Branch(
- assembler_->Word32Equal(
- assembler_->Word32And(FromId(value), assembler_->Int32Constant(mask)),
- assembler_->Int32Constant(0)),
- &fail, &pass);
+ assembler_->IsSetWord(assembler_->BitcastTaggedToWord(value), mask),
+ &pass, &fail);
assembler_->Bind(&fail);
assembler_->Return(assembler_->NullConstant());
assembler_->Bind(&pass);
}
-void FastAccessorAssembler::CheckNotZeroOrReturnNull(ValueId value) {
+void FastAccessorAssembler::CheckNotZeroOrReturnNull(ValueId value_id) {
CHECK_EQ(kBuilding, state_);
- CodeStubAssembler::Label is_null(assembler_.get());
- CodeStubAssembler::Label not_null(assembler_.get());
+ CodeAssemblerLabel is_null(assembler_.get());
+ CodeAssemblerLabel not_null(assembler_.get());
assembler_->Branch(
- assembler_->WordEqual(FromId(value), assembler_->IntPtrConstant(0)),
+ assembler_->WordEqual(FromId(value_id), assembler_->SmiConstant(0)),
&is_null, &not_null);
assembler_->Bind(&is_null);
assembler_->Return(assembler_->NullConstant());
@@ -146,7 +146,7 @@ void FastAccessorAssembler::CheckNotZeroOrReturnNull(ValueId value) {
FastAccessorAssembler::LabelId FastAccessorAssembler::MakeLabel() {
CHECK_EQ(kBuilding, state_);
- return FromRaw(new CodeStubAssembler::Label(assembler_.get()));
+ return FromRaw(new CodeAssemblerLabel(assembler_.get()));
}
void FastAccessorAssembler::SetLabel(LabelId label_id) {
@@ -162,9 +162,9 @@ void FastAccessorAssembler::Goto(LabelId label_id) {
void FastAccessorAssembler::CheckNotZeroOrJump(ValueId value_id,
LabelId label_id) {
CHECK_EQ(kBuilding, state_);
- CodeStubAssembler::Label pass(assembler_.get());
+ CodeAssemblerLabel pass(assembler_.get());
assembler_->Branch(
- assembler_->WordEqual(FromId(value_id), assembler_->IntPtrConstant(0)),
+ assembler_->WordEqual(FromId(value_id), assembler_->SmiConstant(0)),
FromId(label_id), &pass);
assembler_->Bind(&pass);
}
@@ -192,23 +192,13 @@ FastAccessorAssembler::ValueId FastAccessorAssembler::Call(
Node* context = assembler_->Parameter(kContextParameter);
Node* target = assembler_->HeapConstant(stub.GetCode());
- int param_count = descriptor.GetParameterCount();
- Node** args = zone()->NewArray<Node*>(param_count + 1 + kJSParameterCount);
- // Stub/register parameters:
- args[0] = assembler_->UndefinedConstant(); // callee (there's no JSFunction)
- args[1] = assembler_->UndefinedConstant(); // call_data (undefined)
- args[2] = assembler_->Parameter(0); // receiver (same as holder in this case)
- args[3] = assembler_->ExternalConstant(callback); // API callback function
-
- // JS arguments, on stack:
- args[4] = FromId(arg);
-
- // Context.
- args[5] = context;
-
- Node* call =
- assembler_->CallStubN(descriptor, kJSParameterCount, target, args);
-
+ Node* call = assembler_->CallStub(
+ descriptor, target, context,
+ assembler_->UndefinedConstant(), // callee (there's no JSFunction)
+ assembler_->UndefinedConstant(), // call_data (undefined)
+ assembler_->Parameter(0), // receiver (same as holder in this case)
+ assembler_->ExternalConstant(callback), // API callback function
+ FromId(arg)); // JS argument, on stack
return FromRaw(call);
}
@@ -217,28 +207,20 @@ void FastAccessorAssembler::CheckIsJSObjectOrJump(ValueId value_id,
CHECK_EQ(kBuilding, state_);
// Determine the 'value' object's instance type.
- Node* object_map = assembler_->LoadObjectField(
- FromId(value_id), Internals::kHeapObjectMapOffset,
- MachineType::Pointer());
+ Node* instance_type = assembler_->LoadInstanceType(FromId(value_id));
- Node* instance_type = assembler_->WordAnd(
- assembler_->LoadObjectField(object_map,
- Internals::kMapInstanceTypeAndBitFieldOffset,
- MachineType::Uint16()),
- assembler_->IntPtrConstant(0xff));
-
- CodeStubAssembler::Label is_jsobject(assembler_.get());
+ CodeAssemblerLabel is_jsobject(assembler_.get());
// Check whether we have a proper JSObject.
assembler_->GotoIf(
- assembler_->WordEqual(
- instance_type, assembler_->IntPtrConstant(Internals::kJSObjectType)),
+ assembler_->Word32Equal(
+ instance_type, assembler_->Int32Constant(Internals::kJSObjectType)),
&is_jsobject);
// JSApiObject?.
assembler_->GotoUnless(
- assembler_->WordEqual(instance_type, assembler_->IntPtrConstant(
- Internals::kJSApiObjectType)),
+ assembler_->Word32Equal(instance_type, assembler_->Int32Constant(
+ Internals::kJSApiObjectType)),
FromId(label_id));
// Continue.
@@ -248,7 +230,8 @@ void FastAccessorAssembler::CheckIsJSObjectOrJump(ValueId value_id,
MaybeHandle<Code> FastAccessorAssembler::Build() {
CHECK_EQ(kBuilding, state_);
- Handle<Code> code = assembler_->GenerateCode();
+ Handle<Code> code =
+ compiler::CodeAssembler::GenerateCode(assembler_state_.get());
state_ = !code.is_null() ? kBuilt : kError;
Clear();
return code;
@@ -256,12 +239,12 @@ MaybeHandle<Code> FastAccessorAssembler::Build() {
FastAccessorAssembler::ValueId FastAccessorAssembler::FromRaw(Node* node) {
nodes_.push_back(node);
- ValueId value = {nodes_.size() - 1};
- return value;
+ ValueId value_id = {nodes_.size() - 1};
+ return value_id;
}
FastAccessorAssembler::LabelId FastAccessorAssembler::FromRaw(
- CodeStubAssembler::Label* label) {
+ CodeAssemblerLabel* label) {
labels_.push_back(label);
LabelId label_id = {labels_.size() - 1};
return label_id;
@@ -273,7 +256,7 @@ Node* FastAccessorAssembler::FromId(ValueId value) const {
return nodes_.at(value.value_id);
}
-CodeStubAssembler::Label* FastAccessorAssembler::FromId(LabelId label) const {
+CodeAssemblerLabel* FastAccessorAssembler::FromId(LabelId label) const {
CHECK_LT(label.label_id, labels_.size());
CHECK_NOT_NULL(labels_.at(label.label_id));
return labels_.at(label.label_id);
diff --git a/deps/v8/src/fast-accessor-assembler.h b/deps/v8/src/fast-accessor-assembler.h
index 9468d8603a..c1380c9025 100644
--- a/deps/v8/src/fast-accessor-assembler.h
+++ b/deps/v8/src/fast-accessor-assembler.h
@@ -13,18 +13,19 @@
#include "src/base/macros.h"
#include "src/handles.h"
-// For CodeStubAssembler::Label. (We cannot forward-declare inner classes.)
-#include "src/code-stub-assembler.h"
-
namespace v8 {
namespace internal {
class Code;
+class CodeStubAssembler;
class Isolate;
class Zone;
namespace compiler {
class Node;
+class CodeAssemblerLabel;
+class CodeAssemblerState;
+class CodeAssemblerVariable;
}
// This interface "exports" an aggregated subset of RawMachineAssembler, for
@@ -86,9 +87,9 @@ class FastAccessorAssembler {
private:
ValueId FromRaw(compiler::Node* node);
- LabelId FromRaw(CodeStubAssembler::Label* label);
+ LabelId FromRaw(compiler::CodeAssemblerLabel* label);
compiler::Node* FromId(ValueId value) const;
- CodeStubAssembler::Label* FromId(LabelId value) const;
+ compiler::CodeAssemblerLabel* FromId(LabelId value) const;
void CheckIsJSObjectOrJump(ValueId value, LabelId label_id);
@@ -98,13 +99,14 @@ class FastAccessorAssembler {
Zone zone_;
Isolate* isolate_;
+ std::unique_ptr<compiler::CodeAssemblerState> assembler_state_;
std::unique_ptr<CodeStubAssembler> assembler_;
// To prevent exposing the RMA internals to the outside world, we'll map
// Node + Label pointers integers wrapped in ValueId and LabelId instances.
// These vectors maintain this mapping.
std::vector<compiler::Node*> nodes_;
- std::vector<CodeStubAssembler::Label*> labels_;
+ std::vector<compiler::CodeAssemblerLabel*> labels_;
// Remember the current state for easy error checking. (We prefer to be
// strict as this class will be exposed at the API.)
diff --git a/deps/v8/src/field-type.cc b/deps/v8/src/field-type.cc
index 16bccf294b..0097a35bc0 100644
--- a/deps/v8/src/field-type.cc
+++ b/deps/v8/src/field-type.cc
@@ -6,6 +6,7 @@
#include "src/ast/ast-types.h"
#include "src/handles-inl.h"
+#include "src/objects-inl.h"
#include "src/ostreams.h"
namespace v8 {
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index a7efe1163a..39534f9301 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -201,7 +201,8 @@ DEFINE_IMPLICATION(es_staging, move_object_start)
V(harmony_do_expressions, "harmony do-expressions") \
V(harmony_regexp_named_captures, "harmony regexp named captures") \
V(harmony_regexp_property, "harmony unicode regexp property classes") \
- V(harmony_class_fields, "harmony public fields in class literals")
+ V(harmony_class_fields, "harmony public fields in class literals") \
+ V(harmony_object_spread, "harmony object spread")
// Features that are complete (but still behind --harmony/es-staging flag).
#define HARMONY_STAGED_BASE(V) \
@@ -210,20 +211,26 @@ DEFINE_IMPLICATION(es_staging, move_object_start)
"harmony restrictions on generator declarations") \
V(harmony_tailcalls, "harmony tail calls") \
V(harmony_trailing_commas, \
- "harmony trailing commas in function parameter lists") \
- V(harmony_string_padding, "harmony String-padding methods")
+ "harmony trailing commas in function parameter lists")
#ifdef V8_I18N_SUPPORT
#define HARMONY_STAGED(V) \
HARMONY_STAGED_BASE(V) \
- V(datetime_format_to_parts, "Intl.DateTimeFormat.formatToParts") \
V(icu_case_mapping, "case mapping with ICU rather than Unibrow")
#else
#define HARMONY_STAGED(V) HARMONY_STAGED_BASE(V)
#endif
// Features that are shipping (turned on by default, but internal flag remains).
-#define HARMONY_SHIPPING(V) V(harmony_async_await, "harmony async-await")
+#define HARMONY_SHIPPING_BASE(V)
+
+#ifdef V8_I18N_SUPPORT
+#define HARMONY_SHIPPING(V) \
+ HARMONY_SHIPPING_BASE(V) \
+ V(datetime_format_to_parts, "Intl.DateTimeFormat.formatToParts")
+#else
+#define HARMONY_SHIPPING(V) HARMONY_SHIPPING_BASE(V)
+#endif
// Once a shipping feature has proved stable in the wild, it will be dropped
// from HARMONY_SHIPPING, all occurrences of the FLAG_ variable are removed,
@@ -274,9 +281,12 @@ DEFINE_BOOL(track_field_types, true, "track field types")
DEFINE_IMPLICATION(track_field_types, track_fields)
DEFINE_IMPLICATION(track_field_types, track_heap_object_fields)
DEFINE_BOOL(smi_binop, true, "support smi representation in binary operations")
-DEFINE_BOOL(mark_shared_functions_for_tier_up, true,
+DEFINE_BOOL(mark_shared_functions_for_tier_up, false,
"mark shared functions for tier up")
+// Flags for strongly rooting literal arrays in the feedback vector.
+DEFINE_BOOL(trace_strong_rooted_literals, false, "trace literal rooting")
+
// Flags for optimization types.
DEFINE_BOOL(optimize_for_size, false,
"Enables optimizations which favor memory size over execution "
@@ -292,6 +302,7 @@ DEFINE_BOOL(string_slices, true, "use string slices")
DEFINE_BOOL(ignition, false, "use ignition interpreter")
DEFINE_BOOL(ignition_staging, false, "use ignition with all staged features")
DEFINE_IMPLICATION(ignition_staging, ignition)
+DEFINE_IMPLICATION(ignition_staging, compiler_dispatcher)
DEFINE_STRING(ignition_filter, "*", "filter for ignition interpreter")
DEFINE_BOOL(ignition_deadcode, true,
"use ignition dead code elimination optimizer")
@@ -399,7 +410,7 @@ DEFINE_BOOL(inline_construct, true, "inline constructor calls")
DEFINE_BOOL(inline_arguments, true, "inline functions with arguments object")
DEFINE_BOOL(inline_accessors, true, "inline JavaScript accessors")
DEFINE_BOOL(inline_into_try, true, "inline into try blocks")
-DEFINE_INT(escape_analysis_iterations, 2,
+DEFINE_INT(escape_analysis_iterations, 1,
"maximum number of escape analysis fix-point iterations")
DEFINE_BOOL(concurrent_recompilation, true,
@@ -419,9 +430,6 @@ DEFINE_BOOL(omit_map_checks_for_leaf_maps, true,
// Flags for TurboFan.
DEFINE_BOOL(turbo, false, "enable TurboFan compiler")
-DEFINE_IMPLICATION(turbo, turbo_asm_deoptimization)
-DEFINE_IMPLICATION(turbo, turbo_loop_peeling)
-DEFINE_IMPLICATION(turbo, turbo_escape)
DEFINE_BOOL(turbo_sp_frame_access, false,
"use stack pointer-relative access to frame wherever possible")
DEFINE_BOOL(turbo_preprocess_ranges, true,
@@ -441,17 +449,19 @@ DEFINE_BOOL(trace_turbo_jt, false, "trace TurboFan's jump threading")
DEFINE_BOOL(trace_turbo_ceq, false, "trace TurboFan's control equivalence")
DEFINE_BOOL(trace_turbo_loop, false, "trace TurboFan's loop optimizations")
DEFINE_BOOL(turbo_asm, true, "enable TurboFan for asm.js code")
-DEFINE_BOOL(turbo_asm_deoptimization, false,
- "enable deoptimization in TurboFan for asm.js code")
DEFINE_BOOL(turbo_verify, DEBUG_BOOL, "verify TurboFan graphs at each phase")
DEFINE_STRING(turbo_verify_machine_graph, nullptr,
"verify TurboFan machine graph before instruction selection")
+DEFINE_BOOL(csa_verify, DEBUG_BOOL,
+ "verify TurboFan machine graph of code stubs")
+DEFINE_BOOL(trace_csa_verify, false, "trace code stubs verification")
+DEFINE_STRING(csa_trap_on_node, nullptr,
+ "trigger break point when a node with given id is created in "
+ "given stub. The format is: StubName,NodeId")
DEFINE_BOOL(turbo_stats, false, "print TurboFan statistics")
DEFINE_BOOL(turbo_stats_nvp, false,
"print TurboFan statistics in machine-readable format")
DEFINE_BOOL(turbo_splitting, true, "split nodes during scheduling in TurboFan")
-DEFINE_BOOL(turbo_type_feedback, true,
- "use typed feedback for representation inference in Turbofan")
DEFINE_BOOL(function_context_specialization, false,
"enable function context specialization in TurboFan")
DEFINE_BOOL(turbo_inlining, true, "enable inlining in TurboFan")
@@ -465,19 +475,19 @@ DEFINE_BOOL(turbo_verify_allocation, DEBUG_BOOL,
"verify register allocation in TurboFan")
DEFINE_BOOL(turbo_move_optimization, true, "optimize gap moves in TurboFan")
DEFINE_BOOL(turbo_jt, true, "enable jump threading in TurboFan")
-DEFINE_BOOL(turbo_stress_loop_peeling, false,
- "stress loop peeling optimization")
-DEFINE_BOOL(turbo_loop_peeling, false, "Turbofan loop peeling")
+DEFINE_BOOL(turbo_loop_peeling, true, "Turbofan loop peeling")
DEFINE_BOOL(turbo_loop_variable, true, "Turbofan loop variable optimization")
DEFINE_BOOL(turbo_cf_optimization, true, "optimize control flow in TurboFan")
DEFINE_BOOL(turbo_frame_elision, true, "elide frames in TurboFan")
-DEFINE_BOOL(turbo_escape, false, "enable escape analysis")
+DEFINE_BOOL(turbo_escape, true, "enable escape analysis")
DEFINE_BOOL(turbo_instruction_scheduling, false,
"enable instruction scheduling in TurboFan")
DEFINE_BOOL(turbo_stress_instruction_scheduling, false,
"randomly schedule instructions to stress dependency tracking")
DEFINE_BOOL(turbo_store_elimination, true,
"enable store-store elimination in TurboFan")
+DEFINE_BOOL(turbo_lower_create_closure, false,
+ "enable inline allocation for closure instantiation")
// Flags to help platform porters
DEFINE_BOOL(minimal, false,
@@ -488,7 +498,9 @@ DEFINE_NEG_IMPLICATION(minimal, crankshaft)
DEFINE_NEG_IMPLICATION(minimal, use_ic)
// Flags for native WebAssembly.
-DEFINE_BOOL(expose_wasm, false, "expose WASM interface to JavaScript")
+DEFINE_BOOL(expose_wasm, true, "expose WASM interface to JavaScript")
+DEFINE_BOOL(wasm_disable_structured_cloning, false,
+ "disable WASM structured cloning")
DEFINE_INT(wasm_num_compilation_tasks, 10,
"number of parallel compilation tasks for wasm")
DEFINE_BOOL(trace_wasm_encoder, false, "trace encoding of wasm code")
@@ -499,6 +511,10 @@ DEFINE_BOOL(trace_wasm_interpreter, false, "trace interpretation of wasm code")
DEFINE_INT(trace_wasm_ast_start, 0,
"start function for WASM AST trace (inclusive)")
DEFINE_INT(trace_wasm_ast_end, 0, "end function for WASM AST trace (exclusive)")
+DEFINE_INT(trace_wasm_text_start, 0,
+ "start function for WASM text generation (inclusive)")
+DEFINE_INT(trace_wasm_text_end, 0,
+ "end function for WASM text generation (exclusive)")
DEFINE_INT(skip_compiling_wasm_funcs, 0, "start compiling at function N")
DEFINE_BOOL(wasm_break_on_decoder_error, false,
"debug break when wasm decoder encounters an error")
@@ -506,6 +522,10 @@ DEFINE_BOOL(wasm_loop_assignment_analysis, true,
"perform loop assignment analysis for WASM")
DEFINE_BOOL(validate_asm, false, "validate asm.js modules before compiling")
+DEFINE_IMPLICATION(ignition_staging, validate_asm)
+DEFINE_BOOL(suppress_asm_messages, false,
+ "don't emit asm.js related messages (for golden file testing)")
+DEFINE_BOOL(trace_asm_time, false, "log asm.js timing info to the console")
DEFINE_BOOL(dump_wasm_module, false, "dump WASM module bytes")
DEFINE_STRING(dump_wasm_module_path, NULL, "directory to dump wasm modules to")
@@ -522,10 +542,23 @@ DEFINE_BOOL(wasm_mv_prototype, false,
DEFINE_BOOL(wasm_atomics_prototype, false,
"enable prototype atomic opcodes for wasm")
+DEFINE_BOOL(wasm_opt, true, "enable wasm optimization")
+DEFINE_BOOL(wasm_no_bounds_checks, false,
+ "disable bounds checks (performance testing only)")
+DEFINE_BOOL(wasm_no_stack_checks, false,
+ "disable stack checks (performance testing only)")
+
DEFINE_BOOL(wasm_trap_handler, false,
"use signal handlers to catch out of bounds memory access in wasm"
- " (currently Linux x86_64 only)")
-
+ " (experimental, currently Linux x86_64 only)")
+DEFINE_BOOL(wasm_guard_pages, false,
+ "add guard pages to the end of WebWassembly memory"
+ " (experimental, no effect on 32-bit)")
+DEFINE_IMPLICATION(wasm_trap_handler, wasm_guard_pages)
+DEFINE_BOOL(wasm_trap_if, false,
+ "enable the use of the trap_if operator for traps")
+DEFINE_BOOL(wasm_code_fuzzer_gen_test, false,
+ "Generate a test case when running the wasm-code fuzzer")
// Profiler flags.
DEFINE_INT(frame_count, 1, "number of stack frames inspected by the profiler")
// 0x1800 fits in the immediate field of an ARM instruction.
@@ -541,7 +574,8 @@ DEFINE_BOOL(trace_opt_verbose, false, "extra verbose compilation tracing")
DEFINE_IMPLICATION(trace_opt_verbose, trace_opt)
// assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc
-DEFINE_BOOL(debug_code, false, "generate extra code (assertions) for debugging")
+DEFINE_BOOL(debug_code, DEBUG_BOOL,
+ "generate extra code (assertions) for debugging")
DEFINE_BOOL(code_comments, false, "emit comments in code disassembly")
DEFINE_BOOL(enable_sse3, true, "enable use of SSE3 instructions if available")
DEFINE_BOOL(enable_ssse3, true, "enable use of SSSE3 instructions if available")
@@ -633,8 +667,6 @@ DEFINE_BOOL(external_reference_stats, false,
#endif // DEBUG
// compiler.cc
-DEFINE_INT(min_preparse_length, 1024,
- "minimum length for automatic enable preparsing")
DEFINE_INT(max_opt_count, 10,
"maximum number of optimization attempts before giving up.")
@@ -643,6 +675,16 @@ DEFINE_BOOL(compilation_cache, true, "enable compilation cache")
DEFINE_BOOL(cache_prototype_transitions, true, "cache prototype transitions")
+// compiler-dispatcher.cc
+DEFINE_BOOL(compiler_dispatcher, false, "enable compiler dispatcher")
+DEFINE_BOOL(trace_compiler_dispatcher, false,
+ "trace compiler dispatcher activity")
+
+// compiler-dispatcher-job.cc
+DEFINE_BOOL(
+ trace_compiler_dispatcher_jobs, false,
+ "trace progress of individual jobs managed by the compiler dispatcher")
+
// cpu-profiler.cc
DEFINE_INT(cpu_profiler_sampling_interval, 1000,
"CPU profiler sampling interval in microseconds")
@@ -660,6 +702,11 @@ DEFINE_IMPLICATION(trace_array_abuse, trace_external_array_abuse)
// debugger
DEFINE_BOOL(trace_debug_json, false, "trace debugging JSON request/response")
DEFINE_BOOL(enable_liveedit, true, "enable liveedit experimental feature")
+DEFINE_BOOL(side_effect_free_debug_evaluate, false,
+ "use side-effect-free debug-evaluate for testing")
+DEFINE_BOOL(
+ trace_side_effect_free_debug_evaluate, false,
+ "print debug messages for side-effect-free debug-evaluate for testing")
DEFINE_BOOL(hard_abort, true, "abort by crashing")
// execution.cc
@@ -731,6 +778,8 @@ DEFINE_BOOL(age_code, true,
DEFINE_BOOL(incremental_marking, true, "use incremental marking")
DEFINE_BOOL(incremental_marking_wrappers, true,
"use incremental marking for marking wrappers")
+DEFINE_BOOL(object_grouping_in_incremental_finalization, true,
+ "enable object grouping in incremental finalization")
DEFINE_INT(min_progress_during_incremental_marking_finalization, 32,
"keep finalizing incremental marking as long as we discover at "
"least this many unmarked objects")
@@ -738,7 +787,7 @@ DEFINE_INT(max_incremental_marking_finalization_rounds, 3,
"at most try this many times to finalize incremental marking")
DEFINE_BOOL(minor_mc, false, "perform young generation mark compact GCs")
DEFINE_NEG_IMPLICATION(minor_mc, incremental_marking)
-DEFINE_BOOL(black_allocation, false, "use black allocation")
+DEFINE_BOOL(black_allocation, true, "use black allocation")
DEFINE_BOOL(concurrent_sweeping, true, "use concurrent sweeping")
DEFINE_BOOL(parallel_compaction, true, "use parallel compaction")
DEFINE_BOOL(parallel_pointer_update, true,
@@ -798,8 +847,8 @@ DEFINE_BOOL(use_idle_notification, true,
// ic.cc
DEFINE_BOOL(use_ic, true, "use inline caching")
DEFINE_BOOL(trace_ic, false, "trace inline cache state transitions")
-DEFINE_BOOL_READONLY(tf_load_ic_stub, true, "use TF LoadIC stub")
-DEFINE_BOOL(tf_store_ic_stub, true, "use TF StoreIC stub")
+DEFINE_INT(ic_stats, 0, "inline cache state transitions statistics")
+DEFINE_VALUE_IMPLICATION(trace_ic, ic_stats, 1)
// macro-assembler-ia32.cc
DEFINE_BOOL(native_code_counters, false,
@@ -837,6 +886,9 @@ DEFINE_BOOL(allow_natives_syntax, false, "allow natives syntax")
DEFINE_BOOL(trace_parse, false, "trace parsing and preparsing")
DEFINE_BOOL(trace_preparse, false, "trace preparsing decisions")
DEFINE_BOOL(lazy_inner_functions, false, "enable lazy parsing inner functions")
+DEFINE_BOOL(aggressive_lazy_inner_functions, false,
+ "even lazier inner function parsing")
+DEFINE_IMPLICATION(aggressive_lazy_inner_functions, lazy_inner_functions)
// simulator-arm.cc, simulator-arm64.cc and simulator-mips.cc
DEFINE_BOOL(trace_sim, false, "Trace simulator execution")
@@ -940,6 +992,8 @@ DEFINE_BOOL(disable_old_api_accessors, false,
DEFINE_BOOL(help, false, "Print usage message, including flags, on console")
DEFINE_BOOL(dump_counters, false, "Dump counters on exit")
+DEFINE_BOOL(dump_counters_nvp, false,
+ "Dump counters as name-value pairs on exit")
DEFINE_STRING(map_counters, "", "Map counters to a file")
DEFINE_ARGS(js_arguments,
@@ -1074,10 +1128,9 @@ DEFINE_IMPLICATION(perf_basic_prof_only_functions, perf_basic_prof)
DEFINE_BOOL(perf_prof, false,
"Enable perf linux profiler (experimental annotate support).")
DEFINE_NEG_IMPLICATION(perf_prof, compact_code_space)
-DEFINE_BOOL(perf_prof_debug_info, false,
- "Enable debug info for perf linux profiler (experimental).")
DEFINE_BOOL(perf_prof_unwinding_info, false,
"Enable unwinding info for perf linux profiler (experimental).")
+DEFINE_IMPLICATION(perf_prof, perf_prof_unwinding_info)
DEFINE_STRING(gc_fake_mmap, "/tmp/__v8_gc__",
"Specify the name of the file for fake gc mmap used in ll_prof")
DEFINE_BOOL(log_internal_timer_events, false, "Time internal events.")
@@ -1100,6 +1153,10 @@ DEFINE_STRING(redirect_code_traces_to, NULL,
DEFINE_BOOL(hydrogen_track_positions, false,
"track source code positions when building IR")
+DEFINE_BOOL(print_opt_source, false,
+ "print source code of optimized and inlined functions")
+DEFINE_IMPLICATION(hydrogen_track_positions, print_opt_source)
+
//
// Disassembler only flags
//
@@ -1124,6 +1181,9 @@ DEFINE_BOOL(test_secondary_stub_cache, false,
DEFINE_BOOL(test_primary_stub_cache, false,
"test primary stub cache by disabling the secondary one")
+DEFINE_BOOL(test_small_max_function_context_stub_size, false,
+ "enable testing the function context size overflow path "
+ "by making the maximum size smaller")
// codegen-ia32.cc / codegen-arm.cc
DEFINE_BOOL(print_code, false, "print generated code")
@@ -1169,6 +1229,7 @@ DEFINE_IMPLICATION(print_all_code, trace_codegen)
DEFINE_BOOL(predictable, false, "enable predictable mode")
DEFINE_IMPLICATION(predictable, single_threaded)
DEFINE_NEG_IMPLICATION(predictable, memory_reducer)
+DEFINE_VALUE_IMPLICATION(single_threaded, wasm_num_compilation_tasks, 0)
//
// Threading related flags.
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index 61d0dcd663..c18938c18e 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -252,7 +252,11 @@ inline ArgumentsAdaptorFrame::ArgumentsAdaptorFrame(
inline BuiltinFrame::BuiltinFrame(StackFrameIteratorBase* iterator)
: JavaScriptFrame(iterator) {}
-inline WasmFrame::WasmFrame(StackFrameIteratorBase* iterator)
+inline WasmCompiledFrame::WasmCompiledFrame(StackFrameIteratorBase* iterator)
+ : StandardFrame(iterator) {}
+
+inline WasmInterpreterEntryFrame::WasmInterpreterEntryFrame(
+ StackFrameIteratorBase* iterator)
: StandardFrame(iterator) {}
inline WasmToJsFrame::WasmToJsFrame(StackFrameIteratorBase* iterator)
@@ -311,13 +315,7 @@ bool StackTraceFrameIterator::is_javascript() const {
bool StackTraceFrameIterator::is_wasm() const { return frame()->is_wasm(); }
JavaScriptFrame* StackTraceFrameIterator::javascript_frame() const {
- DCHECK(is_javascript());
- return static_cast<JavaScriptFrame*>(frame());
-}
-
-WasmFrame* StackTraceFrameIterator::wasm_frame() const {
- DCHECK(is_wasm());
- return static_cast<WasmFrame*>(frame());
+ return JavaScriptFrame::cast(frame());
}
inline StackFrame* SafeStackFrameIterator::frame() const {
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 3b730278db..45d26a161a 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -11,6 +11,7 @@
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
#include "src/full-codegen/full-codegen.h"
+#include "src/ic/ic-stats.h"
#include "src/register-configuration.h"
#include "src/safepoint-table.h"
#include "src/string-stream.h"
@@ -177,10 +178,7 @@ bool StackTraceFrameIterator::IsValidFrame(StackFrame* frame) const {
if (frame->is_java_script()) {
JavaScriptFrame* jsFrame = static_cast<JavaScriptFrame*>(frame);
if (!jsFrame->function()->IsJSFunction()) return false;
- Object* script = jsFrame->function()->shared()->script();
- // Don't show functions from native scripts to user.
- return (script->IsScript() &&
- Script::TYPE_NATIVE != Script::cast(script)->type());
+ return jsFrame->function()->shared()->IsSubjectToDebugging();
}
// apart from javascript, only wasm is valid
return frame->is_wasm();
@@ -470,11 +468,13 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
case Code::OPTIMIZED_FUNCTION:
return OPTIMIZED;
case Code::WASM_FUNCTION:
- return WASM;
+ return WASM_COMPILED;
case Code::WASM_TO_JS_FUNCTION:
return WASM_TO_JS;
case Code::JS_TO_WASM_FUNCTION:
return JS_TO_WASM;
+ case Code::WASM_INTERPRETER_ENTRY:
+ return WASM_INTERPRETER_ENTRY;
default:
// All other types should have an explicit marker
break;
@@ -498,7 +498,7 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
case CONSTRUCT:
case ARGUMENTS_ADAPTOR:
case WASM_TO_JS:
- case WASM:
+ case WASM_COMPILED:
return candidate;
case JS_TO_WASM:
case JAVA_SCRIPT:
@@ -576,6 +576,7 @@ void ExitFrame::ComputeCallerState(State* state) const {
state->fp = Memory::Address_at(fp() + ExitFrameConstants::kCallerFPOffset);
state->pc_address = ResolveReturnAddressLocation(
reinterpret_cast<Address*>(fp() + ExitFrameConstants::kCallerPCOffset));
+ state->callee_pc_address = nullptr;
if (FLAG_enable_embedded_constant_pool) {
state->constant_pool_address = reinterpret_cast<Address*>(
fp() + ExitFrameConstants::kConstantPoolOffset);
@@ -605,7 +606,7 @@ StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
if (fp == 0) return NONE;
Address sp = ComputeStackPointer(fp);
FillState(fp, sp, state);
- DCHECK(*state->pc_address != NULL);
+ DCHECK_NOT_NULL(*state->pc_address);
return ComputeFrameType(fp);
}
@@ -639,11 +640,12 @@ void ExitFrame::FillState(Address fp, Address sp, State* state) {
state->fp = fp;
state->pc_address = ResolveReturnAddressLocation(
reinterpret_cast<Address*>(sp - 1 * kPCOnStackSize));
+ state->callee_pc_address = nullptr;
// The constant pool recorded in the exit frame is not associated
// with the pc in this state (the return address into a C entry
// stub). ComputeCallerState will retrieve the constant pool
// together with the associated caller pc.
- state->constant_pool_address = NULL;
+ state->constant_pool_address = nullptr;
}
JSFunction* BuiltinExitFrame::function() const {
@@ -747,6 +749,7 @@ void StandardFrame::ComputeCallerState(State* state) const {
state->fp = caller_fp();
state->pc_address = ResolveReturnAddressLocation(
reinterpret_cast<Address*>(ComputePCAddress(fp())));
+ state->callee_pc_address = pc_address();
state->constant_pool_address =
reinterpret_cast<Address*>(ComputeConstantPoolAddress(fp()));
}
@@ -759,6 +762,12 @@ void StandardFrame::SetCallerFp(Address caller_fp) {
bool StandardFrame::IsConstructor() const { return false; }
+void StandardFrame::Summarize(List<FrameSummary>* functions,
+ FrameSummary::Mode mode) const {
+ // This should only be called on frames which override this method.
+ UNREACHABLE();
+}
+
void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const {
// Make sure that we're not doing "safe" stack frame iteration. We cannot
// possibly find pointers in optimized frames in that state.
@@ -790,7 +799,8 @@ void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const {
case CONSTRUCT:
case JS_TO_WASM:
case WASM_TO_JS:
- case WASM:
+ case WASM_COMPILED:
+ case WASM_INTERPRETER_ENTRY:
frame_header_size = TypedFrameConstants::kFixedFrameSizeFromFp;
break;
case JAVA_SCRIPT:
@@ -881,7 +891,7 @@ void StubFrame::Iterate(ObjectVisitor* v) const {
Code* StubFrame::unchecked_code() const {
- return static_cast<Code*>(isolate()->FindCodeObject(pc()));
+ return isolate()->FindCodeObject(pc());
}
@@ -916,7 +926,7 @@ bool JavaScriptFrame::IsConstructor() const {
bool JavaScriptFrame::HasInlinedFrames() const {
- List<JSFunction*> functions(1);
+ List<SharedFunctionInfo*> functions(1);
GetFunctions(&functions);
return functions.length() > 1;
}
@@ -949,10 +959,9 @@ Address JavaScriptFrame::GetCallerStackPointer() const {
return fp() + StandardFrameConstants::kCallerSPOffset;
}
-
-void JavaScriptFrame::GetFunctions(List<JSFunction*>* functions) const {
+void JavaScriptFrame::GetFunctions(List<SharedFunctionInfo*>* functions) const {
DCHECK(functions->length() == 0);
- functions->Add(function());
+ functions->Add(function()->shared());
}
void JavaScriptFrame::Summarize(List<FrameSummary>* functions,
@@ -961,8 +970,9 @@ void JavaScriptFrame::Summarize(List<FrameSummary>* functions,
Code* code = LookupCode();
int offset = static_cast<int>(pc() - code->instruction_start());
AbstractCode* abstract_code = AbstractCode::cast(code);
- FrameSummary summary(receiver(), function(), abstract_code, offset,
- IsConstructor(), mode);
+ FrameSummary::JavaScriptFrameSummary summary(isolate(), receiver(),
+ function(), abstract_code,
+ offset, IsConstructor(), mode);
functions->Add(summary);
}
@@ -972,10 +982,6 @@ JSFunction* JavaScriptFrame::function() const {
Object* JavaScriptFrame::receiver() const { return GetParameter(-1); }
-Script* JavaScriptFrame::script() const {
- return Script::cast(function()->shared()->script());
-}
-
Object* JavaScriptFrame::context() const {
const int offset = StandardFrameConstants::kContextOffset;
Object* maybe_result = Memory::Object_at(fp() + offset);
@@ -983,12 +989,15 @@ Object* JavaScriptFrame::context() const {
return maybe_result;
}
+Script* JavaScriptFrame::script() const {
+ return Script::cast(function()->shared()->script());
+}
+
int JavaScriptFrame::LookupExceptionHandlerInTable(
int* stack_depth, HandlerTable::CatchPrediction* prediction) {
- Code* code = LookupCode();
- DCHECK(!code->is_optimized_code());
- int pc_offset = static_cast<int>(pc() - code->entry());
- return code->LookupRangeInHandlerTable(pc_offset, stack_depth, prediction);
+ DCHECK_EQ(0, LookupCode()->handler_table()->length());
+ DCHECK(!LookupCode()->is_optimized_code());
+ return -1;
}
void JavaScriptFrame::PrintFunctionAndOffset(JSFunction* function,
@@ -1020,7 +1029,6 @@ void JavaScriptFrame::PrintFunctionAndOffset(JSFunction* function,
}
}
-
void JavaScriptFrame::PrintTop(Isolate* isolate, FILE* file, bool print_args,
bool print_line_number) {
// constructor calls
@@ -1060,12 +1068,48 @@ void JavaScriptFrame::PrintTop(Isolate* isolate, FILE* file, bool print_args,
}
}
+void JavaScriptFrame::CollectFunctionAndOffsetForICStats(JSFunction* function,
+ AbstractCode* code,
+ int code_offset) {
+ auto ic_stats = ICStats::instance();
+ ICInfo& ic_info = ic_stats->Current();
+ SharedFunctionInfo* shared = function->shared();
+
+ ic_info.function_name = ic_stats->GetOrCacheFunctionName(function);
+ ic_info.script_offset = code_offset;
-void JavaScriptFrame::SaveOperandStack(FixedArray* store) const {
- int operands_count = store->length();
- DCHECK_LE(operands_count, ComputeOperandsCount());
- for (int i = 0; i < operands_count; i++) {
- store->set(i, GetOperand(i));
+ int source_pos = code->SourcePosition(code_offset);
+ Object* maybe_script = shared->script();
+ if (maybe_script->IsScript()) {
+ Script* script = Script::cast(maybe_script);
+ ic_info.line_num = script->GetLineNumber(source_pos) + 1;
+ ic_info.script_name = ic_stats->GetOrCacheScriptName(script);
+ }
+}
+
+void JavaScriptFrame::CollectTopFrameForICStats(Isolate* isolate) {
+ // constructor calls
+ DisallowHeapAllocation no_allocation;
+ JavaScriptFrameIterator it(isolate);
+ ICInfo& ic_info = ICStats::instance()->Current();
+ while (!it.done()) {
+ if (it.frame()->is_java_script()) {
+ JavaScriptFrame* frame = it.frame();
+ if (frame->IsConstructor()) ic_info.is_constructor = true;
+ JSFunction* function = frame->function();
+ int code_offset = 0;
+ if (frame->is_interpreted()) {
+ InterpretedFrame* iframe = reinterpret_cast<InterpretedFrame*>(frame);
+ code_offset = iframe->GetBytecodeOffset();
+ } else {
+ Code* code = frame->unchecked_code();
+ code_offset = static_cast<int>(frame->pc() - code->instruction_start());
+ }
+ CollectFunctionAndOffsetForICStats(function, function->abstract_code(),
+ code_offset);
+ return;
+ }
+ it.Advance();
}
}
@@ -1080,18 +1124,19 @@ int JavaScriptFrame::ComputeParametersCount() const {
namespace {
bool CannotDeoptFromAsmCode(Code* code, JSFunction* function) {
- return code->is_turbofanned() && function->shared()->asm_function() &&
- !FLAG_turbo_asm_deoptimization;
+ return code->is_turbofanned() && function->shared()->asm_function();
}
} // namespace
-FrameSummary::FrameSummary(Object* receiver, JSFunction* function,
- AbstractCode* abstract_code, int code_offset,
- bool is_constructor, Mode mode)
- : receiver_(receiver, function->GetIsolate()),
- function_(function),
- abstract_code_(abstract_code),
+FrameSummary::JavaScriptFrameSummary::JavaScriptFrameSummary(
+ Isolate* isolate, Object* receiver, JSFunction* function,
+ AbstractCode* abstract_code, int code_offset, bool is_constructor,
+ Mode mode)
+ : FrameSummaryBase(isolate, JAVA_SCRIPT),
+ receiver_(receiver, isolate),
+ function_(function, isolate),
+ abstract_code_(abstract_code, isolate),
code_offset_(code_offset),
is_constructor_(is_constructor) {
DCHECK(abstract_code->IsBytecodeArray() ||
@@ -1100,36 +1145,166 @@ FrameSummary::FrameSummary(Object* receiver, JSFunction* function,
mode == kApproximateSummary);
}
-FrameSummary FrameSummary::GetFirst(JavaScriptFrame* frame) {
- List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
- frame->Summarize(&frames);
- return frames.first();
+bool FrameSummary::JavaScriptFrameSummary::is_subject_to_debugging() const {
+ return function()->shared()->IsSubjectToDebugging();
}
-void FrameSummary::Print() {
- PrintF("receiver: ");
- receiver_->ShortPrint();
- PrintF("\nfunction: ");
- function_->shared()->DebugName()->ShortPrint();
- PrintF("\ncode: ");
- abstract_code_->ShortPrint();
- if (abstract_code_->IsCode()) {
- Code* code = abstract_code_->GetCode();
- if (code->kind() == Code::FUNCTION) PrintF(" UNOPT ");
- if (code->kind() == Code::OPTIMIZED_FUNCTION) {
- if (function()->shared()->asm_function()) {
- DCHECK(CannotDeoptFromAsmCode(code, *function()));
- PrintF(" ASM ");
- } else {
- PrintF(" OPT (approximate)");
- }
- }
+int FrameSummary::JavaScriptFrameSummary::SourcePosition() const {
+ return abstract_code()->SourcePosition(code_offset());
+}
+
+int FrameSummary::JavaScriptFrameSummary::SourceStatementPosition() const {
+ return abstract_code()->SourceStatementPosition(code_offset());
+}
+
+Handle<Object> FrameSummary::JavaScriptFrameSummary::script() const {
+ return handle(function_->shared()->script(), isolate());
+}
+
+Handle<String> FrameSummary::JavaScriptFrameSummary::FunctionName() const {
+ return JSFunction::GetDebugName(function_);
+}
+
+Handle<Context> FrameSummary::JavaScriptFrameSummary::native_context() const {
+ return handle(function_->context()->native_context(), isolate());
+}
+
+FrameSummary::WasmFrameSummary::WasmFrameSummary(
+ Isolate* isolate, FrameSummary::Kind kind,
+ Handle<WasmInstanceObject> instance, bool at_to_number_conversion)
+ : FrameSummaryBase(isolate, kind),
+ wasm_instance_(instance),
+ at_to_number_conversion_(at_to_number_conversion) {}
+
+Handle<Object> FrameSummary::WasmFrameSummary::receiver() const {
+ return wasm_instance_->GetIsolate()->global_proxy();
+}
+
+#define WASM_SUMMARY_DISPATCH(type, name) \
+ type FrameSummary::WasmFrameSummary::name() const { \
+ DCHECK(kind() == Kind::WASM_COMPILED || kind() == Kind::WASM_INTERPRETED); \
+ return kind() == Kind::WASM_COMPILED \
+ ? static_cast<const WasmCompiledFrameSummary*>(this)->name() \
+ : static_cast<const WasmInterpretedFrameSummary*>(this) \
+ ->name(); \
+ }
+
+WASM_SUMMARY_DISPATCH(uint32_t, function_index)
+WASM_SUMMARY_DISPATCH(int, byte_offset)
+
+#undef WASM_SUMMARY_DISPATCH
+
+int FrameSummary::WasmFrameSummary::SourcePosition() const {
+ int offset = byte_offset();
+ Handle<WasmCompiledModule> compiled_module(wasm_instance()->compiled_module(),
+ isolate());
+ if (compiled_module->is_asm_js()) {
+ offset = WasmCompiledModule::GetAsmJsSourcePosition(
+ compiled_module, function_index(), offset, at_to_number_conversion());
} else {
- PrintF(" BYTECODE ");
+ offset += compiled_module->GetFunctionOffset(function_index());
}
- PrintF("\npc: %d\n", code_offset_);
+ return offset;
}
+Handle<Script> FrameSummary::WasmFrameSummary::script() const {
+ return handle(wasm_instance()->compiled_module()->script());
+}
+
+Handle<String> FrameSummary::WasmFrameSummary::FunctionName() const {
+ Handle<WasmCompiledModule> compiled_module(
+ wasm_instance()->compiled_module());
+ return WasmCompiledModule::GetFunctionName(compiled_module->GetIsolate(),
+ compiled_module, function_index());
+}
+
+Handle<Context> FrameSummary::WasmFrameSummary::native_context() const {
+ return wasm_instance()->compiled_module()->native_context();
+}
+
+FrameSummary::WasmCompiledFrameSummary::WasmCompiledFrameSummary(
+ Isolate* isolate, Handle<WasmInstanceObject> instance, Handle<Code> code,
+ int code_offset, bool at_to_number_conversion)
+ : WasmFrameSummary(isolate, WASM_COMPILED, instance,
+ at_to_number_conversion),
+ code_(code),
+ code_offset_(code_offset) {}
+
+uint32_t FrameSummary::WasmCompiledFrameSummary::function_index() const {
+ FixedArray* deopt_data = code()->deoptimization_data();
+ DCHECK_EQ(2, deopt_data->length());
+ DCHECK(deopt_data->get(1)->IsSmi());
+ int val = Smi::cast(deopt_data->get(1))->value();
+ DCHECK_LE(0, val);
+ return static_cast<uint32_t>(val);
+}
+
+int FrameSummary::WasmCompiledFrameSummary::byte_offset() const {
+ return AbstractCode::cast(*code())->SourcePosition(code_offset());
+}
+
+FrameSummary::WasmInterpretedFrameSummary::WasmInterpretedFrameSummary(
+ Isolate* isolate, Handle<WasmInstanceObject> instance,
+ uint32_t function_index, int byte_offset)
+ : WasmFrameSummary(isolate, WASM_INTERPRETED, instance, false),
+ function_index_(function_index),
+ byte_offset_(byte_offset) {}
+
+FrameSummary::~FrameSummary() {
+#define FRAME_SUMMARY_DESTR(kind, type, field, desc) \
+ case kind: \
+ field.~type(); \
+ break;
+ switch (base_.kind()) {
+ FRAME_SUMMARY_VARIANTS(FRAME_SUMMARY_DESTR)
+ default:
+ UNREACHABLE();
+ }
+#undef FRAME_SUMMARY_DESTR
+}
+
+FrameSummary FrameSummary::Get(const StandardFrame* frame, int index) {
+ DCHECK_LE(0, index);
+ List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
+ frame->Summarize(&frames);
+ DCHECK_GT(frames.length(), index);
+ return frames[index];
+}
+
+FrameSummary FrameSummary::GetSingle(const StandardFrame* frame) {
+ List<FrameSummary> frames(1);
+ frame->Summarize(&frames);
+ DCHECK_EQ(1, frames.length());
+ return frames.first();
+}
+
+#define FRAME_SUMMARY_DISPATCH(ret, name) \
+ ret FrameSummary::name() const { \
+ switch (base_.kind()) { \
+ case JAVA_SCRIPT: \
+ return java_script_summary_.name(); \
+ case WASM_COMPILED: \
+ return wasm_compiled_summary_.name(); \
+ case WASM_INTERPRETED: \
+ return wasm_interpreted_summary_.name(); \
+ default: \
+ UNREACHABLE(); \
+ return ret{}; \
+ } \
+ }
+
+FRAME_SUMMARY_DISPATCH(Handle<Object>, receiver)
+FRAME_SUMMARY_DISPATCH(int, code_offset)
+FRAME_SUMMARY_DISPATCH(bool, is_constructor)
+FRAME_SUMMARY_DISPATCH(bool, is_subject_to_debugging)
+FRAME_SUMMARY_DISPATCH(Handle<Object>, script)
+FRAME_SUMMARY_DISPATCH(int, SourcePosition)
+FRAME_SUMMARY_DISPATCH(int, SourceStatementPosition)
+FRAME_SUMMARY_DISPATCH(Handle<String>, FunctionName)
+FRAME_SUMMARY_DISPATCH(Handle<Context>, native_context)
+
+#undef FRAME_SUMMARY_DISPATCH
+
void OptimizedFrame::Summarize(List<FrameSummary>* frames,
FrameSummary::Mode mode) const {
DCHECK(frames->length() == 0);
@@ -1226,8 +1401,9 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames,
code_offset = bailout_id.ToInt(); // Points to current bytecode.
abstract_code = AbstractCode::cast(shared_info->bytecode_array());
}
- FrameSummary summary(receiver, function, abstract_code, code_offset,
- is_constructor);
+ FrameSummary::JavaScriptFrameSummary summary(isolate(), receiver,
+ function, abstract_code,
+ code_offset, is_constructor);
frames->Add(summary);
is_constructor = false;
} else if (frame_opcode == Translation::CONSTRUCT_STUB_FRAME) {
@@ -1297,7 +1473,7 @@ Object* OptimizedFrame::receiver() const {
}
}
-void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) const {
+void OptimizedFrame::GetFunctions(List<SharedFunctionInfo*>* functions) const {
DCHECK(functions->length() == 0);
DCHECK(is_optimized());
@@ -1327,25 +1503,20 @@ void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) const {
// in the deoptimization translation are ordered bottom-to-top.
while (jsframe_count != 0) {
opcode = static_cast<Translation::Opcode>(it.Next());
- // Skip over operands to advance to the next opcode.
- it.Skip(Translation::NumberOfOperandsFor(opcode));
if (opcode == Translation::JS_FRAME ||
opcode == Translation::INTERPRETED_FRAME) {
+ it.Next(); // Skip bailout id.
jsframe_count--;
- // The translation commands are ordered and the function is always at the
- // first position.
- opcode = static_cast<Translation::Opcode>(it.Next());
+ // The second operand of the frame points to the function.
+ Object* shared = literal_array->get(it.Next());
+ functions->Add(SharedFunctionInfo::cast(shared));
- // Get the correct function in the optimized frame.
- Object* function;
- if (opcode == Translation::LITERAL) {
- function = literal_array->get(it.Next());
- } else {
- CHECK_EQ(Translation::STACK_SLOT, opcode);
- function = StackSlotAt(it.Next());
- }
- functions->Add(JSFunction::cast(function));
+ // Skip over remaining operands to advance to the next opcode.
+ it.Skip(Translation::NumberOfOperandsFor(opcode) - 2);
+ } else {
+ // Skip over operands to advance to the next opcode.
+ it.Skip(Translation::NumberOfOperandsFor(opcode));
}
}
}
@@ -1370,8 +1541,8 @@ int InterpretedFrame::position() const {
int InterpretedFrame::LookupExceptionHandlerInTable(
int* context_register, HandlerTable::CatchPrediction* prediction) {
BytecodeArray* bytecode = function()->shared()->bytecode_array();
- return bytecode->LookupRangeInHandlerTable(GetBytecodeOffset(),
- context_register, prediction);
+ HandlerTable* table = HandlerTable::cast(bytecode->handler_table());
+ return table->LookupRange(GetBytecodeOffset(), context_register, prediction);
}
int InterpretedFrame::GetBytecodeOffset() const {
@@ -1441,8 +1612,9 @@ void InterpretedFrame::Summarize(List<FrameSummary>* functions,
DCHECK(functions->length() == 0);
AbstractCode* abstract_code =
AbstractCode::cast(function()->shared()->bytecode_array());
- FrameSummary summary(receiver(), function(), abstract_code,
- GetBytecodeOffset(), IsConstructor());
+ FrameSummary::JavaScriptFrameSummary summary(
+ isolate(), receiver(), function(), abstract_code, GetBytecodeOffset(),
+ IsConstructor());
functions->Add(summary);
}
@@ -1488,49 +1660,83 @@ void StackFrame::PrintIndex(StringStream* accumulator,
accumulator->Add((mode == OVERVIEW) ? "%5d: " : "[%d]: ", index);
}
-void WasmFrame::Print(StringStream* accumulator, PrintMode mode,
- int index) const {
- accumulator->Add("wasm frame");
+void WasmCompiledFrame::Print(StringStream* accumulator, PrintMode mode,
+ int index) const {
+ PrintIndex(accumulator, mode, index);
+ accumulator->Add("WASM [");
+ Script* script = this->script();
+ accumulator->PrintName(script->name());
+ int pc = static_cast<int>(this->pc() - LookupCode()->instruction_start());
+ Object* instance = this->wasm_instance();
+ Vector<const uint8_t> raw_func_name =
+ WasmInstanceObject::cast(instance)->compiled_module()->GetRawFunctionName(
+ this->function_index());
+ const int kMaxPrintedFunctionName = 64;
+ char func_name[kMaxPrintedFunctionName + 1];
+ int func_name_len = std::min(kMaxPrintedFunctionName, raw_func_name.length());
+ memcpy(func_name, raw_func_name.start(), func_name_len);
+ func_name[func_name_len] = '\0';
+ accumulator->Add("], function #%u ('%s'), pc=%p, pos=%d\n",
+ this->function_index(), func_name, pc, this->position());
+ if (mode != OVERVIEW) accumulator->Add("\n");
+}
+
+Code* WasmCompiledFrame::unchecked_code() const {
+ return isolate()->FindCodeObject(pc());
+}
+
+void WasmCompiledFrame::Iterate(ObjectVisitor* v) const {
+ IterateCompiledFrame(v);
}
-Code* WasmFrame::unchecked_code() const {
- return static_cast<Code*>(isolate()->FindCodeObject(pc()));
+Address WasmCompiledFrame::GetCallerStackPointer() const {
+ return fp() + ExitFrameConstants::kCallerSPOffset;
}
-void WasmFrame::Iterate(ObjectVisitor* v) const { IterateCompiledFrame(v); }
+WasmInstanceObject* WasmCompiledFrame::wasm_instance() const {
+ WasmInstanceObject* obj = wasm::GetOwningWasmInstance(LookupCode());
+ // This is a live stack frame; it must have a live instance.
+ DCHECK_NOT_NULL(obj);
+ return obj;
+}
-Address WasmFrame::GetCallerStackPointer() const {
- return fp() + ExitFrameConstants::kCallerSPOffset;
+uint32_t WasmCompiledFrame::function_index() const {
+ return FrameSummary::GetSingle(this).AsWasmCompiled().function_index();
}
-Object* WasmFrame::wasm_instance() const {
- Object* ret = wasm::GetOwningWasmInstance(LookupCode());
- if (ret == nullptr) ret = isolate()->heap()->undefined_value();
- return ret;
+Script* WasmCompiledFrame::script() const {
+ return wasm_instance()->compiled_module()->script();
}
-uint32_t WasmFrame::function_index() const {
- FixedArray* deopt_data = LookupCode()->deoptimization_data();
- DCHECK(deopt_data->length() == 2);
- return Smi::cast(deopt_data->get(1))->value();
+int WasmCompiledFrame::position() const {
+ return FrameSummary::GetSingle(this).SourcePosition();
}
-Script* WasmFrame::script() const {
- Handle<JSObject> instance(JSObject::cast(wasm_instance()), isolate());
- return *wasm::GetScript(instance);
+void WasmCompiledFrame::Summarize(List<FrameSummary>* functions,
+ FrameSummary::Mode mode) const {
+ DCHECK_EQ(0, functions->length());
+ Handle<Code> code(LookupCode(), isolate());
+ int offset = static_cast<int>(pc() - code->instruction_start());
+ Handle<WasmInstanceObject> instance(wasm_instance(), isolate());
+ FrameSummary::WasmCompiledFrameSummary summary(
+ isolate(), instance, code, offset, at_to_number_conversion());
+ functions->Add(summary);
}
-int WasmFrame::position() const {
- int position = StandardFrame::position();
- if (wasm::WasmIsAsmJs(wasm_instance(), isolate())) {
- Handle<JSObject> instance(JSObject::cast(wasm_instance()), isolate());
- position =
- wasm::GetAsmWasmSourcePosition(instance, function_index(), position);
- }
- return position;
+bool WasmCompiledFrame::at_to_number_conversion() const {
+ // Check whether our callee is a WASM_TO_JS frame, and this frame is at the
+ // ToNumber conversion call.
+ Address callee_pc = reinterpret_cast<Address>(this->callee_pc());
+ Code* code = callee_pc ? isolate()->FindCodeObject(callee_pc) : nullptr;
+ if (!code || code->kind() != Code::WASM_TO_JS_FUNCTION) return false;
+ int offset = static_cast<int>(callee_pc - code->instruction_start());
+ int pos = AbstractCode::cast(code)->SourcePosition(offset);
+ DCHECK(pos == 0 || pos == 1);
+ // The imported call has position 0, ToNumber has position 1.
+ return !!pos;
}
-int WasmFrame::LookupExceptionHandlerInTable(int* stack_slots) {
+int WasmCompiledFrame::LookupExceptionHandlerInTable(int* stack_slots) {
DCHECK_NOT_NULL(stack_slots);
Code* code = LookupCode();
HandlerTable* table = HandlerTable::cast(code->handler_table());
@@ -1539,6 +1745,49 @@ int WasmFrame::LookupExceptionHandlerInTable(int* stack_slots) {
return table->LookupReturn(pc_offset);
}
+void WasmInterpreterEntryFrame::Iterate(ObjectVisitor* v) const {
+ IterateCompiledFrame(v);
+}
+
+void WasmInterpreterEntryFrame::Print(StringStream* accumulator, PrintMode mode,
+ int index) const {
+ PrintIndex(accumulator, mode, index);
+ accumulator->Add("WASM TO INTERPRETER [");
+ Script* script = this->script();
+ accumulator->PrintName(script->name());
+ accumulator->Add("]");
+ if (mode != OVERVIEW) accumulator->Add("\n");
+}
+
+void WasmInterpreterEntryFrame::Summarize(List<FrameSummary>* functions,
+ FrameSummary::Mode mode) const {
+ // TODO(clemensh): Implement this.
+ UNIMPLEMENTED();
+}
+
+Code* WasmInterpreterEntryFrame::unchecked_code() const {
+ return isolate()->FindCodeObject(pc());
+}
+
+WasmInstanceObject* WasmInterpreterEntryFrame::wasm_instance() const {
+ WasmInstanceObject* ret = wasm::GetOwningWasmInstance(LookupCode());
+ // This is a live stack frame, there must be a live wasm instance available.
+ DCHECK_NOT_NULL(ret);
+ return ret;
+}
+
+Script* WasmInterpreterEntryFrame::script() const {
+ return wasm_instance()->compiled_module()->script();
+}
+
+int WasmInterpreterEntryFrame::position() const {
+ return FrameSummary::GetFirst(this).AsWasmInterpreted().SourcePosition();
+}
+
+Address WasmInterpreterEntryFrame::GetCallerStackPointer() const {
+ return fp() + ExitFrameConstants::kCallerSPOffset;
+}
+
namespace {
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index 1daa36404b..74131e874b 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -29,9 +29,10 @@ int JSCallerSavedCode(int n);
// Forward declarations.
class ExternalCallbackScope;
+class Isolate;
class StackFrameIteratorBase;
class ThreadLocalTop;
-class Isolate;
+class WasmInstanceObject;
class InnerPointerToCodeCache {
public:
@@ -66,13 +67,6 @@ class InnerPointerToCodeCache {
};
-// Every try-block pushes the context register.
-class TryBlockConstant : public AllStatic {
- public:
- static const int kElementCount = 1;
-};
-
-
class StackHandlerConstants : public AllStatic {
public:
static const int kNextOffset = 0 * kPointerSize;
@@ -103,9 +97,10 @@ class StackHandler BASE_EMBEDDED {
V(EXIT, ExitFrame) \
V(JAVA_SCRIPT, JavaScriptFrame) \
V(OPTIMIZED, OptimizedFrame) \
- V(WASM, WasmFrame) \
+ V(WASM_COMPILED, WasmCompiledFrame) \
V(WASM_TO_JS, WasmToJsFrame) \
V(JS_TO_WASM, JsToWasmFrame) \
+ V(WASM_INTERPRETER_ENTRY, WasmInterpreterEntryFrame) \
V(INTERPRETED, InterpretedFrame) \
V(STUB, StubFrame) \
V(STUB_FAILURE_TRAMPOLINE, StubFailureTrampolineFrame) \
@@ -448,12 +443,11 @@ class StackFrame BASE_EMBEDDED {
};
struct State {
- State() : sp(NULL), fp(NULL), pc_address(NULL),
- constant_pool_address(NULL) { }
- Address sp;
- Address fp;
- Address* pc_address;
- Address* constant_pool_address;
+ Address sp = nullptr;
+ Address fp = nullptr;
+ Address* pc_address = nullptr;
+ Address* callee_pc_address = nullptr;
+ Address* constant_pool_address = nullptr;
};
// Copy constructor; it breaks the connection to host iterator
@@ -470,9 +464,12 @@ class StackFrame BASE_EMBEDDED {
bool is_exit() const { return type() == EXIT; }
bool is_optimized() const { return type() == OPTIMIZED; }
bool is_interpreted() const { return type() == INTERPRETED; }
- bool is_wasm() const { return type() == WASM; }
+ bool is_wasm_compiled() const { return type() == WASM_COMPILED; }
bool is_wasm_to_js() const { return type() == WASM_TO_JS; }
bool is_js_to_wasm() const { return type() == JS_TO_WASM; }
+ bool is_wasm_interpreter_entry() const {
+ return type() == WASM_INTERPRETER_ENTRY;
+ }
bool is_arguments_adaptor() const { return type() == ARGUMENTS_ADAPTOR; }
bool is_builtin() const { return type() == BUILTIN; }
bool is_internal() const { return type() == INTERNAL; }
@@ -488,10 +485,17 @@ class StackFrame BASE_EMBEDDED {
return (type == JAVA_SCRIPT) || (type == OPTIMIZED) ||
(type == INTERPRETED) || (type == BUILTIN);
}
+ bool is_wasm() const {
+ Type type = this->type();
+ return type == WASM_COMPILED || type == WASM_INTERPRETER_ENTRY;
+ }
// Accessors.
Address sp() const { return state_.sp; }
Address fp() const { return state_.fp; }
+ Address callee_pc() const {
+ return state_.callee_pc_address ? *state_.callee_pc_address : nullptr;
+ }
Address caller_sp() const { return GetCallerStackPointer(); }
// If this frame is optimized and was dynamically aligned return its old
@@ -733,7 +737,7 @@ class BuiltinExitFrame : public ExitFrame {
friend class StackFrameIteratorBase;
};
-class JavaScriptFrame;
+class StandardFrame;
class FrameSummary BASE_EMBEDDED {
public:
@@ -744,26 +748,154 @@ class FrameSummary BASE_EMBEDDED {
// information, but it might miss frames.
enum Mode { kExactSummary, kApproximateSummary };
- FrameSummary(Object* receiver, JSFunction* function,
- AbstractCode* abstract_code, int code_offset,
- bool is_constructor, Mode mode = kExactSummary);
+// Subclasses for the different summary kinds:
+#define FRAME_SUMMARY_VARIANTS(F) \
+ F(JAVA_SCRIPT, JavaScriptFrameSummary, java_script_summary_, JavaScript) \
+ F(WASM_COMPILED, WasmCompiledFrameSummary, wasm_compiled_summary_, \
+ WasmCompiled) \
+ F(WASM_INTERPRETED, WasmInterpretedFrameSummary, wasm_interpreted_summary_, \
+ WasmInterpreted)
+
+#define FRAME_SUMMARY_KIND(kind, type, field, desc) kind,
+ enum Kind { FRAME_SUMMARY_VARIANTS(FRAME_SUMMARY_KIND) };
+#undef FRAME_SUMMARY_KIND
+
+ class FrameSummaryBase {
+ public:
+ FrameSummaryBase(Isolate* isolate, Kind kind)
+ : isolate_(isolate), kind_(kind) {}
+ Isolate* isolate() const { return isolate_; }
+ Kind kind() const { return kind_; }
+
+ private:
+ Isolate* isolate_;
+ Kind kind_;
+ };
+
+ class JavaScriptFrameSummary : public FrameSummaryBase {
+ public:
+ JavaScriptFrameSummary(Isolate* isolate, Object* receiver,
+ JSFunction* function, AbstractCode* abstract_code,
+ int code_offset, bool is_constructor,
+ Mode mode = kExactSummary);
+
+ Handle<Object> receiver() const { return receiver_; }
+ Handle<JSFunction> function() const { return function_; }
+ Handle<AbstractCode> abstract_code() const { return abstract_code_; }
+ int code_offset() const { return code_offset_; }
+ bool is_constructor() const { return is_constructor_; }
+ bool is_subject_to_debugging() const;
+ int SourcePosition() const;
+ int SourceStatementPosition() const;
+ Handle<Object> script() const;
+ Handle<String> FunctionName() const;
+ Handle<Context> native_context() const;
+
+ private:
+ Handle<Object> receiver_;
+ Handle<JSFunction> function_;
+ Handle<AbstractCode> abstract_code_;
+ int code_offset_;
+ bool is_constructor_;
+ };
+
+ class WasmFrameSummary : public FrameSummaryBase {
+ protected:
+ WasmFrameSummary(Isolate*, Kind, Handle<WasmInstanceObject>,
+ bool at_to_number_conversion);
+
+ public:
+ Handle<Object> receiver() const;
+ uint32_t function_index() const;
+ int byte_offset() const;
+ bool is_constructor() const { return false; }
+ bool is_subject_to_debugging() const { return true; }
+ int SourcePosition() const;
+ int SourceStatementPosition() const { return SourcePosition(); }
+ Handle<Script> script() const;
+ Handle<WasmInstanceObject> wasm_instance() const { return wasm_instance_; }
+ Handle<String> FunctionName() const;
+ Handle<Context> native_context() const;
+ bool at_to_number_conversion() const { return at_to_number_conversion_; }
+
+ private:
+ Handle<WasmInstanceObject> wasm_instance_;
+ bool at_to_number_conversion_;
+ };
+
+ class WasmCompiledFrameSummary : public WasmFrameSummary {
+ public:
+ WasmCompiledFrameSummary(Isolate*, Handle<WasmInstanceObject>, Handle<Code>,
+ int code_offset, bool at_to_number_conversion);
+ uint32_t function_index() const;
+ Handle<Code> code() const { return code_; }
+ int code_offset() const { return code_offset_; }
+ int byte_offset() const;
+
+ private:
+ Handle<Code> code_;
+ int code_offset_;
+ };
+
+ class WasmInterpretedFrameSummary : public WasmFrameSummary {
+ public:
+ WasmInterpretedFrameSummary(Isolate*, Handle<WasmInstanceObject>,
+ uint32_t function_index, int byte_offset);
+ uint32_t function_index() const { return function_index_; }
+ int code_offset() const { return byte_offset_; }
+ int byte_offset() const { return byte_offset_; }
+
+ private:
+ uint32_t function_index_;
+ int byte_offset_;
+ };
- static FrameSummary GetFirst(JavaScriptFrame* frame);
+#undef FRAME_SUMMARY_FIELD
+#define FRAME_SUMMARY_CONS(kind, type, field, desc) \
+ FrameSummary(type summ) : field(summ) {} // NOLINT
+ FRAME_SUMMARY_VARIANTS(FRAME_SUMMARY_CONS)
+#undef FRAME_SUMMARY_CONS
- Handle<Object> receiver() const { return receiver_; }
- Handle<JSFunction> function() const { return function_; }
- Handle<AbstractCode> abstract_code() const { return abstract_code_; }
- int code_offset() const { return code_offset_; }
- bool is_constructor() const { return is_constructor_; }
+ ~FrameSummary();
- void Print();
+ static inline FrameSummary GetFirst(const StandardFrame* frame) {
+ return Get(frame, 0);
+ }
+ static FrameSummary Get(const StandardFrame* frame, int index);
+ static FrameSummary GetSingle(const StandardFrame* frame);
+
+ // Dispatched accessors.
+ Handle<Object> receiver() const;
+ int code_offset() const;
+ bool is_constructor() const;
+ bool is_subject_to_debugging() const;
+ Handle<Object> script() const;
+ int SourcePosition() const;
+ int SourceStatementPosition() const;
+ Handle<String> FunctionName() const;
+ Handle<Context> native_context() const;
+
+#define FRAME_SUMMARY_CAST(kind_, type, field, desc) \
+ bool Is##desc() const { return base_.kind() == kind_; } \
+ const type& As##desc() const { \
+ DCHECK_EQ(base_.kind(), kind_); \
+ return field; \
+ }
+ FRAME_SUMMARY_VARIANTS(FRAME_SUMMARY_CAST)
+#undef FRAME_SUMMARY_CAST
+
+ bool IsWasm() const { return IsWasmCompiled() || IsWasmInterpreted(); }
+ const WasmFrameSummary& AsWasm() const {
+ if (IsWasmCompiled()) return AsWasmCompiled();
+ return AsWasmInterpreted();
+ }
private:
- Handle<Object> receiver_;
- Handle<JSFunction> function_;
- Handle<AbstractCode> abstract_code_;
- int code_offset_;
- bool is_constructor_;
+#define FRAME_SUMMARY_FIELD(kind, type, field, desc) type field;
+ union {
+ FrameSummaryBase base_;
+ FRAME_SUMMARY_VARIANTS(FRAME_SUMMARY_FIELD)
+ };
};
class StandardFrame : public StackFrame {
@@ -791,6 +923,11 @@ class StandardFrame : public StackFrame {
// Check if this frame is a constructor frame invoked through 'new'.
virtual bool IsConstructor() const;
+ // Build a list with summaries for this frame including all inlined frames.
+ virtual void Summarize(
+ List<FrameSummary>* frames,
+ FrameSummary::Mode mode = FrameSummary::kExactSummary) const;
+
static StandardFrame* cast(StackFrame* frame) {
DCHECK(frame->is_standard());
return static_cast<StandardFrame*>(frame);
@@ -840,10 +977,9 @@ class JavaScriptFrame : public StandardFrame {
public:
Type type() const override { return JAVA_SCRIPT; }
- // Build a list with summaries for this frame including all inlined frames.
- virtual void Summarize(
+ void Summarize(
List<FrameSummary>* frames,
- FrameSummary::Mode mode = FrameSummary::kExactSummary) const;
+ FrameSummary::Mode mode = FrameSummary::kExactSummary) const override;
// Accessors.
virtual JSFunction* function() const;
@@ -863,9 +999,6 @@ class JavaScriptFrame : public StandardFrame {
inline Object* GetOperand(int index) const;
inline int ComputeOperandsCount() const;
- // Generator support to preserve operand stack.
- void SaveOperandStack(FixedArray* store) const;
-
// Debugger access.
void SetParameterValue(int index, Object* value) const;
@@ -892,12 +1025,11 @@ class JavaScriptFrame : public StandardFrame {
// Determine the code for the frame.
Code* unchecked_code() const override;
- // Return a list with JSFunctions of this frame.
- virtual void GetFunctions(List<JSFunction*>* functions) const;
+ // Return a list with {SharedFunctionInfo} objects of this frame.
+ virtual void GetFunctions(List<SharedFunctionInfo*>* functions) const;
// Lookup exception handler for current {pc}, returns -1 if none found. Also
// returns data associated with the handler site specific to the frame type:
- // - JavaScriptFrame : Data is the stack depth at entry of the try-block.
// - OptimizedFrame : Data is the stack slot count of the entire frame.
// - InterpretedFrame: Data is the register index holding the context.
virtual int LookupExceptionHandlerInTable(
@@ -920,6 +1052,11 @@ class JavaScriptFrame : public StandardFrame {
static void PrintTop(Isolate* isolate, FILE* file, bool print_args,
bool print_line_number);
+ static void CollectFunctionAndOffsetForICStats(JSFunction* function,
+ AbstractCode* code,
+ int code_offset);
+ static void CollectTopFrameForICStats(Isolate* isolate);
+
protected:
inline explicit JavaScriptFrame(StackFrameIteratorBase* iterator);
@@ -968,10 +1105,10 @@ class OptimizedFrame : public JavaScriptFrame {
// GC support.
void Iterate(ObjectVisitor* v) const override;
- // Return a list with JSFunctions of this frame.
+ // Return a list with {SharedFunctionInfo} objects of this frame.
// The functions are ordered bottom-to-top (i.e. functions.last()
// is the top-most activation)
- void GetFunctions(List<JSFunction*>* functions) const override;
+ void GetFunctions(List<SharedFunctionInfo*>* functions) const override;
void Summarize(
List<FrameSummary>* frames,
@@ -1094,9 +1231,9 @@ class BuiltinFrame final : public JavaScriptFrame {
friend class StackFrameIteratorBase;
};
-class WasmFrame : public StandardFrame {
+class WasmCompiledFrame : public StandardFrame {
public:
- Type type() const override { return WASM; }
+ Type type() const override { return WASM_COMPILED; }
// GC support.
void Iterate(ObjectVisitor* v) const override;
@@ -1113,18 +1250,59 @@ class WasmFrame : public StandardFrame {
Code* unchecked_code() const override;
// Accessors.
- Object* wasm_instance() const;
+ WasmInstanceObject* wasm_instance() const;
uint32_t function_index() const;
Script* script() const override;
int position() const override;
+ bool at_to_number_conversion() const;
+
+ void Summarize(List<FrameSummary>* frames,
+ FrameSummary::Mode mode) const override;
+
+ static WasmCompiledFrame* cast(StackFrame* frame) {
+ DCHECK(frame->is_wasm_compiled());
+ return static_cast<WasmCompiledFrame*>(frame);
+ }
+
+ protected:
+ inline explicit WasmCompiledFrame(StackFrameIteratorBase* iterator);
+
+ Address GetCallerStackPointer() const override;
+
+ private:
+ friend class StackFrameIteratorBase;
+};
+
+class WasmInterpreterEntryFrame : public StandardFrame {
+ public:
+ Type type() const override { return WASM_INTERPRETER_ENTRY; }
+
+ // GC support.
+ void Iterate(ObjectVisitor* v) const override;
+
+ // Printing support.
+ void Print(StringStream* accumulator, PrintMode mode,
+ int index) const override;
+
+ void Summarize(
+ List<FrameSummary>* frames,
+ FrameSummary::Mode mode = FrameSummary::kExactSummary) const override;
+
+ // Determine the code for the frame.
+ Code* unchecked_code() const override;
+
+ // Accessors.
+ WasmInstanceObject* wasm_instance() const;
+ Script* script() const override;
+ int position() const override;
- static WasmFrame* cast(StackFrame* frame) {
- DCHECK(frame->is_wasm());
- return static_cast<WasmFrame*>(frame);
+ static WasmInterpreterEntryFrame* cast(StackFrame* frame) {
+ DCHECK(frame->is_wasm_interpreter_entry());
+ return static_cast<WasmInterpreterEntryFrame*>(frame);
}
protected:
- inline explicit WasmFrame(StackFrameIteratorBase* iterator);
+ inline explicit WasmInterpreterEntryFrame(StackFrameIteratorBase* iterator);
Address GetCallerStackPointer() const override;
@@ -1315,7 +1493,6 @@ class StackTraceFrameIterator BASE_EMBEDDED {
inline bool is_javascript() const;
inline bool is_wasm() const;
inline JavaScriptFrame* javascript_frame() const;
- inline WasmFrame* wasm_frame() const;
// Advance to the frame holding the arguments for the current
// frame. This only affects the current frame if it is a javascript frame and
diff --git a/deps/v8/src/full-codegen/arm/full-codegen-arm.cc b/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
index 22c991bed1..4b61bf9ab8 100644
--- a/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
+++ b/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
@@ -4,15 +4,16 @@
#if V8_TARGET_ARCH_ARM
-#include "src/full-codegen/full-codegen.h"
#include "src/ast/compile-time-value.h"
#include "src/ast/scopes.h"
+#include "src/builtins/builtins-constructor.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/debug/debug.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
#include "src/arm/code-stubs-arm.h"
@@ -144,8 +145,6 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
- // Generators allocate locals, if any, in context slots.
- DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
OperandStackDepthIncrement(locals_count);
if (locals_count > 0) {
if (locals_count >= 128) {
@@ -200,15 +199,18 @@ void FullCodeGenerator::Generate() {
if (info->scope()->new_target_var() != nullptr) {
__ push(r3); // Preserve new target.
}
- if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
- FastNewFunctionContextStub stub(isolate());
+ if (slots <=
+ ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+ Callable callable = CodeFactory::FastNewFunctionContext(
+ isolate(), info->scope()->scope_type());
__ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
Operand(slots));
- __ CallStub(&stub);
- // Result of FastNewFunctionContextStub is always in new space.
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ // Result of the FastNewFunctionContext builtin is always in new space.
need_write_barrier = false;
} else {
__ push(r1);
+ __ Push(Smi::FromInt(info->scope()->scope_type()));
__ CallRuntime(Runtime::kNewFunctionContext);
}
if (info->scope()->new_target_var() != nullptr) {
@@ -255,37 +257,10 @@ void FullCodeGenerator::Generate() {
PrepareForBailoutForId(BailoutId::FunctionContext(),
BailoutState::NO_REGISTERS);
- // Possibly set up a local binding to the this function which is used in
- // derived constructors with super calls.
- Variable* this_function_var = info->scope()->this_function_var();
- if (this_function_var != nullptr) {
- Comment cmnt(masm_, "[ This function");
- if (!function_in_register_r1) {
- __ ldr(r1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- // The write barrier clobbers register again, keep it marked as such.
- }
- SetVar(this_function_var, r1, r0, r2);
- }
-
- // Possibly set up a local binding to the new target value.
- Variable* new_target_var = info->scope()->new_target_var();
- if (new_target_var != nullptr) {
- Comment cmnt(masm_, "[ new.target");
- SetVar(new_target_var, r3, r0, r2);
- }
-
- // Possibly allocate RestParameters
- Variable* rest_param = info->scope()->rest_parameter();
- if (rest_param != nullptr) {
- Comment cmnt(masm_, "[ Allocate rest parameter array");
- if (!function_in_register_r1) {
- __ ldr(r1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- }
- FastNewRestParameterStub stub(isolate());
- __ CallStub(&stub);
- function_in_register_r1 = false;
- SetVar(rest_param, r0, r1, r2);
- }
+ // We don't support new.target and rest parameters here.
+ DCHECK_NULL(info->scope()->new_target_var());
+ DCHECK_NULL(info->scope()->rest_parameter());
+ DCHECK_NULL(info->scope()->this_function_var());
Variable* arguments = info->scope()->arguments();
if (arguments != NULL) {
@@ -554,10 +529,8 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
true,
true_label_,
false_label_);
- DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
- !lit->IsUndetectable());
- if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
- lit->IsFalse(isolate())) {
+ DCHECK(lit->IsNullOrUndefined(isolate()) || !lit->IsUndetectable());
+ if (lit->IsNullOrUndefined(isolate()) || lit->IsFalse(isolate())) {
if (false_label_ != fall_through_) __ b(false_label_);
} else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
if (true_label_ != fall_through_) __ b(true_label_);
@@ -783,6 +756,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
+ globals_->Add(variable->name(), zone());
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
@@ -809,17 +783,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
break;
- case VariableLocation::LOOKUP: {
- Comment cmnt(masm_, "[ VariableDeclaration");
- DCHECK_EQ(VAR, variable->mode());
- DCHECK(!variable->binding_needs_init());
- __ mov(r2, Operand(variable->name()));
- __ Push(r2);
- __ CallRuntime(Runtime::kDeclareEvalVar);
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
- break;
- }
-
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -832,6 +796,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Variable* variable = proxy->var();
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
+ globals_->Add(variable->name(), zone());
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
@@ -870,17 +835,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
break;
}
- case VariableLocation::LOOKUP: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- __ mov(r2, Operand(variable->name()));
- PushOperand(r2);
- // Push initial value for function declaration.
- VisitForStackValue(declaration->fun());
- CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
- break;
- }
-
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -1184,92 +1139,6 @@ void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
-
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
- TypeofMode typeof_mode,
- Label* slow) {
- Register current = cp;
- Register next = r1;
- Register temp = r2;
-
- int to_check = scope()->ContextChainLengthUntilOutermostSloppyEval();
- for (Scope* s = scope(); to_check > 0; s = s->outer_scope()) {
- if (!s->NeedsContext()) continue;
- if (s->calls_sloppy_eval()) {
- // Check that extension is "the hole".
- __ ldr(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
- __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
- }
- // Load next context in chain.
- __ ldr(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering cp.
- current = next;
- to_check--;
- }
-
- // All extension objects were empty and it is safe to use a normal global
- // load machinery.
- EmitGlobalVariableLoad(proxy, typeof_mode);
-}
-
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
- Label* slow) {
- DCHECK(var->IsContextSlot());
- Register context = cp;
- Register next = r3;
- Register temp = r4;
-
- for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
- if (s->NeedsContext()) {
- if (s->calls_sloppy_eval()) {
- // Check that extension is "the hole".
- __ ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
- __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
- }
- __ ldr(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering cp.
- context = next;
- }
- }
- // Check that last extension is "the hole".
- __ ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
- __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
-
- // This function is used only for loads, not stores, so it's safe to
- // return an cp-based operand (the write barrier cannot be allowed to
- // destroy the cp register).
- return ContextMemOperand(context, var->index());
-}
-
-
-void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
- TypeofMode typeof_mode,
- Label* slow, Label* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- Variable* var = proxy->var();
- if (var->mode() == DYNAMIC_GLOBAL) {
- EmitLoadGlobalCheckExtensions(proxy, typeof_mode, slow);
- __ jmp(done);
- } else if (var->mode() == DYNAMIC_LOCAL) {
- Variable* local = var->local_if_not_shadowed();
- __ ldr(r0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->binding_needs_init()) {
- __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
- __ b(ne, done);
- __ mov(r0, Operand(var->name()));
- __ push(r0);
- __ CallRuntime(Runtime::kThrowReferenceError);
- } else {
- __ jmp(done);
- }
- }
-}
-
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
// Record position before possible IC call.
@@ -1277,8 +1146,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
Variable* var = proxy->var();
- // Three cases: global variables, lookup variables, and all other types of
- // variables.
+ // Two cases: global variables and all other types of variables.
switch (var->location()) {
case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
@@ -1311,24 +1179,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
break;
}
- case VariableLocation::LOOKUP: {
- Comment cmnt(masm_, "[ Lookup variable");
- Label done, slow;
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
- __ bind(&slow);
- __ Push(var->name());
- Runtime::FunctionId function_id =
- typeof_mode == NOT_INSIDE_TYPEOF
- ? Runtime::kLoadLookupSlot
- : Runtime::kLoadLookupSlotInsideTypeof;
- __ CallRuntime(function_id);
- __ bind(&done);
- context()->Plug(r0);
- break;
- }
-
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -1355,7 +1206,8 @@ void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- Handle<FixedArray> constant_properties = expr->constant_properties();
+ Handle<FixedArray> constant_properties =
+ expr->GetOrBuildConstantProperties(isolate());
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r1, Operand(constant_properties));
@@ -1365,8 +1217,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Push(r3, r2, r1, r0);
__ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
- FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
- __ CallStub(&stub);
+ Callable callable = CodeFactory::FastCloneShallowObject(
+ isolate(), expr->properties_count());
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1376,10 +1229,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
bool result_saved = false;
AccessorTable accessor_table(zone());
- int property_index = 0;
- for (; property_index < expr->properties()->length(); property_index++) {
- ObjectLiteral::Property* property = expr->properties()->at(property_index);
- if (property->is_computed_name()) break;
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ DCHECK(!property->is_computed_name());
if (property->IsCompileTimeValue()) continue;
Literal* key = property->key()->AsLiteral();
@@ -1389,6 +1241,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
result_saved = true;
}
switch (property->kind()) {
+ case ObjectLiteral::Property::SPREAD:
case ObjectLiteral::Property::CONSTANT:
UNREACHABLE();
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -1437,21 +1290,21 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(value);
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
- PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ PrepareForBailoutForId(expr->GetIdForPropertySet(i),
BailoutState::NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->setter = property;
}
break;
@@ -1474,73 +1327,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
}
- // Object literals have two parts. The "static" part on the left contains no
- // computed property names, and so we can compute its map ahead of time; see
- // runtime.cc::CreateObjectLiteralBoilerplate. The second "dynamic" part
- // starts with the first computed property name, and continues with all
- // properties to its right. All the code from above initializes the static
- // component of the object literal, and arranges for the map of the result to
- // reflect the static order in which the keys appear. For the dynamic
- // properties, we compile them into a series of "SetOwnProperty" runtime
- // calls. This will preserve insertion order.
- for (; property_index < expr->properties()->length(); property_index++) {
- ObjectLiteral::Property* property = expr->properties()->at(property_index);
-
- Expression* value = property->value();
- if (!result_saved) {
- PushOperand(r0); // Save result on the stack
- result_saved = true;
- }
-
- __ ldr(r0, MemOperand(sp)); // Duplicate receiver.
- PushOperand(r0);
-
- if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
- DCHECK(!property->is_computed_name());
- VisitForStackValue(value);
- DCHECK(property->emit_store());
- CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
- PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- BailoutState::NO_REGISTERS);
- } else {
- EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
- VisitForStackValue(value);
- if (NeedsHomeObject(value)) {
- EmitSetHomeObject(value, 2, property->GetSlot());
- }
-
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- case ObjectLiteral::Property::COMPUTED:
- if (property->emit_store()) {
- PushOperand(Smi::FromInt(NONE));
- PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
- CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
- PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- BailoutState::NO_REGISTERS);
- } else {
- DropOperands(3);
- }
- break;
-
- case ObjectLiteral::Property::PROTOTYPE:
- UNREACHABLE();
- break;
-
- case ObjectLiteral::Property::GETTER:
- PushOperand(Smi::FromInt(NONE));
- CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
- break;
-
- case ObjectLiteral::Property::SETTER:
- PushOperand(Smi::FromInt(NONE));
- CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
- break;
- }
- }
- }
-
if (result_saved) {
context()->PlugTOS();
} else {
@@ -1552,11 +1338,10 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- Handle<FixedArray> constant_elements = expr->constant_elements();
+ Handle<ConstantElementsPair> constant_elements =
+ expr->GetOrBuildConstantElements(isolate());
bool has_fast_elements =
IsFastObjectElementsKind(expr->constant_elements_kind());
- Handle<FixedArrayBase> constant_elements_values(
- FixedArrayBase::cast(constant_elements->get(1)));
AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
@@ -1573,8 +1358,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ Push(r3, r2, r1, r0);
__ CallRuntime(Runtime::kCreateArrayLiteral);
} else {
- FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
- __ CallStub(&stub);
+ Callable callable =
+ CodeFactory::FastCloneShallowArray(isolate(), allocation_site_mode);
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1637,35 +1423,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForStackValue(property->obj());
}
break;
- case NAMED_SUPER_PROPERTY:
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- property->obj()->AsSuperPropertyReference()->home_object());
- PushOperand(result_register());
- if (expr->is_compound()) {
- const Register scratch = r1;
- __ ldr(scratch, MemOperand(sp, kPointerSize));
- PushOperand(scratch);
- PushOperand(result_register());
- }
- break;
- case KEYED_SUPER_PROPERTY:
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(property->key());
- PushOperand(result_register());
- if (expr->is_compound()) {
- const Register scratch = r1;
- __ ldr(scratch, MemOperand(sp, 2 * kPointerSize));
- PushOperand(scratch);
- __ ldr(scratch, MemOperand(sp, 2 * kPointerSize));
- PushOperand(scratch);
- PushOperand(result_register());
- }
- break;
case KEYED_PROPERTY:
if (expr->is_compound()) {
VisitForStackValue(property->obj());
@@ -1678,6 +1435,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForStackValue(property->key());
}
break;
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
// For compound assignments we need another deoptimization point after the
@@ -1694,21 +1455,15 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
PrepareForBailoutForId(property->LoadId(),
BailoutState::TOS_REGISTER);
break;
- case NAMED_SUPER_PROPERTY:
- EmitNamedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
- break;
- case KEYED_SUPER_PROPERTY:
- EmitKeyedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
- break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(),
BailoutState::TOS_REGISTER);
break;
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
}
@@ -1747,72 +1502,20 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
- case NAMED_SUPER_PROPERTY:
- EmitNamedSuperPropertyStore(property);
- context()->Plug(r0);
- break;
- case KEYED_SUPER_PROPERTY:
- EmitKeyedSuperPropertyStore(property);
- context()->Plug(r0);
- break;
case KEYED_PROPERTY:
EmitKeyedPropertyAssignment(expr);
break;
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
}
void FullCodeGenerator::VisitYield(Yield* expr) {
- Comment cmnt(masm_, "[ Yield");
- SetExpressionPosition(expr);
-
- // Evaluate yielded value first; the initial iterator definition depends on
- // this. It stays on the stack while we update the iterator.
- VisitForStackValue(expr->expression());
-
- Label suspend, continuation, post_runtime, resume, exception;
-
- __ jmp(&suspend);
- __ bind(&continuation);
- // When we arrive here, r0 holds the generator object.
- __ RecordGeneratorContinuation();
- __ ldr(r1, FieldMemOperand(r0, JSGeneratorObject::kResumeModeOffset));
- __ ldr(r0, FieldMemOperand(r0, JSGeneratorObject::kInputOrDebugPosOffset));
- STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
- STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
- __ cmp(r1, Operand(Smi::FromInt(JSGeneratorObject::kReturn)));
- __ b(lt, &resume);
- __ Push(result_register());
- __ b(gt, &exception);
- EmitCreateIteratorResult(true);
- EmitUnwindAndReturn();
-
- __ bind(&exception);
- __ CallRuntime(expr->rethrow_on_exception() ? Runtime::kReThrow
- : Runtime::kThrow);
-
- __ bind(&suspend);
- OperandStackDepthIncrement(1); // Not popped on this path.
- VisitForAccumulatorValue(expr->generator_object());
- DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
- __ mov(r1, Operand(Smi::FromInt(continuation.pos())));
- __ str(r1, FieldMemOperand(r0, JSGeneratorObject::kContinuationOffset));
- __ str(cp, FieldMemOperand(r0, JSGeneratorObject::kContextOffset));
- __ mov(r1, cp);
- __ RecordWriteField(r0, JSGeneratorObject::kContextOffset, r1, r2,
- kLRHasBeenSaved, kDontSaveFPRegs);
- __ add(r1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
- __ cmp(sp, r1);
- __ b(eq, &post_runtime);
- __ push(r0); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- RestoreContext();
- __ bind(&post_runtime);
- PopOperand(result_register());
- EmitReturnSequence();
-
- __ bind(&resume);
- context()->Plug(result_register());
+ // Resumable functions are not supported.
+ UNREACHABLE();
}
void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
@@ -1952,60 +1655,6 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
context()->Plug(r0);
}
-
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
- for (int i = 0; i < lit->properties()->length(); i++) {
- ClassLiteral::Property* property = lit->properties()->at(i);
- Expression* value = property->value();
-
- Register scratch = r1;
- if (property->is_static()) {
- __ ldr(scratch, MemOperand(sp, kPointerSize)); // constructor
- } else {
- __ ldr(scratch, MemOperand(sp, 0)); // prototype
- }
- PushOperand(scratch);
- EmitPropertyKey(property, lit->GetIdForProperty(i));
-
- // The static prototype property is read only. We handle the non computed
- // property name case in the parser. Since this is the only case where we
- // need to check for an own read only property we special case this so we do
- // not need to do this for every property.
- if (property->is_static() && property->is_computed_name()) {
- __ CallRuntime(Runtime::kThrowIfStaticPrototype);
- __ push(r0);
- }
-
- VisitForStackValue(value);
- if (NeedsHomeObject(value)) {
- EmitSetHomeObject(value, 2, property->GetSlot());
- }
-
- switch (property->kind()) {
- case ClassLiteral::Property::METHOD:
- PushOperand(Smi::FromInt(DONT_ENUM));
- PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
- CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
- break;
-
- case ClassLiteral::Property::GETTER:
- PushOperand(Smi::FromInt(DONT_ENUM));
- CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
- break;
-
- case ClassLiteral::Property::SETTER:
- PushOperand(Smi::FromInt(DONT_ENUM));
- CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
- break;
-
- case ClassLiteral::Property::FIELD:
- default:
- UNREACHABLE();
- }
- }
-}
-
-
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
PopOperand(r1);
Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
@@ -2039,43 +1688,6 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
CallStoreIC(slot, prop->key()->AsLiteral()->value());
break;
}
- case NAMED_SUPER_PROPERTY: {
- PushOperand(r0);
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- // stack: value, this; r0: home_object
- Register scratch = r2;
- Register scratch2 = r3;
- __ mov(scratch, result_register()); // home_object
- __ ldr(r0, MemOperand(sp, kPointerSize)); // value
- __ ldr(scratch2, MemOperand(sp, 0)); // this
- __ str(scratch2, MemOperand(sp, kPointerSize)); // this
- __ str(scratch, MemOperand(sp, 0)); // home_object
- // stack: this, home_object; r0: value
- EmitNamedSuperPropertyStore(prop);
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- PushOperand(r0);
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(prop->key());
- Register scratch = r2;
- Register scratch2 = r3;
- __ ldr(scratch2, MemOperand(sp, 2 * kPointerSize)); // value
- // stack: value, this, home_object; r0: key, r3: value
- __ ldr(scratch, MemOperand(sp, kPointerSize)); // this
- __ str(scratch, MemOperand(sp, 2 * kPointerSize));
- __ ldr(scratch, MemOperand(sp, 0)); // home_object
- __ str(scratch, MemOperand(sp, kPointerSize));
- __ str(r0, MemOperand(sp, 0));
- __ Move(r0, scratch2);
- // stack: this, home_object, key; r0: value.
- EmitKeyedSuperPropertyStore(prop);
- break;
- }
case KEYED_PROPERTY: {
PushOperand(r0); // Preserve value.
VisitForStackValue(prop->obj());
@@ -2086,6 +1698,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
CallKeyedStoreIC(slot);
break;
}
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
context()->Plug(r0);
}
@@ -2147,26 +1763,18 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
} else {
DCHECK(var->mode() != CONST || op == Token::INIT);
- if (var->IsLookupSlot()) {
- // Assignment to var.
- __ Push(var->name());
- __ Push(r0);
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kStoreLookupSlot_Strict
- : Runtime::kStoreLookupSlot_Sloppy);
- } else {
- // Assignment to var or initializing assignment to let/const in harmony
- // mode.
- DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
- MemOperand location = VarOperand(var, r1);
- if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
- // Check for an uninitialized let binding.
- __ ldr(r2, location);
- __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
- __ Check(eq, kLetBindingReInitialization);
- }
- EmitStoreToStackLocalOrContextSlot(var, location);
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ DCHECK(!var->IsLookupSlot());
+ // Assignment to var or initializing assignment to let/const in harmony
+ // mode.
+ MemOperand location = VarOperand(var, r1);
+ if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
+ // Check for an uninitialized let binding.
+ __ ldr(r2, location);
+ __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
+ __ Check(eq, kLetBindingReInitialization);
}
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
}
@@ -2185,35 +1793,6 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
}
-void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
- // Assignment to named property of super.
- // r0 : value
- // stack : receiver ('this'), home_object
- DCHECK(prop != NULL);
- Literal* key = prop->key()->AsLiteral();
- DCHECK(key != NULL);
-
- PushOperand(key->value());
- PushOperand(r0);
- CallRuntimeWithOperands(is_strict(language_mode())
- ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
- // Assignment to named property of super.
- // r0 : value
- // stack : receiver ('this'), home_object, key
- DCHECK(prop != NULL);
-
- PushOperand(r0);
- CallRuntimeWithOperands(is_strict(language_mode())
- ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy);
-}
-
-
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
PopOperands(StoreDescriptor::ReceiverRegister(),
@@ -2261,45 +1840,6 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
}
-void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
- Expression* callee = expr->expression();
- DCHECK(callee->IsProperty());
- Property* prop = callee->AsProperty();
- DCHECK(prop->IsSuperAccess());
- SetExpressionPosition(prop);
-
- Literal* key = prop->key()->AsLiteral();
- DCHECK(!key->value()->IsSmi());
- // Load the function from the receiver.
- const Register scratch = r1;
- SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
- VisitForStackValue(super_ref->home_object());
- VisitForAccumulatorValue(super_ref->this_var());
- PushOperand(r0);
- PushOperand(r0);
- __ ldr(scratch, MemOperand(sp, kPointerSize * 2));
- PushOperand(scratch);
- PushOperand(key->value());
-
- // Stack here:
- // - home_object
- // - this (receiver)
- // - this (receiver) <-- LoadFromSuper will pop here and below.
- // - home_object
- // - key
- CallRuntimeWithOperands(Runtime::kLoadFromSuper);
- PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
- // Replace home_object with target function.
- __ str(r0, MemOperand(sp, kPointerSize));
-
- // Stack here:
- // - target function
- // - this (receiver)
- EmitCall(expr);
-}
-
-
// Code common for calls using the IC.
void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
Expression* key) {
@@ -2325,43 +1865,6 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
}
-void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
- Expression* callee = expr->expression();
- DCHECK(callee->IsProperty());
- Property* prop = callee->AsProperty();
- DCHECK(prop->IsSuperAccess());
-
- SetExpressionPosition(prop);
- // Load the function from the receiver.
- const Register scratch = r1;
- SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
- VisitForStackValue(super_ref->home_object());
- VisitForAccumulatorValue(super_ref->this_var());
- PushOperand(r0);
- PushOperand(r0);
- __ ldr(scratch, MemOperand(sp, kPointerSize * 2));
- PushOperand(scratch);
- VisitForStackValue(prop->key());
-
- // Stack here:
- // - home_object
- // - this (receiver)
- // - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
- // - home_object
- // - key
- CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
- PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
- // Replace home_object with target function.
- __ str(r0, MemOperand(sp, kPointerSize));
-
- // Stack here:
- // - target function
- // - this (receiver)
- EmitCall(expr);
-}
-
-
void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
@@ -2393,116 +1896,6 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
context()->DropAndPlug(1, r0);
}
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
- int arg_count = expr->arguments()->length();
- // r4: copy of the first argument or undefined if it doesn't exist.
- if (arg_count > 0) {
- __ ldr(r4, MemOperand(sp, arg_count * kPointerSize));
- } else {
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
- }
-
- // r3: the receiver of the enclosing function.
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-
- // r2: language mode.
- __ mov(r2, Operand(Smi::FromInt(language_mode())));
-
- // r1: the start position of the scope the calls resides in.
- __ mov(r1, Operand(Smi::FromInt(scope()->start_position())));
-
- // r0: the source position of the eval call.
- __ mov(r0, Operand(Smi::FromInt(expr->position())));
-
- // Do the runtime call.
- __ Push(r4, r3, r2, r1, r0);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
-}
-
-
-// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
-void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
- VariableProxy* callee = expr->expression()->AsVariableProxy();
- if (callee->var()->IsLookupSlot()) {
- Label slow, done;
- SetExpressionPosition(callee);
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(callee, NOT_INSIDE_TYPEOF, &slow, &done);
-
- __ bind(&slow);
- // Call the runtime to find the function to call (returned in r0)
- // and the object holding it (returned in edx).
- __ Push(callee->name());
- __ CallRuntime(Runtime::kLoadLookupSlotForCall);
- PushOperands(r0, r1); // Function, receiver.
- PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
-
- // If fast case code has been generated, emit code to push the
- // function and receiver and have the slow path jump around this
- // code.
- if (done.is_linked()) {
- Label call;
- __ b(&call);
- __ bind(&done);
- // Push function.
- __ push(r0);
- // The receiver is implicitly the global receiver. Indicate this
- // by passing the hole to the call function stub.
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
- __ push(r1);
- __ bind(&call);
- }
- } else {
- VisitForStackValue(callee);
- // refEnv.WithBaseObject()
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- PushOperand(r2); // Reserved receiver slot.
- }
-}
-
-
-void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
- // In a call to eval, we first call
- // Runtime_ResolvePossiblyDirectEval to resolve the function we need
- // to call. Then we call the resolved function using the given arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- PushCalleeAndWithBaseObject(expr);
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ push(r1);
- EmitResolvePossiblyDirectEval(expr);
-
- // Touch up the stack with the resolved function.
- __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
-
- PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
-
- // Record source position for debugger.
- SetCallPosition(expr);
- Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
- expr->tail_call_mode())
- .code();
- __ mov(r3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ mov(r0, Operand(arg_count));
- __ Call(code, RelocInfo::CODE_TARGET);
- OperandStackDepthDecrement(arg_count + 1);
- RecordJSReturnSite(expr);
- RestoreContext();
- context()->DropAndPlug(1, r0);
-}
-
-
void FullCodeGenerator::VisitCallNew(CallNew* expr) {
Comment cmnt(masm_, "[ CallNew");
// According to ECMA-262, section 11.2.2, page 44, the function
@@ -2543,49 +1936,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
-void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
- SuperCallReference* super_call_ref =
- expr->expression()->AsSuperCallReference();
- DCHECK_NOT_NULL(super_call_ref);
-
- // Push the super constructor target on the stack (may be null,
- // but the Construct builtin can deal with that properly).
- VisitForAccumulatorValue(super_call_ref->this_function_var());
- __ AssertFunction(result_register());
- __ ldr(result_register(),
- FieldMemOperand(result_register(), HeapObject::kMapOffset));
- __ ldr(result_register(),
- FieldMemOperand(result_register(), Map::kPrototypeOffset));
- PushOperand(result_register());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- SetConstructCallPosition(expr);
-
- // Load new target into r3.
- VisitForAccumulatorValue(super_call_ref->new_target_var());
- __ mov(r3, result_register());
-
- // Load function and argument count into r1 and r0.
- __ mov(r0, Operand(arg_count));
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
-
- __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
- OperandStackDepthDecrement(arg_count + 1);
-
- RecordJSReturnSite(expr);
- RestoreContext();
- context()->Plug(r0);
-}
-
-
void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -2673,28 +2023,6 @@ void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -2931,16 +2259,12 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
__ Push(r2, r1);
__ CallRuntime(Runtime::kDeleteProperty_Sloppy);
context()->Plug(r0);
- } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+ } else {
+ DCHECK(!var->IsLookupSlot());
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
// Result of deleting non-global, non-dynamic variables is false.
// The subexpression does not have side effects.
context()->Plug(is_this);
- } else {
- // Non-global variable. Call the runtime to try to delete from the
- // context where the variable was introduced.
- __ Push(var->name());
- __ CallRuntime(Runtime::kDeleteLookupSlot);
- context()->Plug(r0);
}
} else {
// Result of deleting non-property, non-variable reference is true.
@@ -3046,35 +2370,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
- case NAMED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- PushOperand(result_register());
- const Register scratch = r1;
- __ ldr(scratch, MemOperand(sp, kPointerSize));
- PushOperand(scratch);
- PushOperand(result_register());
- EmitNamedSuperPropertyLoad(prop);
- break;
- }
-
- case KEYED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(prop->key());
- PushOperand(result_register());
- const Register scratch = r1;
- __ ldr(scratch, MemOperand(sp, 2 * kPointerSize));
- PushOperand(scratch);
- __ ldr(scratch, MemOperand(sp, 2 * kPointerSize));
- PushOperand(scratch);
- PushOperand(result_register());
- EmitKeyedSuperPropertyLoad(prop);
- break;
- }
-
case KEYED_PROPERTY: {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
@@ -3085,6 +2380,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
case VARIABLE:
UNREACHABLE();
}
@@ -3120,14 +2417,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY:
__ str(r0, MemOperand(sp, kPointerSize));
break;
- case NAMED_SUPER_PROPERTY:
- __ str(r0, MemOperand(sp, 2 * kPointerSize));
- break;
case KEYED_PROPERTY:
__ str(r0, MemOperand(sp, 2 * kPointerSize));
break;
+ case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
- __ str(r0, MemOperand(sp, 3 * kPointerSize));
+ UNREACHABLE();
break;
}
}
@@ -3159,14 +2454,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY:
__ str(r0, MemOperand(sp, kPointerSize));
break;
- case NAMED_SUPER_PROPERTY:
- __ str(r0, MemOperand(sp, 2 * kPointerSize));
- break;
case KEYED_PROPERTY:
__ str(r0, MemOperand(sp, 2 * kPointerSize));
break;
+ case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
- __ str(r0, MemOperand(sp, 3 * kPointerSize));
+ UNREACHABLE();
break;
}
}
@@ -3223,30 +2516,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
}
- case NAMED_SUPER_PROPERTY: {
- EmitNamedSuperPropertyStore(prop);
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(r0);
- }
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- EmitKeyedSuperPropertyStore(prop);
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(r0);
- }
- break;
- }
case KEYED_PROPERTY: {
PopOperands(StoreDescriptor::ReceiverRegister(),
StoreDescriptor::NameRegister());
@@ -3261,6 +2530,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
}
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
}
@@ -3493,70 +2766,6 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
}
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
-
-void FullCodeGenerator::EnterFinallyBlock() {
- DCHECK(!result_register().is(r1));
- // Store pending message while executing finally block.
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ mov(ip, Operand(pending_message_obj));
- __ ldr(r1, MemOperand(ip));
- PushOperand(r1);
-
- ClearPendingMessage();
-}
-
-
-void FullCodeGenerator::ExitFinallyBlock() {
- DCHECK(!result_register().is(r1));
- // Restore pending message from stack.
- PopOperand(r1);
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ mov(ip, Operand(pending_message_obj));
- __ str(r1, MemOperand(ip));
-}
-
-
-void FullCodeGenerator::ClearPendingMessage() {
- DCHECK(!result_register().is(r1));
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
- __ mov(ip, Operand(pending_message_obj));
- __ str(r1, MemOperand(ip));
-}
-
-
-void FullCodeGenerator::DeferredCommands::EmitCommands() {
- DCHECK(!result_register().is(r1));
- __ Pop(result_register()); // Restore the accumulator.
- __ Pop(r1); // Get the token.
- for (DeferredCommand cmd : commands_) {
- Label skip;
- __ cmp(r1, Operand(Smi::FromInt(cmd.token)));
- __ b(ne, &skip);
- switch (cmd.command) {
- case kReturn:
- codegen_->EmitUnwindAndReturn();
- break;
- case kThrow:
- __ Push(result_register());
- __ CallRuntime(Runtime::kReThrow);
- break;
- case kContinue:
- codegen_->EmitContinue(cmd.target);
- break;
- case kBreak:
- codegen_->EmitBreak(cmd.target);
- break;
- }
- __ bind(&skip);
- }
-}
-
#undef __
diff --git a/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc b/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
index 51b3009cd0..ae1fab14b9 100644
--- a/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
+++ b/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
@@ -4,15 +4,16 @@
#if V8_TARGET_ARCH_ARM64
-#include "src/full-codegen/full-codegen.h"
#include "src/ast/compile-time-value.h"
#include "src/ast/scopes.h"
+#include "src/builtins/builtins-constructor.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/debug/debug.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
#include "src/arm64/code-stubs-arm64.h"
@@ -149,8 +150,6 @@ void FullCodeGenerator::Generate() {
// Reserve space on the stack for locals.
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
- // Generators allocate locals, if any, in context slots.
- DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
OperandStackDepthIncrement(locals_count);
if (locals_count > 0) {
if (locals_count >= 128) {
@@ -203,14 +202,17 @@ void FullCodeGenerator::Generate() {
if (info->scope()->new_target_var() != nullptr) {
__ Push(x3); // Preserve new target.
}
- if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
- FastNewFunctionContextStub stub(isolate());
+ if (slots <=
+ ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+ Callable callable = CodeFactory::FastNewFunctionContext(
+ isolate(), info->scope()->scope_type());
__ Mov(FastNewFunctionContextDescriptor::SlotsRegister(), slots);
- __ CallStub(&stub);
- // Result of FastNewFunctionContextStub is always in new space.
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ // Result of the FastNewFunctionContext builtin is always in new space.
need_write_barrier = false;
} else {
__ Push(x1);
+ __ Push(Smi::FromInt(info->scope()->scope_type()));
__ CallRuntime(Runtime::kNewFunctionContext);
}
if (info->scope()->new_target_var() != nullptr) {
@@ -257,37 +259,10 @@ void FullCodeGenerator::Generate() {
PrepareForBailoutForId(BailoutId::FunctionContext(),
BailoutState::NO_REGISTERS);
- // Possibly set up a local binding to the this function which is used in
- // derived constructors with super calls.
- Variable* this_function_var = info->scope()->this_function_var();
- if (this_function_var != nullptr) {
- Comment cmnt(masm_, "[ This function");
- if (!function_in_register_x1) {
- __ Ldr(x1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- // The write barrier clobbers register again, keep it marked as such.
- }
- SetVar(this_function_var, x1, x0, x2);
- }
-
- // Possibly set up a local binding to the new target value.
- Variable* new_target_var = info->scope()->new_target_var();
- if (new_target_var != nullptr) {
- Comment cmnt(masm_, "[ new.target");
- SetVar(new_target_var, x3, x0, x2);
- }
-
- // Possibly allocate RestParameters
- Variable* rest_param = info->scope()->rest_parameter();
- if (rest_param != nullptr) {
- Comment cmnt(masm_, "[ Allocate rest parameter array");
- if (!function_in_register_x1) {
- __ Ldr(x1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- }
- FastNewRestParameterStub stub(isolate());
- __ CallStub(&stub);
- function_in_register_x1 = false;
- SetVar(rest_param, x0, x1, x2);
- }
+ // We don't support new.target and rest parameters here.
+ DCHECK_NULL(info->scope()->new_target_var());
+ DCHECK_NULL(info->scope()->rest_parameter());
+ DCHECK_NULL(info->scope()->this_function_var());
Variable* arguments = info->scope()->arguments();
if (arguments != NULL) {
@@ -544,10 +519,8 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
true,
true_label_,
false_label_);
- DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
- !lit->IsUndetectable());
- if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
- lit->IsFalse(isolate())) {
+ DCHECK(lit->IsNullOrUndefined(isolate()) || !lit->IsUndetectable());
+ if (lit->IsNullOrUndefined(isolate()) || lit->IsFalse(isolate())) {
if (false_label_ != fall_through_) __ B(false_label_);
} else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
if (true_label_ != fall_through_) __ B(true_label_);
@@ -778,6 +751,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
+ globals_->Add(variable->name(), zone());
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
@@ -804,17 +778,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
break;
- case VariableLocation::LOOKUP: {
- Comment cmnt(masm_, "[ VariableDeclaration");
- DCHECK_EQ(VAR, variable->mode());
- DCHECK(!variable->binding_needs_init());
- __ Mov(x2, Operand(variable->name()));
- __ Push(x2);
- __ CallRuntime(Runtime::kDeclareEvalVar);
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
- break;
- }
-
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -827,6 +791,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Variable* variable = proxy->var();
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
+ globals_->Add(variable->name(), zone());
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
@@ -865,17 +830,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
break;
}
- case VariableLocation::LOOKUP: {
- Comment cmnt(masm_, "[ Function Declaration");
- __ Mov(x2, Operand(variable->name()));
- PushOperand(x2);
- // Push initial value for function declaration.
- VisitForStackValue(declaration->fun());
- CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
- break;
- }
-
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -1171,91 +1126,6 @@ void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
-
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
- TypeofMode typeof_mode,
- Label* slow) {
- Register current = cp;
- Register next = x10;
- Register temp = x11;
-
- int to_check = scope()->ContextChainLengthUntilOutermostSloppyEval();
- for (Scope* s = scope(); to_check > 0; s = s->outer_scope()) {
- if (!s->NeedsContext()) continue;
- if (s->calls_sloppy_eval()) {
- // Check that extension is "the hole".
- __ Ldr(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
- __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
- }
- // Load next context in chain.
- __ Ldr(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering cp.
- current = next;
- to_check--;
- }
-
- // All extension objects were empty and it is safe to use a normal global
- // load machinery.
- EmitGlobalVariableLoad(proxy, typeof_mode);
-}
-
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
- Label* slow) {
- DCHECK(var->IsContextSlot());
- Register context = cp;
- Register next = x10;
- Register temp = x11;
-
- for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
- if (s->NeedsContext()) {
- if (s->calls_sloppy_eval()) {
- // Check that extension is "the hole".
- __ Ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
- __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
- }
- __ Ldr(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering cp.
- context = next;
- }
- }
- // Check that last extension is "the hole".
- __ Ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
- __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
-
- // This function is used only for loads, not stores, so it's safe to
- // return an cp-based operand (the write barrier cannot be allowed to
- // destroy the cp register).
- return ContextMemOperand(context, var->index());
-}
-
-
-void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
- TypeofMode typeof_mode,
- Label* slow, Label* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- Variable* var = proxy->var();
- if (var->mode() == DYNAMIC_GLOBAL) {
- EmitLoadGlobalCheckExtensions(proxy, typeof_mode, slow);
- __ B(done);
- } else if (var->mode() == DYNAMIC_LOCAL) {
- Variable* local = var->local_if_not_shadowed();
- __ Ldr(x0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->binding_needs_init()) {
- __ JumpIfNotRoot(x0, Heap::kTheHoleValueRootIndex, done);
- __ Mov(x0, Operand(var->name()));
- __ Push(x0);
- __ CallRuntime(Runtime::kThrowReferenceError);
- } else {
- __ B(done);
- }
- }
-}
-
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
// Record position before possible IC call.
@@ -1263,8 +1133,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
Variable* var = proxy->var();
- // Three cases: global variables, lookup variables, and all other types of
- // variables.
+ // Two cases: global variables and all other types of variables.
switch (var->location()) {
case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "Global variable");
@@ -1297,24 +1166,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
break;
}
- case VariableLocation::LOOKUP: {
- Label done, slow;
- // Generate code for loading from variables potentially shadowed by
- // eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
- __ Bind(&slow);
- Comment cmnt(masm_, "Lookup variable");
- __ Push(var->name());
- Runtime::FunctionId function_id =
- typeof_mode == NOT_INSIDE_TYPEOF
- ? Runtime::kLoadLookupSlot
- : Runtime::kLoadLookupSlotInsideTypeof;
- __ CallRuntime(function_id);
- __ Bind(&done);
- context()->Plug(x0);
- break;
- }
-
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -1341,7 +1193,8 @@ void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- Handle<FixedArray> constant_properties = expr->constant_properties();
+ Handle<FixedArray> constant_properties =
+ expr->GetOrBuildConstantProperties(isolate());
__ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Mov(x2, Smi::FromInt(expr->literal_index()));
__ Mov(x1, Operand(constant_properties));
@@ -1351,8 +1204,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Push(x3, x2, x1, x0);
__ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
- FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
- __ CallStub(&stub);
+ Callable callable = CodeFactory::FastCloneShallowObject(
+ isolate(), expr->properties_count());
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1362,10 +1216,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
bool result_saved = false;
AccessorTable accessor_table(zone());
- int property_index = 0;
- for (; property_index < expr->properties()->length(); property_index++) {
- ObjectLiteral::Property* property = expr->properties()->at(property_index);
- if (property->is_computed_name()) break;
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ DCHECK(!property->is_computed_name());
if (property->IsCompileTimeValue()) continue;
Literal* key = property->key()->AsLiteral();
@@ -1375,6 +1228,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
result_saved = true;
}
switch (property->kind()) {
+ case ObjectLiteral::Property::SPREAD:
case ObjectLiteral::Property::CONSTANT:
UNREACHABLE();
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -1422,20 +1276,20 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
PushOperand(x0);
VisitForStackValue(value);
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
- PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ PrepareForBailoutForId(expr->GetIdForPropertySet(i),
BailoutState::NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->setter = property;
}
break;
@@ -1458,73 +1312,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
}
- // Object literals have two parts. The "static" part on the left contains no
- // computed property names, and so we can compute its map ahead of time; see
- // runtime.cc::CreateObjectLiteralBoilerplate. The second "dynamic" part
- // starts with the first computed property name, and continues with all
- // properties to its right. All the code from above initializes the static
- // component of the object literal, and arranges for the map of the result to
- // reflect the static order in which the keys appear. For the dynamic
- // properties, we compile them into a series of "SetOwnProperty" runtime
- // calls. This will preserve insertion order.
- for (; property_index < expr->properties()->length(); property_index++) {
- ObjectLiteral::Property* property = expr->properties()->at(property_index);
-
- Expression* value = property->value();
- if (!result_saved) {
- PushOperand(x0); // Save result on stack
- result_saved = true;
- }
-
- __ Peek(x10, 0); // Duplicate receiver.
- PushOperand(x10);
-
- if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
- DCHECK(!property->is_computed_name());
- VisitForStackValue(value);
- DCHECK(property->emit_store());
- CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
- PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- BailoutState::NO_REGISTERS);
- } else {
- EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
- VisitForStackValue(value);
- if (NeedsHomeObject(value)) {
- EmitSetHomeObject(value, 2, property->GetSlot());
- }
-
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- case ObjectLiteral::Property::COMPUTED:
- if (property->emit_store()) {
- PushOperand(Smi::FromInt(NONE));
- PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
- CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
- PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- BailoutState::NO_REGISTERS);
- } else {
- DropOperands(3);
- }
- break;
-
- case ObjectLiteral::Property::PROTOTYPE:
- UNREACHABLE();
- break;
-
- case ObjectLiteral::Property::GETTER:
- PushOperand(Smi::FromInt(NONE));
- CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
- break;
-
- case ObjectLiteral::Property::SETTER:
- PushOperand(Smi::FromInt(NONE));
- CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
- break;
- }
- }
- }
-
if (result_saved) {
context()->PlugTOS();
} else {
@@ -1536,7 +1323,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- Handle<FixedArray> constant_elements = expr->constant_elements();
+ Handle<ConstantElementsPair> constant_elements =
+ expr->GetOrBuildConstantElements(isolate());
bool has_fast_elements =
IsFastObjectElementsKind(expr->constant_elements_kind());
@@ -1555,8 +1343,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ Push(x3, x2, x1, x0);
__ CallRuntime(Runtime::kCreateArrayLiteral);
} else {
- FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
- __ CallStub(&stub);
+ Callable callable =
+ CodeFactory::FastCloneShallowArray(isolate(), allocation_site_mode);
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1619,33 +1408,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForStackValue(property->obj());
}
break;
- case NAMED_SUPER_PROPERTY:
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- property->obj()->AsSuperPropertyReference()->home_object());
- PushOperand(result_register());
- if (expr->is_compound()) {
- const Register scratch = x10;
- __ Peek(scratch, kPointerSize);
- PushOperands(scratch, result_register());
- }
- break;
- case KEYED_SUPER_PROPERTY:
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(property->key());
- PushOperand(result_register());
- if (expr->is_compound()) {
- const Register scratch1 = x10;
- const Register scratch2 = x11;
- __ Peek(scratch1, 2 * kPointerSize);
- __ Peek(scratch2, kPointerSize);
- PushOperands(scratch1, scratch2, result_register());
- }
- break;
case KEYED_PROPERTY:
if (expr->is_compound()) {
VisitForStackValue(property->obj());
@@ -1657,6 +1419,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForStackValue(property->key());
}
break;
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
// For compound assignments we need another deoptimization point after the
@@ -1673,21 +1439,15 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
PrepareForBailoutForId(property->LoadId(),
BailoutState::TOS_REGISTER);
break;
- case NAMED_SUPER_PROPERTY:
- EmitNamedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
- break;
- case KEYED_SUPER_PROPERTY:
- EmitKeyedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
- break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(),
BailoutState::TOS_REGISTER);
break;
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
}
@@ -1726,17 +1486,13 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
- case NAMED_SUPER_PROPERTY:
- EmitNamedSuperPropertyStore(property);
- context()->Plug(x0);
- break;
- case KEYED_SUPER_PROPERTY:
- EmitKeyedSuperPropertyStore(property);
- context()->Plug(x0);
- break;
case KEYED_PROPERTY:
EmitKeyedPropertyAssignment(expr);
break;
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
}
@@ -1852,60 +1608,6 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
context()->Plug(x0);
}
-
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
- for (int i = 0; i < lit->properties()->length(); i++) {
- ClassLiteral::Property* property = lit->properties()->at(i);
- Expression* value = property->value();
-
- Register scratch = x1;
- if (property->is_static()) {
- __ Peek(scratch, kPointerSize); // constructor
- } else {
- __ Peek(scratch, 0); // prototype
- }
- PushOperand(scratch);
- EmitPropertyKey(property, lit->GetIdForProperty(i));
-
- // The static prototype property is read only. We handle the non computed
- // property name case in the parser. Since this is the only case where we
- // need to check for an own read only property we special case this so we do
- // not need to do this for every property.
- if (property->is_static() && property->is_computed_name()) {
- __ CallRuntime(Runtime::kThrowIfStaticPrototype);
- __ Push(x0);
- }
-
- VisitForStackValue(value);
- if (NeedsHomeObject(value)) {
- EmitSetHomeObject(value, 2, property->GetSlot());
- }
-
- switch (property->kind()) {
- case ClassLiteral::Property::METHOD:
- PushOperand(Smi::FromInt(DONT_ENUM));
- PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
- CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
- break;
-
- case ClassLiteral::Property::GETTER:
- PushOperand(Smi::FromInt(DONT_ENUM));
- CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
- break;
-
- case ClassLiteral::Property::SETTER:
- PushOperand(Smi::FromInt(DONT_ENUM));
- CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
- break;
-
- case ClassLiteral::Property::FIELD:
- default:
- UNREACHABLE();
- }
- }
-}
-
-
void FullCodeGenerator::EmitAssignment(Expression* expr,
FeedbackVectorSlot slot) {
DCHECK(expr->IsValidReferenceExpressionOrThis());
@@ -1931,43 +1633,6 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
CallStoreIC(slot, prop->key()->AsLiteral()->value());
break;
}
- case NAMED_SUPER_PROPERTY: {
- PushOperand(x0);
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- // stack: value, this; x0: home_object
- Register scratch = x10;
- Register scratch2 = x11;
- __ mov(scratch, result_register()); // home_object
- __ Peek(x0, kPointerSize); // value
- __ Peek(scratch2, 0); // this
- __ Poke(scratch2, kPointerSize); // this
- __ Poke(scratch, 0); // home_object
- // stack: this, home_object; x0: value
- EmitNamedSuperPropertyStore(prop);
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- PushOperand(x0);
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(prop->key());
- Register scratch = x10;
- Register scratch2 = x11;
- __ Peek(scratch2, 2 * kPointerSize); // value
- // stack: value, this, home_object; x0: key, x11: value
- __ Peek(scratch, kPointerSize); // this
- __ Poke(scratch, 2 * kPointerSize);
- __ Peek(scratch, 0); // home_object
- __ Poke(scratch, kPointerSize);
- __ Poke(x0, 0);
- __ Move(x0, scratch2);
- // stack: this, home_object, key; x0: value.
- EmitKeyedSuperPropertyStore(prop);
- break;
- }
case KEYED_PROPERTY: {
PushOperand(x0); // Preserve value.
VisitForStackValue(prop->obj());
@@ -1978,6 +1643,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
CallKeyedStoreIC(slot);
break;
}
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
context()->Plug(x0);
}
@@ -2038,25 +1707,17 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
} else {
DCHECK(var->mode() != CONST || op == Token::INIT);
- if (var->IsLookupSlot()) {
- // Assignment to var.
- __ Push(var->name());
- __ Push(x0);
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kStoreLookupSlot_Strict
- : Runtime::kStoreLookupSlot_Sloppy);
- } else {
- // Assignment to var or initializing assignment to let/const in harmony
- // mode.
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- MemOperand location = VarOperand(var, x1);
- if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
- __ Ldr(x10, location);
- __ CompareRoot(x10, Heap::kTheHoleValueRootIndex);
- __ Check(eq, kLetBindingReInitialization);
- }
- EmitStoreToStackLocalOrContextSlot(var, location);
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ DCHECK(!var->IsLookupSlot());
+ // Assignment to var or initializing assignment to let/const in harmony
+ // mode.
+ MemOperand location = VarOperand(var, x1);
+ if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
+ __ Ldr(x10, location);
+ __ CompareRoot(x10, Heap::kTheHoleValueRootIndex);
+ __ Check(eq, kLetBindingReInitialization);
}
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
}
@@ -2076,35 +1737,6 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
}
-void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
- // Assignment to named property of super.
- // x0 : value
- // stack : receiver ('this'), home_object
- DCHECK(prop != NULL);
- Literal* key = prop->key()->AsLiteral();
- DCHECK(key != NULL);
-
- PushOperand(key->value());
- PushOperand(x0);
- CallRuntimeWithOperands(is_strict(language_mode())
- ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
- // Assignment to named property of super.
- // x0 : value
- // stack : receiver ('this'), home_object, key
- DCHECK(prop != NULL);
-
- PushOperand(x0);
- CallRuntimeWithOperands(is_strict(language_mode())
- ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy);
-}
-
-
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
ASM_LOCATION("FullCodeGenerator::EmitKeyedPropertyAssignment");
// Assignment to a property, using a keyed store IC.
@@ -2159,47 +1791,6 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
}
-void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
- ASM_LOCATION("FullCodeGenerator::EmitSuperCallWithLoadIC");
- Expression* callee = expr->expression();
- DCHECK(callee->IsProperty());
- Property* prop = callee->AsProperty();
- DCHECK(prop->IsSuperAccess());
- SetExpressionPosition(prop);
-
- Literal* key = prop->key()->AsLiteral();
- DCHECK(!key->value()->IsSmi());
-
- // Load the function from the receiver.
- const Register scratch = x10;
- SuperPropertyReference* super_ref =
- callee->AsProperty()->obj()->AsSuperPropertyReference();
- VisitForStackValue(super_ref->home_object());
- VisitForAccumulatorValue(super_ref->this_var());
- PushOperand(x0);
- __ Peek(scratch, kPointerSize);
- PushOperands(x0, scratch);
- PushOperand(key->value());
-
- // Stack here:
- // - home_object
- // - this (receiver)
- // - this (receiver) <-- LoadFromSuper will pop here and below.
- // - home_object
- // - key
- CallRuntimeWithOperands(Runtime::kLoadFromSuper);
- PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
- // Replace home_object with target function.
- __ Poke(x0, kPointerSize);
-
- // Stack here:
- // - target function
- // - this (receiver)
- EmitCall(expr);
-}
-
-
// Code common for calls using the IC.
void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
Expression* key) {
@@ -2225,44 +1816,6 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
}
-void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
- ASM_LOCATION("FullCodeGenerator::EmitKeyedSuperCallWithLoadIC");
- Expression* callee = expr->expression();
- DCHECK(callee->IsProperty());
- Property* prop = callee->AsProperty();
- DCHECK(prop->IsSuperAccess());
- SetExpressionPosition(prop);
-
- // Load the function from the receiver.
- const Register scratch = x10;
- SuperPropertyReference* super_ref =
- callee->AsProperty()->obj()->AsSuperPropertyReference();
- VisitForStackValue(super_ref->home_object());
- VisitForAccumulatorValue(super_ref->this_var());
- PushOperand(x0);
- __ Peek(scratch, kPointerSize);
- PushOperands(x0, scratch);
- VisitForStackValue(prop->key());
-
- // Stack here:
- // - home_object
- // - this (receiver)
- // - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
- // - home_object
- // - key
- CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
- PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
- // Replace home_object with target function.
- __ Poke(x0, kPointerSize);
-
- // Stack here:
- // - target function
- // - this (receiver)
- EmitCall(expr);
-}
-
-
void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
ASM_LOCATION("FullCodeGenerator::EmitCall");
// Load the arguments.
@@ -2295,119 +1848,6 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
context()->DropAndPlug(1, x0);
}
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
- int arg_count = expr->arguments()->length();
- ASM_LOCATION("FullCodeGenerator::EmitResolvePossiblyDirectEval");
- // Prepare to push a copy of the first argument or undefined if it doesn't
- // exist.
- if (arg_count > 0) {
- __ Peek(x9, arg_count * kXRegSize);
- } else {
- __ LoadRoot(x9, Heap::kUndefinedValueRootIndex);
- }
-
- __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-
- // Prepare to push the language mode.
- __ Mov(x11, Smi::FromInt(language_mode()));
- // Prepare to push the start position of the scope the calls resides in.
- __ Mov(x12, Smi::FromInt(scope()->start_position()));
- // Prepare to push the source position of the eval call.
- __ Mov(x13, Smi::FromInt(expr->position()));
-
- // Push.
- __ Push(x9, x10, x11, x12, x13);
-
- // Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
-}
-
-
-// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
-void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
- VariableProxy* callee = expr->expression()->AsVariableProxy();
- if (callee->var()->IsLookupSlot()) {
- Label slow, done;
- SetExpressionPosition(callee);
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(callee, NOT_INSIDE_TYPEOF, &slow, &done);
-
- __ Bind(&slow);
- // Call the runtime to find the function to call (returned in x0)
- // and the object holding it (returned in x1).
- __ Push(callee->name());
- __ CallRuntime(Runtime::kLoadLookupSlotForCall);
- PushOperands(x0, x1); // Receiver, function.
- PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
-
- // If fast case code has been generated, emit code to push the
- // function and receiver and have the slow path jump around this
- // code.
- if (done.is_linked()) {
- Label call;
- __ B(&call);
- __ Bind(&done);
- // Push function.
- // The receiver is implicitly the global receiver. Indicate this
- // by passing the undefined to the call function stub.
- __ LoadRoot(x1, Heap::kUndefinedValueRootIndex);
- __ Push(x0, x1);
- __ Bind(&call);
- }
- } else {
- VisitForStackValue(callee);
- // refEnv.WithBaseObject()
- __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
- PushOperand(x10); // Reserved receiver slot.
- }
-}
-
-
-void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
- ASM_LOCATION("FullCodeGenerator::EmitPossiblyEvalCall");
- // In a call to eval, we first call Runtime_ResolvePossiblyDirectEval
- // to resolve the function we need to call. Then we call the resolved
- // function using the given arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- PushCalleeAndWithBaseObject(expr);
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ Peek(x10, (arg_count + 1) * kPointerSize);
- __ Push(x10);
- EmitResolvePossiblyDirectEval(expr);
-
- // Touch up the stack with the resolved function.
- __ Poke(x0, (arg_count + 1) * kPointerSize);
-
- PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
-
- // Record source position for debugger.
- SetCallPosition(expr);
-
- // Call the evaluated function.
- Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
- expr->tail_call_mode())
- .code();
- __ Mov(x3, SmiFromSlot(expr->CallFeedbackICSlot()));
- __ Peek(x1, (arg_count + 1) * kXRegSize);
- __ Mov(x0, arg_count);
- __ Call(code, RelocInfo::CODE_TARGET);
- OperandStackDepthDecrement(arg_count + 1);
- RecordJSReturnSite(expr);
- RestoreContext();
- context()->DropAndPlug(1, x0);
-}
-
-
void FullCodeGenerator::VisitCallNew(CallNew* expr) {
Comment cmnt(masm_, "[ CallNew");
// According to ECMA-262, section 11.2.2, page 44, the function
@@ -2448,50 +1888,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
-void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
- ASM_LOCATION("FullCodeGenerator::EmitSuperConstructorCall");
- SuperCallReference* super_call_ref =
- expr->expression()->AsSuperCallReference();
- DCHECK_NOT_NULL(super_call_ref);
-
- // Push the super constructor target on the stack (may be null,
- // but the Construct builtin can deal with that properly).
- VisitForAccumulatorValue(super_call_ref->this_function_var());
- __ AssertFunction(result_register());
- __ Ldr(result_register(),
- FieldMemOperand(result_register(), HeapObject::kMapOffset));
- __ Ldr(result_register(),
- FieldMemOperand(result_register(), Map::kPrototypeOffset));
- PushOperand(result_register());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- SetConstructCallPosition(expr);
-
- // Load new target into x3.
- VisitForAccumulatorValue(super_call_ref->new_target_var());
- __ Mov(x3, result_register());
-
- // Load function and argument count into x1 and x0.
- __ Mov(x0, arg_count);
- __ Peek(x1, arg_count * kXRegSize);
-
- __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
- OperandStackDepthDecrement(arg_count + 1);
-
- RecordJSReturnSite(expr);
- RestoreContext();
- context()->Plug(x0);
-}
-
-
void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -2578,28 +1974,6 @@ void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(x0, if_false);
- __ CompareObjectType(x0, x10, x11, JS_REGEXP_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -2851,16 +2225,12 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
__ Push(x12, x11);
__ CallRuntime(Runtime::kDeleteProperty_Sloppy);
context()->Plug(x0);
- } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+ } else {
+ DCHECK(!var->IsLookupSlot());
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
// Result of deleting non-global, non-dynamic variables is false.
// The subexpression does not have side effects.
context()->Plug(is_this);
- } else {
- // Non-global variable. Call the runtime to try to delete from the
- // context where the variable was introduced.
- __ Push(var->name());
- __ CallRuntime(Runtime::kDeleteLookupSlot);
- context()->Plug(x0);
}
} else {
// Result of deleting non-property, non-variable reference is true.
@@ -2965,33 +2335,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
- case NAMED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- PushOperand(result_register());
- const Register scratch = x10;
- __ Peek(scratch, kPointerSize);
- PushOperands(scratch, result_register());
- EmitNamedSuperPropertyLoad(prop);
- break;
- }
-
- case KEYED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(prop->key());
- PushOperand(result_register());
- const Register scratch1 = x10;
- const Register scratch2 = x11;
- __ Peek(scratch1, 2 * kPointerSize);
- __ Peek(scratch2, kPointerSize);
- PushOperands(scratch1, scratch2, result_register());
- EmitKeyedSuperPropertyLoad(prop);
- break;
- }
-
case KEYED_PROPERTY: {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
@@ -3001,6 +2344,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
case VARIABLE:
UNREACHABLE();
}
@@ -3036,14 +2381,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY:
__ Poke(x0, kPointerSize);
break;
- case NAMED_SUPER_PROPERTY:
- __ Poke(x0, kPointerSize * 2);
- break;
case KEYED_PROPERTY:
__ Poke(x0, kPointerSize * 2);
break;
+ case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
- __ Poke(x0, kPointerSize * 3);
+ UNREACHABLE();
break;
}
}
@@ -3075,14 +2418,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY:
__ Poke(x0, kXRegSize);
break;
- case NAMED_SUPER_PROPERTY:
- __ Poke(x0, 2 * kXRegSize);
- break;
case KEYED_PROPERTY:
__ Poke(x0, 2 * kXRegSize);
break;
+ case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
- __ Poke(x0, 3 * kXRegSize);
+ UNREACHABLE();
break;
}
}
@@ -3141,30 +2482,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
}
- case NAMED_SUPER_PROPERTY: {
- EmitNamedSuperPropertyStore(prop);
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(x0);
- }
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- EmitKeyedSuperPropertyStore(prop);
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(x0);
- }
- break;
- }
case KEYED_PROPERTY: {
PopOperand(StoreDescriptor::NameRegister());
PopOperand(StoreDescriptor::ReceiverRegister());
@@ -3179,6 +2496,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
}
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
}
@@ -3376,63 +2697,8 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
void FullCodeGenerator::VisitYield(Yield* expr) {
- Comment cmnt(masm_, "[ Yield");
- SetExpressionPosition(expr);
-
- // Evaluate yielded value first; the initial iterator definition depends on
- // this. It stays on the stack while we update the iterator.
- VisitForStackValue(expr->expression());
-
- // TODO(jbramley): Tidy this up once the merge is done, using named registers
- // and suchlike. The implementation changes a little by bleeding_edge so I
- // don't want to spend too much time on it now.
-
- Label suspend, continuation, post_runtime, resume, exception;
-
- __ B(&suspend);
- // TODO(jbramley): This label is bound here because the following code
- // looks at its pos(). Is it possible to do something more efficient here,
- // perhaps using Adr?
- __ Bind(&continuation);
- // When we arrive here, x0 holds the generator object.
- __ RecordGeneratorContinuation();
- __ Ldr(x1, FieldMemOperand(x0, JSGeneratorObject::kResumeModeOffset));
- __ Ldr(x0, FieldMemOperand(x0, JSGeneratorObject::kInputOrDebugPosOffset));
- STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
- STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
- __ Cmp(x1, Operand(Smi::FromInt(JSGeneratorObject::kReturn)));
- __ B(lt, &resume);
- __ Push(result_register());
- __ B(gt, &exception);
- EmitCreateIteratorResult(true);
- EmitUnwindAndReturn();
-
- __ Bind(&exception);
- __ CallRuntime(expr->rethrow_on_exception() ? Runtime::kReThrow
- : Runtime::kThrow);
-
- __ Bind(&suspend);
- OperandStackDepthIncrement(1); // Not popped on this path.
- VisitForAccumulatorValue(expr->generator_object());
- DCHECK((continuation.pos() > 0) && Smi::IsValid(continuation.pos()));
- __ Mov(x1, Smi::FromInt(continuation.pos()));
- __ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset));
- __ Str(cp, FieldMemOperand(x0, JSGeneratorObject::kContextOffset));
- __ Mov(x1, cp);
- __ RecordWriteField(x0, JSGeneratorObject::kContextOffset, x1, x2,
- kLRHasBeenSaved, kDontSaveFPRegs);
- __ Add(x1, fp, StandardFrameConstants::kExpressionsOffset);
- __ Cmp(__ StackPointer(), x1);
- __ B(eq, &post_runtime);
- __ Push(x0); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- RestoreContext();
- __ Bind(&post_runtime);
- PopOperand(result_register());
- EmitReturnSequence();
-
- __ Bind(&resume);
- context()->Plug(result_register());
+ // Resumable functions are not supported.
+ UNREACHABLE();
}
void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
@@ -3556,68 +2822,6 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
}
-void FullCodeGenerator::EnterFinallyBlock() {
- ASM_LOCATION("FullCodeGenerator::EnterFinallyBlock");
- DCHECK(!result_register().is(x10));
- // Store pending message while executing finally block.
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ Mov(x10, pending_message_obj);
- __ Ldr(x10, MemOperand(x10));
- PushOperand(x10);
-
- ClearPendingMessage();
-}
-
-
-void FullCodeGenerator::ExitFinallyBlock() {
- ASM_LOCATION("FullCodeGenerator::ExitFinallyBlock");
- DCHECK(!result_register().is(x10));
-
- // Restore pending message from stack.
- PopOperand(x10);
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ Mov(x13, pending_message_obj);
- __ Str(x10, MemOperand(x13));
-}
-
-
-void FullCodeGenerator::ClearPendingMessage() {
- DCHECK(!result_register().is(x10));
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
- __ Mov(x13, pending_message_obj);
- __ Str(x10, MemOperand(x13));
-}
-
-
-void FullCodeGenerator::DeferredCommands::EmitCommands() {
- __ Pop(result_register(), x1); // Restore the accumulator and get the token.
- for (DeferredCommand cmd : commands_) {
- Label skip;
- __ Cmp(x1, Operand(Smi::FromInt(cmd.token)));
- __ B(ne, &skip);
- switch (cmd.command) {
- case kReturn:
- codegen_->EmitUnwindAndReturn();
- break;
- case kThrow:
- __ Push(result_register());
- __ CallRuntime(Runtime::kReThrow);
- break;
- case kContinue:
- codegen_->EmitContinue(cmd.target);
- break;
- case kBreak:
- codegen_->EmitBreak(cmd.target);
- break;
- }
- __ bind(&skip);
- }
-}
-
#undef __
diff --git a/deps/v8/src/full-codegen/full-codegen.cc b/deps/v8/src/full-codegen/full-codegen.cc
index ee5e8881ba..b8021366a2 100644
--- a/deps/v8/src/full-codegen/full-codegen.cc
+++ b/deps/v8/src/full-codegen/full-codegen.cc
@@ -42,6 +42,9 @@ class FullCodegenCompilationJob final : public CompilationJob {
}
CompilationJob::Status FinalizeJobImpl() final { return SUCCEEDED; }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FullCodegenCompilationJob);
};
FullCodeGenerator::FullCodeGenerator(MacroAssembler* masm,
@@ -62,7 +65,6 @@ FullCodeGenerator::FullCodeGenerator(MacroAssembler* masm,
: 0,
info->zone()),
back_edges_(2, info->zone()),
- handler_table_(info->zone()),
source_position_table_builder_(info->zone(),
info->SourcePositionRecordingMode()),
ic_total_count_(0) {
@@ -84,6 +86,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
bool FullCodeGenerator::MakeCode(CompilationInfo* info, uintptr_t stack_limit) {
Isolate* isolate = info->isolate();
+ DCHECK(!info->shared_info()->must_use_ignition_turbo());
DCHECK(!FLAG_minimal);
RuntimeCallTimerScope runtimeTimer(isolate,
&RuntimeCallStats::CompileFullCode);
@@ -114,7 +117,6 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info, uintptr_t stack_limit) {
CodeGenerator::MakeCodeEpilogue(&masm, nullptr, info, masm.CodeObject());
cgen.PopulateDeoptimizationData(code);
cgen.PopulateTypeFeedbackInfo(code);
- cgen.PopulateHandlerTable(code);
code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
code->set_has_reloc_info_for_serialization(info->will_serialize());
code->set_allow_osr_at_loop_nesting_level(0);
@@ -175,41 +177,15 @@ void FullCodeGenerator::PopulateTypeFeedbackInfo(Handle<Code> code) {
}
-void FullCodeGenerator::PopulateHandlerTable(Handle<Code> code) {
- int handler_table_size = static_cast<int>(handler_table_.size());
- Handle<HandlerTable> table =
- Handle<HandlerTable>::cast(isolate()->factory()->NewFixedArray(
- HandlerTable::LengthForRange(handler_table_size), TENURED));
- for (int i = 0; i < handler_table_size; ++i) {
- table->SetRangeStart(i, handler_table_[i].range_start);
- table->SetRangeEnd(i, handler_table_[i].range_end);
- table->SetRangeHandler(i, handler_table_[i].handler_offset,
- handler_table_[i].catch_prediction);
- table->SetRangeData(i, handler_table_[i].stack_depth);
- }
- code->set_handler_table(*table);
-}
-
-
-int FullCodeGenerator::NewHandlerTableEntry() {
- int index = static_cast<int>(handler_table_.size());
- HandlerTableEntry entry = {0, 0, 0, 0, HandlerTable::UNCAUGHT};
- handler_table_.push_back(entry);
- return index;
-}
-
-
bool FullCodeGenerator::MustCreateObjectLiteralWithRuntime(
ObjectLiteral* expr) const {
- return masm()->serializer_enabled() ||
- !FastCloneShallowObjectStub::IsSupported(expr);
+ return masm()->serializer_enabled() || !expr->IsFastCloningSupported();
}
bool FullCodeGenerator::MustCreateArrayLiteralWithRuntime(
ArrayLiteral* expr) const {
- return expr->depth() > 1 ||
- expr->values()->length() > JSArray::kInitialMaxFastElementArray;
+ return !expr->IsFastCloningSupported();
}
void FullCodeGenerator::Initialize(uintptr_t stack_limit) {
@@ -237,7 +213,7 @@ void FullCodeGenerator::CallLoadIC(FeedbackVectorSlot slot,
Handle<Code> code = CodeFactory::LoadIC(isolate()).code();
__ Call(code, RelocInfo::CODE_TARGET);
- if (FLAG_tf_load_ic_stub) RestoreContext();
+ RestoreContext();
}
void FullCodeGenerator::CallStoreIC(FeedbackVectorSlot slot,
@@ -499,15 +475,15 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
-#ifdef DEBUG
Variable* var = proxy->var();
- DCHECK(var->IsUnallocated() ||
- (var->IsLookupSlot() && var->mode() == DYNAMIC_GLOBAL));
-#endif
+ DCHECK(var->IsUnallocated());
+ __ Move(LoadDescriptor::NameRegister(), var->name());
+
EmitLoadSlot(LoadGlobalDescriptor::SlotRegister(),
proxy->VariableFeedbackSlot());
Handle<Code> code = CodeFactory::LoadGlobalIC(isolate(), typeof_mode).code();
__ Call(code, RelocInfo::CODE_TARGET);
+ RestoreContext();
}
void FullCodeGenerator::VisitSloppyBlockFunctionStatement(
@@ -623,10 +599,6 @@ void FullCodeGenerator::EmitIntrinsicAsStubCall(CallRuntime* expr,
context()->Plug(result_register());
}
-void FullCodeGenerator::EmitNewObject(CallRuntime* expr) {
- EmitIntrinsicAsStubCall(expr, CodeFactory::FastNewObject(isolate()));
-}
-
void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
EmitIntrinsicAsStubCall(expr, CodeFactory::NumberToString(isolate()));
}
@@ -878,30 +850,17 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
Expression* key = expr->key();
if (key->IsPropertyName()) {
- if (!expr->IsSuperAccess()) {
- VisitForAccumulatorValue(expr->obj());
- __ Move(LoadDescriptor::ReceiverRegister(), result_register());
- EmitNamedPropertyLoad(expr);
- } else {
- VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- expr->obj()->AsSuperPropertyReference()->home_object());
- EmitNamedSuperPropertyLoad(expr);
- }
+ DCHECK(!expr->IsSuperAccess());
+ VisitForAccumulatorValue(expr->obj());
+ __ Move(LoadDescriptor::ReceiverRegister(), result_register());
+ EmitNamedPropertyLoad(expr);
} else {
- if (!expr->IsSuperAccess()) {
- VisitForStackValue(expr->obj());
- VisitForAccumulatorValue(expr->key());
- __ Move(LoadDescriptor::NameRegister(), result_register());
- PopOperand(LoadDescriptor::ReceiverRegister());
- EmitKeyedPropertyLoad(expr);
- } else {
- VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- expr->obj()->AsSuperPropertyReference()->home_object());
- VisitForStackValue(expr->key());
- EmitKeyedSuperPropertyLoad(expr);
- }
+ DCHECK(!expr->IsSuperAccess());
+ VisitForStackValue(expr->obj());
+ VisitForAccumulatorValue(expr->key());
+ __ Move(LoadDescriptor::NameRegister(), result_register());
+ PopOperand(LoadDescriptor::ReceiverRegister());
+ EmitKeyedPropertyLoad(expr);
}
PrepareForBailoutForId(expr->LoadId(), BailoutState::TOS_REGISTER);
context()->Plug(result_register());
@@ -912,8 +871,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
DCHECK(!context()->IsEffect());
DCHECK(!context()->IsTest());
- if (proxy != NULL &&
- (proxy->var()->IsUnallocated() || proxy->var()->IsLookupSlot())) {
+ if (proxy != NULL && proxy->var()->IsUnallocated()) {
EmitVariableLoad(proxy, INSIDE_TYPEOF);
PrepareForBailout(proxy, BailoutState::TOS_REGISTER);
} else {
@@ -987,19 +945,10 @@ void FullCodeGenerator::EmitContinue(Statement* target) {
NestedStatement* current = nesting_stack_;
int context_length = 0;
// When continuing, we clobber the unpredictable value in the accumulator
- // with one that's safe for GC. If we hit an exit from the try block of
- // try...finally on our way out, we will unconditionally preserve the
- // accumulator on the stack.
+ // with one that's safe for GC.
ClearAccumulator();
while (!current->IsContinueTarget(target)) {
if (HasStackOverflow()) return;
- if (current->IsTryFinally()) {
- Comment cmnt(masm(), "[ Deferred continue through finally");
- current->Exit(&context_length);
- DCHECK_EQ(-1, context_length);
- current->AsTryFinally()->deferred_commands()->RecordContinue(target);
- return;
- }
current = current->Exit(&context_length);
}
int stack_depth = current->GetStackDepthAtTarget();
@@ -1028,19 +977,10 @@ void FullCodeGenerator::EmitBreak(Statement* target) {
NestedStatement* current = nesting_stack_;
int context_length = 0;
// When breaking, we clobber the unpredictable value in the accumulator
- // with one that's safe for GC. If we hit an exit from the try block of
- // try...finally on our way out, we will unconditionally preserve the
- // accumulator on the stack.
+ // with one that's safe for GC.
ClearAccumulator();
while (!current->IsBreakTarget(target)) {
if (HasStackOverflow()) return;
- if (current->IsTryFinally()) {
- Comment cmnt(masm(), "[ Deferred break through finally");
- current->Exit(&context_length);
- DCHECK_EQ(-1, context_length);
- current->AsTryFinally()->deferred_commands()->RecordBreak(target);
- return;
- }
current = current->Exit(&context_length);
}
int stack_depth = current->GetStackDepthAtTarget();
@@ -1070,31 +1010,34 @@ void FullCodeGenerator::EmitUnwindAndReturn() {
int context_length = 0;
while (current != NULL) {
if (HasStackOverflow()) return;
- if (current->IsTryFinally()) {
- Comment cmnt(masm(), "[ Deferred return through finally");
- current->Exit(&context_length);
- DCHECK_EQ(-1, context_length);
- current->AsTryFinally()->deferred_commands()->RecordReturn();
- return;
- }
current = current->Exit(&context_length);
}
EmitReturnSequence();
}
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
+ FeedbackVectorSlot slot,
bool pretenure) {
+ // If slot is invalid, then it's a native function literal and we
+ // can pass the empty array or empty literal array, something like that...
+
// If we're running with the --always-opt or the --prepare-always-opt
// flag, we need to use the runtime function so that the new function
// we are creating here gets a chance to have its code optimized and
// doesn't just get a copy of the existing unoptimized code.
if (!FLAG_always_opt && !FLAG_prepare_always_opt && !pretenure &&
scope()->is_function_scope()) {
- FastNewClosureStub stub(isolate());
- __ Move(stub.GetCallInterfaceDescriptor().GetRegisterParameter(0), info);
- __ CallStub(&stub);
+ Callable callable = CodeFactory::FastNewClosure(isolate());
+ __ Move(callable.descriptor().GetRegisterParameter(0), info);
+ __ EmitLoadTypeFeedbackVector(
+ callable.descriptor().GetRegisterParameter(1));
+ __ Move(callable.descriptor().GetRegisterParameter(2), SmiFromSlot(slot));
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
} else {
__ Push(info);
+ __ EmitLoadTypeFeedbackVector(result_register());
+ __ Push(result_register());
+ __ Push(SmiFromSlot(slot));
__ CallRuntime(pretenure ? Runtime::kNewClosure_Tenured
: Runtime::kNewClosure);
}
@@ -1110,17 +1053,6 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
CallLoadIC(prop->PropertyFeedbackSlot(), key->value());
}
-void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
- // Stack: receiver, home_object
- SetExpressionPosition(prop);
- Literal* key = prop->key()->AsLiteral();
- DCHECK(!key->value()->IsSmi());
- DCHECK(prop->IsSuperAccess());
-
- PushOperand(key->value());
- CallRuntimeWithOperands(Runtime::kLoadFromSuper);
-}
-
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetExpressionPosition(prop);
@@ -1131,20 +1063,6 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
RestoreContext();
}
-void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
- // Stack: receiver, home_object, key.
- SetExpressionPosition(prop);
- CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
-}
-
-void FullCodeGenerator::EmitPropertyKey(LiteralProperty* property,
- BailoutId bailout_id) {
- VisitForStackValue(property->key());
- CallRuntimeWithOperands(Runtime::kToName);
- PrepareForBailoutForId(bailout_id, BailoutState::TOS_REGISTER);
- PushOperand(result_register());
-}
-
void FullCodeGenerator::EmitLoadSlot(Register destination,
FeedbackVectorSlot slot) {
DCHECK(!slot.IsInvalid());
@@ -1165,33 +1083,8 @@ void FullCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
void FullCodeGenerator::VisitWithStatement(WithStatement* stmt) {
- Comment cmnt(masm_, "[ WithStatement");
- SetStatementPosition(stmt);
-
- VisitForAccumulatorValue(stmt->expression());
- Callable callable = CodeFactory::ToObject(isolate());
- __ Move(callable.descriptor().GetRegisterParameter(0), result_register());
- __ Call(callable.code(), RelocInfo::CODE_TARGET);
- RestoreContext();
- PrepareForBailoutForId(stmt->ToObjectId(), BailoutState::TOS_REGISTER);
- PushOperand(result_register());
- PushOperand(stmt->scope()->scope_info());
- PushFunctionArgumentForContextAllocation();
- CallRuntimeWithOperands(Runtime::kPushWithContext);
- StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
- PrepareForBailoutForId(stmt->EntryId(), BailoutState::NO_REGISTERS);
-
- Scope* saved_scope = scope();
- scope_ = stmt->scope();
- { WithOrCatch body(this);
- Visit(stmt->statement());
- }
- scope_ = saved_scope;
-
- // Pop context.
- LoadContextField(context_register(), Context::PREVIOUS_INDEX);
- // Update local stack frame context field.
- StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
+ // Dynamic scoping is not supported.
+ UNREACHABLE();
}
@@ -1312,43 +1205,8 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
- Comment cmnt(masm_, "[ ForOfStatement");
-
- Iteration loop_statement(this, stmt);
- increment_loop_depth();
-
- // var iterator = iterable[Symbol.iterator]();
- SetExpressionAsStatementPosition(stmt->assign_iterator());
- VisitForEffect(stmt->assign_iterator());
-
- // Loop entry.
- __ bind(loop_statement.continue_label());
-
- // result = iterator.next()
- SetExpressionAsStatementPosition(stmt->next_result());
- VisitForEffect(stmt->next_result());
-
- // if (result.done) break;
- Label result_not_done;
- VisitForControl(stmt->result_done(), loop_statement.break_label(),
- &result_not_done, &result_not_done);
- __ bind(&result_not_done);
-
- // each = result.value
- VisitForEffect(stmt->assign_each());
-
- // Generate code for the body of the loop.
- Visit(stmt->body());
-
- // Check stack before looping.
- PrepareForBailoutForId(stmt->BackEdgeId(), BailoutState::NO_REGISTERS);
- EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
- __ jmp(loop_statement.continue_label());
-
- // Exit and decrement the loop depth.
- PrepareForBailoutForId(stmt->ExitId(), BailoutState::NO_REGISTERS);
- __ bind(loop_statement.break_label());
- decrement_loop_depth();
+ // Iterator looping is not supported.
+ UNREACHABLE();
}
void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
@@ -1358,127 +1216,14 @@ void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
}
void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
- Comment cmnt(masm_, "[ TryCatchStatement");
- SetStatementPosition(stmt, SKIP_BREAK);
-
- // The try block adds a handler to the exception handler chain before
- // entering, and removes it again when exiting normally. If an exception
- // is thrown during execution of the try block, the handler is consumed
- // and control is passed to the catch block with the exception in the
- // result register.
-
- Label try_entry, handler_entry, exit;
- __ jmp(&try_entry);
- __ bind(&handler_entry);
- if (stmt->clear_pending_message()) ClearPendingMessage();
-
- // Exception handler code, the exception is in the result register.
- // Extend the context before executing the catch block.
- { Comment cmnt(masm_, "[ Extend catch context");
- PushOperand(stmt->variable()->name());
- PushOperand(result_register());
- PushOperand(stmt->scope()->scope_info());
- PushFunctionArgumentForContextAllocation();
- CallRuntimeWithOperands(Runtime::kPushCatchContext);
- StoreToFrameField(StandardFrameConstants::kContextOffset,
- context_register());
- }
-
- Scope* saved_scope = scope();
- scope_ = stmt->scope();
- DCHECK(scope_->declarations()->is_empty());
- { WithOrCatch catch_body(this);
- Visit(stmt->catch_block());
- }
- // Restore the context.
- LoadContextField(context_register(), Context::PREVIOUS_INDEX);
- StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
- scope_ = saved_scope;
- __ jmp(&exit);
-
- // Try block code. Sets up the exception handler chain.
- __ bind(&try_entry);
-
- int handler_index = NewHandlerTableEntry();
- EnterTryBlock(handler_index, &handler_entry, stmt->catch_prediction());
- {
- Comment cmnt_try(masm(), "[ Try block");
- Visit(stmt->try_block());
- }
- ExitTryBlock(handler_index);
- __ bind(&exit);
+ // Exception handling is not supported.
+ UNREACHABLE();
}
void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- Comment cmnt(masm_, "[ TryFinallyStatement");
- SetStatementPosition(stmt, SKIP_BREAK);
-
- // Try finally is compiled by setting up a try-handler on the stack while
- // executing the try body, and removing it again afterwards.
- //
- // The try-finally construct can enter the finally block in three ways:
- // 1. By exiting the try-block normally. This exits the try block,
- // pushes the continuation token and falls through to the finally
- // block.
- // 2. By exiting the try-block with a function-local control flow transfer
- // (break/continue/return). The site of the, e.g., break exits the
- // try block, pushes the continuation token and jumps to the
- // finally block. After the finally block executes, the execution
- // continues based on the continuation token to a block that
- // continues with the control flow transfer.
- // 3. By exiting the try-block with a thrown exception. In the handler,
- // we push the exception and continuation token and jump to the
- // finally block (which will again dispatch based on the token once
- // it is finished).
-
- Label try_entry, handler_entry, finally_entry;
- DeferredCommands deferred(this, &finally_entry);
-
- // Jump to try-handler setup and try-block code.
- __ jmp(&try_entry);
- __ bind(&handler_entry);
-
- // Exception handler code. This code is only executed when an exception
- // is thrown. Record the continuation and jump to the finally block.
- {
- Comment cmnt_handler(masm(), "[ Finally handler");
- deferred.RecordThrow();
- }
-
- // Set up try handler.
- __ bind(&try_entry);
- int handler_index = NewHandlerTableEntry();
- EnterTryBlock(handler_index, &handler_entry, stmt->catch_prediction());
- {
- Comment cmnt_try(masm(), "[ Try block");
- TryFinally try_body(this, &deferred);
- Visit(stmt->try_block());
- }
- ExitTryBlock(handler_index);
- // Execute the finally block on the way out. Clobber the unpredictable
- // value in the result register with one that's safe for GC because the
- // finally block will unconditionally preserve the result register on the
- // stack.
- ClearAccumulator();
- deferred.EmitFallThrough();
- // Fall through to the finally block.
-
- // Finally block implementation.
- __ bind(&finally_entry);
- {
- Comment cmnt_finally(masm(), "[ Finally block");
- OperandStackDepthIncrement(2); // Token and accumulator are on stack.
- EnterFinallyBlock();
- Visit(stmt->finally_block());
- ExitFinallyBlock();
- OperandStackDepthDecrement(2); // Token and accumulator were on stack.
- }
-
- {
- Comment cmnt_deferred(masm(), "[ Post-finally dispatch");
- deferred.EmitCommands(); // Return to the calling code.
- }
+ // Exception handling is not supported.
+ UNREACHABLE();
}
@@ -1546,46 +1291,13 @@ void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
SetStackOverflow();
return;
}
- EmitNewClosure(function_info, expr->pretenure());
+ EmitNewClosure(function_info, expr->LiteralFeedbackSlot(), expr->pretenure());
}
void FullCodeGenerator::VisitClassLiteral(ClassLiteral* lit) {
- Comment cmnt(masm_, "[ ClassLiteral");
-
- if (lit->extends() != NULL) {
- VisitForStackValue(lit->extends());
- } else {
- PushOperand(isolate()->factory()->the_hole_value());
- }
-
- VisitForStackValue(lit->constructor());
-
- PushOperand(Smi::FromInt(lit->start_position()));
- PushOperand(Smi::FromInt(lit->end_position()));
-
- CallRuntimeWithOperands(Runtime::kDefineClass);
- PrepareForBailoutForId(lit->CreateLiteralId(), BailoutState::TOS_REGISTER);
- PushOperand(result_register());
-
- // Load the "prototype" from the constructor.
- __ Move(LoadDescriptor::ReceiverRegister(), result_register());
- CallLoadIC(lit->PrototypeSlot(), isolate()->factory()->prototype_string());
- PrepareForBailoutForId(lit->PrototypeId(), BailoutState::TOS_REGISTER);
- PushOperand(result_register());
-
- EmitClassDefineProperties(lit);
- DropOperands(1);
-
- // Set the constructor to have fast properties.
- CallRuntimeWithOperands(Runtime::kToFastProperties);
-
- if (lit->class_variable_proxy() != nullptr) {
- EmitVariableAssignment(lit->class_variable_proxy()->var(), Token::INIT,
- lit->ProxySlot(), HoleCheckMode::kElided);
- }
-
- context()->Plug(result_register());
+ // Unsupported
+ UNREACHABLE();
}
void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
@@ -1612,7 +1324,7 @@ void FullCodeGenerator::VisitNativeFunctionLiteral(
Comment cmnt(masm_, "[ NativeFunctionLiteral");
Handle<SharedFunctionInfo> shared =
Compiler::GetSharedFunctionInfoForNative(expr->extension(), expr->name());
- EmitNewClosure(shared, false);
+ EmitNewClosure(shared, expr->LiteralFeedbackSlot(), false);
}
@@ -1628,32 +1340,6 @@ void FullCodeGenerator::VisitThrow(Throw* expr) {
if (context()->IsStackValue()) OperandStackDepthIncrement(1);
}
-void FullCodeGenerator::EnterTryBlock(
- int handler_index, Label* handler,
- HandlerTable::CatchPrediction catch_prediction) {
- HandlerTableEntry* entry = &handler_table_[handler_index];
- entry->range_start = masm()->pc_offset();
- entry->handler_offset = handler->pos();
- entry->stack_depth = operand_stack_depth_;
- entry->catch_prediction = catch_prediction;
-
- // We are using the operand stack depth, check for accuracy.
- EmitOperandStackDepthCheck();
-
- // Push context onto operand stack.
- STATIC_ASSERT(TryBlockConstant::kElementCount == 1);
- PushOperand(context_register());
-}
-
-
-void FullCodeGenerator::ExitTryBlock(int handler_index) {
- HandlerTableEntry* entry = &handler_table_[handler_index];
- entry->range_end = masm()->pc_offset();
-
- // Drop context from operand stack.
- DropOperands(TryBlockConstant::kElementCount);
-}
-
void FullCodeGenerator::VisitCall(Call* expr) {
#ifdef DEBUG
@@ -1668,48 +1354,38 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Expression* callee = expr->expression();
Call::CallType call_type = expr->GetCallType();
- if (expr->is_possibly_eval()) {
- EmitPossiblyEvalCall(expr);
- } else {
- switch (call_type) {
- case Call::GLOBAL_CALL:
- EmitCallWithLoadIC(expr);
- break;
- case Call::WITH_CALL:
- // Call to a lookup slot looked up through a with scope.
- PushCalleeAndWithBaseObject(expr);
- EmitCall(expr);
- break;
- case Call::NAMED_PROPERTY_CALL: {
- Property* property = callee->AsProperty();
- VisitForStackValue(property->obj());
- EmitCallWithLoadIC(expr);
- break;
- }
- case Call::KEYED_PROPERTY_CALL: {
- Property* property = callee->AsProperty();
- VisitForStackValue(property->obj());
- EmitKeyedCallWithLoadIC(expr, property->key());
- break;
- }
- case Call::NAMED_SUPER_PROPERTY_CALL:
- EmitSuperCallWithLoadIC(expr);
- break;
- case Call::KEYED_SUPER_PROPERTY_CALL:
- EmitKeyedSuperCallWithLoadIC(expr);
- break;
- case Call::SUPER_CALL:
- EmitSuperConstructorCall(expr);
- break;
- case Call::OTHER_CALL:
- // Call to an arbitrary expression not handled specially above.
- VisitForStackValue(callee);
- OperandStackDepthIncrement(1);
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- // Emit function call.
- EmitCall(expr);
- break;
+ // Eval is unsupported.
+ CHECK(!expr->is_possibly_eval());
+
+ switch (call_type) {
+ case Call::GLOBAL_CALL:
+ EmitCallWithLoadIC(expr);
+ break;
+ case Call::NAMED_PROPERTY_CALL: {
+ Property* property = callee->AsProperty();
+ VisitForStackValue(property->obj());
+ EmitCallWithLoadIC(expr);
+ break;
+ }
+ case Call::KEYED_PROPERTY_CALL: {
+ Property* property = callee->AsProperty();
+ VisitForStackValue(property->obj());
+ EmitKeyedCallWithLoadIC(expr, property->key());
+ break;
}
+ case Call::OTHER_CALL:
+ // Call to an arbitrary expression not handled specially above.
+ VisitForStackValue(callee);
+ OperandStackDepthIncrement(1);
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ // Emit function call.
+ EmitCall(expr);
+ break;
+ case Call::NAMED_SUPER_PROPERTY_CALL:
+ case Call::KEYED_SUPER_PROPERTY_CALL:
+ case Call::SUPER_CALL:
+ case Call::WITH_CALL:
+ UNREACHABLE();
}
#ifdef DEBUG
@@ -1769,78 +1445,12 @@ void FullCodeGenerator::VisitEmptyParentheses(EmptyParentheses* expr) {
UNREACHABLE();
}
+void FullCodeGenerator::VisitGetIterator(GetIterator* expr) { UNREACHABLE(); }
void FullCodeGenerator::VisitRewritableExpression(RewritableExpression* expr) {
Visit(expr->expression());
}
-FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
- int* context_length) {
- // The macros used here must preserve the result register.
-
- // Calculate how many operands to drop to get down to handler block.
- int stack_drop = codegen_->operand_stack_depth_ - GetStackDepthAtTarget();
- DCHECK_GE(stack_drop, 0);
-
- // Because the handler block contains the context of the finally
- // code, we can restore it directly from there for the finally code
- // rather than iteratively unwinding contexts via their previous
- // links.
- if (*context_length > 0) {
- __ Drop(stack_drop); // Down to the handler block.
- // Restore the context to its dedicated register and the stack.
- STATIC_ASSERT(TryBlockConstant::kElementCount == 1);
- __ Pop(codegen_->context_register());
- codegen_->StoreToFrameField(StandardFrameConstants::kContextOffset,
- codegen_->context_register());
- } else {
- // Down to the handler block and also drop context.
- __ Drop(stack_drop + TryBlockConstant::kElementCount);
- }
-
- // The caller will ignore outputs.
- *context_length = -1;
- return previous_;
-}
-
-void FullCodeGenerator::DeferredCommands::RecordBreak(Statement* target) {
- TokenId token = dispenser_.GetBreakContinueToken();
- commands_.push_back({kBreak, token, target});
- EmitJumpToFinally(token);
-}
-
-void FullCodeGenerator::DeferredCommands::RecordContinue(Statement* target) {
- TokenId token = dispenser_.GetBreakContinueToken();
- commands_.push_back({kContinue, token, target});
- EmitJumpToFinally(token);
-}
-
-void FullCodeGenerator::DeferredCommands::RecordReturn() {
- if (return_token_ == TokenDispenserForFinally::kInvalidToken) {
- return_token_ = TokenDispenserForFinally::kReturnToken;
- commands_.push_back({kReturn, return_token_, nullptr});
- }
- EmitJumpToFinally(return_token_);
-}
-
-void FullCodeGenerator::DeferredCommands::RecordThrow() {
- if (throw_token_ == TokenDispenserForFinally::kInvalidToken) {
- throw_token_ = TokenDispenserForFinally::kThrowToken;
- commands_.push_back({kThrow, throw_token_, nullptr});
- }
- EmitJumpToFinally(throw_token_);
-}
-
-void FullCodeGenerator::DeferredCommands::EmitFallThrough() {
- __ Push(Smi::FromInt(TokenDispenserForFinally::kFallThroughToken));
- __ Push(result_register());
-}
-
-void FullCodeGenerator::DeferredCommands::EmitJumpToFinally(TokenId token) {
- __ Push(Smi::FromInt(token));
- __ Push(result_register());
- __ jmp(finally_entry_);
-}
bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) {
Expression* sub_expr;
diff --git a/deps/v8/src/full-codegen/full-codegen.h b/deps/v8/src/full-codegen/full-codegen.h
index 558dae18dd..9a34cf93f0 100644
--- a/deps/v8/src/full-codegen/full-codegen.h
+++ b/deps/v8/src/full-codegen/full-codegen.h
@@ -82,7 +82,6 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
class Breakable;
class Iteration;
- class TryFinally;
class TestContext;
@@ -103,11 +102,9 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
virtual Breakable* AsBreakable() { return nullptr; }
virtual Iteration* AsIteration() { return nullptr; }
- virtual TryFinally* AsTryFinally() { return nullptr; }
virtual bool IsContinueTarget(Statement* target) { return false; }
virtual bool IsBreakTarget(Statement* target) { return false; }
- virtual bool IsTryFinally() { return false; }
// Notify the statement that we are exiting it via break, continue, or
// return and give it a chance to generate cleanup code. Return the
@@ -185,73 +182,6 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
}
};
- class DeferredCommands {
- public:
- enum Command { kReturn, kThrow, kBreak, kContinue };
- typedef int TokenId;
- struct DeferredCommand {
- Command command;
- TokenId token;
- Statement* target;
- };
-
- DeferredCommands(FullCodeGenerator* codegen, Label* finally_entry)
- : codegen_(codegen),
- commands_(codegen->zone()),
- return_token_(TokenDispenserForFinally::kInvalidToken),
- throw_token_(TokenDispenserForFinally::kInvalidToken),
- finally_entry_(finally_entry) {}
-
- void EmitCommands();
-
- void RecordBreak(Statement* target);
- void RecordContinue(Statement* target);
- void RecordReturn();
- void RecordThrow();
- void EmitFallThrough();
-
- private:
- MacroAssembler* masm() { return codegen_->masm(); }
- void EmitJumpToFinally(TokenId token);
-
- FullCodeGenerator* codegen_;
- ZoneVector<DeferredCommand> commands_;
- TokenDispenserForFinally dispenser_;
- TokenId return_token_;
- TokenId throw_token_;
- Label* finally_entry_;
- };
-
- // The try block of a try/finally statement.
- class TryFinally : public NestedStatement {
- public:
- TryFinally(FullCodeGenerator* codegen, DeferredCommands* commands)
- : NestedStatement(codegen), deferred_commands_(commands) {}
-
- NestedStatement* Exit(int* context_length) override;
-
- bool IsTryFinally() override { return true; }
- TryFinally* AsTryFinally() override { return this; }
-
- DeferredCommands* deferred_commands() { return deferred_commands_; }
-
- private:
- DeferredCommands* deferred_commands_;
- };
-
- // The body of a with or catch.
- class WithOrCatch : public NestedStatement {
- public:
- explicit WithOrCatch(FullCodeGenerator* codegen)
- : NestedStatement(codegen) {
- }
-
- NestedStatement* Exit(int* context_length) override {
- ++(*context_length);
- return previous_;
- }
- };
-
// A platform-specific utility to overwrite the accumulator register
// with a GC-safe value.
void ClearAccumulator();
@@ -462,21 +392,15 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
// Platform-specific code sequences for calls
void EmitCall(Call* expr, ConvertReceiverMode = ConvertReceiverMode::kAny);
- void EmitSuperConstructorCall(Call* expr);
void EmitCallWithLoadIC(Call* expr);
- void EmitSuperCallWithLoadIC(Call* expr);
void EmitKeyedCallWithLoadIC(Call* expr, Expression* key);
- void EmitKeyedSuperCallWithLoadIC(Call* expr);
- void EmitPossiblyEvalCall(Call* expr);
#define FOR_EACH_FULL_CODE_INTRINSIC(F) \
F(IsSmi) \
F(IsArray) \
F(IsTypedArray) \
- F(IsRegExp) \
F(IsJSProxy) \
F(Call) \
- F(NewObject) \
F(IsJSReceiver) \
F(GetSuperConstructor) \
F(DebugBreakInOptimizedCode) \
@@ -506,23 +430,16 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
void RestoreContext();
// Platform-specific code for loading variables.
- void EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
- TypeofMode typeof_mode, Label* slow);
- MemOperand ContextSlotOperandCheckExtensions(Variable* var, Label* slow);
- void EmitDynamicLookupFastCase(VariableProxy* proxy, TypeofMode typeof_mode,
- Label* slow, Label* done);
void EmitGlobalVariableLoad(VariableProxy* proxy, TypeofMode typeof_mode);
void EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
void EmitAccessor(ObjectLiteralProperty* property);
- // Expects the arguments and the function already pushed.
- void EmitResolvePossiblyDirectEval(Call* expr);
-
// Platform-specific support for allocating a new closure based on
// the given function info.
- void EmitNewClosure(Handle<SharedFunctionInfo> info, bool pretenure);
+ void EmitNewClosure(Handle<SharedFunctionInfo> info, FeedbackVectorSlot slot,
+ bool pretenure);
// Re-usable portions of CallRuntime
void EmitLoadJSRuntimeFunction(CallRuntime* expr);
@@ -532,26 +449,10 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
// The receiver is left on the stack by the IC.
void EmitNamedPropertyLoad(Property* expr);
- // Load a value from super.named property.
- // Expect receiver ('this' value) and home_object on the stack.
- void EmitNamedSuperPropertyLoad(Property* expr);
-
- // Load a value from super[keyed] property.
- // Expect receiver ('this' value), home_object and key on the stack.
- void EmitKeyedSuperPropertyLoad(Property* expr);
-
// Load a value from a keyed property.
// The receiver and the key is left on the stack by the IC.
void EmitKeyedPropertyLoad(Property* expr);
- // Adds the properties to the class (function) object and to its prototype.
- // Expects the class (function) in the accumulator. The class (function) is
- // in the accumulator after installing all the properties.
- void EmitClassDefineProperties(ClassLiteral* lit);
-
- // Pushes the property key as a Name on the stack.
- void EmitPropertyKey(LiteralProperty* property, BailoutId bailout_id);
-
// Apply the compound assignment operator. Expects the left operand on top
// of the stack and the right one in the accumulator.
void EmitBinaryOp(BinaryOperation* expr, Token::Value op);
@@ -582,14 +483,6 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
// of the stack and the right-hand-side value in the accumulator.
void EmitNamedPropertyAssignment(Assignment* expr);
- // Complete a super named property assignment. The right-hand-side value
- // is expected in accumulator.
- void EmitNamedSuperPropertyStore(Property* prop);
-
- // Complete a super named property assignment. The right-hand-side value
- // is expected in accumulator.
- void EmitKeyedSuperPropertyStore(Property* prop);
-
// Complete a keyed property assignment. The receiver and key are
// expected on top of the stack and the right-hand-side value in the
// accumulator.
@@ -648,14 +541,7 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
void RecordStatementPosition(int pos);
void RecordPosition(int pos);
- // Non-local control flow support.
- void EnterTryBlock(int handler_index, Label* handler,
- HandlerTable::CatchPrediction catch_prediction);
- void ExitTryBlock(int handler_index);
- void EnterFinallyBlock();
- void ExitFinallyBlock();
- void ClearPendingMessage();
-
+ // Local control flow support.
void EmitContinue(Statement* target);
void EmitBreak(Statement* target);
@@ -698,8 +584,6 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
// and PushCatchContext.
void PushFunctionArgumentForContextAllocation();
- void PushCalleeAndWithBaseObject(Call* expr);
-
// AST node visit functions.
#define DECLARE_VISIT(type) void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
@@ -714,13 +598,10 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
void Generate();
void PopulateDeoptimizationData(Handle<Code> code);
void PopulateTypeFeedbackInfo(Handle<Code> code);
- void PopulateHandlerTable(Handle<Code> code);
bool MustCreateObjectLiteralWithRuntime(ObjectLiteral* expr) const;
bool MustCreateArrayLiteralWithRuntime(ArrayLiteral* expr) const;
- int NewHandlerTableEntry();
-
struct BailoutEntry {
BailoutId id;
unsigned pc_and_state;
@@ -732,14 +613,6 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
uint32_t loop_depth;
};
- struct HandlerTableEntry {
- unsigned range_start;
- unsigned range_end;
- unsigned handler_offset;
- int stack_depth;
- HandlerTable::CatchPrediction catch_prediction;
- };
-
class ExpressionContext BASE_EMBEDDED {
public:
explicit ExpressionContext(FullCodeGenerator* codegen)
@@ -937,7 +810,6 @@ class FullCodeGenerator final : public AstVisitor<FullCodeGenerator> {
const ExpressionContext* context_;
ZoneList<BailoutEntry> bailout_entries_;
ZoneList<BackEdgeEntry> back_edges_;
- ZoneVector<HandlerTableEntry> handler_table_;
SourcePositionTableBuilder source_position_table_builder_;
int ic_total_count_;
Handle<Cell> profiling_counter_;
diff --git a/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc b/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
index 5e80dd3280..28e2616ac6 100644
--- a/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
@@ -4,15 +4,16 @@
#if V8_TARGET_ARCH_IA32
-#include "src/full-codegen/full-codegen.h"
#include "src/ast/compile-time-value.h"
#include "src/ast/scopes.h"
+#include "src/builtins/builtins-constructor.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/debug/debug.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/ia32/frames-ia32.h"
#include "src/ic/ic.h"
@@ -130,8 +131,6 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
- // Generators allocate locals, if any, in context slots.
- DCHECK(!IsGeneratorFunction(literal()->kind()) || locals_count == 0);
OperandStackDepthIncrement(locals_count);
if (locals_count == 1) {
__ push(Immediate(isolate()->factory()->undefined_value()));
@@ -189,15 +188,18 @@ void FullCodeGenerator::Generate() {
if (info->scope()->new_target_var() != nullptr) {
__ push(edx); // Preserve new target.
}
- if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
- FastNewFunctionContextStub stub(isolate());
+ if (slots <=
+ ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+ Callable callable = CodeFactory::FastNewFunctionContext(
+ isolate(), info->scope()->scope_type());
__ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
Immediate(slots));
- __ CallStub(&stub);
- // Result of FastNewFunctionContextStub is always in new space.
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ // Result of the FastNewFunctionContext builtin is always in new space.
need_write_barrier = false;
} else {
__ push(edi);
+ __ Push(Smi::FromInt(info->scope()->scope_type()));
__ CallRuntime(Runtime::kNewFunctionContext);
}
if (info->scope()->new_target_var() != nullptr) {
@@ -247,37 +249,10 @@ void FullCodeGenerator::Generate() {
PrepareForBailoutForId(BailoutId::FunctionContext(),
BailoutState::NO_REGISTERS);
- // Possibly set up a local binding to the this function which is used in
- // derived constructors with super calls.
- Variable* this_function_var = info->scope()->this_function_var();
- if (this_function_var != nullptr) {
- Comment cmnt(masm_, "[ This function");
- if (!function_in_register) {
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- // The write barrier clobbers register again, keep it marked as such.
- }
- SetVar(this_function_var, edi, ebx, ecx);
- }
-
- // Possibly set up a local binding to the new target value.
- Variable* new_target_var = info->scope()->new_target_var();
- if (new_target_var != nullptr) {
- Comment cmnt(masm_, "[ new.target");
- SetVar(new_target_var, edx, ebx, ecx);
- }
-
- // Possibly allocate RestParameters
- Variable* rest_param = info->scope()->rest_parameter();
- if (rest_param != nullptr) {
- Comment cmnt(masm_, "[ Allocate rest parameter array");
- if (!function_in_register) {
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- }
- FastNewRestParameterStub stub(isolate());
- __ CallStub(&stub);
- function_in_register = false;
- SetVar(rest_param, eax, ebx, edx);
- }
+ // We don't support new.target and rest parameters here.
+ DCHECK_NULL(info->scope()->new_target_var());
+ DCHECK_NULL(info->scope()->rest_parameter());
+ DCHECK_NULL(info->scope()->this_function_var());
Variable* arguments = info->scope()->arguments();
if (arguments != NULL) {
@@ -505,10 +480,8 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
true,
true_label_,
false_label_);
- DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
- !lit->IsUndetectable());
- if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
- lit->IsFalse(isolate())) {
+ DCHECK(lit->IsNullOrUndefined(isolate()) || !lit->IsUndetectable());
+ if (lit->IsNullOrUndefined(isolate()) || lit->IsFalse(isolate())) {
if (false_label_ != fall_through_) __ jmp(false_label_);
} else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
if (true_label_ != fall_through_) __ jmp(true_label_);
@@ -731,6 +704,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
+ globals_->Add(variable->name(), zone());
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
@@ -757,16 +731,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
break;
- case VariableLocation::LOOKUP: {
- Comment cmnt(masm_, "[ VariableDeclaration");
- DCHECK_EQ(VAR, variable->mode());
- DCHECK(!variable->binding_needs_init());
- __ push(Immediate(variable->name()));
- __ CallRuntime(Runtime::kDeclareEvalVar);
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
- break;
- }
-
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -779,6 +744,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Variable* variable = proxy->var();
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
+ globals_->Add(variable->name(), zone());
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
@@ -815,15 +781,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
break;
}
- case VariableLocation::LOOKUP: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- PushOperand(variable->name());
- VisitForStackValue(declaration->fun());
- CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
- break;
- }
-
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -1109,97 +1067,13 @@ void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
-
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
- TypeofMode typeof_mode,
- Label* slow) {
- Register context = esi;
- Register temp = edx;
-
- int to_check = scope()->ContextChainLengthUntilOutermostSloppyEval();
- for (Scope* s = scope(); to_check > 0; s = s->outer_scope()) {
- if (!s->NeedsContext()) continue;
- if (s->calls_sloppy_eval()) {
- // Check that extension is "the hole".
- __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
- Heap::kTheHoleValueRootIndex, slow);
- }
- // Load next context in chain.
- __ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering esi.
- context = temp;
- to_check--;
- }
-
- // All extension objects were empty and it is safe to use a normal global
- // load machinery.
- EmitGlobalVariableLoad(proxy, typeof_mode);
-}
-
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
- Label* slow) {
- DCHECK(var->IsContextSlot());
- Register context = esi;
- Register temp = ebx;
-
- for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
- if (s->NeedsContext()) {
- if (s->calls_sloppy_eval()) {
- // Check that extension is "the hole".
- __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
- Heap::kTheHoleValueRootIndex, slow);
- }
- __ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering esi.
- context = temp;
- }
- }
- // Check that last extension is "the hole".
- __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
- Heap::kTheHoleValueRootIndex, slow);
-
- // This function is used only for loads, not stores, so it's safe to
- // return an esi-based operand (the write barrier cannot be allowed to
- // destroy the esi register).
- return ContextOperand(context, var->index());
-}
-
-
-void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
- TypeofMode typeof_mode,
- Label* slow, Label* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- Variable* var = proxy->var();
- if (var->mode() == DYNAMIC_GLOBAL) {
- EmitLoadGlobalCheckExtensions(proxy, typeof_mode, slow);
- __ jmp(done);
- } else if (var->mode() == DYNAMIC_LOCAL) {
- Variable* local = var->local_if_not_shadowed();
- __ mov(eax, ContextSlotOperandCheckExtensions(local, slow));
- if (local->binding_needs_init()) {
- __ cmp(eax, isolate()->factory()->the_hole_value());
- __ j(not_equal, done);
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError);
- } else {
- __ jmp(done);
- }
- }
-}
-
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
SetExpressionPosition(proxy);
PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
Variable* var = proxy->var();
- // Three cases: global variables, lookup variables, and all other types of
- // variables.
+ // Two cases: global variables and all other types of variables.
switch (var->location()) {
case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
@@ -1232,24 +1106,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
break;
}
- case VariableLocation::LOOKUP: {
- Comment cmnt(masm_, "[ Lookup variable");
- Label done, slow;
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
- __ bind(&slow);
- __ push(Immediate(var->name()));
- Runtime::FunctionId function_id =
- typeof_mode == NOT_INSIDE_TYPEOF
- ? Runtime::kLoadLookupSlot
- : Runtime::kLoadLookupSlotInsideTypeof;
- __ CallRuntime(function_id);
- __ bind(&done);
- context()->Plug(eax);
- break;
- }
-
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -1275,7 +1132,8 @@ void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- Handle<FixedArray> constant_properties = expr->constant_properties();
+ Handle<FixedArray> constant_properties =
+ expr->GetOrBuildConstantProperties(isolate());
int flags = expr->ComputeFlags();
// If any of the keys would store to the elements array, then we shouldn't
// allow it.
@@ -1290,8 +1148,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
__ mov(ecx, Immediate(constant_properties));
__ mov(edx, Immediate(Smi::FromInt(flags)));
- FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
- __ CallStub(&stub);
+ Callable callable = CodeFactory::FastCloneShallowObject(
+ isolate(), expr->properties_count());
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1301,10 +1160,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
bool result_saved = false;
AccessorTable accessor_table(zone());
- int property_index = 0;
- for (; property_index < expr->properties()->length(); property_index++) {
- ObjectLiteral::Property* property = expr->properties()->at(property_index);
- if (property->is_computed_name()) break;
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ DCHECK(!property->is_computed_name());
if (property->IsCompileTimeValue()) continue;
Literal* key = property->key()->AsLiteral();
@@ -1314,6 +1172,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
result_saved = true;
}
switch (property->kind()) {
+ case ObjectLiteral::Property::SPREAD:
case ObjectLiteral::Property::CONSTANT:
UNREACHABLE();
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -1356,20 +1215,20 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(value);
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
- PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ PrepareForBailoutForId(expr->GetIdForPropertySet(i),
BailoutState::NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->setter = property;
}
break;
@@ -1392,72 +1251,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
}
- // Object literals have two parts. The "static" part on the left contains no
- // computed property names, and so we can compute its map ahead of time; see
- // runtime.cc::CreateObjectLiteralBoilerplate. The second "dynamic" part
- // starts with the first computed property name, and continues with all
- // properties to its right. All the code from above initializes the static
- // component of the object literal, and arranges for the map of the result to
- // reflect the static order in which the keys appear. For the dynamic
- // properties, we compile them into a series of "SetOwnProperty" runtime
- // calls. This will preserve insertion order.
- for (; property_index < expr->properties()->length(); property_index++) {
- ObjectLiteral::Property* property = expr->properties()->at(property_index);
-
- Expression* value = property->value();
- if (!result_saved) {
- PushOperand(eax); // Save result on the stack
- result_saved = true;
- }
-
- PushOperand(Operand(esp, 0)); // Duplicate receiver.
-
- if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
- DCHECK(!property->is_computed_name());
- VisitForStackValue(value);
- DCHECK(property->emit_store());
- CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
- PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- BailoutState::NO_REGISTERS);
- } else {
- EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
- VisitForStackValue(value);
- if (NeedsHomeObject(value)) {
- EmitSetHomeObject(value, 2, property->GetSlot());
- }
-
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- case ObjectLiteral::Property::COMPUTED:
- if (property->emit_store()) {
- PushOperand(Smi::FromInt(NONE));
- PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
- CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
- PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- BailoutState::NO_REGISTERS);
- } else {
- DropOperands(3);
- }
- break;
-
- case ObjectLiteral::Property::PROTOTYPE:
- UNREACHABLE();
- break;
-
- case ObjectLiteral::Property::GETTER:
- PushOperand(Smi::FromInt(NONE));
- CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
- break;
-
- case ObjectLiteral::Property::SETTER:
- PushOperand(Smi::FromInt(NONE));
- CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
- break;
- }
- }
- }
-
if (result_saved) {
context()->PlugTOS();
} else {
@@ -1469,7 +1262,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- Handle<FixedArray> constant_elements = expr->constant_elements();
+ Handle<ConstantElementsPair> constant_elements =
+ expr->GetOrBuildConstantElements(isolate());
bool has_constant_fast_elements =
IsFastObjectElementsKind(expr->constant_elements_kind());
@@ -1490,8 +1284,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
__ mov(ecx, Immediate(constant_elements));
- FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
- __ CallStub(&stub);
+ Callable callable =
+ CodeFactory::FastCloneShallowArray(isolate(), allocation_site_mode);
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1545,17 +1340,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case VARIABLE:
// Nothing to do here.
break;
- case NAMED_SUPER_PROPERTY:
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- property->obj()->AsSuperPropertyReference()->home_object());
- PushOperand(result_register());
- if (expr->is_compound()) {
- PushOperand(MemOperand(esp, kPointerSize));
- PushOperand(result_register());
- }
- break;
case NAMED_PROPERTY:
if (expr->is_compound()) {
// We need the receiver both on the stack and in the register.
@@ -1565,19 +1349,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForStackValue(property->obj());
}
break;
- case KEYED_SUPER_PROPERTY:
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(property->key());
- PushOperand(result_register());
- if (expr->is_compound()) {
- PushOperand(MemOperand(esp, 2 * kPointerSize));
- PushOperand(MemOperand(esp, 2 * kPointerSize));
- PushOperand(result_register());
- }
- break;
case KEYED_PROPERTY: {
if (expr->is_compound()) {
VisitForStackValue(property->obj());
@@ -1590,6 +1361,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
break;
}
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
// For compound assignments we need another deoptimization point after the
@@ -1602,26 +1377,20 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
EmitVariableLoad(expr->target()->AsVariableProxy());
PrepareForBailout(expr->target(), BailoutState::TOS_REGISTER);
break;
- case NAMED_SUPER_PROPERTY:
- EmitNamedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
- break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(),
BailoutState::TOS_REGISTER);
break;
- case KEYED_SUPER_PROPERTY:
- EmitKeyedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
- break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(),
BailoutState::TOS_REGISTER);
break;
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
}
@@ -1659,72 +1428,20 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
- case NAMED_SUPER_PROPERTY:
- EmitNamedSuperPropertyStore(property);
- context()->Plug(result_register());
- break;
- case KEYED_SUPER_PROPERTY:
- EmitKeyedSuperPropertyStore(property);
- context()->Plug(result_register());
- break;
case KEYED_PROPERTY:
EmitKeyedPropertyAssignment(expr);
break;
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
}
void FullCodeGenerator::VisitYield(Yield* expr) {
- Comment cmnt(masm_, "[ Yield");
- SetExpressionPosition(expr);
-
- // Evaluate yielded value first; the initial iterator definition depends on
- // this. It stays on the stack while we update the iterator.
- VisitForStackValue(expr->expression());
-
- Label suspend, continuation, post_runtime, resume, exception;
-
- __ jmp(&suspend);
- __ bind(&continuation);
- // When we arrive here, eax holds the generator object.
- __ RecordGeneratorContinuation();
- __ mov(ebx, FieldOperand(eax, JSGeneratorObject::kResumeModeOffset));
- __ mov(eax, FieldOperand(eax, JSGeneratorObject::kInputOrDebugPosOffset));
- STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
- STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
- __ cmp(ebx, Immediate(Smi::FromInt(JSGeneratorObject::kReturn)));
- __ j(less, &resume);
- __ Push(result_register());
- __ j(greater, &exception);
- EmitCreateIteratorResult(true);
- EmitUnwindAndReturn();
-
- __ bind(&exception);
- __ CallRuntime(expr->rethrow_on_exception() ? Runtime::kReThrow
- : Runtime::kThrow);
-
- __ bind(&suspend);
- OperandStackDepthIncrement(1); // Not popped on this path.
- VisitForAccumulatorValue(expr->generator_object());
- DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
- __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
- Immediate(Smi::FromInt(continuation.pos())));
- __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
- __ mov(ecx, esi);
- __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
- kDontSaveFPRegs);
- __ lea(ebx, Operand(ebp, StandardFrameConstants::kExpressionsOffset));
- __ cmp(esp, ebx);
- __ j(equal, &post_runtime);
- __ push(eax); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- RestoreContext();
- __ bind(&post_runtime);
- PopOperand(result_register());
- EmitReturnSequence();
-
- __ bind(&resume);
- context()->Plug(result_register());
+ // Resumable functions are not supported.
+ UNREACHABLE();
}
void FullCodeGenerator::PushOperand(MemOperand operand) {
@@ -1863,58 +1580,6 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
context()->Plug(eax);
}
-
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
- for (int i = 0; i < lit->properties()->length(); i++) {
- ClassLiteral::Property* property = lit->properties()->at(i);
- Expression* value = property->value();
-
- if (property->is_static()) {
- PushOperand(Operand(esp, kPointerSize)); // constructor
- } else {
- PushOperand(Operand(esp, 0)); // prototype
- }
- EmitPropertyKey(property, lit->GetIdForProperty(i));
-
- // The static prototype property is read only. We handle the non computed
- // property name case in the parser. Since this is the only case where we
- // need to check for an own read only property we special case this so we do
- // not need to do this for every property.
- if (property->is_static() && property->is_computed_name()) {
- __ CallRuntime(Runtime::kThrowIfStaticPrototype);
- __ push(eax);
- }
-
- VisitForStackValue(value);
- if (NeedsHomeObject(value)) {
- EmitSetHomeObject(value, 2, property->GetSlot());
- }
-
- switch (property->kind()) {
- case ClassLiteral::Property::METHOD:
- PushOperand(Smi::FromInt(DONT_ENUM));
- PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
- CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
- break;
-
- case ClassLiteral::Property::GETTER:
- PushOperand(Smi::FromInt(DONT_ENUM));
- CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
- break;
-
- case ClassLiteral::Property::SETTER:
- PushOperand(Smi::FromInt(DONT_ENUM));
- CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
- break;
-
- case ClassLiteral::Property::FIELD:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
PopOperand(edx);
Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
@@ -1948,43 +1613,6 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
CallStoreIC(slot, prop->key()->AsLiteral()->value());
break;
}
- case NAMED_SUPER_PROPERTY: {
- PushOperand(eax);
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- // stack: value, this; eax: home_object
- Register scratch = ecx;
- Register scratch2 = edx;
- __ mov(scratch, result_register()); // home_object
- __ mov(eax, MemOperand(esp, kPointerSize)); // value
- __ mov(scratch2, MemOperand(esp, 0)); // this
- __ mov(MemOperand(esp, kPointerSize), scratch2); // this
- __ mov(MemOperand(esp, 0), scratch); // home_object
- // stack: this, home_object. eax: value
- EmitNamedSuperPropertyStore(prop);
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- PushOperand(eax);
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(prop->key());
- Register scratch = ecx;
- Register scratch2 = edx;
- __ mov(scratch2, MemOperand(esp, 2 * kPointerSize)); // value
- // stack: value, this, home_object; eax: key, edx: value
- __ mov(scratch, MemOperand(esp, kPointerSize)); // this
- __ mov(MemOperand(esp, 2 * kPointerSize), scratch);
- __ mov(scratch, MemOperand(esp, 0)); // home_object
- __ mov(MemOperand(esp, kPointerSize), scratch);
- __ mov(MemOperand(esp, 0), eax);
- __ mov(eax, scratch2);
- // stack: this, home_object, key; eax: value.
- EmitKeyedSuperPropertyStore(prop);
- break;
- }
case KEYED_PROPERTY: {
PushOperand(eax); // Preserve value.
VisitForStackValue(prop->obj());
@@ -1995,6 +1623,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
CallKeyedStoreIC(slot);
break;
}
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
context()->Plug(eax);
}
@@ -2055,26 +1687,18 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
} else {
DCHECK(var->mode() != CONST || op == Token::INIT);
- if (var->IsLookupSlot()) {
- // Assignment to var.
- __ Push(Immediate(var->name()));
- __ Push(eax);
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kStoreLookupSlot_Strict
- : Runtime::kStoreLookupSlot_Sloppy);
- } else {
- // Assignment to var or initializing assignment to let/const in harmony
- // mode.
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- MemOperand location = VarOperand(var, ecx);
- if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
- // Check for an uninitialized let binding.
- __ mov(edx, location);
- __ cmp(edx, isolate()->factory()->the_hole_value());
- __ Check(equal, kLetBindingReInitialization);
- }
- EmitStoreToStackLocalOrContextSlot(var, location);
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ DCHECK(!var->IsLookupSlot());
+ // Assignment to var or initializing assignment to let/const in harmony
+ // mode.
+ MemOperand location = VarOperand(var, ecx);
+ if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
+ // Check for an uninitialized let binding.
+ __ mov(edx, location);
+ __ cmp(edx, isolate()->factory()->the_hole_value());
+ __ Check(equal, kLetBindingReInitialization);
}
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
}
@@ -2094,34 +1718,6 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
}
-void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
- // Assignment to named property of super.
- // eax : value
- // stack : receiver ('this'), home_object
- DCHECK(prop != NULL);
- Literal* key = prop->key()->AsLiteral();
- DCHECK(key != NULL);
-
- PushOperand(key->value());
- PushOperand(eax);
- CallRuntimeWithOperands(is_strict(language_mode())
- ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
- // Assignment to named property of super.
- // eax : value
- // stack : receiver ('this'), home_object, key
-
- PushOperand(eax);
- CallRuntimeWithOperands(is_strict(language_mode())
- ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy);
-}
-
-
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
// eax : value
@@ -2169,42 +1765,6 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
}
-void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
- SetExpressionPosition(expr);
- Expression* callee = expr->expression();
- DCHECK(callee->IsProperty());
- Property* prop = callee->AsProperty();
- DCHECK(prop->IsSuperAccess());
-
- Literal* key = prop->key()->AsLiteral();
- DCHECK(!key->value()->IsSmi());
- // Load the function from the receiver.
- SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
- VisitForStackValue(super_ref->home_object());
- VisitForAccumulatorValue(super_ref->this_var());
- PushOperand(eax);
- PushOperand(eax);
- PushOperand(Operand(esp, kPointerSize * 2));
- PushOperand(key->value());
- // Stack here:
- // - home_object
- // - this (receiver)
- // - this (receiver) <-- LoadFromSuper will pop here and below.
- // - home_object
- // - key
- CallRuntimeWithOperands(Runtime::kLoadFromSuper);
- PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
- // Replace home_object with target function.
- __ mov(Operand(esp, kPointerSize), eax);
-
- // Stack here:
- // - target function
- // - this (receiver)
- EmitCall(expr);
-}
-
-
// Code common for calls using the IC.
void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
Expression* key) {
@@ -2229,40 +1789,6 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
}
-void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
- Expression* callee = expr->expression();
- DCHECK(callee->IsProperty());
- Property* prop = callee->AsProperty();
- DCHECK(prop->IsSuperAccess());
-
- SetExpressionPosition(prop);
- // Load the function from the receiver.
- SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
- VisitForStackValue(super_ref->home_object());
- VisitForAccumulatorValue(super_ref->this_var());
- PushOperand(eax);
- PushOperand(eax);
- PushOperand(Operand(esp, kPointerSize * 2));
- VisitForStackValue(prop->key());
- // Stack here:
- // - home_object
- // - this (receiver)
- // - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
- // - home_object
- // - key
- CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
- PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
- // Replace home_object with target function.
- __ mov(Operand(esp, kPointerSize), eax);
-
- // Stack here:
- // - target function
- // - this (receiver)
- EmitCall(expr);
-}
-
-
void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
@@ -2294,111 +1820,6 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
context()->DropAndPlug(1, eax);
}
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
- int arg_count = expr->arguments()->length();
- // Push copy of the first argument or undefined if it doesn't exist.
- if (arg_count > 0) {
- __ push(Operand(esp, arg_count * kPointerSize));
- } else {
- __ push(Immediate(isolate()->factory()->undefined_value()));
- }
-
- // Push the enclosing function.
- __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-
- // Push the language mode.
- __ push(Immediate(Smi::FromInt(language_mode())));
-
- // Push the start position of the scope the calls resides in.
- __ push(Immediate(Smi::FromInt(scope()->start_position())));
-
- // Push the source position of the eval call.
- __ push(Immediate(Smi::FromInt(expr->position())));
-
- // Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
-}
-
-
-// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
-void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
- VariableProxy* callee = expr->expression()->AsVariableProxy();
- if (callee->var()->IsLookupSlot()) {
- Label slow, done;
- SetExpressionPosition(callee);
- // Generate code for loading from variables potentially shadowed by
- // eval-introduced variables.
- EmitDynamicLookupFastCase(callee, NOT_INSIDE_TYPEOF, &slow, &done);
-
- __ bind(&slow);
- // Call the runtime to find the function to call (returned in eax) and
- // the object holding it (returned in edx).
- __ Push(callee->name());
- __ CallRuntime(Runtime::kLoadLookupSlotForCall);
- PushOperand(eax); // Function.
- PushOperand(edx); // Receiver.
- PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
-
- // If fast case code has been generated, emit code to push the function
- // and receiver and have the slow path jump around this code.
- if (done.is_linked()) {
- Label call;
- __ jmp(&call, Label::kNear);
- __ bind(&done);
- // Push function.
- __ push(eax);
- // The receiver is implicitly the global receiver. Indicate this by
- // passing the hole to the call function stub.
- __ push(Immediate(isolate()->factory()->undefined_value()));
- __ bind(&call);
- }
- } else {
- VisitForStackValue(callee);
- // refEnv.WithBaseObject()
- PushOperand(isolate()->factory()->undefined_value());
- }
-}
-
-
-void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
- // In a call to eval, we first call Runtime_ResolvePossiblyDirectEval
- // to resolve the function we need to call. Then we call the resolved
- // function using the given arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- PushCalleeAndWithBaseObject(expr);
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ push(Operand(esp, (arg_count + 1) * kPointerSize));
- EmitResolvePossiblyDirectEval(expr);
-
- // Touch up the stack with the resolved function.
- __ mov(Operand(esp, (arg_count + 1) * kPointerSize), eax);
-
- PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
-
- SetCallPosition(expr);
- Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
- expr->tail_call_mode())
- .code();
- __ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
- __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
- __ Move(eax, Immediate(arg_count));
- __ call(code, RelocInfo::CODE_TARGET);
- OperandStackDepthDecrement(arg_count + 1);
- RecordJSReturnSite(expr);
- RestoreContext();
- context()->DropAndPlug(1, eax);
-}
-
-
void FullCodeGenerator::VisitCallNew(CallNew* expr) {
Comment cmnt(masm_, "[ CallNew");
// According to ECMA-262, section 11.2.2, page 44, the function
@@ -2439,47 +1860,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
-void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
- SuperCallReference* super_call_ref =
- expr->expression()->AsSuperCallReference();
- DCHECK_NOT_NULL(super_call_ref);
-
- // Push the super constructor target on the stack (may be null,
- // but the Construct builtin can deal with that properly).
- VisitForAccumulatorValue(super_call_ref->this_function_var());
- __ AssertFunction(result_register());
- __ mov(result_register(),
- FieldOperand(result_register(), HeapObject::kMapOffset));
- PushOperand(FieldOperand(result_register(), Map::kPrototypeOffset));
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- SetConstructCallPosition(expr);
-
- // Load new target into edx.
- VisitForAccumulatorValue(super_call_ref->new_target_var());
- __ mov(edx, result_register());
-
- // Load function and argument count into edi and eax.
- __ Move(eax, Immediate(arg_count));
- __ mov(edi, Operand(esp, arg_count * kPointerSize));
-
- __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
- OperandStackDepthDecrement(arg_count + 1);
-
- RecordJSReturnSite(expr);
- RestoreContext();
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -2567,28 +1947,6 @@ void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, JS_REGEXP_TYPE, ebx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -2822,17 +2180,13 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
__ push(Immediate(var->name()));
__ CallRuntime(Runtime::kDeleteProperty_Sloppy);
context()->Plug(eax);
- } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+ } else {
+ DCHECK(!var->IsLookupSlot());
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
// Result of deleting non-global variables is false. 'this' is
// not really a variable, though we implement it as one. The
// subexpression does not have side effects.
context()->Plug(is_this);
- } else {
- // Non-global variable. Call the runtime to try to delete from the
- // context where the variable was introduced.
- __ Push(var->name());
- __ CallRuntime(Runtime::kDeleteLookupSlot);
- context()->Plug(eax);
}
} else {
// Result of deleting non-property, non-variable reference is true.
@@ -2943,30 +2297,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
- case NAMED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- PushOperand(result_register());
- PushOperand(MemOperand(esp, kPointerSize));
- PushOperand(result_register());
- EmitNamedSuperPropertyLoad(prop);
- break;
- }
-
- case KEYED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(prop->key());
- PushOperand(result_register());
- PushOperand(MemOperand(esp, 2 * kPointerSize));
- PushOperand(MemOperand(esp, 2 * kPointerSize));
- PushOperand(result_register());
- EmitKeyedSuperPropertyLoad(prop);
- break;
- }
-
case KEYED_PROPERTY: {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
@@ -2977,6 +2307,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
case VARIABLE:
UNREACHABLE();
}
@@ -3010,14 +2342,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY:
__ mov(Operand(esp, kPointerSize), eax);
break;
- case NAMED_SUPER_PROPERTY:
- __ mov(Operand(esp, 2 * kPointerSize), eax);
- break;
case KEYED_PROPERTY:
__ mov(Operand(esp, 2 * kPointerSize), eax);
break;
+ case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
- __ mov(Operand(esp, 3 * kPointerSize), eax);
+ UNREACHABLE();
break;
}
}
@@ -3057,14 +2387,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY:
__ mov(Operand(esp, kPointerSize), eax);
break;
- case NAMED_SUPER_PROPERTY:
- __ mov(Operand(esp, 2 * kPointerSize), eax);
- break;
case KEYED_PROPERTY:
__ mov(Operand(esp, 2 * kPointerSize), eax);
break;
+ case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
- __ mov(Operand(esp, 3 * kPointerSize), eax);
+ UNREACHABLE();
break;
}
}
@@ -3123,30 +2451,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
}
- case NAMED_SUPER_PROPERTY: {
- EmitNamedSuperPropertyStore(prop);
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(eax);
- }
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- EmitKeyedSuperPropertyStore(prop);
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(eax);
- }
- break;
- }
case KEYED_PROPERTY: {
PopOperand(StoreDescriptor::NameRegister());
PopOperand(StoreDescriptor::ReceiverRegister());
@@ -3162,6 +2466,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
}
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
}
@@ -3394,66 +2702,6 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
}
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
-
-void FullCodeGenerator::EnterFinallyBlock() {
- // Store pending message while executing finally block.
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ mov(edx, Operand::StaticVariable(pending_message_obj));
- PushOperand(edx);
-
- ClearPendingMessage();
-}
-
-
-void FullCodeGenerator::ExitFinallyBlock() {
- DCHECK(!result_register().is(edx));
- // Restore pending message from stack.
- PopOperand(edx);
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ mov(Operand::StaticVariable(pending_message_obj), edx);
-}
-
-
-void FullCodeGenerator::ClearPendingMessage() {
- DCHECK(!result_register().is(edx));
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ mov(edx, Immediate(isolate()->factory()->the_hole_value()));
- __ mov(Operand::StaticVariable(pending_message_obj), edx);
-}
-
-
-void FullCodeGenerator::DeferredCommands::EmitCommands() {
- DCHECK(!result_register().is(edx));
- __ Pop(result_register()); // Restore the accumulator.
- __ Pop(edx); // Get the token.
- for (DeferredCommand cmd : commands_) {
- Label skip;
- __ cmp(edx, Immediate(Smi::FromInt(cmd.token)));
- __ j(not_equal, &skip);
- switch (cmd.command) {
- case kReturn:
- codegen_->EmitUnwindAndReturn();
- break;
- case kThrow:
- __ Push(result_register());
- __ CallRuntime(Runtime::kReThrow);
- break;
- case kContinue:
- codegen_->EmitContinue(cmd.target);
- break;
- case kBreak:
- codegen_->EmitBreak(cmd.target);
- break;
- }
- __ bind(&skip);
- }
-}
-
#undef __
diff --git a/deps/v8/src/full-codegen/mips/full-codegen-mips.cc b/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
index 10cdb54b40..4599439c81 100644
--- a/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
+++ b/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
@@ -12,15 +12,16 @@
// places where we have to move a previous result in v0 to a0 for the
// next call: mov(a0, v0). This is not needed on the other architectures.
-#include "src/full-codegen/full-codegen.h"
#include "src/ast/compile-time-value.h"
#include "src/ast/scopes.h"
+#include "src/builtins/builtins-constructor.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/debug/debug.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
#include "src/mips/code-stubs-mips.h"
@@ -153,8 +154,6 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
- // Generators allocate locals, if any, in context slots.
- DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
OperandStackDepthIncrement(locals_count);
if (locals_count > 0) {
if (locals_count >= 128) {
@@ -210,15 +209,18 @@ void FullCodeGenerator::Generate() {
if (info->scope()->new_target_var() != nullptr) {
__ push(a3); // Preserve new target.
}
- if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
- FastNewFunctionContextStub stub(isolate());
+ if (slots <=
+ ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+ Callable callable = CodeFactory::FastNewFunctionContext(
+ isolate(), info->scope()->scope_type());
__ li(FastNewFunctionContextDescriptor::SlotsRegister(),
Operand(slots));
- __ CallStub(&stub);
- // Result of FastNewFunctionContextStub is always in new space.
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ // Result of the FastNewFunctionContext builtin is always in new space.
need_write_barrier = false;
} else {
__ push(a1);
+ __ Push(Smi::FromInt(info->scope()->scope_type()));
__ CallRuntime(Runtime::kNewFunctionContext);
}
if (info->scope()->new_target_var() != nullptr) {
@@ -265,37 +267,10 @@ void FullCodeGenerator::Generate() {
PrepareForBailoutForId(BailoutId::FunctionContext(),
BailoutState::NO_REGISTERS);
- // Possibly set up a local binding to the this function which is used in
- // derived constructors with super calls.
- Variable* this_function_var = info->scope()->this_function_var();
- if (this_function_var != nullptr) {
- Comment cmnt(masm_, "[ This function");
- if (!function_in_register_a1) {
- __ lw(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- // The write barrier clobbers register again, keep it marked as such.
- }
- SetVar(this_function_var, a1, a0, a2);
- }
-
- // Possibly set up a local binding to the new target value.
- Variable* new_target_var = info->scope()->new_target_var();
- if (new_target_var != nullptr) {
- Comment cmnt(masm_, "[ new.target");
- SetVar(new_target_var, a3, a0, a2);
- }
-
- // Possibly allocate RestParameters
- Variable* rest_param = info->scope()->rest_parameter();
- if (rest_param != nullptr) {
- Comment cmnt(masm_, "[ Allocate rest parameter array");
- if (!function_in_register_a1) {
- __ lw(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- }
- FastNewRestParameterStub stub(isolate());
- __ CallStub(&stub);
- function_in_register_a1 = false;
- SetVar(rest_param, v0, a1, a2);
- }
+ // We don't support new.target and rest parameters here.
+ DCHECK_NULL(info->scope()->new_target_var());
+ DCHECK_NULL(info->scope()->rest_parameter());
+ DCHECK_NULL(info->scope()->this_function_var());
Variable* arguments = info->scope()->arguments();
if (arguments != NULL) {
@@ -547,10 +522,8 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
true,
true_label_,
false_label_);
- DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
- !lit->IsUndetectable());
- if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
- lit->IsFalse(isolate())) {
+ DCHECK(lit->IsNullOrUndefined(isolate()) || !lit->IsUndetectable());
+ if (lit->IsNullOrUndefined(isolate()) || lit->IsFalse(isolate())) {
if (false_label_ != fall_through_) __ Branch(false_label_);
} else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
if (true_label_ != fall_through_) __ Branch(true_label_);
@@ -782,6 +755,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
+ globals_->Add(variable->name(), zone());
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
@@ -808,17 +782,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
break;
- case VariableLocation::LOOKUP: {
- Comment cmnt(masm_, "[ VariableDeclaration");
- DCHECK_EQ(VAR, variable->mode());
- DCHECK(!variable->binding_needs_init());
- __ li(a2, Operand(variable->name()));
- __ Push(a2);
- __ CallRuntime(Runtime::kDeclareEvalVar);
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
- break;
- }
-
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -831,6 +795,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Variable* variable = proxy->var();
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
+ globals_->Add(variable->name(), zone());
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
@@ -869,17 +834,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
break;
}
- case VariableLocation::LOOKUP: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- __ li(a2, Operand(variable->name()));
- PushOperand(a2);
- // Push initial value for function declaration.
- VisitForStackValue(declaration->fun());
- CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
- break;
- }
-
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -1183,93 +1138,6 @@ void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
-
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
- TypeofMode typeof_mode,
- Label* slow) {
- Register current = cp;
- Register next = a1;
- Register temp = a2;
-
- int to_check = scope()->ContextChainLengthUntilOutermostSloppyEval();
- for (Scope* s = scope(); to_check > 0; s = s->outer_scope()) {
- if (!s->NeedsContext()) continue;
- if (s->calls_sloppy_eval()) {
- // Check that extension is "the hole".
- __ lw(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
- __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
- }
- // Load next context in chain.
- __ lw(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering cp.
- current = next;
- to_check--;
- }
-
- // All extension objects were empty and it is safe to use a normal global
- // load machinery.
- EmitGlobalVariableLoad(proxy, typeof_mode);
-}
-
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
- Label* slow) {
- DCHECK(var->IsContextSlot());
- Register context = cp;
- Register next = a3;
- Register temp = t0;
-
- for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
- if (s->NeedsContext()) {
- if (s->calls_sloppy_eval()) {
- // Check that extension is "the hole".
- __ lw(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
- __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
- }
- __ lw(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering cp.
- context = next;
- }
- }
- // Check that last extension is "the hole".
- __ lw(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
- __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
-
- // This function is used only for loads, not stores, so it's safe to
- // return an cp-based operand (the write barrier cannot be allowed to
- // destroy the cp register).
- return ContextMemOperand(context, var->index());
-}
-
-
-void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
- TypeofMode typeof_mode,
- Label* slow, Label* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- Variable* var = proxy->var();
- if (var->mode() == DYNAMIC_GLOBAL) {
- EmitLoadGlobalCheckExtensions(proxy, typeof_mode, slow);
- __ Branch(done);
- } else if (var->mode() == DYNAMIC_LOCAL) {
- Variable* local = var->local_if_not_shadowed();
- __ lw(v0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->binding_needs_init()) {
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ subu(at, v0, at); // Sub as compare: at == 0 on eq.
- __ Branch(done, ne, at, Operand(zero_reg));
- __ li(a0, Operand(var->name()));
- __ push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError);
- } else {
- __ Branch(done);
- }
- }
-}
-
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
// Record position before possible IC call.
@@ -1277,8 +1145,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
Variable* var = proxy->var();
- // Three cases: global variables, lookup variables, and all other types of
- // variables.
+ // Two cases: global variables and all other types of variables.
switch (var->location()) {
case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
@@ -1312,24 +1179,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
break;
}
- case VariableLocation::LOOKUP: {
- Comment cmnt(masm_, "[ Lookup variable");
- Label done, slow;
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
- __ bind(&slow);
- __ Push(var->name());
- Runtime::FunctionId function_id =
- typeof_mode == NOT_INSIDE_TYPEOF
- ? Runtime::kLoadLookupSlot
- : Runtime::kLoadLookupSlotInsideTypeof;
- __ CallRuntime(function_id);
- __ bind(&done);
- context()->Plug(v0);
- break;
- }
-
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -1356,7 +1206,8 @@ void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- Handle<FixedArray> constant_properties = expr->constant_properties();
+ Handle<FixedArray> constant_properties =
+ expr->GetOrBuildConstantProperties(isolate());
__ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ li(a2, Operand(Smi::FromInt(expr->literal_index())));
__ li(a1, Operand(constant_properties));
@@ -1365,8 +1216,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Push(a3, a2, a1, a0);
__ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
- FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
- __ CallStub(&stub);
+ Callable callable = CodeFactory::FastCloneShallowObject(
+ isolate(), expr->properties_count());
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1376,10 +1228,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
bool result_saved = false;
AccessorTable accessor_table(zone());
- int property_index = 0;
- for (; property_index < expr->properties()->length(); property_index++) {
- ObjectLiteral::Property* property = expr->properties()->at(property_index);
- if (property->is_computed_name()) break;
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ DCHECK(!property->is_computed_name());
if (property->IsCompileTimeValue()) continue;
Literal* key = property->key()->AsLiteral();
@@ -1389,6 +1240,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
result_saved = true;
}
switch (property->kind()) {
+ case ObjectLiteral::Property::SPREAD:
case ObjectLiteral::Property::CONSTANT:
UNREACHABLE();
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -1438,20 +1290,20 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(value);
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
- PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ PrepareForBailoutForId(expr->GetIdForPropertySet(i),
BailoutState::NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->setter = property;
}
break;
@@ -1474,73 +1326,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
}
- // Object literals have two parts. The "static" part on the left contains no
- // computed property names, and so we can compute its map ahead of time; see
- // runtime.cc::CreateObjectLiteralBoilerplate. The second "dynamic" part
- // starts with the first computed property name, and continues with all
- // properties to its right. All the code from above initializes the static
- // component of the object literal, and arranges for the map of the result to
- // reflect the static order in which the keys appear. For the dynamic
- // properties, we compile them into a series of "SetOwnProperty" runtime
- // calls. This will preserve insertion order.
- for (; property_index < expr->properties()->length(); property_index++) {
- ObjectLiteral::Property* property = expr->properties()->at(property_index);
-
- Expression* value = property->value();
- if (!result_saved) {
- PushOperand(v0); // Save result on the stack
- result_saved = true;
- }
-
- __ lw(a0, MemOperand(sp)); // Duplicate receiver.
- PushOperand(a0);
-
- if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
- DCHECK(!property->is_computed_name());
- VisitForStackValue(value);
- DCHECK(property->emit_store());
- CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
- PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- BailoutState::NO_REGISTERS);
- } else {
- EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
- VisitForStackValue(value);
- if (NeedsHomeObject(value)) {
- EmitSetHomeObject(value, 2, property->GetSlot());
- }
-
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- case ObjectLiteral::Property::COMPUTED:
- if (property->emit_store()) {
- PushOperand(Smi::FromInt(NONE));
- PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
- CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
- PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- BailoutState::NO_REGISTERS);
- } else {
- DropOperands(3);
- }
- break;
-
- case ObjectLiteral::Property::PROTOTYPE:
- UNREACHABLE();
- break;
-
- case ObjectLiteral::Property::GETTER:
- PushOperand(Smi::FromInt(NONE));
- CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
- break;
-
- case ObjectLiteral::Property::SETTER:
- PushOperand(Smi::FromInt(NONE));
- CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
- break;
- }
- }
- }
-
if (result_saved) {
context()->PlugTOS();
} else {
@@ -1552,7 +1337,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- Handle<FixedArray> constant_elements = expr->constant_elements();
+ Handle<ConstantElementsPair> constant_elements =
+ expr->GetOrBuildConstantElements(isolate());
bool has_fast_elements =
IsFastObjectElementsKind(expr->constant_elements_kind());
@@ -1572,8 +1358,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ Push(a3, a2, a1, a0);
__ CallRuntime(Runtime::kCreateArrayLiteral);
} else {
- FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
- __ CallStub(&stub);
+ Callable callable =
+ CodeFactory::FastCloneShallowArray(isolate(), allocation_site_mode);
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1638,34 +1425,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForStackValue(property->obj());
}
break;
- case NAMED_SUPER_PROPERTY:
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- property->obj()->AsSuperPropertyReference()->home_object());
- PushOperand(result_register());
- if (expr->is_compound()) {
- const Register scratch = a1;
- __ lw(scratch, MemOperand(sp, kPointerSize));
- PushOperands(scratch, result_register());
- }
- break;
- case KEYED_SUPER_PROPERTY: {
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(property->key());
- PushOperand(result_register());
- if (expr->is_compound()) {
- const Register scratch1 = t0;
- const Register scratch2 = a1;
- __ lw(scratch1, MemOperand(sp, 2 * kPointerSize));
- __ lw(scratch2, MemOperand(sp, 1 * kPointerSize));
- PushOperands(scratch1, scratch2, result_register());
- }
- break;
- }
case KEYED_PROPERTY:
// We need the key and receiver on both the stack and in v0 and a1.
if (expr->is_compound()) {
@@ -1679,6 +1438,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForStackValue(property->key());
}
break;
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
// For compound assignments we need another deoptimization point after the
@@ -1695,21 +1458,15 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
PrepareForBailoutForId(property->LoadId(),
BailoutState::TOS_REGISTER);
break;
- case NAMED_SUPER_PROPERTY:
- EmitNamedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
- break;
- case KEYED_SUPER_PROPERTY:
- EmitKeyedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
- break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(),
BailoutState::TOS_REGISTER);
break;
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
}
@@ -1748,69 +1505,20 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
- case NAMED_SUPER_PROPERTY:
- EmitNamedSuperPropertyStore(property);
- context()->Plug(v0);
- break;
- case KEYED_SUPER_PROPERTY:
- EmitKeyedSuperPropertyStore(property);
- context()->Plug(v0);
- break;
case KEYED_PROPERTY:
EmitKeyedPropertyAssignment(expr);
break;
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
}
void FullCodeGenerator::VisitYield(Yield* expr) {
- Comment cmnt(masm_, "[ Yield");
- SetExpressionPosition(expr);
-
- // Evaluate yielded value first; the initial iterator definition depends on
- // this. It stays on the stack while we update the iterator.
- VisitForStackValue(expr->expression());
-
- Label suspend, continuation, post_runtime, resume, exception;
-
- __ jmp(&suspend);
- __ bind(&continuation);
- // When we arrive here, v0 holds the generator object.
- __ RecordGeneratorContinuation();
- __ lw(a1, FieldMemOperand(v0, JSGeneratorObject::kResumeModeOffset));
- __ lw(v0, FieldMemOperand(v0, JSGeneratorObject::kInputOrDebugPosOffset));
- __ Branch(&resume, eq, a1, Operand(Smi::FromInt(JSGeneratorObject::kNext)));
- __ Push(result_register());
- __ Branch(&exception, eq, a1,
- Operand(Smi::FromInt(JSGeneratorObject::kThrow)));
- EmitCreateIteratorResult(true);
- EmitUnwindAndReturn();
-
- __ bind(&exception);
- __ CallRuntime(expr->rethrow_on_exception() ? Runtime::kReThrow
- : Runtime::kThrow);
-
- __ bind(&suspend);
- OperandStackDepthIncrement(1); // Not popped on this path.
- VisitForAccumulatorValue(expr->generator_object());
- DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
- __ li(a1, Operand(Smi::FromInt(continuation.pos())));
- __ sw(a1, FieldMemOperand(v0, JSGeneratorObject::kContinuationOffset));
- __ sw(cp, FieldMemOperand(v0, JSGeneratorObject::kContextOffset));
- __ mov(a1, cp);
- __ RecordWriteField(v0, JSGeneratorObject::kContextOffset, a1, a2,
- kRAHasBeenSaved, kDontSaveFPRegs);
- __ Addu(a1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
- __ Branch(&post_runtime, eq, sp, Operand(a1));
- __ push(v0); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- RestoreContext();
- __ bind(&post_runtime);
- PopOperand(result_register());
- EmitReturnSequence();
-
- __ bind(&resume);
- context()->Plug(result_register());
+ // Resumable functions are not supported.
+ UNREACHABLE();
}
void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
@@ -1959,60 +1667,6 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
context()->Plug(v0);
}
-
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
- for (int i = 0; i < lit->properties()->length(); i++) {
- ClassLiteral::Property* property = lit->properties()->at(i);
- Expression* value = property->value();
-
- Register scratch = a1;
- if (property->is_static()) {
- __ lw(scratch, MemOperand(sp, kPointerSize)); // constructor
- } else {
- __ lw(scratch, MemOperand(sp, 0)); // prototype
- }
- PushOperand(scratch);
- EmitPropertyKey(property, lit->GetIdForProperty(i));
-
- // The static prototype property is read only. We handle the non computed
- // property name case in the parser. Since this is the only case where we
- // need to check for an own read only property we special case this so we do
- // not need to do this for every property.
- if (property->is_static() && property->is_computed_name()) {
- __ CallRuntime(Runtime::kThrowIfStaticPrototype);
- __ push(v0);
- }
-
- VisitForStackValue(value);
- if (NeedsHomeObject(value)) {
- EmitSetHomeObject(value, 2, property->GetSlot());
- }
-
- switch (property->kind()) {
- case ClassLiteral::Property::METHOD:
- PushOperand(Smi::FromInt(DONT_ENUM));
- PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
- CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
- break;
-
- case ClassLiteral::Property::GETTER:
- PushOperand(Smi::FromInt(DONT_ENUM));
- CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
- break;
-
- case ClassLiteral::Property::SETTER:
- PushOperand(Smi::FromInt(DONT_ENUM));
- CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
- break;
-
- case ClassLiteral::Property::FIELD:
- default:
- UNREACHABLE();
- }
- }
-}
-
-
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
__ mov(a0, result_register());
PopOperand(a1);
@@ -2047,43 +1701,6 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
CallStoreIC(slot, prop->key()->AsLiteral()->value());
break;
}
- case NAMED_SUPER_PROPERTY: {
- PushOperand(v0);
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- // stack: value, this; v0: home_object
- Register scratch = a2;
- Register scratch2 = a3;
- __ mov(scratch, result_register()); // home_object
- __ lw(v0, MemOperand(sp, kPointerSize)); // value
- __ lw(scratch2, MemOperand(sp, 0)); // this
- __ sw(scratch2, MemOperand(sp, kPointerSize)); // this
- __ sw(scratch, MemOperand(sp, 0)); // home_object
- // stack: this, home_object; v0: value
- EmitNamedSuperPropertyStore(prop);
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- PushOperand(v0);
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(prop->key());
- Register scratch = a2;
- Register scratch2 = a3;
- __ lw(scratch2, MemOperand(sp, 2 * kPointerSize)); // value
- // stack: value, this, home_object; v0: key, a3: value
- __ lw(scratch, MemOperand(sp, kPointerSize)); // this
- __ sw(scratch, MemOperand(sp, 2 * kPointerSize));
- __ lw(scratch, MemOperand(sp, 0)); // home_object
- __ sw(scratch, MemOperand(sp, kPointerSize));
- __ sw(v0, MemOperand(sp, 0));
- __ Move(v0, scratch2);
- // stack: this, home_object, key; v0: value.
- EmitKeyedSuperPropertyStore(prop);
- break;
- }
case KEYED_PROPERTY: {
PushOperand(result_register()); // Preserve value.
VisitForStackValue(prop->obj());
@@ -2094,6 +1711,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
CallKeyedStoreIC(slot);
break;
}
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
context()->Plug(v0);
}
@@ -2156,26 +1777,18 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
} else {
DCHECK(var->mode() != CONST || op == Token::INIT);
- if (var->IsLookupSlot()) {
- // Assignment to var.
- __ Push(var->name());
- __ Push(v0);
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kStoreLookupSlot_Strict
- : Runtime::kStoreLookupSlot_Sloppy);
- } else {
- // Assignment to var or initializing assignment to let/const in harmony
- // mode.
- DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
- MemOperand location = VarOperand(var, a1);
- if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
- // Check for an uninitialized let binding.
- __ lw(a2, location);
- __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
- __ Check(eq, kLetBindingReInitialization, a2, Operand(t0));
- }
- EmitStoreToStackLocalOrContextSlot(var, location);
+ DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
+ DCHECK(!var->IsLookupSlot());
+ // Assignment to var or initializing assignment to let/const in harmony
+ // mode.
+ MemOperand location = VarOperand(var, a1);
+ if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
+ // Check for an uninitialized let binding.
+ __ lw(a2, location);
+ __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+ __ Check(eq, kLetBindingReInitialization, a2, Operand(t0));
}
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
}
@@ -2195,35 +1808,6 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
}
-void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
- // Assignment to named property of super.
- // v0 : value
- // stack : receiver ('this'), home_object
- DCHECK(prop != NULL);
- Literal* key = prop->key()->AsLiteral();
- DCHECK(key != NULL);
-
- PushOperand(key->value());
- PushOperand(v0);
- CallRuntimeWithOperands(is_strict(language_mode())
- ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
- // Assignment to named property of super.
- // v0 : value
- // stack : receiver ('this'), home_object, key
- DCHECK(prop != NULL);
-
- PushOperand(v0);
- CallRuntimeWithOperands(is_strict(language_mode())
- ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy);
-}
-
-
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
// Call keyed store IC.
@@ -2277,43 +1861,6 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
}
-void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
- SetExpressionPosition(expr);
- Expression* callee = expr->expression();
- DCHECK(callee->IsProperty());
- Property* prop = callee->AsProperty();
- DCHECK(prop->IsSuperAccess());
-
- Literal* key = prop->key()->AsLiteral();
- DCHECK(!key->value()->IsSmi());
- // Load the function from the receiver.
- const Register scratch = a1;
- SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
- VisitForAccumulatorValue(super_ref->home_object());
- __ mov(scratch, v0);
- VisitForAccumulatorValue(super_ref->this_var());
- PushOperands(scratch, v0, v0, scratch);
- PushOperand(key->value());
-
- // Stack here:
- // - home_object
- // - this (receiver)
- // - this (receiver) <-- LoadFromSuper will pop here and below.
- // - home_object
- // - key
- CallRuntimeWithOperands(Runtime::kLoadFromSuper);
- PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
- // Replace home_object with target function.
- __ sw(v0, MemOperand(sp, kPointerSize));
-
- // Stack here:
- // - target function
- // - this (receiver)
- EmitCall(expr);
-}
-
-
// Code common for calls using the IC.
void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
Expression* key) {
@@ -2339,41 +1886,6 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
}
-void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
- Expression* callee = expr->expression();
- DCHECK(callee->IsProperty());
- Property* prop = callee->AsProperty();
- DCHECK(prop->IsSuperAccess());
-
- SetExpressionPosition(prop);
- // Load the function from the receiver.
- const Register scratch = a1;
- SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
- VisitForAccumulatorValue(super_ref->home_object());
- __ Move(scratch, v0);
- VisitForAccumulatorValue(super_ref->this_var());
- PushOperands(scratch, v0, v0, scratch);
- VisitForStackValue(prop->key());
-
- // Stack here:
- // - home_object
- // - this (receiver)
- // - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
- // - home_object
- // - key
- CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
- PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
- // Replace home_object with target function.
- __ sw(v0, MemOperand(sp, kPointerSize));
-
- // Stack here:
- // - target function
- // - this (receiver)
- EmitCall(expr);
-}
-
-
void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
@@ -2406,115 +1918,6 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
context()->DropAndPlug(1, v0);
}
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
- int arg_count = expr->arguments()->length();
- // t4: copy of the first argument or undefined if it doesn't exist.
- if (arg_count > 0) {
- __ lw(t4, MemOperand(sp, arg_count * kPointerSize));
- } else {
- __ LoadRoot(t4, Heap::kUndefinedValueRootIndex);
- }
-
- // t3: the receiver of the enclosing function.
- __ lw(t3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-
- // t2: the language mode.
- __ li(t2, Operand(Smi::FromInt(language_mode())));
-
- // t1: the start position of the scope the calls resides in.
- __ li(t1, Operand(Smi::FromInt(scope()->start_position())));
-
- // t0: the source position of the eval call.
- __ li(t0, Operand(Smi::FromInt(expr->position())));
-
- // Do the runtime call.
- __ Push(t4, t3, t2, t1, t0);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
-}
-
-
-// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
-void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
- VariableProxy* callee = expr->expression()->AsVariableProxy();
- if (callee->var()->IsLookupSlot()) {
- Label slow, done;
-
- SetExpressionPosition(callee);
- // Generate code for loading from variables potentially shadowed by
- // eval-introduced variables.
- EmitDynamicLookupFastCase(callee, NOT_INSIDE_TYPEOF, &slow, &done);
-
- __ bind(&slow);
- // Call the runtime to find the function to call (returned in v0)
- // and the object holding it (returned in v1).
- __ Push(callee->name());
- __ CallRuntime(Runtime::kLoadLookupSlotForCall);
- PushOperands(v0, v1); // Function, receiver.
- PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
-
- // If fast case code has been generated, emit code to push the
- // function and receiver and have the slow path jump around this
- // code.
- if (done.is_linked()) {
- Label call;
- __ Branch(&call);
- __ bind(&done);
- // Push function.
- __ push(v0);
- // The receiver is implicitly the global receiver. Indicate this
- // by passing the hole to the call function stub.
- __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
- __ push(a1);
- __ bind(&call);
- }
- } else {
- VisitForStackValue(callee);
- // refEnv.WithBaseObject()
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- PushOperand(a2); // Reserved receiver slot.
- }
-}
-
-
-void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
- // In a call to eval, we first call Runtime_ResolvePossiblyDirectEval
- // to resolve the function we need to call. Then we call the resolved
- // function using the given arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- PushCalleeAndWithBaseObject(expr);
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ push(a1);
- EmitResolvePossiblyDirectEval(expr);
-
- // Touch up the stack with the resolved function.
- __ sw(v0, MemOperand(sp, (arg_count + 1) * kPointerSize));
-
- PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
- // Record source position for debugger.
- SetCallPosition(expr);
- Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
- expr->tail_call_mode())
- .code();
- __ li(a3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
- __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ li(a0, Operand(arg_count));
- __ Call(code, RelocInfo::CODE_TARGET);
- OperandStackDepthDecrement(arg_count + 1);
- RecordJSReturnSite(expr);
- RestoreContext();
- context()->DropAndPlug(1, v0);
-}
-
-
void FullCodeGenerator::VisitCallNew(CallNew* expr) {
Comment cmnt(masm_, "[ CallNew");
// According to ECMA-262, section 11.2.2, page 44, the function
@@ -2555,49 +1958,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
-void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
- SuperCallReference* super_call_ref =
- expr->expression()->AsSuperCallReference();
- DCHECK_NOT_NULL(super_call_ref);
-
- // Push the super constructor target on the stack (may be null,
- // but the Construct builtin can deal with that properly).
- VisitForAccumulatorValue(super_call_ref->this_function_var());
- __ AssertFunction(result_register());
- __ lw(result_register(),
- FieldMemOperand(result_register(), HeapObject::kMapOffset));
- __ lw(result_register(),
- FieldMemOperand(result_register(), Map::kPrototypeOffset));
- PushOperand(result_register());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- SetConstructCallPosition(expr);
-
- // Load new target into a3.
- VisitForAccumulatorValue(super_call_ref->new_target_var());
- __ mov(a3, result_register());
-
- // Load function and argument count into a1 and a0.
- __ li(a0, Operand(arg_count));
- __ lw(a1, MemOperand(sp, arg_count * kPointerSize));
-
- __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
- OperandStackDepthDecrement(arg_count + 1);
-
- RecordJSReturnSite(expr);
- RestoreContext();
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -2687,28 +2047,6 @@ void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(v0, if_false);
- __ GetObjectType(v0, a1, a1);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, a1, Operand(JS_REGEXP_TYPE), if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -2944,16 +2282,12 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
__ Push(a2, a1);
__ CallRuntime(Runtime::kDeleteProperty_Sloppy);
context()->Plug(v0);
- } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+ } else {
+ DCHECK(!var->IsLookupSlot());
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
// Result of deleting non-global, non-dynamic variables is false.
// The subexpression does not have side effects.
context()->Plug(is_this);
- } else {
- // Non-global variable. Call the runtime to try to delete from the
- // context where the variable was introduced.
- __ Push(var->name());
- __ CallRuntime(Runtime::kDeleteLookupSlot);
- context()->Plug(v0);
}
} else {
// Result of deleting non-property, non-variable reference is true.
@@ -3059,31 +2393,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
- case NAMED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- const Register scratch = a1;
- __ lw(scratch, MemOperand(sp, 0)); // this
- PushOperands(result_register(), scratch, result_register());
- EmitNamedSuperPropertyLoad(prop);
- break;
- }
-
- case KEYED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(prop->key());
- const Register scratch1 = a1;
- const Register scratch2 = t0;
- __ lw(scratch1, MemOperand(sp, 1 * kPointerSize)); // this
- __ lw(scratch2, MemOperand(sp, 0 * kPointerSize)); // home object
- PushOperands(result_register(), scratch1, scratch2, result_register());
- EmitKeyedSuperPropertyLoad(prop);
- break;
- }
-
case KEYED_PROPERTY: {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
@@ -3094,6 +2403,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
case VARIABLE:
UNREACHABLE();
}
@@ -3130,14 +2441,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY:
__ sw(v0, MemOperand(sp, kPointerSize));
break;
- case NAMED_SUPER_PROPERTY:
- __ sw(v0, MemOperand(sp, 2 * kPointerSize));
- break;
case KEYED_PROPERTY:
__ sw(v0, MemOperand(sp, 2 * kPointerSize));
break;
+ case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
- __ sw(v0, MemOperand(sp, 3 * kPointerSize));
+ UNREACHABLE();
break;
}
}
@@ -3170,14 +2479,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY:
__ sw(v0, MemOperand(sp, kPointerSize));
break;
- case NAMED_SUPER_PROPERTY:
- __ sw(v0, MemOperand(sp, 2 * kPointerSize));
- break;
case KEYED_PROPERTY:
__ sw(v0, MemOperand(sp, 2 * kPointerSize));
break;
+ case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
- __ sw(v0, MemOperand(sp, 3 * kPointerSize));
+ UNREACHABLE();
break;
}
}
@@ -3234,30 +2541,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
}
- case NAMED_SUPER_PROPERTY: {
- EmitNamedSuperPropertyStore(prop);
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(v0);
- }
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- EmitKeyedSuperPropertyStore(prop);
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(v0);
- }
- break;
- }
case KEYED_PROPERTY: {
__ mov(StoreDescriptor::ValueRegister(), result_register());
PopOperands(StoreDescriptor::ReceiverRegister(),
@@ -3273,6 +2556,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
}
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
}
@@ -3504,70 +2791,6 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
}
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
-
-void FullCodeGenerator::EnterFinallyBlock() {
- DCHECK(!result_register().is(a1));
- // Store pending message while executing finally block.
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ li(at, Operand(pending_message_obj));
- __ lw(a1, MemOperand(at));
- PushOperand(a1);
-
- ClearPendingMessage();
-}
-
-
-void FullCodeGenerator::ExitFinallyBlock() {
- DCHECK(!result_register().is(a1));
- // Restore pending message from stack.
- PopOperand(a1);
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ li(at, Operand(pending_message_obj));
- __ sw(a1, MemOperand(at));
-}
-
-
-void FullCodeGenerator::ClearPendingMessage() {
- DCHECK(!result_register().is(a1));
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
- __ li(at, Operand(pending_message_obj));
- __ sw(a1, MemOperand(at));
-}
-
-
-void FullCodeGenerator::DeferredCommands::EmitCommands() {
- DCHECK(!result_register().is(a1));
- __ Pop(result_register()); // Restore the accumulator.
- __ Pop(a1); // Get the token.
- for (DeferredCommand cmd : commands_) {
- Label skip;
- __ li(at, Operand(Smi::FromInt(cmd.token)));
- __ Branch(&skip, ne, a1, Operand(at));
- switch (cmd.command) {
- case kReturn:
- codegen_->EmitUnwindAndReturn();
- break;
- case kThrow:
- __ Push(result_register());
- __ CallRuntime(Runtime::kReThrow);
- break;
- case kContinue:
- codegen_->EmitContinue(cmd.target);
- break;
- case kBreak:
- codegen_->EmitBreak(cmd.target);
- break;
- }
- __ bind(&skip);
- }
-}
-
#undef __
diff --git a/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc b/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
index 7640c52031..f6bda9a401 100644
--- a/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
+++ b/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
@@ -12,15 +12,16 @@
// places where we have to move a previous result in v0 to a0 for the
// next call: mov(a0, v0). This is not needed on the other architectures.
-#include "src/full-codegen/full-codegen.h"
#include "src/ast/compile-time-value.h"
#include "src/ast/scopes.h"
+#include "src/builtins/builtins-constructor.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/debug/debug.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
#include "src/mips64/code-stubs-mips64.h"
@@ -152,8 +153,6 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
- // Generators allocate locals, if any, in context slots.
- DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
OperandStackDepthIncrement(locals_count);
if (locals_count > 0) {
if (locals_count >= 128) {
@@ -209,15 +208,18 @@ void FullCodeGenerator::Generate() {
if (info->scope()->new_target_var() != nullptr) {
__ push(a3); // Preserve new target.
}
- if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
- FastNewFunctionContextStub stub(isolate());
+ if (slots <=
+ ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+ Callable callable = CodeFactory::FastNewFunctionContext(
+ isolate(), info->scope()->scope_type());
__ li(FastNewFunctionContextDescriptor::SlotsRegister(),
Operand(slots));
- __ CallStub(&stub);
- // Result of FastNewFunctionContextStub is always in new space.
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ // Result of the FastNewFunctionContext builtin is always in new space.
need_write_barrier = false;
} else {
__ push(a1);
+ __ Push(Smi::FromInt(info->scope()->scope_type()));
__ CallRuntime(Runtime::kNewFunctionContext);
}
if (info->scope()->new_target_var() != nullptr) {
@@ -264,36 +266,10 @@ void FullCodeGenerator::Generate() {
PrepareForBailoutForId(BailoutId::FunctionContext(),
BailoutState::NO_REGISTERS);
- // Possibly set up a local binding to the this function which is used in
- // derived constructors with super calls.
- Variable* this_function_var = info->scope()->this_function_var();
- if (this_function_var != nullptr) {
- Comment cmnt(masm_, "[ This function");
- if (!function_in_register_a1) {
- __ ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- // The write barrier clobbers register again, keep it marked as such.
- }
- SetVar(this_function_var, a1, a0, a2);
- }
-
- Variable* new_target_var = info->scope()->new_target_var();
- if (new_target_var != nullptr) {
- Comment cmnt(masm_, "[ new.target");
- SetVar(new_target_var, a3, a0, a2);
- }
-
- // Possibly allocate RestParameters
- Variable* rest_param = info->scope()->rest_parameter();
- if (rest_param != nullptr) {
- Comment cmnt(masm_, "[ Allocate rest parameter array");
- if (!function_in_register_a1) {
- __ ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- }
- FastNewRestParameterStub stub(isolate());
- __ CallStub(&stub);
- function_in_register_a1 = false;
- SetVar(rest_param, v0, a1, a2);
- }
+ // We don't support new.target and rest parameters here.
+ DCHECK_NULL(info->scope()->new_target_var());
+ DCHECK_NULL(info->scope()->rest_parameter());
+ DCHECK_NULL(info->scope()->this_function_var());
Variable* arguments = info->scope()->arguments();
if (arguments != NULL) {
@@ -546,10 +522,8 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
true,
true_label_,
false_label_);
- DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
- !lit->IsUndetectable());
- if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
- lit->IsFalse(isolate())) {
+ DCHECK(lit->IsNullOrUndefined(isolate()) || !lit->IsUndetectable());
+ if (lit->IsNullOrUndefined(isolate()) || lit->IsFalse(isolate())) {
if (false_label_ != fall_through_) __ Branch(false_label_);
} else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
if (true_label_ != fall_through_) __ Branch(true_label_);
@@ -781,6 +755,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
+ globals_->Add(variable->name(), zone());
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
@@ -807,17 +782,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
break;
- case VariableLocation::LOOKUP: {
- Comment cmnt(masm_, "[ VariableDeclaration");
- DCHECK_EQ(VAR, variable->mode());
- DCHECK(!variable->binding_needs_init());
- __ li(a2, Operand(variable->name()));
- __ Push(a2);
- __ CallRuntime(Runtime::kDeclareEvalVar);
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
- break;
- }
-
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -830,6 +795,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Variable* variable = proxy->var();
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
+ globals_->Add(variable->name(), zone());
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
@@ -868,17 +834,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
break;
}
- case VariableLocation::LOOKUP: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- __ li(a2, Operand(variable->name()));
- PushOperand(a2);
- // Push initial value for function declaration.
- VisitForStackValue(declaration->fun());
- CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
- break;
- }
-
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -1184,93 +1140,6 @@ void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
-
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
- TypeofMode typeof_mode,
- Label* slow) {
- Register current = cp;
- Register next = a1;
- Register temp = a2;
-
- int to_check = scope()->ContextChainLengthUntilOutermostSloppyEval();
- for (Scope* s = scope(); to_check > 0; s = s->outer_scope()) {
- if (!s->NeedsContext()) continue;
- if (s->calls_sloppy_eval()) {
- // Check that extension is "the hole".
- __ ld(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
- __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
- }
- // Load next context in chain.
- __ ld(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering cp.
- current = next;
- to_check--;
- }
-
- // All extension objects were empty and it is safe to use a normal global
- // load machinery.
- EmitGlobalVariableLoad(proxy, typeof_mode);
-}
-
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
- Label* slow) {
- DCHECK(var->IsContextSlot());
- Register context = cp;
- Register next = a3;
- Register temp = a4;
-
- for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
- if (s->NeedsContext()) {
- if (s->calls_sloppy_eval()) {
- // Check that extension is "the hole".
- __ ld(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
- __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
- }
- __ ld(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering cp.
- context = next;
- }
- }
- // Check that last extension is "the hole".
- __ ld(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
- __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
-
- // This function is used only for loads, not stores, so it's safe to
- // return an cp-based operand (the write barrier cannot be allowed to
- // destroy the cp register).
- return ContextMemOperand(context, var->index());
-}
-
-
-void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
- TypeofMode typeof_mode,
- Label* slow, Label* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- Variable* var = proxy->var();
- if (var->mode() == DYNAMIC_GLOBAL) {
- EmitLoadGlobalCheckExtensions(proxy, typeof_mode, slow);
- __ Branch(done);
- } else if (var->mode() == DYNAMIC_LOCAL) {
- Variable* local = var->local_if_not_shadowed();
- __ ld(v0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->binding_needs_init()) {
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ dsubu(at, v0, at); // Sub as compare: at == 0 on eq.
- __ Branch(done, ne, at, Operand(zero_reg));
- __ li(a0, Operand(var->name()));
- __ push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError);
- } else {
- __ Branch(done);
- }
- }
-}
-
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
// Record position before possible IC call.
@@ -1278,8 +1147,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
Variable* var = proxy->var();
- // Three cases: global variables, lookup variables, and all other types of
- // variables.
+ // Two cases: global variables and all other types of variables.
switch (var->location()) {
case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
@@ -1313,24 +1181,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
break;
}
- case VariableLocation::LOOKUP: {
- Comment cmnt(masm_, "[ Lookup variable");
- Label done, slow;
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
- __ bind(&slow);
- __ Push(var->name());
- Runtime::FunctionId function_id =
- typeof_mode == NOT_INSIDE_TYPEOF
- ? Runtime::kLoadLookupSlot
- : Runtime::kLoadLookupSlotInsideTypeof;
- __ CallRuntime(function_id);
- __ bind(&done);
- context()->Plug(v0);
- break;
- }
-
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -1357,7 +1208,8 @@ void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- Handle<FixedArray> constant_properties = expr->constant_properties();
+ Handle<FixedArray> constant_properties =
+ expr->GetOrBuildConstantProperties(isolate());
__ ld(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ li(a2, Operand(Smi::FromInt(expr->literal_index())));
__ li(a1, Operand(constant_properties));
@@ -1366,8 +1218,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Push(a3, a2, a1, a0);
__ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
- FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
- __ CallStub(&stub);
+ Callable callable = CodeFactory::FastCloneShallowObject(
+ isolate(), expr->properties_count());
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1377,10 +1230,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
bool result_saved = false;
AccessorTable accessor_table(zone());
- int property_index = 0;
- for (; property_index < expr->properties()->length(); property_index++) {
- ObjectLiteral::Property* property = expr->properties()->at(property_index);
- if (property->is_computed_name()) break;
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ DCHECK(!property->is_computed_name());
if (property->IsCompileTimeValue()) continue;
Literal* key = property->key()->AsLiteral();
@@ -1390,6 +1242,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
result_saved = true;
}
switch (property->kind()) {
+ case ObjectLiteral::Property::SPREAD:
case ObjectLiteral::Property::CONSTANT:
UNREACHABLE();
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -1439,20 +1292,20 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(value);
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
- PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ PrepareForBailoutForId(expr->GetIdForPropertySet(i),
BailoutState::NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->setter = property;
}
break;
@@ -1475,73 +1328,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
}
- // Object literals have two parts. The "static" part on the left contains no
- // computed property names, and so we can compute its map ahead of time; see
- // runtime.cc::CreateObjectLiteralBoilerplate. The second "dynamic" part
- // starts with the first computed property name, and continues with all
- // properties to its right. All the code from above initializes the static
- // component of the object literal, and arranges for the map of the result to
- // reflect the static order in which the keys appear. For the dynamic
- // properties, we compile them into a series of "SetOwnProperty" runtime
- // calls. This will preserve insertion order.
- for (; property_index < expr->properties()->length(); property_index++) {
- ObjectLiteral::Property* property = expr->properties()->at(property_index);
-
- Expression* value = property->value();
- if (!result_saved) {
- PushOperand(v0); // Save result on the stack
- result_saved = true;
- }
-
- __ ld(a0, MemOperand(sp)); // Duplicate receiver.
- PushOperand(a0);
-
- if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
- DCHECK(!property->is_computed_name());
- VisitForStackValue(value);
- DCHECK(property->emit_store());
- CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
- PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- BailoutState::NO_REGISTERS);
- } else {
- EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
- VisitForStackValue(value);
- if (NeedsHomeObject(value)) {
- EmitSetHomeObject(value, 2, property->GetSlot());
- }
-
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- case ObjectLiteral::Property::COMPUTED:
- if (property->emit_store()) {
- PushOperand(Smi::FromInt(NONE));
- PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
- CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
- PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- BailoutState::NO_REGISTERS);
- } else {
- DropOperands(3);
- }
- break;
-
- case ObjectLiteral::Property::PROTOTYPE:
- UNREACHABLE();
- break;
-
- case ObjectLiteral::Property::GETTER:
- PushOperand(Smi::FromInt(NONE));
- CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
- break;
-
- case ObjectLiteral::Property::SETTER:
- PushOperand(Smi::FromInt(NONE));
- CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
- break;
- }
- }
- }
-
if (result_saved) {
context()->PlugTOS();
} else {
@@ -1553,7 +1339,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- Handle<FixedArray> constant_elements = expr->constant_elements();
+ Handle<ConstantElementsPair> constant_elements =
+ expr->GetOrBuildConstantElements(isolate());
bool has_fast_elements =
IsFastObjectElementsKind(expr->constant_elements_kind());
@@ -1573,8 +1360,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ Push(a3, a2, a1, a0);
__ CallRuntime(Runtime::kCreateArrayLiteral);
} else {
- FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
- __ CallStub(&stub);
+ Callable callable =
+ CodeFactory::FastCloneShallowArray(isolate(), allocation_site_mode);
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1639,34 +1427,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForStackValue(property->obj());
}
break;
- case NAMED_SUPER_PROPERTY:
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- property->obj()->AsSuperPropertyReference()->home_object());
- PushOperand(result_register());
- if (expr->is_compound()) {
- const Register scratch = a1;
- __ ld(scratch, MemOperand(sp, kPointerSize));
- PushOperands(scratch, result_register());
- }
- break;
- case KEYED_SUPER_PROPERTY: {
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(property->key());
- PushOperand(result_register());
- if (expr->is_compound()) {
- const Register scratch1 = a4;
- const Register scratch2 = a1;
- __ ld(scratch1, MemOperand(sp, 2 * kPointerSize));
- __ ld(scratch2, MemOperand(sp, 1 * kPointerSize));
- PushOperands(scratch1, scratch2, result_register());
- }
- break;
- }
case KEYED_PROPERTY:
// We need the key and receiver on both the stack and in v0 and a1.
if (expr->is_compound()) {
@@ -1680,6 +1440,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForStackValue(property->key());
}
break;
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
// For compound assignments we need another deoptimization point after the
@@ -1696,21 +1460,15 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
PrepareForBailoutForId(property->LoadId(),
BailoutState::TOS_REGISTER);
break;
- case NAMED_SUPER_PROPERTY:
- EmitNamedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
- break;
- case KEYED_SUPER_PROPERTY:
- EmitKeyedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
- break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(),
BailoutState::TOS_REGISTER);
break;
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
}
@@ -1749,69 +1507,20 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
- case NAMED_SUPER_PROPERTY:
- EmitNamedSuperPropertyStore(property);
- context()->Plug(v0);
- break;
- case KEYED_SUPER_PROPERTY:
- EmitKeyedSuperPropertyStore(property);
- context()->Plug(v0);
- break;
case KEYED_PROPERTY:
EmitKeyedPropertyAssignment(expr);
break;
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
}
void FullCodeGenerator::VisitYield(Yield* expr) {
- Comment cmnt(masm_, "[ Yield");
- SetExpressionPosition(expr);
-
- // Evaluate yielded value first; the initial iterator definition depends on
- // this. It stays on the stack while we update the iterator.
- VisitForStackValue(expr->expression());
-
- Label suspend, continuation, post_runtime, resume, exception;
-
- __ jmp(&suspend);
- __ bind(&continuation);
- // When we arrive here, v0 holds the generator object.
- __ RecordGeneratorContinuation();
- __ ld(a1, FieldMemOperand(v0, JSGeneratorObject::kResumeModeOffset));
- __ ld(v0, FieldMemOperand(v0, JSGeneratorObject::kInputOrDebugPosOffset));
- __ Branch(&resume, eq, a1, Operand(Smi::FromInt(JSGeneratorObject::kNext)));
- __ Push(result_register());
- __ Branch(&exception, eq, a1,
- Operand(Smi::FromInt(JSGeneratorObject::kThrow)));
- EmitCreateIteratorResult(true);
- EmitUnwindAndReturn();
-
- __ bind(&exception);
- __ CallRuntime(expr->rethrow_on_exception() ? Runtime::kReThrow
- : Runtime::kThrow);
-
- __ bind(&suspend);
- OperandStackDepthIncrement(1); // Not popped on this path.
- VisitForAccumulatorValue(expr->generator_object());
- DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
- __ li(a1, Operand(Smi::FromInt(continuation.pos())));
- __ sd(a1, FieldMemOperand(v0, JSGeneratorObject::kContinuationOffset));
- __ sd(cp, FieldMemOperand(v0, JSGeneratorObject::kContextOffset));
- __ mov(a1, cp);
- __ RecordWriteField(v0, JSGeneratorObject::kContextOffset, a1, a2,
- kRAHasBeenSaved, kDontSaveFPRegs);
- __ Daddu(a1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
- __ Branch(&post_runtime, eq, sp, Operand(a1));
- __ push(v0); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- RestoreContext();
- __ bind(&post_runtime);
- PopOperand(result_register());
- EmitReturnSequence();
-
- __ bind(&resume);
- context()->Plug(result_register());
+ // Resumable functions are not supported.
+ UNREACHABLE();
}
void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
@@ -1959,60 +1668,6 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
context()->Plug(v0);
}
-
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
- for (int i = 0; i < lit->properties()->length(); i++) {
- ClassLiteral::Property* property = lit->properties()->at(i);
- Expression* value = property->value();
-
- Register scratch = a1;
- if (property->is_static()) {
- __ ld(scratch, MemOperand(sp, kPointerSize)); // constructor
- } else {
- __ ld(scratch, MemOperand(sp, 0)); // prototype
- }
- PushOperand(scratch);
- EmitPropertyKey(property, lit->GetIdForProperty(i));
-
- // The static prototype property is read only. We handle the non computed
- // property name case in the parser. Since this is the only case where we
- // need to check for an own read only property we special case this so we do
- // not need to do this for every property.
- if (property->is_static() && property->is_computed_name()) {
- __ CallRuntime(Runtime::kThrowIfStaticPrototype);
- __ push(v0);
- }
-
- VisitForStackValue(value);
- if (NeedsHomeObject(value)) {
- EmitSetHomeObject(value, 2, property->GetSlot());
- }
-
- switch (property->kind()) {
- case ClassLiteral::Property::METHOD:
- PushOperand(Smi::FromInt(DONT_ENUM));
- PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
- CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
- break;
-
- case ClassLiteral::Property::GETTER:
- PushOperand(Smi::FromInt(DONT_ENUM));
- CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
- break;
-
- case ClassLiteral::Property::SETTER:
- PushOperand(Smi::FromInt(DONT_ENUM));
- CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
- break;
-
- case ClassLiteral::Property::FIELD:
- default:
- UNREACHABLE();
- }
- }
-}
-
-
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
__ mov(a0, result_register());
PopOperand(a1);
@@ -2047,43 +1702,6 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
CallStoreIC(slot, prop->key()->AsLiteral()->value());
break;
}
- case NAMED_SUPER_PROPERTY: {
- PushOperand(v0);
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- // stack: value, this; v0: home_object
- Register scratch = a2;
- Register scratch2 = a3;
- __ mov(scratch, result_register()); // home_object
- __ ld(v0, MemOperand(sp, kPointerSize)); // value
- __ ld(scratch2, MemOperand(sp, 0)); // this
- __ sd(scratch2, MemOperand(sp, kPointerSize)); // this
- __ sd(scratch, MemOperand(sp, 0)); // home_object
- // stack: this, home_object; v0: value
- EmitNamedSuperPropertyStore(prop);
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- PushOperand(v0);
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(prop->key());
- Register scratch = a2;
- Register scratch2 = a3;
- __ ld(scratch2, MemOperand(sp, 2 * kPointerSize)); // value
- // stack: value, this, home_object; v0: key, a3: value
- __ ld(scratch, MemOperand(sp, kPointerSize)); // this
- __ sd(scratch, MemOperand(sp, 2 * kPointerSize));
- __ ld(scratch, MemOperand(sp, 0)); // home_object
- __ sd(scratch, MemOperand(sp, kPointerSize));
- __ sd(v0, MemOperand(sp, 0));
- __ Move(v0, scratch2);
- // stack: this, home_object, key; v0: value.
- EmitKeyedSuperPropertyStore(prop);
- break;
- }
case KEYED_PROPERTY: {
PushOperand(result_register()); // Preserve value.
VisitForStackValue(prop->obj());
@@ -2094,6 +1712,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
CallKeyedStoreIC(slot);
break;
}
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
context()->Plug(v0);
}
@@ -2156,25 +1778,18 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
} else {
DCHECK(var->mode() != CONST || op == Token::INIT);
- if (var->IsLookupSlot()) {
- __ Push(var->name());
- __ Push(v0);
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kStoreLookupSlot_Strict
- : Runtime::kStoreLookupSlot_Sloppy);
- } else {
- // Assignment to var or initializing assignment to let/const in harmony
- // mode.
- DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
- MemOperand location = VarOperand(var, a1);
- if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
- // Check for an uninitialized let binding.
- __ ld(a2, location);
- __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
- __ Check(eq, kLetBindingReInitialization, a2, Operand(a4));
- }
- EmitStoreToStackLocalOrContextSlot(var, location);
+ DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
+ DCHECK(!var->IsLookupSlot());
+ // Assignment to var or initializing assignment to let/const in harmony
+ // mode.
+ MemOperand location = VarOperand(var, a1);
+ if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
+ // Check for an uninitialized let binding.
+ __ ld(a2, location);
+ __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
+ __ Check(eq, kLetBindingReInitialization, a2, Operand(a4));
}
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
}
@@ -2194,35 +1809,6 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
}
-void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
- // Assignment to named property of super.
- // v0 : value
- // stack : receiver ('this'), home_object
- DCHECK(prop != NULL);
- Literal* key = prop->key()->AsLiteral();
- DCHECK(key != NULL);
-
- PushOperand(key->value());
- PushOperand(v0);
- CallRuntimeWithOperands(is_strict(language_mode())
- ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
- // Assignment to named property of super.
- // v0 : value
- // stack : receiver ('this'), home_object, key
- DCHECK(prop != NULL);
-
- PushOperand(v0);
- CallRuntimeWithOperands(is_strict(language_mode())
- ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy);
-}
-
-
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
// Call keyed store IC.
@@ -2276,43 +1862,6 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
}
-void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
- SetExpressionPosition(expr);
- Expression* callee = expr->expression();
- DCHECK(callee->IsProperty());
- Property* prop = callee->AsProperty();
- DCHECK(prop->IsSuperAccess());
-
- Literal* key = prop->key()->AsLiteral();
- DCHECK(!key->value()->IsSmi());
- // Load the function from the receiver.
- const Register scratch = a1;
- SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
- VisitForAccumulatorValue(super_ref->home_object());
- __ mov(scratch, v0);
- VisitForAccumulatorValue(super_ref->this_var());
- PushOperands(scratch, v0, v0, scratch);
- PushOperand(key->value());
-
- // Stack here:
- // - home_object
- // - this (receiver)
- // - this (receiver) <-- LoadFromSuper will pop here and below.
- // - home_object
- // - key
- CallRuntimeWithOperands(Runtime::kLoadFromSuper);
- PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
- // Replace home_object with target function.
- __ sd(v0, MemOperand(sp, kPointerSize));
-
- // Stack here:
- // - target function
- // - this (receiver)
- EmitCall(expr);
-}
-
-
// Code common for calls using the IC.
void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
Expression* key) {
@@ -2338,41 +1887,6 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
}
-void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
- Expression* callee = expr->expression();
- DCHECK(callee->IsProperty());
- Property* prop = callee->AsProperty();
- DCHECK(prop->IsSuperAccess());
-
- SetExpressionPosition(prop);
- // Load the function from the receiver.
- const Register scratch = a1;
- SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
- VisitForAccumulatorValue(super_ref->home_object());
- __ Move(scratch, v0);
- VisitForAccumulatorValue(super_ref->this_var());
- PushOperands(scratch, v0, v0, scratch);
- VisitForStackValue(prop->key());
-
- // Stack here:
- // - home_object
- // - this (receiver)
- // - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
- // - home_object
- // - key
- CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
- PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
- // Replace home_object with target function.
- __ sd(v0, MemOperand(sp, kPointerSize));
-
- // Stack here:
- // - target function
- // - this (receiver)
- EmitCall(expr);
-}
-
-
void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
@@ -2405,115 +1919,6 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
context()->DropAndPlug(1, v0);
}
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
- int arg_count = expr->arguments()->length();
- // a6: copy of the first argument or undefined if it doesn't exist.
- if (arg_count > 0) {
- __ ld(a6, MemOperand(sp, arg_count * kPointerSize));
- } else {
- __ LoadRoot(a6, Heap::kUndefinedValueRootIndex);
- }
-
- // a5: the receiver of the enclosing function.
- __ ld(a5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-
- // a4: the language mode.
- __ li(a4, Operand(Smi::FromInt(language_mode())));
-
- // a1: the start position of the scope the calls resides in.
- __ li(a1, Operand(Smi::FromInt(scope()->start_position())));
-
- // a0: the source position of the eval call.
- __ li(a0, Operand(Smi::FromInt(expr->position())));
-
- // Do the runtime call.
- __ Push(a6, a5, a4, a1, a0);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
-}
-
-
-// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
-void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
- VariableProxy* callee = expr->expression()->AsVariableProxy();
- if (callee->var()->IsLookupSlot()) {
- Label slow, done;
-
- SetExpressionPosition(callee);
- // Generate code for loading from variables potentially shadowed by
- // eval-introduced variables.
- EmitDynamicLookupFastCase(callee, NOT_INSIDE_TYPEOF, &slow, &done);
-
- __ bind(&slow);
- // Call the runtime to find the function to call (returned in v0)
- // and the object holding it (returned in v1).
- __ Push(callee->name());
- __ CallRuntime(Runtime::kLoadLookupSlotForCall);
- PushOperands(v0, v1); // Function, receiver.
- PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
-
- // If fast case code has been generated, emit code to push the
- // function and receiver and have the slow path jump around this
- // code.
- if (done.is_linked()) {
- Label call;
- __ Branch(&call);
- __ bind(&done);
- // Push function.
- __ push(v0);
- // The receiver is implicitly the global receiver. Indicate this
- // by passing the hole to the call function stub.
- __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
- __ push(a1);
- __ bind(&call);
- }
- } else {
- VisitForStackValue(callee);
- // refEnv.WithBaseObject()
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- PushOperand(a2); // Reserved receiver slot.
- }
-}
-
-
-void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
- // In a call to eval, we first call Runtime_ResolvePossiblyDirectEval
- // to resolve the function we need to call. Then we call the resolved
- // function using the given arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- PushCalleeAndWithBaseObject(expr);
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ push(a1);
- EmitResolvePossiblyDirectEval(expr);
-
- // Touch up the stack with the resolved function.
- __ sd(v0, MemOperand(sp, (arg_count + 1) * kPointerSize));
-
- PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
- // Record source position for debugger.
- SetCallPosition(expr);
- Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
- expr->tail_call_mode())
- .code();
- __ li(a3, Operand(SmiFromSlot(expr->CallFeedbackICSlot())));
- __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ li(a0, Operand(arg_count));
- __ Call(code, RelocInfo::CODE_TARGET);
- OperandStackDepthDecrement(arg_count + 1);
- RecordJSReturnSite(expr);
- RestoreContext();
- context()->DropAndPlug(1, v0);
-}
-
-
void FullCodeGenerator::VisitCallNew(CallNew* expr) {
Comment cmnt(masm_, "[ CallNew");
// According to ECMA-262, section 11.2.2, page 44, the function
@@ -2554,49 +1959,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
-void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
- SuperCallReference* super_call_ref =
- expr->expression()->AsSuperCallReference();
- DCHECK_NOT_NULL(super_call_ref);
-
- // Push the super constructor target on the stack (may be null,
- // but the Construct builtin can deal with that properly).
- VisitForAccumulatorValue(super_call_ref->this_function_var());
- __ AssertFunction(result_register());
- __ ld(result_register(),
- FieldMemOperand(result_register(), HeapObject::kMapOffset));
- __ ld(result_register(),
- FieldMemOperand(result_register(), Map::kPrototypeOffset));
- PushOperand(result_register());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- SetConstructCallPosition(expr);
-
- // Load new target into a3.
- VisitForAccumulatorValue(super_call_ref->new_target_var());
- __ mov(a3, result_register());
-
- // Load function and argument count into a1 and a0.
- __ li(a0, Operand(arg_count));
- __ ld(a1, MemOperand(sp, arg_count * kPointerSize));
-
- __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
- OperandStackDepthDecrement(arg_count + 1);
-
- RecordJSReturnSite(expr);
- RestoreContext();
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -2686,28 +2048,6 @@ void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(v0, if_false);
- __ GetObjectType(v0, a1, a1);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, a1, Operand(JS_REGEXP_TYPE), if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -2943,17 +2283,12 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
__ Push(a2, a1);
__ CallRuntime(Runtime::kDeleteProperty_Sloppy);
context()->Plug(v0);
- } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+ } else {
+ DCHECK(!var->IsLookupSlot());
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
// Result of deleting non-global, non-dynamic variables is false.
// The subexpression does not have side effects.
context()->Plug(is_this);
- } else {
- // Non-global variable. Call the runtime to try to delete from the
- // context where the variable was introduced.
- DCHECK(!context_register().is(a2));
- __ Push(var->name());
- __ CallRuntime(Runtime::kDeleteLookupSlot);
- context()->Plug(v0);
}
} else {
// Result of deleting non-property, non-variable reference is true.
@@ -3059,31 +2394,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
- case NAMED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- const Register scratch = a1;
- __ ld(scratch, MemOperand(sp, 0)); // this
- PushOperands(result_register(), scratch, result_register());
- EmitNamedSuperPropertyLoad(prop);
- break;
- }
-
- case KEYED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(prop->key());
- const Register scratch1 = a1;
- const Register scratch2 = a4;
- __ ld(scratch1, MemOperand(sp, 1 * kPointerSize)); // this
- __ ld(scratch2, MemOperand(sp, 0 * kPointerSize)); // home object
- PushOperands(result_register(), scratch1, scratch2, result_register());
- EmitKeyedSuperPropertyLoad(prop);
- break;
- }
-
case KEYED_PROPERTY: {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
@@ -3094,6 +2404,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
case VARIABLE:
UNREACHABLE();
}
@@ -3130,14 +2442,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY:
__ sd(v0, MemOperand(sp, kPointerSize));
break;
- case NAMED_SUPER_PROPERTY:
- __ sd(v0, MemOperand(sp, 2 * kPointerSize));
- break;
case KEYED_PROPERTY:
__ sd(v0, MemOperand(sp, 2 * kPointerSize));
break;
+ case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
- __ sd(v0, MemOperand(sp, 3 * kPointerSize));
+ UNREACHABLE();
break;
}
}
@@ -3170,14 +2480,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY:
__ sd(v0, MemOperand(sp, kPointerSize));
break;
- case NAMED_SUPER_PROPERTY:
- __ sd(v0, MemOperand(sp, 2 * kPointerSize));
- break;
case KEYED_PROPERTY:
__ sd(v0, MemOperand(sp, 2 * kPointerSize));
break;
+ case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
- __ sd(v0, MemOperand(sp, 3 * kPointerSize));
+ UNREACHABLE();
break;
}
}
@@ -3234,30 +2542,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
}
- case NAMED_SUPER_PROPERTY: {
- EmitNamedSuperPropertyStore(prop);
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(v0);
- }
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- EmitKeyedSuperPropertyStore(prop);
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(v0);
- }
- break;
- }
case KEYED_PROPERTY: {
__ mov(StoreDescriptor::ValueRegister(), result_register());
PopOperands(StoreDescriptor::ReceiverRegister(),
@@ -3273,6 +2557,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
}
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
}
@@ -3508,69 +2796,6 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
}
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
-
-void FullCodeGenerator::EnterFinallyBlock() {
- DCHECK(!result_register().is(a1));
- // Store pending message while executing finally block.
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ li(at, Operand(pending_message_obj));
- __ ld(a1, MemOperand(at));
- PushOperand(a1);
-
- ClearPendingMessage();
-}
-
-
-void FullCodeGenerator::ExitFinallyBlock() {
- DCHECK(!result_register().is(a1));
- // Restore pending message from stack.
- PopOperand(a1);
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ li(at, Operand(pending_message_obj));
- __ sd(a1, MemOperand(at));
-}
-
-
-void FullCodeGenerator::ClearPendingMessage() {
- DCHECK(!result_register().is(a1));
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
- __ li(at, Operand(pending_message_obj));
- __ sd(a1, MemOperand(at));
-}
-
-
-void FullCodeGenerator::DeferredCommands::EmitCommands() {
- __ Pop(result_register()); // Restore the accumulator.
- __ Pop(a1); // Get the token.
- for (DeferredCommand cmd : commands_) {
- Label skip;
- __ li(at, Operand(Smi::FromInt(cmd.token)));
- __ Branch(&skip, ne, a1, Operand(at));
- switch (cmd.command) {
- case kReturn:
- codegen_->EmitUnwindAndReturn();
- break;
- case kThrow:
- __ Push(result_register());
- __ CallRuntime(Runtime::kReThrow);
- break;
- case kContinue:
- codegen_->EmitContinue(cmd.target);
- break;
- case kBreak:
- codegen_->EmitBreak(cmd.target);
- break;
- }
- __ bind(&skip);
- }
-}
-
#undef __
diff --git a/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc b/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
index 85d198da2f..c101ad098e 100644
--- a/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
+++ b/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
@@ -4,15 +4,16 @@
#if V8_TARGET_ARCH_PPC
-#include "src/full-codegen/full-codegen.h"
#include "src/ast/compile-time-value.h"
#include "src/ast/scopes.h"
+#include "src/builtins/builtins-constructor.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/debug/debug.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
#include "src/ppc/code-stubs-ppc.h"
@@ -152,8 +153,6 @@ void FullCodeGenerator::Generate() {
{
Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
- // Generators allocate locals, if any, in context slots.
- DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
OperandStackDepthIncrement(locals_count);
if (locals_count > 0) {
if (locals_count >= 128) {
@@ -208,15 +207,18 @@ void FullCodeGenerator::Generate() {
if (info->scope()->new_target_var() != nullptr) {
__ push(r6); // Preserve new target.
}
- if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
- FastNewFunctionContextStub stub(isolate());
+ if (slots <=
+ ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+ Callable callable = CodeFactory::FastNewFunctionContext(
+ isolate(), info->scope()->scope_type());
__ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
Operand(slots));
- __ CallStub(&stub);
- // Result of FastNewFunctionContextStub is always in new space.
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ // Result of the FastNewFunctionContext builtin is always in new space.
need_write_barrier = false;
} else {
__ push(r4);
+ __ Push(Smi::FromInt(info->scope()->scope_type()));
__ CallRuntime(Runtime::kNewFunctionContext);
}
if (info->scope()->new_target_var() != nullptr) {
@@ -263,37 +265,10 @@ void FullCodeGenerator::Generate() {
PrepareForBailoutForId(BailoutId::FunctionContext(),
BailoutState::NO_REGISTERS);
- // Possibly set up a local binding to the this function which is used in
- // derived constructors with super calls.
- Variable* this_function_var = info->scope()->this_function_var();
- if (this_function_var != nullptr) {
- Comment cmnt(masm_, "[ This function");
- if (!function_in_register_r4) {
- __ LoadP(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- // The write barrier clobbers register again, keep it marked as such.
- }
- SetVar(this_function_var, r4, r3, r5);
- }
-
- // Possibly set up a local binding to the new target value.
- Variable* new_target_var = info->scope()->new_target_var();
- if (new_target_var != nullptr) {
- Comment cmnt(masm_, "[ new.target");
- SetVar(new_target_var, r6, r3, r5);
- }
-
- // Possibly allocate RestParameters
- Variable* rest_param = info->scope()->rest_parameter();
- if (rest_param != nullptr) {
- Comment cmnt(masm_, "[ Allocate rest parameter array");
- if (!function_in_register_r4) {
- __ LoadP(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- }
- FastNewRestParameterStub stub(isolate());
- __ CallStub(&stub);
- function_in_register_r4 = false;
- SetVar(rest_param, r3, r4, r5);
- }
+ // We don't support new.target and rest parameters here.
+ DCHECK_NULL(info->scope()->new_target_var());
+ DCHECK_NULL(info->scope()->rest_parameter());
+ DCHECK_NULL(info->scope()->this_function_var());
Variable* arguments = info->scope()->arguments();
if (arguments != NULL) {
@@ -536,10 +511,8 @@ void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
false_label_);
- DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
- !lit->IsUndetectable());
- if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
- lit->IsFalse(isolate())) {
+ DCHECK(lit->IsNullOrUndefined(isolate()) || !lit->IsUndetectable());
+ if (lit->IsNullOrUndefined(isolate()) || lit->IsFalse(isolate())) {
if (false_label_ != fall_through_) __ b(false_label_);
} else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
if (true_label_ != fall_through_) __ b(true_label_);
@@ -751,6 +724,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
+ globals_->Add(variable->name(), zone());
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
@@ -777,17 +751,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
break;
- case VariableLocation::LOOKUP: {
- Comment cmnt(masm_, "[ VariableDeclaration");
- DCHECK_EQ(VAR, variable->mode());
- DCHECK(!variable->binding_needs_init());
- __ mov(r5, Operand(variable->name()));
- __ Push(r5);
- __ CallRuntime(Runtime::kDeclareEvalVar);
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
- break;
- }
-
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -800,6 +764,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Variable* variable = proxy->var();
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
+ globals_->Add(variable->name(), zone());
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
@@ -834,17 +799,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
break;
}
- case VariableLocation::LOOKUP: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- __ mov(r5, Operand(variable->name()));
- PushOperand(r5);
- // Push initial value for function declaration.
- VisitForStackValue(declaration->fun());
- CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
- break;
- }
-
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -1155,92 +1110,6 @@ void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
-
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
- TypeofMode typeof_mode,
- Label* slow) {
- Register current = cp;
- Register next = r4;
- Register temp = r5;
-
- int to_check = scope()->ContextChainLengthUntilOutermostSloppyEval();
- for (Scope* s = scope(); to_check > 0; s = s->outer_scope()) {
- if (!s->NeedsContext()) continue;
- if (s->calls_sloppy_eval()) {
- // Check that extension is "the hole".
- __ LoadP(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
- __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
- }
- // Load next context in chain.
- __ LoadP(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering cp.
- current = next;
- to_check--;
- }
-
- // All extension objects were empty and it is safe to use a normal global
- // load machinery.
- EmitGlobalVariableLoad(proxy, typeof_mode);
-}
-
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
- Label* slow) {
- DCHECK(var->IsContextSlot());
- Register context = cp;
- Register next = r6;
- Register temp = r7;
-
- for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
- if (s->NeedsContext()) {
- if (s->calls_sloppy_eval()) {
- // Check that extension is "the hole".
- __ LoadP(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
- __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
- }
- __ LoadP(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering cp.
- context = next;
- }
- }
- // Check that last extension is "the hole".
- __ LoadP(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
- __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
-
- // This function is used only for loads, not stores, so it's safe to
- // return an cp-based operand (the write barrier cannot be allowed to
- // destroy the cp register).
- return ContextMemOperand(context, var->index());
-}
-
-
-void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
- TypeofMode typeof_mode,
- Label* slow, Label* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- Variable* var = proxy->var();
- if (var->mode() == DYNAMIC_GLOBAL) {
- EmitLoadGlobalCheckExtensions(proxy, typeof_mode, slow);
- __ b(done);
- } else if (var->mode() == DYNAMIC_LOCAL) {
- Variable* local = var->local_if_not_shadowed();
- __ LoadP(r3, ContextSlotOperandCheckExtensions(local, slow));
- if (local->binding_needs_init()) {
- __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
- __ bne(done);
- __ mov(r3, Operand(var->name()));
- __ push(r3);
- __ CallRuntime(Runtime::kThrowReferenceError);
- } else {
- __ b(done);
- }
- }
-}
-
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
// Record position before possible IC call.
@@ -1248,8 +1117,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
Variable* var = proxy->var();
- // Three cases: global variables, lookup variables, and all other types of
- // variables.
+ // Two cases: global variables and all other types of variables.
switch (var->location()) {
case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
@@ -1282,24 +1150,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
break;
}
- case VariableLocation::LOOKUP: {
- Comment cmnt(masm_, "[ Lookup variable");
- Label done, slow;
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
- __ bind(&slow);
- __ Push(var->name());
- Runtime::FunctionId function_id =
- typeof_mode == NOT_INSIDE_TYPEOF
- ? Runtime::kLoadLookupSlot
- : Runtime::kLoadLookupSlotInsideTypeof;
- __ CallRuntime(function_id);
- __ bind(&done);
- context()->Plug(r3);
- break;
- }
-
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -1326,7 +1177,8 @@ void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- Handle<FixedArray> constant_properties = expr->constant_properties();
+ Handle<FixedArray> constant_properties =
+ expr->GetOrBuildConstantProperties(isolate());
__ LoadP(r6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ LoadSmiLiteral(r5, Smi::FromInt(expr->literal_index()));
__ mov(r4, Operand(constant_properties));
@@ -1336,8 +1188,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Push(r6, r5, r4, r3);
__ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
- FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
- __ CallStub(&stub);
+ Callable callable = CodeFactory::FastCloneShallowObject(
+ isolate(), expr->properties_count());
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1347,10 +1200,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
bool result_saved = false;
AccessorTable accessor_table(zone());
- int property_index = 0;
- for (; property_index < expr->properties()->length(); property_index++) {
- ObjectLiteral::Property* property = expr->properties()->at(property_index);
- if (property->is_computed_name()) break;
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ DCHECK(!property->is_computed_name());
if (property->IsCompileTimeValue()) continue;
Literal* key = property->key()->AsLiteral();
@@ -1360,6 +1212,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
result_saved = true;
}
switch (property->kind()) {
+ case ObjectLiteral::Property::SPREAD:
case ObjectLiteral::Property::CONSTANT:
UNREACHABLE();
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -1408,20 +1261,20 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(value);
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
- PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ PrepareForBailoutForId(expr->GetIdForPropertySet(i),
BailoutState::NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->setter = property;
}
break;
@@ -1443,73 +1296,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
}
- // Object literals have two parts. The "static" part on the left contains no
- // computed property names, and so we can compute its map ahead of time; see
- // runtime.cc::CreateObjectLiteralBoilerplate. The second "dynamic" part
- // starts with the first computed property name, and continues with all
- // properties to its right. All the code from above initializes the static
- // component of the object literal, and arranges for the map of the result to
- // reflect the static order in which the keys appear. For the dynamic
- // properties, we compile them into a series of "SetOwnProperty" runtime
- // calls. This will preserve insertion order.
- for (; property_index < expr->properties()->length(); property_index++) {
- ObjectLiteral::Property* property = expr->properties()->at(property_index);
-
- Expression* value = property->value();
- if (!result_saved) {
- PushOperand(r3); // Save result on the stack
- result_saved = true;
- }
-
- __ LoadP(r3, MemOperand(sp)); // Duplicate receiver.
- PushOperand(r3);
-
- if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
- DCHECK(!property->is_computed_name());
- VisitForStackValue(value);
- DCHECK(property->emit_store());
- CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
- PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- BailoutState::NO_REGISTERS);
- } else {
- EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
- VisitForStackValue(value);
- if (NeedsHomeObject(value)) {
- EmitSetHomeObject(value, 2, property->GetSlot());
- }
-
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- case ObjectLiteral::Property::COMPUTED:
- if (property->emit_store()) {
- PushOperand(Smi::FromInt(NONE));
- PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
- CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
- PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- BailoutState::NO_REGISTERS);
- } else {
- DropOperands(3);
- }
- break;
-
- case ObjectLiteral::Property::PROTOTYPE:
- UNREACHABLE();
- break;
-
- case ObjectLiteral::Property::GETTER:
- PushOperand(Smi::FromInt(NONE));
- CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
- break;
-
- case ObjectLiteral::Property::SETTER:
- PushOperand(Smi::FromInt(NONE));
- CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
- break;
- }
- }
- }
-
if (result_saved) {
context()->PlugTOS();
} else {
@@ -1521,11 +1307,10 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- Handle<FixedArray> constant_elements = expr->constant_elements();
+ Handle<ConstantElementsPair> constant_elements =
+ expr->GetOrBuildConstantElements(isolate());
bool has_fast_elements =
IsFastObjectElementsKind(expr->constant_elements_kind());
- Handle<FixedArrayBase> constant_elements_values(
- FixedArrayBase::cast(constant_elements->get(1)));
AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
@@ -1542,8 +1327,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ Push(r6, r5, r4, r3);
__ CallRuntime(Runtime::kCreateArrayLiteral);
} else {
- FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
- __ CallStub(&stub);
+ Callable callable =
+ CodeFactory::FastCloneShallowArray(isolate(), allocation_site_mode);
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1606,34 +1392,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForStackValue(property->obj());
}
break;
- case NAMED_SUPER_PROPERTY:
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- property->obj()->AsSuperPropertyReference()->home_object());
- PushOperand(result_register());
- if (expr->is_compound()) {
- const Register scratch = r4;
- __ LoadP(scratch, MemOperand(sp, kPointerSize));
- PushOperands(scratch, result_register());
- }
- break;
- case KEYED_SUPER_PROPERTY: {
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(property->key());
- PushOperand(result_register());
- if (expr->is_compound()) {
- const Register scratch1 = r5;
- const Register scratch2 = r4;
- __ LoadP(scratch1, MemOperand(sp, 2 * kPointerSize));
- __ LoadP(scratch2, MemOperand(sp, 1 * kPointerSize));
- PushOperands(scratch1, scratch2, result_register());
- }
- break;
- }
case KEYED_PROPERTY:
if (expr->is_compound()) {
VisitForStackValue(property->obj());
@@ -1646,6 +1404,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForStackValue(property->key());
}
break;
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
// For compound assignments we need another deoptimization point after the
@@ -1663,21 +1425,15 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
PrepareForBailoutForId(property->LoadId(),
BailoutState::TOS_REGISTER);
break;
- case NAMED_SUPER_PROPERTY:
- EmitNamedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
- break;
- case KEYED_SUPER_PROPERTY:
- EmitKeyedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
- break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(),
BailoutState::TOS_REGISTER);
break;
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
}
@@ -1714,73 +1470,20 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
- case NAMED_SUPER_PROPERTY:
- EmitNamedSuperPropertyStore(property);
- context()->Plug(r3);
- break;
- case KEYED_SUPER_PROPERTY:
- EmitKeyedSuperPropertyStore(property);
- context()->Plug(r3);
- break;
case KEYED_PROPERTY:
EmitKeyedPropertyAssignment(expr);
break;
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
}
void FullCodeGenerator::VisitYield(Yield* expr) {
- Comment cmnt(masm_, "[ Yield");
- SetExpressionPosition(expr);
-
- // Evaluate yielded value first; the initial iterator definition depends on
- // this. It stays on the stack while we update the iterator.
- VisitForStackValue(expr->expression());
-
- Label suspend, continuation, post_runtime, resume, exception;
-
- __ b(&suspend);
- __ bind(&continuation);
- // When we arrive here, r3 holds the generator object.
- __ RecordGeneratorContinuation();
- __ LoadP(r4, FieldMemOperand(r3, JSGeneratorObject::kResumeModeOffset));
- __ LoadP(r3, FieldMemOperand(r3, JSGeneratorObject::kInputOrDebugPosOffset));
- STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
- STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
- __ CmpSmiLiteral(r4, Smi::FromInt(JSGeneratorObject::kReturn), r0);
- __ blt(&resume);
- __ Push(result_register());
- __ bgt(&exception);
- EmitCreateIteratorResult(true);
- EmitUnwindAndReturn();
-
- __ bind(&exception);
- __ CallRuntime(expr->rethrow_on_exception() ? Runtime::kReThrow
- : Runtime::kThrow);
-
- __ bind(&suspend);
- OperandStackDepthIncrement(1); // Not popped on this path.
- VisitForAccumulatorValue(expr->generator_object());
- DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
- __ LoadSmiLiteral(r4, Smi::FromInt(continuation.pos()));
- __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset),
- r0);
- __ StoreP(cp, FieldMemOperand(r3, JSGeneratorObject::kContextOffset), r0);
- __ mr(r4, cp);
- __ RecordWriteField(r3, JSGeneratorObject::kContextOffset, r4, r5,
- kLRHasBeenSaved, kDontSaveFPRegs);
- __ addi(r4, fp, Operand(StandardFrameConstants::kExpressionsOffset));
- __ cmp(sp, r4);
- __ beq(&post_runtime);
- __ push(r3); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- RestoreContext();
- __ bind(&post_runtime);
- PopOperand(result_register());
- EmitReturnSequence();
-
- __ bind(&resume);
- context()->Plug(result_register());
+ // Resumable functions are not supported.
+ UNREACHABLE();
}
void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
@@ -1965,60 +1668,6 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
context()->Plug(r3);
}
-
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
- for (int i = 0; i < lit->properties()->length(); i++) {
- ClassLiteral::Property* property = lit->properties()->at(i);
- Expression* value = property->value();
-
- Register scratch = r4;
- if (property->is_static()) {
- __ LoadP(scratch, MemOperand(sp, kPointerSize)); // constructor
- } else {
- __ LoadP(scratch, MemOperand(sp, 0)); // prototype
- }
- PushOperand(scratch);
- EmitPropertyKey(property, lit->GetIdForProperty(i));
-
- // The static prototype property is read only. We handle the non computed
- // property name case in the parser. Since this is the only case where we
- // need to check for an own read only property we special case this so we do
- // not need to do this for every property.
- if (property->is_static() && property->is_computed_name()) {
- __ CallRuntime(Runtime::kThrowIfStaticPrototype);
- __ push(r3);
- }
-
- VisitForStackValue(value);
- if (NeedsHomeObject(value)) {
- EmitSetHomeObject(value, 2, property->GetSlot());
- }
-
- switch (property->kind()) {
- case ClassLiteral::Property::METHOD:
- PushOperand(Smi::FromInt(DONT_ENUM));
- PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
- CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
- break;
-
- case ClassLiteral::Property::GETTER:
- PushOperand(Smi::FromInt(DONT_ENUM));
- CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
- break;
-
- case ClassLiteral::Property::SETTER:
- PushOperand(Smi::FromInt(DONT_ENUM));
- CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
- break;
-
- case ClassLiteral::Property::FIELD:
- default:
- UNREACHABLE();
- }
- }
-}
-
-
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
PopOperand(r4);
Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
@@ -2052,43 +1701,6 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
CallStoreIC(slot, prop->key()->AsLiteral()->value());
break;
}
- case NAMED_SUPER_PROPERTY: {
- PushOperand(r3);
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- // stack: value, this; r3: home_object
- Register scratch = r5;
- Register scratch2 = r6;
- __ mr(scratch, result_register()); // home_object
- __ LoadP(r3, MemOperand(sp, kPointerSize)); // value
- __ LoadP(scratch2, MemOperand(sp, 0)); // this
- __ StoreP(scratch2, MemOperand(sp, kPointerSize)); // this
- __ StoreP(scratch, MemOperand(sp, 0)); // home_object
- // stack: this, home_object; r3: value
- EmitNamedSuperPropertyStore(prop);
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- PushOperand(r3);
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(prop->key());
- Register scratch = r5;
- Register scratch2 = r6;
- __ LoadP(scratch2, MemOperand(sp, 2 * kPointerSize)); // value
- // stack: value, this, home_object; r3: key, r6: value
- __ LoadP(scratch, MemOperand(sp, kPointerSize)); // this
- __ StoreP(scratch, MemOperand(sp, 2 * kPointerSize));
- __ LoadP(scratch, MemOperand(sp, 0)); // home_object
- __ StoreP(scratch, MemOperand(sp, kPointerSize));
- __ StoreP(r3, MemOperand(sp, 0));
- __ Move(r3, scratch2);
- // stack: this, home_object, key; r3: value.
- EmitKeyedSuperPropertyStore(prop);
- break;
- }
case KEYED_PROPERTY: {
PushOperand(r3); // Preserve value.
VisitForStackValue(prop->obj());
@@ -2099,6 +1711,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
CallKeyedStoreIC(slot);
break;
}
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
context()->Plug(r3);
}
@@ -2160,26 +1776,18 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
} else {
DCHECK(var->mode() != CONST || op == Token::INIT);
- if (var->IsLookupSlot()) {
- // Assignment to var.
- __ Push(var->name());
- __ Push(r3);
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kStoreLookupSlot_Strict
- : Runtime::kStoreLookupSlot_Sloppy);
- } else {
- // Assignment to var or initializing assignment to let/const in harmony
- // mode.
- DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
- MemOperand location = VarOperand(var, r4);
- if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
- // Check for an uninitialized let binding.
- __ LoadP(r5, location);
- __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
- __ Check(eq, kLetBindingReInitialization);
- }
- EmitStoreToStackLocalOrContextSlot(var, location);
+ DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
+ DCHECK(!var->IsLookupSlot());
+ // Assignment to var or initializing assignment to let/const in harmony
+ // mode.
+ MemOperand location = VarOperand(var, r4);
+ if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
+ // Check for an uninitialized let binding.
+ __ LoadP(r5, location);
+ __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
+ __ Check(eq, kLetBindingReInitialization);
}
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
}
@@ -2198,35 +1806,6 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
}
-void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
- // Assignment to named property of super.
- // r3 : value
- // stack : receiver ('this'), home_object
- DCHECK(prop != NULL);
- Literal* key = prop->key()->AsLiteral();
- DCHECK(key != NULL);
-
- PushOperand(key->value());
- PushOperand(r3);
- CallRuntimeWithOperands((is_strict(language_mode())
- ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy));
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
- // Assignment to named property of super.
- // r3 : value
- // stack : receiver ('this'), home_object, key
- DCHECK(prop != NULL);
-
- PushOperand(r3);
- CallRuntimeWithOperands((is_strict(language_mode())
- ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy));
-}
-
-
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
PopOperands(StoreDescriptor::ReceiverRegister(),
@@ -2275,43 +1854,6 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
}
-void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
- Expression* callee = expr->expression();
- DCHECK(callee->IsProperty());
- Property* prop = callee->AsProperty();
- DCHECK(prop->IsSuperAccess());
- SetExpressionPosition(prop);
-
- Literal* key = prop->key()->AsLiteral();
- DCHECK(!key->value()->IsSmi());
- // Load the function from the receiver.
- const Register scratch = r4;
- SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
- VisitForAccumulatorValue(super_ref->home_object());
- __ mr(scratch, r3);
- VisitForAccumulatorValue(super_ref->this_var());
- PushOperands(scratch, r3, r3, scratch);
- PushOperand(key->value());
-
- // Stack here:
- // - home_object
- // - this (receiver)
- // - this (receiver) <-- LoadFromSuper will pop here and below.
- // - home_object
- // - key
- CallRuntimeWithOperands(Runtime::kLoadFromSuper);
- PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
- // Replace home_object with target function.
- __ StoreP(r3, MemOperand(sp, kPointerSize));
-
- // Stack here:
- // - target function
- // - this (receiver)
- EmitCall(expr);
-}
-
-
// Code common for calls using the IC.
void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, Expression* key) {
// Load the key.
@@ -2336,41 +1878,6 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, Expression* key) {
}
-void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
- Expression* callee = expr->expression();
- DCHECK(callee->IsProperty());
- Property* prop = callee->AsProperty();
- DCHECK(prop->IsSuperAccess());
-
- SetExpressionPosition(prop);
- // Load the function from the receiver.
- const Register scratch = r4;
- SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
- VisitForAccumulatorValue(super_ref->home_object());
- __ mr(scratch, r3);
- VisitForAccumulatorValue(super_ref->this_var());
- PushOperands(scratch, r3, r3, scratch);
- VisitForStackValue(prop->key());
-
- // Stack here:
- // - home_object
- // - this (receiver)
- // - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
- // - home_object
- // - key
- CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
- PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
- // Replace home_object with target function.
- __ StoreP(r3, MemOperand(sp, kPointerSize));
-
- // Stack here:
- // - target function
- // - this (receiver)
- EmitCall(expr);
-}
-
-
void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
@@ -2402,117 +1909,6 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
context()->DropAndPlug(1, r3);
}
-
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
- int arg_count = expr->arguments()->length();
- // r7: copy of the first argument or undefined if it doesn't exist.
- if (arg_count > 0) {
- __ LoadP(r7, MemOperand(sp, arg_count * kPointerSize), r0);
- } else {
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
- }
-
- // r6: the receiver of the enclosing function.
- __ LoadP(r6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-
- // r5: language mode.
- __ LoadSmiLiteral(r5, Smi::FromInt(language_mode()));
-
- // r4: the start position of the scope the calls resides in.
- __ LoadSmiLiteral(r4, Smi::FromInt(scope()->start_position()));
-
- // r3: the source position of the eval call.
- __ LoadSmiLiteral(r3, Smi::FromInt(expr->position()));
-
- // Do the runtime call.
- __ Push(r7, r6, r5, r4, r3);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
-}
-
-
-// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
-void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
- VariableProxy* callee = expr->expression()->AsVariableProxy();
- if (callee->var()->IsLookupSlot()) {
- Label slow, done;
- SetExpressionPosition(callee);
- // Generate code for loading from variables potentially shadowed by
- // eval-introduced variables.
- EmitDynamicLookupFastCase(callee, NOT_INSIDE_TYPEOF, &slow, &done);
-
- __ bind(&slow);
- // Call the runtime to find the function to call (returned in r3) and
- // the object holding it (returned in r4).
- __ Push(callee->name());
- __ CallRuntime(Runtime::kLoadLookupSlotForCall);
- PushOperands(r3, r4); // Function, receiver.
- PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
-
- // If fast case code has been generated, emit code to push the function
- // and receiver and have the slow path jump around this code.
- if (done.is_linked()) {
- Label call;
- __ b(&call);
- __ bind(&done);
- // Push function.
- __ push(r3);
- // Pass undefined as the receiver, which is the WithBaseObject of a
- // non-object environment record. If the callee is sloppy, it will patch
- // it up to be the global receiver.
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
- __ push(r4);
- __ bind(&call);
- }
- } else {
- VisitForStackValue(callee);
- // refEnv.WithBaseObject()
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- PushOperand(r5); // Reserved receiver slot.
- }
-}
-
-
-void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
- // In a call to eval, we first call
- // Runtime_ResolvePossiblyDirectEval to resolve the function we need
- // to call. Then we call the resolved function using the given arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- PushCalleeAndWithBaseObject(expr);
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
- __ push(r4);
- EmitResolvePossiblyDirectEval(expr);
-
- // Touch up the stack with the resolved function.
- __ StoreP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
-
- PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
-
- // Record source position for debugger.
- SetCallPosition(expr);
- Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
- expr->tail_call_mode())
- .code();
- __ LoadSmiLiteral(r6, SmiFromSlot(expr->CallFeedbackICSlot()));
- __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
- __ mov(r3, Operand(arg_count));
- __ Call(code, RelocInfo::CODE_TARGET);
- OperandStackDepthDecrement(arg_count + 1);
- RecordJSReturnSite(expr);
- RestoreContext();
- context()->DropAndPlug(1, r3);
-}
-
-
void FullCodeGenerator::VisitCallNew(CallNew* expr) {
Comment cmnt(masm_, "[ CallNew");
// According to ECMA-262, section 11.2.2, page 44, the function
@@ -2553,49 +1949,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
-void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
- SuperCallReference* super_call_ref =
- expr->expression()->AsSuperCallReference();
- DCHECK_NOT_NULL(super_call_ref);
-
- // Push the super constructor target on the stack (may be null,
- // but the Construct builtin can deal with that properly).
- VisitForAccumulatorValue(super_call_ref->this_function_var());
- __ AssertFunction(result_register());
- __ LoadP(result_register(),
- FieldMemOperand(result_register(), HeapObject::kMapOffset));
- __ LoadP(result_register(),
- FieldMemOperand(result_register(), Map::kPrototypeOffset));
- PushOperand(result_register());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- SetConstructCallPosition(expr);
-
- // Load new target into r6.
- VisitForAccumulatorValue(super_call_ref->new_target_var());
- __ mr(r6, result_register());
-
- // Load function and argument count into r1 and r0.
- __ mov(r3, Operand(arg_count));
- __ LoadP(r4, MemOperand(sp, arg_count * kPointerSize));
-
- __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
- OperandStackDepthDecrement(arg_count + 1);
-
- RecordJSReturnSite(expr);
- RestoreContext();
- context()->Plug(r3);
-}
-
-
void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -2683,28 +2036,6 @@ void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ JumpIfSmi(r3, if_false);
- __ CompareObjectType(r3, r4, r4, JS_REGEXP_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -2941,16 +2272,12 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
__ Push(r5, r4);
__ CallRuntime(Runtime::kDeleteProperty_Sloppy);
context()->Plug(r3);
- } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+ } else {
+ DCHECK(!var->IsLookupSlot());
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
// Result of deleting non-global, non-dynamic variables is false.
// The subexpression does not have side effects.
context()->Plug(is_this);
- } else {
- // Non-global variable. Call the runtime to try to delete from the
- // context where the variable was introduced.
- __ Push(var->name());
- __ CallRuntime(Runtime::kDeleteLookupSlot);
- context()->Plug(r3);
}
} else {
// Result of deleting non-property, non-variable reference is true.
@@ -3052,31 +2379,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
- case NAMED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- const Register scratch = r4;
- __ LoadP(scratch, MemOperand(sp, 0)); // this
- PushOperands(result_register(), scratch, result_register());
- EmitNamedSuperPropertyLoad(prop);
- break;
- }
-
- case KEYED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(prop->key());
- const Register scratch1 = r4;
- const Register scratch2 = r5;
- __ LoadP(scratch1, MemOperand(sp, 1 * kPointerSize)); // this
- __ LoadP(scratch2, MemOperand(sp, 0 * kPointerSize)); // home object
- PushOperands(result_register(), scratch1, scratch2, result_register());
- EmitKeyedSuperPropertyLoad(prop);
- break;
- }
-
case KEYED_PROPERTY: {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
@@ -3087,6 +2389,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
case VARIABLE:
UNREACHABLE();
}
@@ -3122,14 +2426,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY:
__ StoreP(r3, MemOperand(sp, kPointerSize));
break;
- case NAMED_SUPER_PROPERTY:
- __ StoreP(r3, MemOperand(sp, 2 * kPointerSize));
- break;
case KEYED_PROPERTY:
__ StoreP(r3, MemOperand(sp, 2 * kPointerSize));
break;
+ case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
- __ StoreP(r3, MemOperand(sp, 3 * kPointerSize));
+ UNREACHABLE();
break;
}
}
@@ -3164,14 +2466,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY:
__ StoreP(r3, MemOperand(sp, kPointerSize));
break;
- case NAMED_SUPER_PROPERTY:
- __ StoreP(r3, MemOperand(sp, 2 * kPointerSize));
- break;
case KEYED_PROPERTY:
__ StoreP(r3, MemOperand(sp, 2 * kPointerSize));
break;
+ case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
- __ StoreP(r3, MemOperand(sp, 3 * kPointerSize));
+ UNREACHABLE();
break;
}
}
@@ -3228,30 +2528,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
}
- case NAMED_SUPER_PROPERTY: {
- EmitNamedSuperPropertyStore(prop);
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(r3);
- }
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- EmitKeyedSuperPropertyStore(prop);
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(r3);
- }
- break;
- }
case KEYED_PROPERTY: {
PopOperands(StoreDescriptor::ReceiverRegister(),
StoreDescriptor::NameRegister());
@@ -3266,6 +2542,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
}
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
}
@@ -3496,70 +2776,6 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
}
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
-
-void FullCodeGenerator::EnterFinallyBlock() {
- DCHECK(!result_register().is(r4));
- // Store pending message while executing finally block.
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ mov(ip, Operand(pending_message_obj));
- __ LoadP(r4, MemOperand(ip));
- PushOperand(r4);
-
- ClearPendingMessage();
-}
-
-
-void FullCodeGenerator::ExitFinallyBlock() {
- DCHECK(!result_register().is(r4));
- // Restore pending message from stack.
- PopOperand(r4);
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ mov(ip, Operand(pending_message_obj));
- __ StoreP(r4, MemOperand(ip));
-}
-
-
-void FullCodeGenerator::ClearPendingMessage() {
- DCHECK(!result_register().is(r4));
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
- __ mov(ip, Operand(pending_message_obj));
- __ StoreP(r4, MemOperand(ip));
-}
-
-
-void FullCodeGenerator::DeferredCommands::EmitCommands() {
- DCHECK(!result_register().is(r4));
- // Restore the accumulator (r3) and token (r4).
- __ Pop(r4, result_register());
- for (DeferredCommand cmd : commands_) {
- Label skip;
- __ CmpSmiLiteral(r4, Smi::FromInt(cmd.token), r0);
- __ bne(&skip);
- switch (cmd.command) {
- case kReturn:
- codegen_->EmitUnwindAndReturn();
- break;
- case kThrow:
- __ Push(result_register());
- __ CallRuntime(Runtime::kReThrow);
- break;
- case kContinue:
- codegen_->EmitContinue(cmd.target);
- break;
- case kBreak:
- codegen_->EmitBreak(cmd.target);
- break;
- }
- __ bind(&skip);
- }
-}
-
#undef __
diff --git a/deps/v8/src/full-codegen/s390/full-codegen-s390.cc b/deps/v8/src/full-codegen/s390/full-codegen-s390.cc
index 91fa86de80..1fd971354d 100644
--- a/deps/v8/src/full-codegen/s390/full-codegen-s390.cc
+++ b/deps/v8/src/full-codegen/s390/full-codegen-s390.cc
@@ -4,15 +4,16 @@
#if V8_TARGET_ARCH_S390
-#include "src/full-codegen/full-codegen.h"
#include "src/ast/compile-time-value.h"
#include "src/ast/scopes.h"
+#include "src/builtins/builtins-constructor.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/debug/debug.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
#include "src/s390/code-stubs-s390.h"
@@ -151,8 +152,6 @@ void FullCodeGenerator::Generate() {
{
Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
- // Generators allocate locals, if any, in context slots.
- DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
OperandStackDepthIncrement(locals_count);
if (locals_count > 0) {
if (locals_count >= 128) {
@@ -212,15 +211,18 @@ void FullCodeGenerator::Generate() {
if (info->scope()->new_target_var() != nullptr) {
__ push(r5); // Preserve new target.
}
- if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
- FastNewFunctionContextStub stub(isolate());
+ if (slots <=
+ ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+ Callable callable = CodeFactory::FastNewFunctionContext(
+ isolate(), info->scope()->scope_type());
__ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
Operand(slots));
- __ CallStub(&stub);
- // Result of FastNewFunctionContextStub is always in new space.
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ // Result of the FastNewFunctionContext builtin is always in new space.
need_write_barrier = false;
} else {
__ push(r3);
+ __ Push(Smi::FromInt(info->scope()->scope_type()));
__ CallRuntime(Runtime::kNewFunctionContext);
}
if (info->scope()->new_target_var() != nullptr) {
@@ -267,39 +269,10 @@ void FullCodeGenerator::Generate() {
PrepareForBailoutForId(BailoutId::FunctionContext(),
BailoutState::NO_REGISTERS);
- // Possibly set up a local binding to the this function which is used in
- // derived constructors with super calls.
- Variable* this_function_var = info->scope()->this_function_var();
- if (this_function_var != nullptr) {
- Comment cmnt(masm_, "[ This function");
- if (!function_in_register_r3) {
- __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- // The write barrier clobbers register again, keep it marked as such.
- }
- SetVar(this_function_var, r3, r2, r4);
- }
-
- // Possibly set up a local binding to the new target value.
- Variable* new_target_var = info->scope()->new_target_var();
- if (new_target_var != nullptr) {
- Comment cmnt(masm_, "[ new.target");
- SetVar(new_target_var, r5, r2, r4);
- }
-
- // Possibly allocate RestParameters
- Variable* rest_param = info->scope()->rest_parameter();
- if (rest_param != nullptr) {
- Comment cmnt(masm_, "[ Allocate rest parameter array");
-
- if (!function_in_register_r3) {
- __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- }
- FastNewRestParameterStub stub(isolate());
- __ CallStub(&stub);
-
- function_in_register_r3 = false;
- SetVar(rest_param, r2, r3, r4);
- }
+ // We don't support new.target and rest parameters here.
+ DCHECK_NULL(info->scope()->new_target_var());
+ DCHECK_NULL(info->scope()->rest_parameter());
+ DCHECK_NULL(info->scope()->this_function_var());
Variable* arguments = info->scope()->arguments();
if (arguments != NULL) {
@@ -529,10 +502,8 @@ void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
false_label_);
- DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
- !lit->IsUndetectable());
- if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
- lit->IsFalse(isolate())) {
+ DCHECK(lit->IsNullOrUndefined(isolate()) || !lit->IsUndetectable());
+ if (lit->IsNullOrUndefined(isolate()) || lit->IsFalse(isolate())) {
if (false_label_ != fall_through_) __ b(false_label_);
} else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
if (true_label_ != fall_through_) __ b(true_label_);
@@ -726,6 +697,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
+ globals_->Add(variable->name(), zone());
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
@@ -752,17 +724,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
break;
- case VariableLocation::LOOKUP: {
- Comment cmnt(masm_, "[ VariableDeclaration");
- DCHECK_EQ(VAR, variable->mode());
- DCHECK(!variable->binding_needs_init());
- __ mov(r4, Operand(variable->name()));
- __ Push(r4);
- __ CallRuntime(Runtime::kDeclareEvalVar);
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
- break;
- }
-
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -774,6 +736,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Variable* variable = proxy->var();
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
+ globals_->Add(variable->name(), zone());
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
@@ -807,17 +770,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
break;
}
- case VariableLocation::LOOKUP: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- __ mov(r4, Operand(variable->name()));
- PushOperand(r4);
- // Push initial value for function declaration.
- VisitForStackValue(declaration->fun());
- CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
- break;
- }
-
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -1122,89 +1075,6 @@ void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
- TypeofMode typeof_mode,
- Label* slow) {
- Register current = cp;
- Register next = r3;
- Register temp = r4;
-
- int to_check = scope()->ContextChainLengthUntilOutermostSloppyEval();
- for (Scope* s = scope(); to_check > 0; s = s->outer_scope()) {
- if (!s->NeedsContext()) continue;
- if (s->calls_sloppy_eval()) {
- // Check that extension is "the hole".
- __ LoadP(temp, ContextMemOperand(current, Context::EXTENSION_INDEX));
- __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
- }
- // Load next context in chain.
- __ LoadP(next, ContextMemOperand(current, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering cp.
- current = next;
- to_check--;
- }
-
- // All extension objects were empty and it is safe to use a normal global
- // load machinery.
- EmitGlobalVariableLoad(proxy, typeof_mode);
-}
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
- Label* slow) {
- DCHECK(var->IsContextSlot());
- Register context = cp;
- Register next = r5;
- Register temp = r6;
-
- for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
- if (s->NeedsContext()) {
- if (s->calls_sloppy_eval()) {
- // Check that extension is "the hole".
- __ LoadP(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
- __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
- }
- __ LoadP(next, ContextMemOperand(context, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering cp.
- context = next;
- }
- }
- // Check that last extension is "the hole".
- __ LoadP(temp, ContextMemOperand(context, Context::EXTENSION_INDEX));
- __ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
-
- // This function is used only for loads, not stores, so it's safe to
- // return an cp-based operand (the write barrier cannot be allowed to
- // destroy the cp register).
- return ContextMemOperand(context, var->index());
-}
-
-void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
- TypeofMode typeof_mode,
- Label* slow, Label* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- Variable* var = proxy->var();
- if (var->mode() == DYNAMIC_GLOBAL) {
- EmitLoadGlobalCheckExtensions(proxy, typeof_mode, slow);
- __ b(done);
- } else if (var->mode() == DYNAMIC_LOCAL) {
- Variable* local = var->local_if_not_shadowed();
- __ LoadP(r2, ContextSlotOperandCheckExtensions(local, slow));
- if (local->binding_needs_init()) {
- __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
- __ bne(done);
- __ mov(r2, Operand(var->name()));
- __ push(r2);
- __ CallRuntime(Runtime::kThrowReferenceError);
- } else {
- __ b(done);
- }
- }
-}
-
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
// Record position before possible IC call.
@@ -1212,8 +1082,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
Variable* var = proxy->var();
- // Three cases: global variables, lookup variables, and all other types of
- // variables.
+ // Two cases: global variables and all other types of variables.
switch (var->location()) {
case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
@@ -1246,24 +1115,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
break;
}
- case VariableLocation::LOOKUP: {
- Comment cmnt(masm_, "[ Lookup variable");
- Label done, slow;
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
- __ bind(&slow);
- __ Push(var->name());
- Runtime::FunctionId function_id =
- typeof_mode == NOT_INSIDE_TYPEOF
- ? Runtime::kLoadLookupSlot
- : Runtime::kLoadLookupSlotInsideTypeof;
- __ CallRuntime(function_id);
- __ bind(&done);
- context()->Plug(r2);
- break;
- }
-
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -1288,7 +1140,8 @@ void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- Handle<FixedArray> constant_properties = expr->constant_properties();
+ Handle<FixedArray> constant_properties =
+ expr->GetOrBuildConstantProperties(isolate());
__ LoadP(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ LoadSmiLiteral(r4, Smi::FromInt(expr->literal_index()));
__ mov(r3, Operand(constant_properties));
@@ -1298,8 +1151,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Push(r5, r4, r3, r2);
__ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
- FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
- __ CallStub(&stub);
+ Callable callable = CodeFactory::FastCloneShallowObject(
+ isolate(), expr->properties_count());
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1309,10 +1163,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
bool result_saved = false;
AccessorTable accessor_table(zone());
- int property_index = 0;
- for (; property_index < expr->properties()->length(); property_index++) {
- ObjectLiteral::Property* property = expr->properties()->at(property_index);
- if (property->is_computed_name()) break;
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ DCHECK(!property->is_computed_name());
if (property->IsCompileTimeValue()) continue;
Literal* key = property->key()->AsLiteral();
@@ -1322,6 +1175,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
result_saved = true;
}
switch (property->kind()) {
+ case ObjectLiteral::Property::SPREAD:
case ObjectLiteral::Property::CONSTANT:
UNREACHABLE();
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -1370,20 +1224,20 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(value);
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
- PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ PrepareForBailoutForId(expr->GetIdForPropertySet(i),
BailoutState::NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->setter = property;
}
break;
@@ -1405,73 +1259,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
}
- // Object literals have two parts. The "static" part on the left contains no
- // computed property names, and so we can compute its map ahead of time; see
- // runtime.cc::CreateObjectLiteralBoilerplate. The second "dynamic" part
- // starts with the first computed property name, and continues with all
- // properties to its right. All the code from above initializes the static
- // component of the object literal, and arranges for the map of the result to
- // reflect the static order in which the keys appear. For the dynamic
- // properties, we compile them into a series of "SetOwnProperty" runtime
- // calls. This will preserve insertion order.
- for (; property_index < expr->properties()->length(); property_index++) {
- ObjectLiteral::Property* property = expr->properties()->at(property_index);
-
- Expression* value = property->value();
- if (!result_saved) {
- PushOperand(r2); // Save result on the stack
- result_saved = true;
- }
-
- __ LoadP(r2, MemOperand(sp)); // Duplicate receiver.
- PushOperand(r2);
-
- if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
- DCHECK(!property->is_computed_name());
- VisitForStackValue(value);
- DCHECK(property->emit_store());
- CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
- PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- BailoutState::NO_REGISTERS);
- } else {
- EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
- VisitForStackValue(value);
- if (NeedsHomeObject(value)) {
- EmitSetHomeObject(value, 2, property->GetSlot());
- }
-
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- case ObjectLiteral::Property::COMPUTED:
- if (property->emit_store()) {
- PushOperand(Smi::FromInt(NONE));
- PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
- CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
- PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- BailoutState::NO_REGISTERS);
- } else {
- DropOperands(3);
- }
- break;
-
- case ObjectLiteral::Property::PROTOTYPE:
- UNREACHABLE();
- break;
-
- case ObjectLiteral::Property::GETTER:
- PushOperand(Smi::FromInt(NONE));
- CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
- break;
-
- case ObjectLiteral::Property::SETTER:
- PushOperand(Smi::FromInt(NONE));
- CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
- break;
- }
- }
- }
-
if (result_saved) {
context()->PlugTOS();
} else {
@@ -1482,11 +1269,10 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- Handle<FixedArray> constant_elements = expr->constant_elements();
+ Handle<ConstantElementsPair> constant_elements =
+ expr->GetOrBuildConstantElements(isolate());
bool has_fast_elements =
IsFastObjectElementsKind(expr->constant_elements_kind());
- Handle<FixedArrayBase> constant_elements_values(
- FixedArrayBase::cast(constant_elements->get(1)));
AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
@@ -1503,8 +1289,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ Push(r5, r4, r3, r2);
__ CallRuntime(Runtime::kCreateArrayLiteral);
} else {
- FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
- __ CallStub(&stub);
+ Callable callable =
+ CodeFactory::FastCloneShallowArray(isolate(), allocation_site_mode);
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1566,34 +1353,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForStackValue(property->obj());
}
break;
- case NAMED_SUPER_PROPERTY:
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- property->obj()->AsSuperPropertyReference()->home_object());
- PushOperand(result_register());
- if (expr->is_compound()) {
- const Register scratch = r3;
- __ LoadP(scratch, MemOperand(sp, kPointerSize));
- PushOperands(scratch, result_register());
- }
- break;
- case KEYED_SUPER_PROPERTY: {
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(property->key());
- PushOperand(result_register());
- if (expr->is_compound()) {
- const Register scratch1 = r4;
- const Register scratch2 = r3;
- __ LoadP(scratch1, MemOperand(sp, 2 * kPointerSize));
- __ LoadP(scratch2, MemOperand(sp, 1 * kPointerSize));
- PushOperands(scratch1, scratch2, result_register());
- }
- break;
- }
case KEYED_PROPERTY:
if (expr->is_compound()) {
VisitForStackValue(property->obj());
@@ -1606,6 +1365,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForStackValue(property->key());
}
break;
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
// For compound assignments we need another deoptimization point after the
@@ -1623,21 +1386,15 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
PrepareForBailoutForId(property->LoadId(),
BailoutState::TOS_REGISTER);
break;
- case NAMED_SUPER_PROPERTY:
- EmitNamedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
- break;
- case KEYED_SUPER_PROPERTY:
- EmitKeyedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
- break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(),
BailoutState::TOS_REGISTER);
break;
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
}
@@ -1674,72 +1431,19 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
- case NAMED_SUPER_PROPERTY:
- EmitNamedSuperPropertyStore(property);
- context()->Plug(r2);
- break;
- case KEYED_SUPER_PROPERTY:
- EmitKeyedSuperPropertyStore(property);
- context()->Plug(r2);
- break;
case KEYED_PROPERTY:
EmitKeyedPropertyAssignment(expr);
break;
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
}
void FullCodeGenerator::VisitYield(Yield* expr) {
- Comment cmnt(masm_, "[ Yield");
- SetExpressionPosition(expr);
-
- // Evaluate yielded value first; the initial iterator definition depends on
- // this. It stays on the stack while we update the iterator.
- VisitForStackValue(expr->expression());
-
- Label suspend, continuation, post_runtime, resume, exception;
-
- __ b(&suspend);
- __ bind(&continuation);
- // When we arrive here, r2 holds the generator object.
- __ RecordGeneratorContinuation();
- __ LoadP(r3, FieldMemOperand(r2, JSGeneratorObject::kResumeModeOffset));
- __ LoadP(r2, FieldMemOperand(r2, JSGeneratorObject::kInputOrDebugPosOffset));
- STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
- STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
- __ CmpSmiLiteral(r3, Smi::FromInt(JSGeneratorObject::kReturn), r0);
- __ blt(&resume);
- __ Push(result_register());
- __ bgt(&exception);
- EmitCreateIteratorResult(true);
- EmitUnwindAndReturn();
-
- __ bind(&exception);
- __ CallRuntime(expr->rethrow_on_exception() ? Runtime::kReThrow
- : Runtime::kThrow);
-
- __ bind(&suspend);
- OperandStackDepthIncrement(1); // Not popped on this path.
- VisitForAccumulatorValue(expr->generator_object());
- DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
- __ LoadSmiLiteral(r3, Smi::FromInt(continuation.pos()));
- __ StoreP(r3, FieldMemOperand(r2, JSGeneratorObject::kContinuationOffset),
- r0);
- __ StoreP(cp, FieldMemOperand(r2, JSGeneratorObject::kContextOffset), r0);
- __ LoadRR(r3, cp);
- __ RecordWriteField(r2, JSGeneratorObject::kContextOffset, r3, r4,
- kLRHasBeenSaved, kDontSaveFPRegs);
- __ AddP(r3, fp, Operand(StandardFrameConstants::kExpressionsOffset));
- __ CmpP(sp, r3);
- __ beq(&post_runtime);
- __ push(r2); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- RestoreContext();
- __ bind(&post_runtime);
- PopOperand(result_register());
- EmitReturnSequence();
-
- __ bind(&resume);
- context()->Plug(result_register());
+ // Resumable functions are not supported.
+ UNREACHABLE();
}
void FullCodeGenerator::PushOperands(Register reg1, Register reg2) {
@@ -1870,34 +1574,42 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
}
case Token::MUL: {
Label mul_zero;
+ if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
+ __ SmiUntag(ip, right);
+ __ MulPWithCondition(scratch2, ip, left);
+ __ b(overflow, &stub_call);
+ __ beq(&mul_zero, Label::kNear);
+ __ LoadRR(right, scratch2);
+ } else {
#if V8_TARGET_ARCH_S390X
- // Remove tag from both operands.
- __ SmiUntag(ip, right);
- __ SmiUntag(scratch2, left);
- __ mr_z(scratch1, ip);
- // Check for overflowing the smi range - no overflow if higher 33 bits of
- // the result are identical.
- __ lr(ip, scratch2); // 32 bit load
- __ sra(ip, Operand(31));
- __ cr_z(ip, scratch1); // 32 bit compare
- __ bne(&stub_call);
+ // Remove tag from both operands.
+ __ SmiUntag(ip, right);
+ __ SmiUntag(scratch2, left);
+ __ mr_z(scratch1, ip);
+ // Check for overflowing the smi range - no overflow if higher 33 bits
+ // of the result are identical.
+ __ lr(ip, scratch2); // 32 bit load
+ __ sra(ip, Operand(31));
+ __ cr_z(ip, scratch1); // 32 bit compare
+ __ bne(&stub_call);
#else
- __ SmiUntag(ip, right);
- __ LoadRR(scratch2, left); // load into low order of reg pair
- __ mr_z(scratch1, ip); // R4:R5 = R5 * ip
- // Check for overflowing the smi range - no overflow if higher 33 bits of
- // the result are identical.
- __ TestIfInt32(scratch1, scratch2, ip);
- __ bne(&stub_call);
+ __ SmiUntag(ip, right);
+ __ LoadRR(scratch2, left); // load into low order of reg pair
+ __ mr_z(scratch1, ip); // R4:R5 = R5 * ip
+ // Check for overflowing the smi range - no overflow if higher 33 bits
+ // of the result are identical.
+ __ TestIfInt32(scratch1, scratch2, ip);
+ __ bne(&stub_call);
#endif
- // Go slow on zero result to handle -0.
- __ chi(scratch2, Operand::Zero());
- __ beq(&mul_zero, Label::kNear);
+ // Go slow on zero result to handle -0.
+ __ chi(scratch2, Operand::Zero());
+ __ beq(&mul_zero, Label::kNear);
#if V8_TARGET_ARCH_S390X
- __ SmiTag(right, scratch2);
+ __ SmiTag(right, scratch2);
#else
- __ LoadRR(right, scratch2);
+ __ LoadRR(right, scratch2);
#endif
+ }
__ b(&done);
// We need -0 if we were multiplying a negative number with 0 to get 0.
// We know one of them was zero.
@@ -1925,58 +1637,6 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
context()->Plug(r2);
}
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
- for (int i = 0; i < lit->properties()->length(); i++) {
- ClassLiteral::Property* property = lit->properties()->at(i);
- Expression* value = property->value();
-
- Register scratch = r3;
- if (property->is_static()) {
- __ LoadP(scratch, MemOperand(sp, kPointerSize)); // constructor
- } else {
- __ LoadP(scratch, MemOperand(sp, 0)); // prototype
- }
- PushOperand(scratch);
- EmitPropertyKey(property, lit->GetIdForProperty(i));
-
- // The static prototype property is read only. We handle the non computed
- // property name case in the parser. Since this is the only case where we
- // need to check for an own read only property we special case this so we do
- // not need to do this for every property.
- if (property->is_static() && property->is_computed_name()) {
- __ CallRuntime(Runtime::kThrowIfStaticPrototype);
- __ push(r2);
- }
-
- VisitForStackValue(value);
- if (NeedsHomeObject(value)) {
- EmitSetHomeObject(value, 2, property->GetSlot());
- }
-
- switch (property->kind()) {
- case ClassLiteral::Property::METHOD:
- PushOperand(Smi::FromInt(DONT_ENUM));
- PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
- CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
- break;
-
- case ClassLiteral::Property::GETTER:
- PushOperand(Smi::FromInt(DONT_ENUM));
- CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
- break;
-
- case ClassLiteral::Property::SETTER:
- PushOperand(Smi::FromInt(DONT_ENUM));
- CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
- break;
-
- case ClassLiteral::Property::FIELD:
- default:
- UNREACHABLE();
- }
- }
-}
-
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
PopOperand(r3);
Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
@@ -2009,43 +1669,6 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
CallStoreIC(slot, prop->key()->AsLiteral()->value());
break;
}
- case NAMED_SUPER_PROPERTY: {
- PushOperand(r2);
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- // stack: value, this; r2: home_object
- Register scratch = r4;
- Register scratch2 = r5;
- __ LoadRR(scratch, result_register()); // home_object
- __ LoadP(r2, MemOperand(sp, kPointerSize)); // value
- __ LoadP(scratch2, MemOperand(sp, 0)); // this
- __ StoreP(scratch2, MemOperand(sp, kPointerSize)); // this
- __ StoreP(scratch, MemOperand(sp, 0)); // home_object
- // stack: this, home_object; r2: value
- EmitNamedSuperPropertyStore(prop);
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- PushOperand(r2);
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(prop->key());
- Register scratch = r4;
- Register scratch2 = r5;
- __ LoadP(scratch2, MemOperand(sp, 2 * kPointerSize)); // value
- // stack: value, this, home_object; r3: key, r6: value
- __ LoadP(scratch, MemOperand(sp, kPointerSize)); // this
- __ StoreP(scratch, MemOperand(sp, 2 * kPointerSize));
- __ LoadP(scratch, MemOperand(sp, 0)); // home_object
- __ StoreP(scratch, MemOperand(sp, kPointerSize));
- __ StoreP(r2, MemOperand(sp, 0));
- __ Move(r2, scratch2);
- // stack: this, home_object, key; r2: value.
- EmitKeyedSuperPropertyStore(prop);
- break;
- }
case KEYED_PROPERTY: {
PushOperand(r2); // Preserve value.
VisitForStackValue(prop->obj());
@@ -2056,6 +1679,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
CallKeyedStoreIC(slot);
break;
}
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
context()->Plug(r2);
}
@@ -2116,26 +1743,18 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
EmitStoreToStackLocalOrContextSlot(var, location);
} else {
DCHECK(var->mode() != CONST || op == Token::INIT);
- if (var->IsLookupSlot()) {
- // Assignment to var.
- __ Push(var->name());
- __ Push(r2);
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kStoreLookupSlot_Strict
- : Runtime::kStoreLookupSlot_Sloppy);
- } else {
- // Assignment to var or initializing assignment to let/const in harmony
- // mode.
- DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
- MemOperand location = VarOperand(var, r3);
- if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
- // Check for an uninitialized let binding.
- __ LoadP(r4, location);
- __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
- __ Check(eq, kLetBindingReInitialization);
- }
- EmitStoreToStackLocalOrContextSlot(var, location);
+ DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
+ DCHECK(!var->IsLookupSlot());
+ // Assignment to var or initializing assignment to let/const in harmony
+ // mode.
+ MemOperand location = VarOperand(var, r3);
+ if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
+ // Check for an uninitialized let binding.
+ __ LoadP(r4, location);
+ __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
+ __ Check(eq, kLetBindingReInitialization);
}
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
}
@@ -2152,33 +1771,6 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
context()->Plug(r2);
}
-void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
- // Assignment to named property of super.
- // r2 : value
- // stack : receiver ('this'), home_object
- DCHECK(prop != NULL);
- Literal* key = prop->key()->AsLiteral();
- DCHECK(key != NULL);
-
- PushOperand(key->value());
- PushOperand(r2);
- CallRuntimeWithOperands((is_strict(language_mode())
- ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy));
-}
-
-void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
- // Assignment to named property of super.
- // r2 : value
- // stack : receiver ('this'), home_object, key
- DCHECK(prop != NULL);
-
- PushOperand(r2);
- CallRuntimeWithOperands((is_strict(language_mode())
- ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy));
-}
-
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
PopOperands(StoreDescriptor::ReceiverRegister(),
@@ -2226,42 +1818,6 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
EmitCall(expr, convert_mode);
}
-void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
- Expression* callee = expr->expression();
- DCHECK(callee->IsProperty());
- Property* prop = callee->AsProperty();
- DCHECK(prop->IsSuperAccess());
- SetExpressionPosition(prop);
-
- Literal* key = prop->key()->AsLiteral();
- DCHECK(!key->value()->IsSmi());
- // Load the function from the receiver.
- const Register scratch = r3;
- SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
- VisitForAccumulatorValue(super_ref->home_object());
- __ LoadRR(scratch, r2);
- VisitForAccumulatorValue(super_ref->this_var());
- PushOperands(scratch, r2, r2, scratch);
- PushOperand(key->value());
-
- // Stack here:
- // - home_object
- // - this (receiver)
- // - this (receiver) <-- LoadFromSuper will pop here and below.
- // - home_object
- // - key
- CallRuntimeWithOperands(Runtime::kLoadFromSuper);
- PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
- // Replace home_object with target function.
- __ StoreP(r2, MemOperand(sp, kPointerSize));
-
- // Stack here:
- // - target function
- // - this (receiver)
- EmitCall(expr);
-}
-
// Code common for calls using the IC.
void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, Expression* key) {
// Load the key.
@@ -2285,40 +1841,6 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, Expression* key) {
EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
}
-void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
- Expression* callee = expr->expression();
- DCHECK(callee->IsProperty());
- Property* prop = callee->AsProperty();
- DCHECK(prop->IsSuperAccess());
-
- SetExpressionPosition(prop);
- // Load the function from the receiver.
- const Register scratch = r3;
- SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
- VisitForAccumulatorValue(super_ref->home_object());
- __ LoadRR(scratch, r2);
- VisitForAccumulatorValue(super_ref->this_var());
- PushOperands(scratch, r2, r2, scratch);
- VisitForStackValue(prop->key());
-
- // Stack here:
- // - home_object
- // - this (receiver)
- // - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
- // - home_object
- // - key
- CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
- PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
- // Replace home_object with target function.
- __ StoreP(r2, MemOperand(sp, kPointerSize));
-
- // Stack here:
- // - target function
- // - this (receiver)
- EmitCall(expr);
-}
-
void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
@@ -2350,113 +1872,6 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
context()->DropAndPlug(1, r2);
}
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
- int arg_count = expr->arguments()->length();
- // r6: copy of the first argument or undefined if it doesn't exist.
- if (arg_count > 0) {
- __ LoadP(r6, MemOperand(sp, arg_count * kPointerSize), r0);
- } else {
- __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
- }
-
- // r5: the receiver of the enclosing function.
- __ LoadP(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-
- // r4: language mode.
- __ LoadSmiLiteral(r4, Smi::FromInt(language_mode()));
-
- // r3: the start position of the scope the calls resides in.
- __ LoadSmiLiteral(r3, Smi::FromInt(scope()->start_position()));
-
- // r2: the source position of the eval call.
- __ LoadSmiLiteral(r2, Smi::FromInt(expr->position()));
-
- // Do the runtime call.
- __ Push(r6, r5, r4, r3, r2);
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
-}
-
-// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
-void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
- VariableProxy* callee = expr->expression()->AsVariableProxy();
- if (callee->var()->IsLookupSlot()) {
- Label slow, done;
- SetExpressionPosition(callee);
- // Generate code for loading from variables potentially shadowed by
- // eval-introduced variables.
- EmitDynamicLookupFastCase(callee, NOT_INSIDE_TYPEOF, &slow, &done);
-
- __ bind(&slow);
- // Call the runtime to find the function to call (returned in r2) and
- // the object holding it (returned in r3).
- __ Push(callee->name());
- __ CallRuntime(Runtime::kLoadLookupSlotForCall);
- PushOperands(r2, r3); // Function, receiver.
- PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
-
- // If fast case code has been generated, emit code to push the function
- // and receiver and have the slow path jump around this code.
- if (done.is_linked()) {
- Label call;
- __ b(&call);
- __ bind(&done);
- // Push function.
- __ push(r2);
- // Pass undefined as the receiver, which is the WithBaseObject of a
- // non-object environment record. If the callee is sloppy, it will patch
- // it up to be the global receiver.
- __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
- __ push(r3);
- __ bind(&call);
- }
- } else {
- VisitForStackValue(callee);
- // refEnv.WithBaseObject()
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
- PushOperand(r4); // Reserved receiver slot.
- }
-}
-
-void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
- // In a call to eval, we first call
- // Runtime_ResolvePossiblyDirectEval to resolve the function we need
- // to call. Then we call the resolved function using the given arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- PushCalleeAndWithBaseObject(expr);
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ LoadP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
- __ push(r3);
- EmitResolvePossiblyDirectEval(expr);
-
- // Touch up the stack with the resolved function.
- __ StoreP(r2, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
-
- PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
-
- // Record source position for debugger.
- SetCallPosition(expr);
- Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
- expr->tail_call_mode())
- .code();
- __ LoadSmiLiteral(r5, SmiFromSlot(expr->CallFeedbackICSlot()));
- __ LoadP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
- __ mov(r2, Operand(arg_count));
- __ Call(code, RelocInfo::CODE_TARGET);
- OperandStackDepthDecrement(arg_count + 1);
- RecordJSReturnSite(expr);
- RestoreContext();
- context()->DropAndPlug(1, r2);
-}
-
void FullCodeGenerator::VisitCallNew(CallNew* expr) {
Comment cmnt(masm_, "[ CallNew");
// According to ECMA-262, section 11.2.2, page 44, the function
@@ -2496,48 +1911,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
context()->Plug(r2);
}
-void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
- SuperCallReference* super_call_ref =
- expr->expression()->AsSuperCallReference();
- DCHECK_NOT_NULL(super_call_ref);
-
- // Push the super constructor target on the stack (may be null,
- // but the Construct builtin can deal with that properly).
- VisitForAccumulatorValue(super_call_ref->this_function_var());
- __ AssertFunction(result_register());
- __ LoadP(result_register(),
- FieldMemOperand(result_register(), HeapObject::kMapOffset));
- __ LoadP(result_register(),
- FieldMemOperand(result_register(), Map::kPrototypeOffset));
- PushOperand(result_register());
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- SetConstructCallPosition(expr);
-
- // Load new target into r5.
- VisitForAccumulatorValue(super_call_ref->new_target_var());
- __ LoadRR(r5, result_register());
-
- // Load function and argument count into r1 and r0.
- __ mov(r2, Operand(arg_count));
- __ LoadP(r3, MemOperand(sp, arg_count * kPointerSize));
-
- __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
- OperandStackDepthDecrement(arg_count + 1);
-
- RecordJSReturnSite(expr);
- RestoreContext();
- context()->Plug(r2);
-}
-
void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -2621,27 +1994,6 @@ void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
context()->Plug(if_true, if_false);
}
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ JumpIfSmi(r2, if_false);
- __ CompareObjectType(r2, r3, r3, JS_REGEXP_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -2871,16 +2223,12 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
__ Push(r4, r3);
__ CallRuntime(Runtime::kDeleteProperty_Sloppy);
context()->Plug(r2);
- } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+ } else {
+ DCHECK(!var->IsLookupSlot());
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
// Result of deleting non-global, non-dynamic variables is false.
// The subexpression does not have side effects.
context()->Plug(is_this);
- } else {
- // Non-global variable. Call the runtime to try to delete from the
- // context where the variable was introduced.
- __ Push(var->name());
- __ CallRuntime(Runtime::kDeleteLookupSlot);
- context()->Plug(r2);
}
} else {
// Result of deleting non-property, non-variable reference is true.
@@ -2981,31 +2329,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
- case NAMED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- const Register scratch = r3;
- __ LoadP(scratch, MemOperand(sp, 0)); // this
- PushOperands(result_register(), scratch, result_register());
- EmitNamedSuperPropertyLoad(prop);
- break;
- }
-
- case KEYED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(prop->key());
- const Register scratch1 = r3;
- const Register scratch2 = r4;
- __ LoadP(scratch1, MemOperand(sp, 1 * kPointerSize)); // this
- __ LoadP(scratch2, MemOperand(sp, 0 * kPointerSize)); // home object
- PushOperands(result_register(), scratch1, scratch2, result_register());
- EmitKeyedSuperPropertyLoad(prop);
- break;
- }
-
case KEYED_PROPERTY: {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
@@ -3016,6 +2339,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
case VARIABLE:
UNREACHABLE();
}
@@ -3051,14 +2376,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY:
__ StoreP(r2, MemOperand(sp, kPointerSize));
break;
- case NAMED_SUPER_PROPERTY:
- __ StoreP(r2, MemOperand(sp, 2 * kPointerSize));
- break;
case KEYED_PROPERTY:
__ StoreP(r2, MemOperand(sp, 2 * kPointerSize));
break;
+ case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
- __ StoreP(r2, MemOperand(sp, 3 * kPointerSize));
+ UNREACHABLE();
break;
}
}
@@ -3093,14 +2416,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY:
__ StoreP(r2, MemOperand(sp, kPointerSize));
break;
- case NAMED_SUPER_PROPERTY:
- __ StoreP(r2, MemOperand(sp, 2 * kPointerSize));
- break;
case KEYED_PROPERTY:
__ StoreP(r2, MemOperand(sp, 2 * kPointerSize));
break;
+ case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
- __ StoreP(r2, MemOperand(sp, 3 * kPointerSize));
+ UNREACHABLE();
break;
}
}
@@ -3157,30 +2478,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
}
- case NAMED_SUPER_PROPERTY: {
- EmitNamedSuperPropertyStore(prop);
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(r2);
- }
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- EmitKeyedSuperPropertyStore(prop);
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(r2);
- }
- break;
- }
case KEYED_PROPERTY: {
PopOperands(StoreDescriptor::ReceiverRegister(),
StoreDescriptor::NameRegister());
@@ -3195,6 +2492,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
}
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
}
@@ -3412,77 +2713,16 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
PushOperand(ip);
}
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
-
-void FullCodeGenerator::EnterFinallyBlock() {
- DCHECK(!result_register().is(r3));
- // Store pending message while executing finally block.
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ mov(ip, Operand(pending_message_obj));
- __ LoadP(r3, MemOperand(ip));
- PushOperand(r3);
-
- ClearPendingMessage();
-}
-
-void FullCodeGenerator::ExitFinallyBlock() {
- DCHECK(!result_register().is(r3));
- // Restore pending message from stack.
- PopOperand(r3);
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ mov(ip, Operand(pending_message_obj));
- __ StoreP(r3, MemOperand(ip));
-}
-
-void FullCodeGenerator::ClearPendingMessage() {
- DCHECK(!result_register().is(r3));
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
- __ mov(ip, Operand(pending_message_obj));
- __ StoreP(r3, MemOperand(ip));
-}
-
-void FullCodeGenerator::DeferredCommands::EmitCommands() {
- DCHECK(!result_register().is(r3));
- // Restore the accumulator (r2) and token (r3).
- __ Pop(r3, result_register());
- for (DeferredCommand cmd : commands_) {
- Label skip;
- __ CmpSmiLiteral(r3, Smi::FromInt(cmd.token), r0);
- __ bne(&skip);
- switch (cmd.command) {
- case kReturn:
- codegen_->EmitUnwindAndReturn();
- break;
- case kThrow:
- __ Push(result_register());
- __ CallRuntime(Runtime::kReThrow);
- break;
- case kContinue:
- codegen_->EmitContinue(cmd.target);
- break;
- case kBreak:
- codegen_->EmitBreak(cmd.target);
- break;
- }
- __ bind(&skip);
- }
-}
-
#undef __
#if V8_TARGET_ARCH_S390X
static const FourByteInstr kInterruptBranchInstruction = 0xA7A40011;
static const FourByteInstr kOSRBranchInstruction = 0xA7040011;
-static const int16_t kBackEdgeBranchOffset = 0x11 * 2;
+static const int16_t kBackEdgeBranchOffsetInHalfWords = 0x11;
#else
static const FourByteInstr kInterruptBranchInstruction = 0xA7A4000D;
static const FourByteInstr kOSRBranchInstruction = 0xA704000D;
-static const int16_t kBackEdgeBranchOffset = 0xD * 2;
+static const int16_t kBackEdgeBranchOffsetInHalfWords = 0xD;
#endif
void BackEdgeTable::PatchAt(Code* unoptimized_code, Address pc,
@@ -3500,7 +2740,7 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code, Address pc,
// brasrl r14, <interrupt stub address>
// <reset profiling counter>
// ok-label
- patcher.masm()->brc(ge, Operand(kBackEdgeBranchOffset));
+ patcher.masm()->brc(ge, Operand(kBackEdgeBranchOffsetInHalfWords));
break;
}
case ON_STACK_REPLACEMENT:
@@ -3509,7 +2749,7 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code, Address pc,
// brasrl r14, <interrupt stub address>
// <reset profiling counter>
// ok-label ----- pc_after points here
- patcher.masm()->brc(CC_NOP, Operand(kBackEdgeBranchOffset));
+ patcher.masm()->brc(CC_NOP, Operand(kBackEdgeBranchOffsetInHalfWords));
break;
}
@@ -3550,7 +2790,6 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
isolate->builtins()->OnStackReplacement()->entry());
return ON_STACK_REPLACEMENT;
}
-
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_S390
diff --git a/deps/v8/src/full-codegen/x64/full-codegen-x64.cc b/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
index 0720c3d083..02f93f9807 100644
--- a/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
+++ b/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
@@ -4,15 +4,16 @@
#if V8_TARGET_ARCH_X64
-#include "src/full-codegen/full-codegen.h"
#include "src/ast/compile-time-value.h"
#include "src/ast/scopes.h"
+#include "src/builtins/builtins-constructor.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/debug/debug.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
namespace v8 {
@@ -131,8 +132,6 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
- // Generators allocate locals, if any, in context slots.
- DCHECK(!IsGeneratorFunction(info->literal()->kind()) || locals_count == 0);
OperandStackDepthIncrement(locals_count);
if (locals_count == 1) {
__ PushRoot(Heap::kUndefinedValueRootIndex);
@@ -189,14 +188,17 @@ void FullCodeGenerator::Generate() {
if (info->scope()->new_target_var() != nullptr) {
__ Push(rdx); // Preserve new target.
}
- if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
- FastNewFunctionContextStub stub(isolate());
+ if (slots <=
+ ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+ Callable callable = CodeFactory::FastNewFunctionContext(
+ isolate(), info->scope()->scope_type());
__ Set(FastNewFunctionContextDescriptor::SlotsRegister(), slots);
- __ CallStub(&stub);
- // Result of FastNewFunctionContextStub is always in new space.
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ // Result of the FastNewFunctionContext builtin is always in new space.
need_write_barrier = false;
} else {
__ Push(rdi);
+ __ Push(Smi::FromInt(info->scope()->scope_type()));
__ CallRuntime(Runtime::kNewFunctionContext);
}
if (info->scope()->new_target_var() != nullptr) {
@@ -243,37 +245,10 @@ void FullCodeGenerator::Generate() {
PrepareForBailoutForId(BailoutId::FunctionContext(),
BailoutState::NO_REGISTERS);
- // Possibly set up a local binding to the this function which is used in
- // derived constructors with super calls.
- Variable* this_function_var = info->scope()->this_function_var();
- if (this_function_var != nullptr) {
- Comment cmnt(masm_, "[ This function");
- if (!function_in_register) {
- __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- // The write barrier clobbers register again, keep it marked as such.
- }
- SetVar(this_function_var, rdi, rbx, rcx);
- }
-
- // Possibly set up a local binding to the new target value.
- Variable* new_target_var = info->scope()->new_target_var();
- if (new_target_var != nullptr) {
- Comment cmnt(masm_, "[ new.target");
- SetVar(new_target_var, rdx, rbx, rcx);
- }
-
- // Possibly allocate RestParameters
- Variable* rest_param = info->scope()->rest_parameter();
- if (rest_param != nullptr) {
- Comment cmnt(masm_, "[ Allocate rest parameter array");
- if (!function_in_register) {
- __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- }
- FastNewRestParameterStub stub(isolate());
- __ CallStub(&stub);
- function_in_register = false;
- SetVar(rest_param, rax, rbx, rdx);
- }
+ // We don't support new.target and rest parameters here.
+ DCHECK_NULL(info->scope()->new_target_var());
+ DCHECK_NULL(info->scope()->rest_parameter());
+ DCHECK_NULL(info->scope()->this_function_var());
// Possibly allocate an arguments object.
DCHECK_EQ(scope(), info->scope());
@@ -521,10 +496,8 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
true,
true_label_,
false_label_);
- DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
- !lit->IsUndetectable());
- if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
- lit->IsFalse(isolate())) {
+ DCHECK(lit->IsNullOrUndefined(isolate()) || !lit->IsUndetectable());
+ if (lit->IsNullOrUndefined(isolate()) || lit->IsFalse(isolate())) {
if (false_label_ != fall_through_) __ jmp(false_label_);
} else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
if (true_label_ != fall_through_) __ jmp(true_label_);
@@ -745,6 +718,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
+ globals_->Add(variable->name(), zone());
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
@@ -771,16 +745,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
break;
- case VariableLocation::LOOKUP: {
- Comment cmnt(masm_, "[ VariableDeclaration");
- DCHECK_EQ(VAR, variable->mode());
- DCHECK(!variable->binding_needs_init());
- __ Push(variable->name());
- __ CallRuntime(Runtime::kDeclareEvalVar);
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
- break;
- }
-
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -793,6 +758,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Variable* variable = proxy->var();
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
+ globals_->Add(variable->name(), zone());
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
@@ -830,15 +796,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
break;
}
- case VariableLocation::LOOKUP: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- PushOperand(variable->name());
- VisitForStackValue(declaration->fun());
- CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
- break;
- }
-
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -1138,89 +1096,6 @@ void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
-
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
- TypeofMode typeof_mode,
- Label* slow) {
- Register context = rsi;
- Register temp = rdx;
-
- int to_check = scope()->ContextChainLengthUntilOutermostSloppyEval();
- for (Scope* s = scope(); to_check > 0; s = s->outer_scope()) {
- if (!s->NeedsContext()) continue;
- if (s->calls_sloppy_eval()) {
- // Check that extension is "the hole".
- __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
- Heap::kTheHoleValueRootIndex, slow);
- }
- // Load next context in chain.
- __ movp(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering rsi.
- context = temp;
- to_check--;
- }
-
- // All extension objects were empty and it is safe to use a normal global
- // load machinery.
- EmitGlobalVariableLoad(proxy, typeof_mode);
-}
-
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
- Label* slow) {
- DCHECK(var->IsContextSlot());
- Register context = rsi;
- Register temp = rbx;
-
- for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
- if (s->NeedsContext()) {
- if (s->calls_sloppy_eval()) {
- // Check that extension is "the hole".
- __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
- Heap::kTheHoleValueRootIndex, slow);
- }
- __ movp(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering rsi.
- context = temp;
- }
- }
- // Check that last extension is "the hole".
- __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
- Heap::kTheHoleValueRootIndex, slow);
-
- // This function is used only for loads, not stores, so it's safe to
- // return an rsi-based operand (the write barrier cannot be allowed to
- // destroy the rsi register).
- return ContextOperand(context, var->index());
-}
-
-
-void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
- TypeofMode typeof_mode,
- Label* slow, Label* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- Variable* var = proxy->var();
- if (var->mode() == DYNAMIC_GLOBAL) {
- EmitLoadGlobalCheckExtensions(proxy, typeof_mode, slow);
- __ jmp(done);
- } else if (var->mode() == DYNAMIC_LOCAL) {
- Variable* local = var->local_if_not_shadowed();
- __ movp(rax, ContextSlotOperandCheckExtensions(local, slow));
- if (local->binding_needs_init()) {
- __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, done);
- __ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError);
- } else {
- __ jmp(done);
- }
- }
-}
-
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
// Record position before possible IC call.
@@ -1228,8 +1103,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
Variable* var = proxy->var();
- // Three cases: global variables, lookup variables, and all other types of
- // variables.
+ // Two cases: global variable, and all other types of variables.
switch (var->location()) {
case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
@@ -1262,24 +1136,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
break;
}
- case VariableLocation::LOOKUP: {
- Comment cmnt(masm_, "[ Lookup slot");
- Label done, slow;
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
- __ bind(&slow);
- __ Push(var->name());
- Runtime::FunctionId function_id =
- typeof_mode == NOT_INSIDE_TYPEOF
- ? Runtime::kLoadLookupSlot
- : Runtime::kLoadLookupSlotInsideTypeof;
- __ CallRuntime(function_id);
- __ bind(&done);
- context()->Plug(rax);
- break;
- }
-
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -1306,7 +1163,8 @@ void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- Handle<FixedArray> constant_properties = expr->constant_properties();
+ Handle<FixedArray> constant_properties =
+ expr->GetOrBuildConstantProperties(isolate());
int flags = expr->ComputeFlags();
if (MustCreateObjectLiteralWithRuntime(expr)) {
__ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
@@ -1319,8 +1177,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Move(rbx, Smi::FromInt(expr->literal_index()));
__ Move(rcx, constant_properties);
__ Move(rdx, Smi::FromInt(flags));
- FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
- __ CallStub(&stub);
+ Callable callable = CodeFactory::FastCloneShallowObject(
+ isolate(), expr->properties_count());
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1330,10 +1189,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
bool result_saved = false;
AccessorTable accessor_table(zone());
- int property_index = 0;
- for (; property_index < expr->properties()->length(); property_index++) {
- ObjectLiteral::Property* property = expr->properties()->at(property_index);
- if (property->is_computed_name()) break;
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ DCHECK(!property->is_computed_name());
if (property->IsCompileTimeValue()) continue;
Literal* key = property->key()->AsLiteral();
@@ -1343,6 +1201,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
result_saved = true;
}
switch (property->kind()) {
+ case ObjectLiteral::Property::SPREAD:
case ObjectLiteral::Property::CONSTANT:
UNREACHABLE();
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -1386,20 +1245,20 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(value);
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
- PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ PrepareForBailoutForId(expr->GetIdForPropertySet(i),
BailoutState::NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->setter = property;
}
break;
@@ -1420,72 +1279,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
}
- // Object literals have two parts. The "static" part on the left contains no
- // computed property names, and so we can compute its map ahead of time; see
- // runtime.cc::CreateObjectLiteralBoilerplate. The second "dynamic" part
- // starts with the first computed property name, and continues with all
- // properties to its right. All the code from above initializes the static
- // component of the object literal, and arranges for the map of the result to
- // reflect the static order in which the keys appear. For the dynamic
- // properties, we compile them into a series of "SetOwnProperty" runtime
- // calls. This will preserve insertion order.
- for (; property_index < expr->properties()->length(); property_index++) {
- ObjectLiteral::Property* property = expr->properties()->at(property_index);
-
- Expression* value = property->value();
- if (!result_saved) {
- PushOperand(rax); // Save result on the stack
- result_saved = true;
- }
-
- PushOperand(Operand(rsp, 0)); // Duplicate receiver.
-
- if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
- DCHECK(!property->is_computed_name());
- VisitForStackValue(value);
- DCHECK(property->emit_store());
- CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
- PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- BailoutState::NO_REGISTERS);
- } else {
- EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
- VisitForStackValue(value);
- if (NeedsHomeObject(value)) {
- EmitSetHomeObject(value, 2, property->GetSlot());
- }
-
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- case ObjectLiteral::Property::COMPUTED:
- if (property->emit_store()) {
- PushOperand(Smi::FromInt(NONE));
- PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
- CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
- PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- BailoutState::NO_REGISTERS);
- } else {
- DropOperands(3);
- }
- break;
-
- case ObjectLiteral::Property::PROTOTYPE:
- UNREACHABLE();
- break;
-
- case ObjectLiteral::Property::GETTER:
- PushOperand(Smi::FromInt(NONE));
- CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
- break;
-
- case ObjectLiteral::Property::SETTER:
- PushOperand(Smi::FromInt(NONE));
- CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
- break;
- }
- }
- }
-
if (result_saved) {
context()->PlugTOS();
} else {
@@ -1497,7 +1290,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- Handle<FixedArray> constant_elements = expr->constant_elements();
+ Handle<ConstantElementsPair> constant_elements =
+ expr->GetOrBuildConstantElements(isolate());
bool has_constant_fast_elements =
IsFastObjectElementsKind(expr->constant_elements_kind());
@@ -1518,8 +1312,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ Move(rbx, Smi::FromInt(expr->literal_index()));
__ Move(rcx, constant_elements);
- FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
- __ CallStub(&stub);
+ Callable callable =
+ CodeFactory::FastCloneShallowArray(isolate(), allocation_site_mode);
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1582,30 +1377,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForStackValue(property->obj());
}
break;
- case NAMED_SUPER_PROPERTY:
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- property->obj()->AsSuperPropertyReference()->home_object());
- PushOperand(result_register());
- if (expr->is_compound()) {
- PushOperand(MemOperand(rsp, kPointerSize));
- PushOperand(result_register());
- }
- break;
- case KEYED_SUPER_PROPERTY:
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(property->key());
- PushOperand(result_register());
- if (expr->is_compound()) {
- PushOperand(MemOperand(rsp, 2 * kPointerSize));
- PushOperand(MemOperand(rsp, 2 * kPointerSize));
- PushOperand(result_register());
- }
- break;
case KEYED_PROPERTY: {
if (expr->is_compound()) {
VisitForStackValue(property->obj());
@@ -1618,6 +1389,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
break;
}
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
// For compound assignments we need another deoptimization point after the
@@ -1634,21 +1409,15 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
PrepareForBailoutForId(property->LoadId(),
BailoutState::TOS_REGISTER);
break;
- case NAMED_SUPER_PROPERTY:
- EmitNamedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
- break;
- case KEYED_SUPER_PROPERTY:
- EmitKeyedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
- break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(),
BailoutState::TOS_REGISTER);
break;
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
}
@@ -1686,73 +1455,20 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
- case NAMED_SUPER_PROPERTY:
- EmitNamedSuperPropertyStore(property);
- context()->Plug(rax);
- break;
- case KEYED_SUPER_PROPERTY:
- EmitKeyedSuperPropertyStore(property);
- context()->Plug(rax);
- break;
case KEYED_PROPERTY:
EmitKeyedPropertyAssignment(expr);
break;
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
}
void FullCodeGenerator::VisitYield(Yield* expr) {
- Comment cmnt(masm_, "[ Yield");
- SetExpressionPosition(expr);
-
- // Evaluate yielded value first; the initial iterator definition depends on
- // this. It stays on the stack while we update the iterator.
- VisitForStackValue(expr->expression());
-
- Label suspend, continuation, post_runtime, resume, exception;
-
- __ jmp(&suspend);
- __ bind(&continuation);
- // When we arrive here, rax holds the generator object.
- __ RecordGeneratorContinuation();
- __ movp(rbx, FieldOperand(rax, JSGeneratorObject::kResumeModeOffset));
- __ movp(rax, FieldOperand(rax, JSGeneratorObject::kInputOrDebugPosOffset));
- STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
- STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
- __ SmiCompare(rbx, Smi::FromInt(JSGeneratorObject::kReturn));
- __ j(less, &resume);
- __ Push(result_register());
- __ j(greater, &exception);
- EmitCreateIteratorResult(true);
- EmitUnwindAndReturn();
-
- __ bind(&exception);
- __ CallRuntime(expr->rethrow_on_exception() ? Runtime::kReThrow
- : Runtime::kThrow);
-
- __ bind(&suspend);
- OperandStackDepthIncrement(1); // Not popped on this path.
- VisitForAccumulatorValue(expr->generator_object());
- DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
- __ Move(FieldOperand(rax, JSGeneratorObject::kContinuationOffset),
- Smi::FromInt(continuation.pos()));
- __ movp(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi);
- __ movp(rcx, rsi);
- __ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
- kDontSaveFPRegs);
- __ leap(rbx, Operand(rbp, StandardFrameConstants::kExpressionsOffset));
- __ cmpp(rsp, rbx);
- __ j(equal, &post_runtime);
- __ Push(rax); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- RestoreContext();
- __ bind(&post_runtime);
-
- PopOperand(result_register());
- EmitReturnSequence();
-
- __ bind(&resume);
- context()->Plug(result_register());
+ // Resumable functions are not supported.
+ UNREACHABLE();
}
void FullCodeGenerator::PushOperand(MemOperand operand) {
@@ -1856,57 +1572,6 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
}
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
- for (int i = 0; i < lit->properties()->length(); i++) {
- ClassLiteral::Property* property = lit->properties()->at(i);
- Expression* value = property->value();
-
- if (property->is_static()) {
- PushOperand(Operand(rsp, kPointerSize)); // constructor
- } else {
- PushOperand(Operand(rsp, 0)); // prototype
- }
- EmitPropertyKey(property, lit->GetIdForProperty(i));
-
- // The static prototype property is read only. We handle the non computed
- // property name case in the parser. Since this is the only case where we
- // need to check for an own read only property we special case this so we do
- // not need to do this for every property.
- if (property->is_static() && property->is_computed_name()) {
- __ CallRuntime(Runtime::kThrowIfStaticPrototype);
- __ Push(rax);
- }
-
- VisitForStackValue(value);
- if (NeedsHomeObject(value)) {
- EmitSetHomeObject(value, 2, property->GetSlot());
- }
-
- switch (property->kind()) {
- case ClassLiteral::Property::METHOD:
- PushOperand(Smi::FromInt(DONT_ENUM));
- PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
- CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
- break;
-
- case ClassLiteral::Property::GETTER:
- PushOperand(Smi::FromInt(DONT_ENUM));
- CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
- break;
-
- case ClassLiteral::Property::SETTER:
- PushOperand(Smi::FromInt(DONT_ENUM));
- CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
- break;
-
- case ClassLiteral::Property::FIELD:
- default:
- UNREACHABLE();
- }
- }
-}
-
-
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
PopOperand(rdx);
Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
@@ -1940,43 +1605,6 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
CallStoreIC(slot, prop->key()->AsLiteral()->value());
break;
}
- case NAMED_SUPER_PROPERTY: {
- PushOperand(rax);
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- // stack: value, this; rax: home_object
- Register scratch = rcx;
- Register scratch2 = rdx;
- __ Move(scratch, result_register()); // home_object
- __ movp(rax, MemOperand(rsp, kPointerSize)); // value
- __ movp(scratch2, MemOperand(rsp, 0)); // this
- __ movp(MemOperand(rsp, kPointerSize), scratch2); // this
- __ movp(MemOperand(rsp, 0), scratch); // home_object
- // stack: this, home_object; rax: value
- EmitNamedSuperPropertyStore(prop);
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- PushOperand(rax);
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(prop->key());
- Register scratch = rcx;
- Register scratch2 = rdx;
- __ movp(scratch2, MemOperand(rsp, 2 * kPointerSize)); // value
- // stack: value, this, home_object; rax: key, rdx: value
- __ movp(scratch, MemOperand(rsp, kPointerSize)); // this
- __ movp(MemOperand(rsp, 2 * kPointerSize), scratch);
- __ movp(scratch, MemOperand(rsp, 0)); // home_object
- __ movp(MemOperand(rsp, kPointerSize), scratch);
- __ movp(MemOperand(rsp, 0), rax);
- __ Move(rax, scratch2);
- // stack: this, home_object, key; rax: value.
- EmitKeyedSuperPropertyStore(prop);
- break;
- }
case KEYED_PROPERTY: {
PushOperand(rax); // Preserve value.
VisitForStackValue(prop->obj());
@@ -1987,6 +1615,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
CallKeyedStoreIC(slot);
break;
}
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
context()->Plug(rax);
}
@@ -2045,26 +1677,18 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
} else {
DCHECK(var->mode() != CONST || op == Token::INIT);
- if (var->IsLookupSlot()) {
- // Assignment to var.
- __ Push(var->name());
- __ Push(rax);
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kStoreLookupSlot_Strict
- : Runtime::kStoreLookupSlot_Sloppy);
- } else {
- // Assignment to var or initializing assignment to let/const in harmony
- // mode.
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- MemOperand location = VarOperand(var, rcx);
- if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
- // Check for an uninitialized let binding.
- __ movp(rdx, location);
- __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ Check(equal, kLetBindingReInitialization);
- }
- EmitStoreToStackLocalOrContextSlot(var, location);
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ DCHECK(!var->IsLookupSlot());
+ // Assignment to var or initializing assignment to let/const in harmony
+ // mode.
+ MemOperand location = VarOperand(var, rcx);
+ if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
+ // Check for an uninitialized let binding.
+ __ movp(rdx, location);
+ __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
+ __ Check(equal, kLetBindingReInitialization);
}
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
}
@@ -2083,35 +1707,6 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
}
-void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
- // Assignment to named property of super.
- // rax : value
- // stack : receiver ('this'), home_object
- DCHECK(prop != NULL);
- Literal* key = prop->key()->AsLiteral();
- DCHECK(key != NULL);
-
- PushOperand(key->value());
- PushOperand(rax);
- CallRuntimeWithOperands(is_strict(language_mode())
- ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
- // Assignment to named property of super.
- // rax : value
- // stack : receiver ('this'), home_object, key
- DCHECK(prop != NULL);
-
- PushOperand(rax);
- CallRuntimeWithOperands(is_strict(language_mode())
- ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy);
-}
-
-
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
PopOperand(StoreDescriptor::NameRegister()); // Key.
@@ -2156,43 +1751,6 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
}
-void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
- Expression* callee = expr->expression();
- DCHECK(callee->IsProperty());
- Property* prop = callee->AsProperty();
- DCHECK(prop->IsSuperAccess());
- SetExpressionPosition(prop);
-
- Literal* key = prop->key()->AsLiteral();
- DCHECK(!key->value()->IsSmi());
- // Load the function from the receiver.
- SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
- VisitForStackValue(super_ref->home_object());
- VisitForAccumulatorValue(super_ref->this_var());
- PushOperand(rax);
- PushOperand(rax);
- PushOperand(Operand(rsp, kPointerSize * 2));
- PushOperand(key->value());
-
- // Stack here:
- // - home_object
- // - this (receiver)
- // - this (receiver) <-- LoadFromSuper will pop here and below.
- // - home_object
- // - key
- CallRuntimeWithOperands(Runtime::kLoadFromSuper);
- PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
- // Replace home_object with target function.
- __ movp(Operand(rsp, kPointerSize), rax);
-
- // Stack here:
- // - target function
- // - this (receiver)
- EmitCall(expr);
-}
-
-
// Common code for calls using the IC.
void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
Expression* key) {
@@ -2217,41 +1775,6 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
}
-void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
- Expression* callee = expr->expression();
- DCHECK(callee->IsProperty());
- Property* prop = callee->AsProperty();
- DCHECK(prop->IsSuperAccess());
-
- SetExpressionPosition(prop);
- // Load the function from the receiver.
- SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
- VisitForStackValue(super_ref->home_object());
- VisitForAccumulatorValue(super_ref->this_var());
- PushOperand(rax);
- PushOperand(rax);
- PushOperand(Operand(rsp, kPointerSize * 2));
- VisitForStackValue(prop->key());
-
- // Stack here:
- // - home_object
- // - this (receiver)
- // - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
- // - home_object
- // - key
- CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
- PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
- // Replace home_object with target function.
- __ movp(Operand(rsp, kPointerSize), rax);
-
- // Stack here:
- // - target function
- // - this (receiver)
- EmitCall(expr);
-}
-
-
void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
@@ -2284,111 +1807,6 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
context()->DropAndPlug(1, rax);
}
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
- int arg_count = expr->arguments()->length();
- // Push copy of the first argument or undefined if it doesn't exist.
- if (arg_count > 0) {
- __ Push(Operand(rsp, arg_count * kPointerSize));
- } else {
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- }
-
- // Push the enclosing function.
- __ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
-
- // Push the language mode.
- __ Push(Smi::FromInt(language_mode()));
-
- // Push the start position of the scope the calls resides in.
- __ Push(Smi::FromInt(scope()->start_position()));
-
- // Push the source position of the eval call.
- __ Push(Smi::FromInt(expr->position()));
-
- // Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
-}
-
-
-// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
-void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
- VariableProxy* callee = expr->expression()->AsVariableProxy();
- if (callee->var()->IsLookupSlot()) {
- Label slow, done;
- SetExpressionPosition(callee);
- // Generate code for loading from variables potentially shadowed by
- // eval-introduced variables.
- EmitDynamicLookupFastCase(callee, NOT_INSIDE_TYPEOF, &slow, &done);
- __ bind(&slow);
- // Call the runtime to find the function to call (returned in rax) and
- // the object holding it (returned in rdx).
- __ Push(callee->name());
- __ CallRuntime(Runtime::kLoadLookupSlotForCall);
- PushOperand(rax); // Function.
- PushOperand(rdx); // Receiver.
- PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
-
- // If fast case code has been generated, emit code to push the function
- // and receiver and have the slow path jump around this code.
- if (done.is_linked()) {
- Label call;
- __ jmp(&call, Label::kNear);
- __ bind(&done);
- // Push function.
- __ Push(rax);
- // Pass undefined as the receiver, which is the WithBaseObject of a
- // non-object environment record. If the callee is sloppy, it will patch
- // it up to be the global receiver.
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- __ bind(&call);
- }
- } else {
- VisitForStackValue(callee);
- // refEnv.WithBaseObject()
- OperandStackDepthIncrement(1);
- __ PushRoot(Heap::kUndefinedValueRootIndex);
- }
-}
-
-
-void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
- // In a call to eval, we first call Runtime_ResolvePossiblyDirectEval
- // to resolve the function we need to call. Then we call the resolved
- // function using the given arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- PushCalleeAndWithBaseObject(expr);
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Push a copy of the function (found below the arguments) and resolve
- // eval.
- __ Push(Operand(rsp, (arg_count + 1) * kPointerSize));
- EmitResolvePossiblyDirectEval(expr);
-
- // Touch up the callee.
- __ movp(Operand(rsp, (arg_count + 1) * kPointerSize), rax);
-
- PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
-
- SetCallPosition(expr);
- Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
- expr->tail_call_mode())
- .code();
- __ Move(rdx, SmiFromSlot(expr->CallFeedbackICSlot()));
- __ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
- __ Set(rax, arg_count);
- __ call(code, RelocInfo::CODE_TARGET);
- OperandStackDepthDecrement(arg_count + 1);
- RecordJSReturnSite(expr);
- RestoreContext();
- context()->DropAndPlug(1, rax);
-}
-
-
void FullCodeGenerator::VisitCallNew(CallNew* expr) {
Comment cmnt(masm_, "[ CallNew");
// According to ECMA-262, section 11.2.2, page 44, the function
@@ -2428,48 +1846,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
context()->Plug(rax);
}
-
-void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
- SuperCallReference* super_call_ref =
- expr->expression()->AsSuperCallReference();
- DCHECK_NOT_NULL(super_call_ref);
-
- // Push the super constructor target on the stack (may be null,
- // but the Construct builtin can deal with that properly).
- VisitForAccumulatorValue(super_call_ref->this_function_var());
- __ AssertFunction(result_register());
- __ movp(result_register(),
- FieldOperand(result_register(), HeapObject::kMapOffset));
- PushOperand(FieldOperand(result_register(), Map::kPrototypeOffset));
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- SetConstructCallPosition(expr);
-
- // Load new target into rdx.
- VisitForAccumulatorValue(super_call_ref->new_target_var());
- __ movp(rdx, result_register());
-
- // Load function and argument count into rdi and rax.
- __ Set(rax, arg_count);
- __ movp(rdi, Operand(rsp, arg_count * kPointerSize));
-
- __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
- OperandStackDepthDecrement(arg_count + 1);
-
- RecordJSReturnSite(expr);
- RestoreContext();
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -2557,28 +1933,6 @@ void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(rax, if_false);
- __ CmpObjectType(rax, JS_REGEXP_TYPE, rbx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -2813,17 +2167,13 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
__ Push(var->name());
__ CallRuntime(Runtime::kDeleteProperty_Sloppy);
context()->Plug(rax);
- } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+ } else {
+ DCHECK(!var->IsLookupSlot());
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
// Result of deleting non-global variables is false. 'this' is
// not really a variable, though we implement it as one. The
// subexpression does not have side effects.
context()->Plug(is_this);
- } else {
- // Non-global variable. Call the runtime to try to delete from the
- // context where the variable was introduced.
- __ Push(var->name());
- __ CallRuntime(Runtime::kDeleteLookupSlot);
- context()->Plug(rax);
}
} else {
// Result of deleting non-property, non-variable reference is true.
@@ -2933,30 +2283,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
- case NAMED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- PushOperand(result_register());
- PushOperand(MemOperand(rsp, kPointerSize));
- PushOperand(result_register());
- EmitNamedSuperPropertyLoad(prop);
- break;
- }
-
- case KEYED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(prop->key());
- PushOperand(result_register());
- PushOperand(MemOperand(rsp, 2 * kPointerSize));
- PushOperand(MemOperand(rsp, 2 * kPointerSize));
- PushOperand(result_register());
- EmitKeyedSuperPropertyLoad(prop);
- break;
- }
-
case KEYED_PROPERTY: {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
@@ -2968,6 +2294,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
case VARIABLE:
UNREACHABLE();
}
@@ -3001,14 +2329,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY:
__ movp(Operand(rsp, kPointerSize), rax);
break;
- case NAMED_SUPER_PROPERTY:
- __ movp(Operand(rsp, 2 * kPointerSize), rax);
- break;
case KEYED_PROPERTY:
__ movp(Operand(rsp, 2 * kPointerSize), rax);
break;
+ case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
- __ movp(Operand(rsp, 3 * kPointerSize), rax);
+ UNREACHABLE();
break;
}
}
@@ -3046,14 +2372,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY:
__ movp(Operand(rsp, kPointerSize), rax);
break;
- case NAMED_SUPER_PROPERTY:
- __ movp(Operand(rsp, 2 * kPointerSize), rax);
- break;
case KEYED_PROPERTY:
__ movp(Operand(rsp, 2 * kPointerSize), rax);
break;
+ case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
- __ movp(Operand(rsp, 3 * kPointerSize), rax);
+ UNREACHABLE();
break;
}
}
@@ -3112,30 +2436,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
}
- case NAMED_SUPER_PROPERTY: {
- EmitNamedSuperPropertyStore(prop);
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(rax);
- }
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- EmitKeyedSuperPropertyStore(prop);
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(rax);
- }
- break;
- }
case KEYED_PROPERTY: {
PopOperand(StoreDescriptor::NameRegister());
PopOperand(StoreDescriptor::ReceiverRegister());
@@ -3150,6 +2450,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
}
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
}
@@ -3382,68 +2686,6 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
}
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
-
-
-void FullCodeGenerator::EnterFinallyBlock() {
- DCHECK(!result_register().is(rdx));
-
- // Store pending message while executing finally block.
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ Load(rdx, pending_message_obj);
- PushOperand(rdx);
-
- ClearPendingMessage();
-}
-
-
-void FullCodeGenerator::ExitFinallyBlock() {
- DCHECK(!result_register().is(rdx));
- // Restore pending message from stack.
- PopOperand(rdx);
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ Store(pending_message_obj, rdx);
-}
-
-
-void FullCodeGenerator::ClearPendingMessage() {
- DCHECK(!result_register().is(rdx));
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ Store(pending_message_obj, rdx);
-}
-
-
-void FullCodeGenerator::DeferredCommands::EmitCommands() {
- __ Pop(result_register()); // Restore the accumulator.
- __ Pop(rdx); // Get the token.
- for (DeferredCommand cmd : commands_) {
- Label skip;
- __ SmiCompare(rdx, Smi::FromInt(cmd.token));
- __ j(not_equal, &skip);
- switch (cmd.command) {
- case kReturn:
- codegen_->EmitUnwindAndReturn();
- break;
- case kThrow:
- __ Push(result_register());
- __ CallRuntime(Runtime::kReThrow);
- break;
- case kContinue:
- codegen_->EmitContinue(cmd.target);
- break;
- case kBreak:
- codegen_->EmitBreak(cmd.target);
- break;
- }
- __ bind(&skip);
- }
-}
-
#undef __
diff --git a/deps/v8/src/full-codegen/x87/OWNERS b/deps/v8/src/full-codegen/x87/OWNERS
index dd9998b261..61245ae8e2 100644
--- a/deps/v8/src/full-codegen/x87/OWNERS
+++ b/deps/v8/src/full-codegen/x87/OWNERS
@@ -1 +1,2 @@
weiliang.lin@intel.com
+chunyang.dai@intel.com
diff --git a/deps/v8/src/full-codegen/x87/full-codegen-x87.cc b/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
index 7cc7e2bc04..2f50c28e75 100644
--- a/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
+++ b/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
@@ -4,15 +4,16 @@
#if V8_TARGET_ARCH_X87
-#include "src/full-codegen/full-codegen.h"
#include "src/ast/compile-time-value.h"
#include "src/ast/scopes.h"
+#include "src/builtins/builtins-constructor.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/debug/debug.h"
+#include "src/full-codegen/full-codegen.h"
#include "src/ic/ic.h"
#include "src/x87/frames-x87.h"
@@ -130,8 +131,6 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
- // Generators allocate locals, if any, in context slots.
- DCHECK(!IsGeneratorFunction(literal()->kind()) || locals_count == 0);
OperandStackDepthIncrement(locals_count);
if (locals_count == 1) {
__ push(Immediate(isolate()->factory()->undefined_value()));
@@ -189,15 +188,18 @@ void FullCodeGenerator::Generate() {
if (info->scope()->new_target_var() != nullptr) {
__ push(edx); // Preserve new target.
}
- if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
- FastNewFunctionContextStub stub(isolate());
+ if (slots <=
+ ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+ Callable callable = CodeFactory::FastNewFunctionContext(
+ isolate(), info->scope()->scope_type());
__ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
Immediate(slots));
- __ CallStub(&stub);
- // Result of FastNewFunctionContextStub is always in new space.
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
+ // Result of the FastNewFunctionContext builtin is always in new space.
need_write_barrier = false;
} else {
__ push(edi);
+ __ Push(Smi::FromInt(info->scope()->scope_type()));
__ CallRuntime(Runtime::kNewFunctionContext);
}
if (info->scope()->new_target_var() != nullptr) {
@@ -244,37 +246,10 @@ void FullCodeGenerator::Generate() {
PrepareForBailoutForId(BailoutId::FunctionContext(),
BailoutState::NO_REGISTERS);
- // Possibly set up a local binding to the this function which is used in
- // derived constructors with super calls.
- Variable* this_function_var = info->scope()->this_function_var();
- if (this_function_var != nullptr) {
- Comment cmnt(masm_, "[ This function");
- if (!function_in_register) {
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- // The write barrier clobbers register again, keep it marked as such.
- }
- SetVar(this_function_var, edi, ebx, ecx);
- }
-
- // Possibly set up a local binding to the new target value.
- Variable* new_target_var = info->scope()->new_target_var();
- if (new_target_var != nullptr) {
- Comment cmnt(masm_, "[ new.target");
- SetVar(new_target_var, edx, ebx, ecx);
- }
-
- // Possibly allocate RestParameters
- Variable* rest_param = info->scope()->rest_parameter();
- if (rest_param != nullptr) {
- Comment cmnt(masm_, "[ Allocate rest parameter array");
- if (!function_in_register) {
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- }
- FastNewRestParameterStub stub(isolate());
- __ CallStub(&stub);
- function_in_register = false;
- SetVar(rest_param, eax, ebx, edx);
- }
+ // We don't support new.target and rest parameters here.
+ DCHECK_NULL(info->scope()->new_target_var());
+ DCHECK_NULL(info->scope()->rest_parameter());
+ DCHECK_NULL(info->scope()->this_function_var());
Variable* arguments = info->scope()->arguments();
if (arguments != NULL) {
@@ -502,10 +477,8 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
true,
true_label_,
false_label_);
- DCHECK(lit->IsNull(isolate()) || lit->IsUndefined(isolate()) ||
- !lit->IsUndetectable());
- if (lit->IsUndefined(isolate()) || lit->IsNull(isolate()) ||
- lit->IsFalse(isolate())) {
+ DCHECK(lit->IsNullOrUndefined(isolate()) || !lit->IsUndetectable());
+ if (lit->IsNullOrUndefined(isolate()) || lit->IsFalse(isolate())) {
if (false_label_ != fall_through_) __ jmp(false_label_);
} else if (lit->IsTrue(isolate()) || lit->IsJSObject()) {
if (true_label_ != fall_through_) __ jmp(true_label_);
@@ -728,6 +701,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
+ globals_->Add(variable->name(), zone());
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
@@ -754,16 +728,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
break;
- case VariableLocation::LOOKUP: {
- Comment cmnt(masm_, "[ VariableDeclaration");
- DCHECK_EQ(VAR, variable->mode());
- DCHECK(!variable->binding_needs_init());
- __ push(Immediate(variable->name()));
- __ CallRuntime(Runtime::kDeclareEvalVar);
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
- break;
- }
-
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -775,6 +740,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Variable* variable = proxy->var();
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
+ globals_->Add(variable->name(), zone());
FeedbackVectorSlot slot = proxy->VariableFeedbackSlot();
DCHECK(!slot.IsInvalid());
globals_->Add(handle(Smi::FromInt(slot.ToInt()), isolate()), zone());
@@ -807,15 +773,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
break;
}
- case VariableLocation::LOOKUP: {
- Comment cmnt(masm_, "[ FunctionDeclaration");
- PushOperand(variable->name());
- VisitForStackValue(declaration->fun());
- CallRuntimeWithOperands(Runtime::kDeclareEvalFunction);
- PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
- break;
- }
-
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -1101,97 +1059,13 @@ void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
-
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
- TypeofMode typeof_mode,
- Label* slow) {
- Register context = esi;
- Register temp = edx;
-
- int to_check = scope()->ContextChainLengthUntilOutermostSloppyEval();
- for (Scope* s = scope(); to_check > 0; s = s->outer_scope()) {
- if (!s->NeedsContext()) continue;
- if (s->calls_sloppy_eval()) {
- // Check that extension is "the hole".
- __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
- Heap::kTheHoleValueRootIndex, slow);
- }
- // Load next context in chain.
- __ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering esi.
- context = temp;
- to_check--;
- }
-
- // All extension objects were empty and it is safe to use a normal global
- // load machinery.
- EmitGlobalVariableLoad(proxy, typeof_mode);
-}
-
-
-MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
- Label* slow) {
- DCHECK(var->IsContextSlot());
- Register context = esi;
- Register temp = ebx;
-
- for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
- if (s->NeedsContext()) {
- if (s->calls_sloppy_eval()) {
- // Check that extension is "the hole".
- __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
- Heap::kTheHoleValueRootIndex, slow);
- }
- __ mov(temp, ContextOperand(context, Context::PREVIOUS_INDEX));
- // Walk the rest of the chain without clobbering esi.
- context = temp;
- }
- }
- // Check that last extension is "the hole".
- __ JumpIfNotRoot(ContextOperand(context, Context::EXTENSION_INDEX),
- Heap::kTheHoleValueRootIndex, slow);
-
- // This function is used only for loads, not stores, so it's safe to
- // return an esi-based operand (the write barrier cannot be allowed to
- // destroy the esi register).
- return ContextOperand(context, var->index());
-}
-
-
-void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
- TypeofMode typeof_mode,
- Label* slow, Label* done) {
- // Generate fast-case code for variables that might be shadowed by
- // eval-introduced variables. Eval is used a lot without
- // introducing variables. In those cases, we do not want to
- // perform a runtime call for all variables in the scope
- // containing the eval.
- Variable* var = proxy->var();
- if (var->mode() == DYNAMIC_GLOBAL) {
- EmitLoadGlobalCheckExtensions(proxy, typeof_mode, slow);
- __ jmp(done);
- } else if (var->mode() == DYNAMIC_LOCAL) {
- Variable* local = var->local_if_not_shadowed();
- __ mov(eax, ContextSlotOperandCheckExtensions(local, slow));
- if (local->binding_needs_init()) {
- __ cmp(eax, isolate()->factory()->the_hole_value());
- __ j(not_equal, done);
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError);
- } else {
- __ jmp(done);
- }
- }
-}
-
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
SetExpressionPosition(proxy);
PrepareForBailoutForId(proxy->BeforeId(), BailoutState::NO_REGISTERS);
Variable* var = proxy->var();
- // Three cases: global variables, lookup variables, and all other types of
- // variables.
+ // Two cases: global variables and all other types of variables.
switch (var->location()) {
case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
@@ -1224,24 +1098,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
break;
}
- case VariableLocation::LOOKUP: {
- Comment cmnt(masm_, "[ Lookup variable");
- Label done, slow;
- // Generate code for loading from variables potentially shadowed
- // by eval-introduced variables.
- EmitDynamicLookupFastCase(proxy, typeof_mode, &slow, &done);
- __ bind(&slow);
- __ push(Immediate(var->name()));
- Runtime::FunctionId function_id =
- typeof_mode == NOT_INSIDE_TYPEOF
- ? Runtime::kLoadLookupSlot
- : Runtime::kLoadLookupSlotInsideTypeof;
- __ CallRuntime(function_id);
- __ bind(&done);
- context()->Plug(eax);
- break;
- }
-
+ case VariableLocation::LOOKUP:
case VariableLocation::MODULE:
UNREACHABLE();
}
@@ -1267,7 +1124,8 @@ void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
- Handle<FixedArray> constant_properties = expr->constant_properties();
+ Handle<FixedArray> constant_properties =
+ expr->GetOrBuildConstantProperties(isolate());
int flags = expr->ComputeFlags();
// If any of the keys would store to the elements array, then we shouldn't
// allow it.
@@ -1282,8 +1140,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
__ mov(ecx, Immediate(constant_properties));
__ mov(edx, Immediate(Smi::FromInt(flags)));
- FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
- __ CallStub(&stub);
+ Callable callable = CodeFactory::FastCloneShallowObject(
+ isolate(), expr->properties_count());
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1293,10 +1152,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
bool result_saved = false;
AccessorTable accessor_table(zone());
- int property_index = 0;
- for (; property_index < expr->properties()->length(); property_index++) {
- ObjectLiteral::Property* property = expr->properties()->at(property_index);
- if (property->is_computed_name()) break;
+ for (int i = 0; i < expr->properties()->length(); i++) {
+ ObjectLiteral::Property* property = expr->properties()->at(i);
+ DCHECK(!property->is_computed_name());
if (property->IsCompileTimeValue()) continue;
Literal* key = property->key()->AsLiteral();
@@ -1306,6 +1164,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
result_saved = true;
}
switch (property->kind()) {
+ case ObjectLiteral::Property::SPREAD:
case ObjectLiteral::Property::CONSTANT:
UNREACHABLE();
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -1348,20 +1207,20 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(value);
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
- PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
+ PrepareForBailoutForId(expr->GetIdForPropertySet(i),
BailoutState::NO_REGISTERS);
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
AccessorTable::Iterator it = accessor_table.lookup(key);
- it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+ it->second->bailout_id = expr->GetIdForPropertySet(i);
it->second->setter = property;
}
break;
@@ -1384,72 +1243,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
PrepareForBailoutForId(it->second->bailout_id, BailoutState::NO_REGISTERS);
}
- // Object literals have two parts. The "static" part on the left contains no
- // computed property names, and so we can compute its map ahead of time; see
- // runtime.cc::CreateObjectLiteralBoilerplate. The second "dynamic" part
- // starts with the first computed property name, and continues with all
- // properties to its right. All the code from above initializes the static
- // component of the object literal, and arranges for the map of the result to
- // reflect the static order in which the keys appear. For the dynamic
- // properties, we compile them into a series of "SetOwnProperty" runtime
- // calls. This will preserve insertion order.
- for (; property_index < expr->properties()->length(); property_index++) {
- ObjectLiteral::Property* property = expr->properties()->at(property_index);
-
- Expression* value = property->value();
- if (!result_saved) {
- PushOperand(eax); // Save result on the stack
- result_saved = true;
- }
-
- PushOperand(Operand(esp, 0)); // Duplicate receiver.
-
- if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
- DCHECK(!property->is_computed_name());
- VisitForStackValue(value);
- DCHECK(property->emit_store());
- CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
- PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- BailoutState::NO_REGISTERS);
- } else {
- EmitPropertyKey(property, expr->GetIdForPropertyName(property_index));
- VisitForStackValue(value);
- if (NeedsHomeObject(value)) {
- EmitSetHomeObject(value, 2, property->GetSlot());
- }
-
- switch (property->kind()) {
- case ObjectLiteral::Property::CONSTANT:
- case ObjectLiteral::Property::MATERIALIZED_LITERAL:
- case ObjectLiteral::Property::COMPUTED:
- if (property->emit_store()) {
- PushOperand(Smi::FromInt(NONE));
- PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
- CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
- PrepareForBailoutForId(expr->GetIdForPropertySet(property_index),
- BailoutState::NO_REGISTERS);
- } else {
- DropOperands(3);
- }
- break;
-
- case ObjectLiteral::Property::PROTOTYPE:
- UNREACHABLE();
- break;
-
- case ObjectLiteral::Property::GETTER:
- PushOperand(Smi::FromInt(NONE));
- CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
- break;
-
- case ObjectLiteral::Property::SETTER:
- PushOperand(Smi::FromInt(NONE));
- CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
- break;
- }
- }
- }
-
if (result_saved) {
context()->PlugTOS();
} else {
@@ -1461,7 +1254,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
- Handle<FixedArray> constant_elements = expr->constant_elements();
+ Handle<ConstantElementsPair> constant_elements =
+ expr->GetOrBuildConstantElements(isolate());
bool has_constant_fast_elements =
IsFastObjectElementsKind(expr->constant_elements_kind());
@@ -1482,8 +1276,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
__ mov(ecx, Immediate(constant_elements));
- FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
- __ CallStub(&stub);
+ Callable callable =
+ CodeFactory::FastCloneShallowArray(isolate(), allocation_site_mode);
+ __ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
PrepareForBailoutForId(expr->CreateLiteralId(), BailoutState::TOS_REGISTER);
@@ -1537,17 +1332,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case VARIABLE:
// Nothing to do here.
break;
- case NAMED_SUPER_PROPERTY:
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- property->obj()->AsSuperPropertyReference()->home_object());
- PushOperand(result_register());
- if (expr->is_compound()) {
- PushOperand(MemOperand(esp, kPointerSize));
- PushOperand(result_register());
- }
- break;
case NAMED_PROPERTY:
if (expr->is_compound()) {
// We need the receiver both on the stack and in the register.
@@ -1557,19 +1341,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForStackValue(property->obj());
}
break;
- case KEYED_SUPER_PROPERTY:
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- property->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(property->key());
- PushOperand(result_register());
- if (expr->is_compound()) {
- PushOperand(MemOperand(esp, 2 * kPointerSize));
- PushOperand(MemOperand(esp, 2 * kPointerSize));
- PushOperand(result_register());
- }
- break;
case KEYED_PROPERTY: {
if (expr->is_compound()) {
VisitForStackValue(property->obj());
@@ -1582,6 +1353,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
break;
}
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
// For compound assignments we need another deoptimization point after the
@@ -1594,26 +1369,20 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
EmitVariableLoad(expr->target()->AsVariableProxy());
PrepareForBailout(expr->target(), BailoutState::TOS_REGISTER);
break;
- case NAMED_SUPER_PROPERTY:
- EmitNamedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
- break;
case NAMED_PROPERTY:
EmitNamedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(),
BailoutState::TOS_REGISTER);
break;
- case KEYED_SUPER_PROPERTY:
- EmitKeyedSuperPropertyLoad(property);
- PrepareForBailoutForId(property->LoadId(),
- BailoutState::TOS_REGISTER);
- break;
case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(),
BailoutState::TOS_REGISTER);
break;
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
}
@@ -1651,72 +1420,20 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
- case NAMED_SUPER_PROPERTY:
- EmitNamedSuperPropertyStore(property);
- context()->Plug(result_register());
- break;
- case KEYED_SUPER_PROPERTY:
- EmitKeyedSuperPropertyStore(property);
- context()->Plug(result_register());
- break;
case KEYED_PROPERTY:
EmitKeyedPropertyAssignment(expr);
break;
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
}
void FullCodeGenerator::VisitYield(Yield* expr) {
- Comment cmnt(masm_, "[ Yield");
- SetExpressionPosition(expr);
-
- // Evaluate yielded value first; the initial iterator definition depends on
- // this. It stays on the stack while we update the iterator.
- VisitForStackValue(expr->expression());
-
- Label suspend, continuation, post_runtime, resume, exception;
-
- __ jmp(&suspend);
- __ bind(&continuation);
- // When we arrive here, eax holds the generator object.
- __ RecordGeneratorContinuation();
- __ mov(ebx, FieldOperand(eax, JSGeneratorObject::kResumeModeOffset));
- __ mov(eax, FieldOperand(eax, JSGeneratorObject::kInputOrDebugPosOffset));
- STATIC_ASSERT(JSGeneratorObject::kNext < JSGeneratorObject::kReturn);
- STATIC_ASSERT(JSGeneratorObject::kThrow > JSGeneratorObject::kReturn);
- __ cmp(ebx, Immediate(Smi::FromInt(JSGeneratorObject::kReturn)));
- __ j(less, &resume);
- __ Push(result_register());
- __ j(greater, &exception);
- EmitCreateIteratorResult(true);
- EmitUnwindAndReturn();
-
- __ bind(&exception);
- __ CallRuntime(expr->rethrow_on_exception() ? Runtime::kReThrow
- : Runtime::kThrow);
-
- __ bind(&suspend);
- OperandStackDepthIncrement(1); // Not popped on this path.
- VisitForAccumulatorValue(expr->generator_object());
- DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
- __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
- Immediate(Smi::FromInt(continuation.pos())));
- __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
- __ mov(ecx, esi);
- __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
- kDontSaveFPRegs);
- __ lea(ebx, Operand(ebp, StandardFrameConstants::kExpressionsOffset));
- __ cmp(esp, ebx);
- __ j(equal, &post_runtime);
- __ push(eax); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- RestoreContext();
- __ bind(&post_runtime);
- PopOperand(result_register());
- EmitReturnSequence();
-
- __ bind(&resume);
- context()->Plug(result_register());
+ // Resumable functions are not supported.
+ UNREACHABLE();
}
void FullCodeGenerator::PushOperand(MemOperand operand) {
@@ -1855,58 +1572,6 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
context()->Plug(eax);
}
-
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
- for (int i = 0; i < lit->properties()->length(); i++) {
- ClassLiteral::Property* property = lit->properties()->at(i);
- Expression* value = property->value();
-
- if (property->is_static()) {
- PushOperand(Operand(esp, kPointerSize)); // constructor
- } else {
- PushOperand(Operand(esp, 0)); // prototype
- }
- EmitPropertyKey(property, lit->GetIdForProperty(i));
-
- // The static prototype property is read only. We handle the non computed
- // property name case in the parser. Since this is the only case where we
- // need to check for an own read only property we special case this so we do
- // not need to do this for every property.
- if (property->is_static() && property->is_computed_name()) {
- __ CallRuntime(Runtime::kThrowIfStaticPrototype);
- __ push(eax);
- }
-
- VisitForStackValue(value);
- if (NeedsHomeObject(value)) {
- EmitSetHomeObject(value, 2, property->GetSlot());
- }
-
- switch (property->kind()) {
- case ClassLiteral::Property::METHOD:
- PushOperand(Smi::FromInt(DONT_ENUM));
- PushOperand(Smi::FromInt(property->NeedsSetFunctionName()));
- CallRuntimeWithOperands(Runtime::kDefineDataPropertyInLiteral);
- break;
-
- case ClassLiteral::Property::GETTER:
- PushOperand(Smi::FromInt(DONT_ENUM));
- CallRuntimeWithOperands(Runtime::kDefineGetterPropertyUnchecked);
- break;
-
- case ClassLiteral::Property::SETTER:
- PushOperand(Smi::FromInt(DONT_ENUM));
- CallRuntimeWithOperands(Runtime::kDefineSetterPropertyUnchecked);
- break;
-
- case ClassLiteral::Property::FIELD:
- UNREACHABLE();
- break;
- }
- }
-}
-
-
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
PopOperand(edx);
Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
@@ -1940,43 +1605,6 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
CallStoreIC(slot, prop->key()->AsLiteral()->value());
break;
}
- case NAMED_SUPER_PROPERTY: {
- PushOperand(eax);
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- // stack: value, this; eax: home_object
- Register scratch = ecx;
- Register scratch2 = edx;
- __ mov(scratch, result_register()); // home_object
- __ mov(eax, MemOperand(esp, kPointerSize)); // value
- __ mov(scratch2, MemOperand(esp, 0)); // this
- __ mov(MemOperand(esp, kPointerSize), scratch2); // this
- __ mov(MemOperand(esp, 0), scratch); // home_object
- // stack: this, home_object. eax: value
- EmitNamedSuperPropertyStore(prop);
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- PushOperand(eax);
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(prop->key());
- Register scratch = ecx;
- Register scratch2 = edx;
- __ mov(scratch2, MemOperand(esp, 2 * kPointerSize)); // value
- // stack: value, this, home_object; eax: key, edx: value
- __ mov(scratch, MemOperand(esp, kPointerSize)); // this
- __ mov(MemOperand(esp, 2 * kPointerSize), scratch);
- __ mov(scratch, MemOperand(esp, 0)); // home_object
- __ mov(MemOperand(esp, kPointerSize), scratch);
- __ mov(MemOperand(esp, 0), eax);
- __ mov(eax, scratch2);
- // stack: this, home_object, key; eax: value.
- EmitKeyedSuperPropertyStore(prop);
- break;
- }
case KEYED_PROPERTY: {
PushOperand(eax); // Preserve value.
VisitForStackValue(prop->obj());
@@ -1987,6 +1615,10 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
CallKeyedStoreIC(slot);
break;
}
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
context()->Plug(eax);
}
@@ -2047,26 +1679,18 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
} else {
DCHECK(var->mode() != CONST || op == Token::INIT);
- if (var->IsLookupSlot()) {
- // Assignment to var.
- __ Push(Immediate(var->name()));
- __ Push(eax);
- __ CallRuntime(is_strict(language_mode())
- ? Runtime::kStoreLookupSlot_Strict
- : Runtime::kStoreLookupSlot_Sloppy);
- } else {
- // Assignment to var or initializing assignment to let/const in harmony
- // mode.
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- MemOperand location = VarOperand(var, ecx);
- if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
- // Check for an uninitialized let binding.
- __ mov(edx, location);
- __ cmp(edx, isolate()->factory()->the_hole_value());
- __ Check(equal, kLetBindingReInitialization);
- }
- EmitStoreToStackLocalOrContextSlot(var, location);
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ DCHECK(!var->IsLookupSlot());
+ // Assignment to var or initializing assignment to let/const in harmony
+ // mode.
+ MemOperand location = VarOperand(var, ecx);
+ if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
+ // Check for an uninitialized let binding.
+ __ mov(edx, location);
+ __ cmp(edx, isolate()->factory()->the_hole_value());
+ __ Check(equal, kLetBindingReInitialization);
}
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
}
@@ -2086,34 +1710,6 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
}
-void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
- // Assignment to named property of super.
- // eax : value
- // stack : receiver ('this'), home_object
- DCHECK(prop != NULL);
- Literal* key = prop->key()->AsLiteral();
- DCHECK(key != NULL);
-
- PushOperand(key->value());
- PushOperand(eax);
- CallRuntimeWithOperands(is_strict(language_mode())
- ? Runtime::kStoreToSuper_Strict
- : Runtime::kStoreToSuper_Sloppy);
-}
-
-
-void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
- // Assignment to named property of super.
- // eax : value
- // stack : receiver ('this'), home_object, key
-
- PushOperand(eax);
- CallRuntimeWithOperands(is_strict(language_mode())
- ? Runtime::kStoreKeyedToSuper_Strict
- : Runtime::kStoreKeyedToSuper_Sloppy);
-}
-
-
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
// eax : value
@@ -2161,42 +1757,6 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
}
-void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
- SetExpressionPosition(expr);
- Expression* callee = expr->expression();
- DCHECK(callee->IsProperty());
- Property* prop = callee->AsProperty();
- DCHECK(prop->IsSuperAccess());
-
- Literal* key = prop->key()->AsLiteral();
- DCHECK(!key->value()->IsSmi());
- // Load the function from the receiver.
- SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
- VisitForStackValue(super_ref->home_object());
- VisitForAccumulatorValue(super_ref->this_var());
- PushOperand(eax);
- PushOperand(eax);
- PushOperand(Operand(esp, kPointerSize * 2));
- PushOperand(key->value());
- // Stack here:
- // - home_object
- // - this (receiver)
- // - this (receiver) <-- LoadFromSuper will pop here and below.
- // - home_object
- // - key
- CallRuntimeWithOperands(Runtime::kLoadFromSuper);
- PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
- // Replace home_object with target function.
- __ mov(Operand(esp, kPointerSize), eax);
-
- // Stack here:
- // - target function
- // - this (receiver)
- EmitCall(expr);
-}
-
-
// Code common for calls using the IC.
void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
Expression* key) {
@@ -2221,40 +1781,6 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
}
-void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
- Expression* callee = expr->expression();
- DCHECK(callee->IsProperty());
- Property* prop = callee->AsProperty();
- DCHECK(prop->IsSuperAccess());
-
- SetExpressionPosition(prop);
- // Load the function from the receiver.
- SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
- VisitForStackValue(super_ref->home_object());
- VisitForAccumulatorValue(super_ref->this_var());
- PushOperand(eax);
- PushOperand(eax);
- PushOperand(Operand(esp, kPointerSize * 2));
- VisitForStackValue(prop->key());
- // Stack here:
- // - home_object
- // - this (receiver)
- // - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
- // - home_object
- // - key
- CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
- PrepareForBailoutForId(prop->LoadId(), BailoutState::TOS_REGISTER);
-
- // Replace home_object with target function.
- __ mov(Operand(esp, kPointerSize), eax);
-
- // Stack here:
- // - target function
- // - this (receiver)
- EmitCall(expr);
-}
-
-
void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
@@ -2286,111 +1812,6 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
context()->DropAndPlug(1, eax);
}
-void FullCodeGenerator::EmitResolvePossiblyDirectEval(Call* expr) {
- int arg_count = expr->arguments()->length();
- // Push copy of the first argument or undefined if it doesn't exist.
- if (arg_count > 0) {
- __ push(Operand(esp, arg_count * kPointerSize));
- } else {
- __ push(Immediate(isolate()->factory()->undefined_value()));
- }
-
- // Push the enclosing function.
- __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
-
- // Push the language mode.
- __ push(Immediate(Smi::FromInt(language_mode())));
-
- // Push the start position of the scope the calls resides in.
- __ push(Immediate(Smi::FromInt(scope()->start_position())));
-
- // Push the source position of the eval call.
- __ push(Immediate(Smi::FromInt(expr->position())));
-
- // Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval);
-}
-
-
-// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
-void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
- VariableProxy* callee = expr->expression()->AsVariableProxy();
- if (callee->var()->IsLookupSlot()) {
- Label slow, done;
- SetExpressionPosition(callee);
- // Generate code for loading from variables potentially shadowed by
- // eval-introduced variables.
- EmitDynamicLookupFastCase(callee, NOT_INSIDE_TYPEOF, &slow, &done);
-
- __ bind(&slow);
- // Call the runtime to find the function to call (returned in eax) and
- // the object holding it (returned in edx).
- __ Push(callee->name());
- __ CallRuntime(Runtime::kLoadLookupSlotForCall);
- PushOperand(eax); // Function.
- PushOperand(edx); // Receiver.
- PrepareForBailoutForId(expr->LookupId(), BailoutState::NO_REGISTERS);
-
- // If fast case code has been generated, emit code to push the function
- // and receiver and have the slow path jump around this code.
- if (done.is_linked()) {
- Label call;
- __ jmp(&call, Label::kNear);
- __ bind(&done);
- // Push function.
- __ push(eax);
- // The receiver is implicitly the global receiver. Indicate this by
- // passing the hole to the call function stub.
- __ push(Immediate(isolate()->factory()->undefined_value()));
- __ bind(&call);
- }
- } else {
- VisitForStackValue(callee);
- // refEnv.WithBaseObject()
- PushOperand(isolate()->factory()->undefined_value());
- }
-}
-
-
-void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
- // In a call to eval, we first call Runtime_ResolvePossiblyDirectEval
- // to resolve the function we need to call. Then we call the resolved
- // function using the given arguments.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
-
- PushCalleeAndWithBaseObject(expr);
-
- // Push the arguments.
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Push a copy of the function (found below the arguments) and
- // resolve eval.
- __ push(Operand(esp, (arg_count + 1) * kPointerSize));
- EmitResolvePossiblyDirectEval(expr);
-
- // Touch up the stack with the resolved function.
- __ mov(Operand(esp, (arg_count + 1) * kPointerSize), eax);
-
- PrepareForBailoutForId(expr->EvalId(), BailoutState::NO_REGISTERS);
-
- SetCallPosition(expr);
- Handle<Code> code = CodeFactory::CallIC(isolate(), ConvertReceiverMode::kAny,
- expr->tail_call_mode())
- .code();
- __ Move(edx, Immediate(SmiFromSlot(expr->CallFeedbackICSlot())));
- __ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
- __ Move(eax, Immediate(arg_count));
- __ call(code, RelocInfo::CODE_TARGET);
- OperandStackDepthDecrement(arg_count + 1);
- RecordJSReturnSite(expr);
- RestoreContext();
- context()->DropAndPlug(1, eax);
-}
-
-
void FullCodeGenerator::VisitCallNew(CallNew* expr) {
Comment cmnt(masm_, "[ CallNew");
// According to ECMA-262, section 11.2.2, page 44, the function
@@ -2431,47 +1852,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
-void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
- SuperCallReference* super_call_ref =
- expr->expression()->AsSuperCallReference();
- DCHECK_NOT_NULL(super_call_ref);
-
- // Push the super constructor target on the stack (may be null,
- // but the Construct builtin can deal with that properly).
- VisitForAccumulatorValue(super_call_ref->this_function_var());
- __ AssertFunction(result_register());
- __ mov(result_register(),
- FieldOperand(result_register(), HeapObject::kMapOffset));
- PushOperand(FieldOperand(result_register(), Map::kPrototypeOffset));
-
- // Push the arguments ("left-to-right") on the stack.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the construct call builtin that handles allocation and
- // constructor invocation.
- SetConstructCallPosition(expr);
-
- // Load new target into edx.
- VisitForAccumulatorValue(super_call_ref->new_target_var());
- __ mov(edx, result_register());
-
- // Load function and argument count into edi and eax.
- __ Move(eax, Immediate(arg_count));
- __ mov(edi, Operand(esp, arg_count * kPointerSize));
-
- __ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
- OperandStackDepthDecrement(arg_count + 1);
-
- RecordJSReturnSite(expr);
- RestoreContext();
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -2559,28 +1939,6 @@ void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(eax, if_false);
- __ CmpObjectType(eax, JS_REGEXP_TYPE, ebx);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -2814,17 +2172,13 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
__ push(Immediate(var->name()));
__ CallRuntime(Runtime::kDeleteProperty_Sloppy);
context()->Plug(eax);
- } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+ } else {
+ DCHECK(!var->IsLookupSlot());
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
// Result of deleting non-global variables is false. 'this' is
// not really a variable, though we implement it as one. The
// subexpression does not have side effects.
context()->Plug(is_this);
- } else {
- // Non-global variable. Call the runtime to try to delete from the
- // context where the variable was introduced.
- __ Push(var->name());
- __ CallRuntime(Runtime::kDeleteLookupSlot);
- context()->Plug(eax);
}
} else {
// Result of deleting non-property, non-variable reference is true.
@@ -2935,30 +2289,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
- case NAMED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForAccumulatorValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- PushOperand(result_register());
- PushOperand(MemOperand(esp, kPointerSize));
- PushOperand(result_register());
- EmitNamedSuperPropertyLoad(prop);
- break;
- }
-
- case KEYED_SUPER_PROPERTY: {
- VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
- VisitForStackValue(
- prop->obj()->AsSuperPropertyReference()->home_object());
- VisitForAccumulatorValue(prop->key());
- PushOperand(result_register());
- PushOperand(MemOperand(esp, 2 * kPointerSize));
- PushOperand(MemOperand(esp, 2 * kPointerSize));
- PushOperand(result_register());
- EmitKeyedSuperPropertyLoad(prop);
- break;
- }
-
case KEYED_PROPERTY: {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
@@ -2969,6 +2299,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
case VARIABLE:
UNREACHABLE();
}
@@ -3002,14 +2334,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY:
__ mov(Operand(esp, kPointerSize), eax);
break;
- case NAMED_SUPER_PROPERTY:
- __ mov(Operand(esp, 2 * kPointerSize), eax);
- break;
case KEYED_PROPERTY:
__ mov(Operand(esp, 2 * kPointerSize), eax);
break;
+ case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
- __ mov(Operand(esp, 3 * kPointerSize), eax);
+ UNREACHABLE();
break;
}
}
@@ -3049,14 +2379,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY:
__ mov(Operand(esp, kPointerSize), eax);
break;
- case NAMED_SUPER_PROPERTY:
- __ mov(Operand(esp, 2 * kPointerSize), eax);
- break;
case KEYED_PROPERTY:
__ mov(Operand(esp, 2 * kPointerSize), eax);
break;
+ case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
- __ mov(Operand(esp, 3 * kPointerSize), eax);
+ UNREACHABLE();
break;
}
}
@@ -3115,30 +2443,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
}
- case NAMED_SUPER_PROPERTY: {
- EmitNamedSuperPropertyStore(prop);
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(eax);
- }
- break;
- }
- case KEYED_SUPER_PROPERTY: {
- EmitKeyedSuperPropertyStore(prop);
- PrepareForBailoutForId(expr->AssignmentId(), BailoutState::TOS_REGISTER);
- if (expr->is_postfix()) {
- if (!context()->IsEffect()) {
- context()->PlugTOS();
- }
- } else {
- context()->Plug(eax);
- }
- break;
- }
case KEYED_PROPERTY: {
PopOperand(StoreDescriptor::NameRegister());
PopOperand(StoreDescriptor::ReceiverRegister());
@@ -3154,6 +2458,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
break;
}
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNREACHABLE();
+ break;
}
}
@@ -3386,66 +2694,6 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
}
-// ----------------------------------------------------------------------------
-// Non-local control flow support.
-
-void FullCodeGenerator::EnterFinallyBlock() {
- // Store pending message while executing finally block.
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ mov(edx, Operand::StaticVariable(pending_message_obj));
- PushOperand(edx);
-
- ClearPendingMessage();
-}
-
-
-void FullCodeGenerator::ExitFinallyBlock() {
- DCHECK(!result_register().is(edx));
- // Restore pending message from stack.
- PopOperand(edx);
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ mov(Operand::StaticVariable(pending_message_obj), edx);
-}
-
-
-void FullCodeGenerator::ClearPendingMessage() {
- DCHECK(!result_register().is(edx));
- ExternalReference pending_message_obj =
- ExternalReference::address_of_pending_message_obj(isolate());
- __ mov(edx, Immediate(isolate()->factory()->the_hole_value()));
- __ mov(Operand::StaticVariable(pending_message_obj), edx);
-}
-
-
-void FullCodeGenerator::DeferredCommands::EmitCommands() {
- DCHECK(!result_register().is(edx));
- __ Pop(result_register()); // Restore the accumulator.
- __ Pop(edx); // Get the token.
- for (DeferredCommand cmd : commands_) {
- Label skip;
- __ cmp(edx, Immediate(Smi::FromInt(cmd.token)));
- __ j(not_equal, &skip);
- switch (cmd.command) {
- case kReturn:
- codegen_->EmitUnwindAndReturn();
- break;
- case kThrow:
- __ Push(result_register());
- __ CallRuntime(Runtime::kReThrow);
- break;
- case kContinue:
- codegen_->EmitContinue(cmd.target);
- break;
- case kBreak:
- codegen_->EmitBreak(cmd.target);
- break;
- }
- __ bind(&skip);
- }
-}
-
#undef __
diff --git a/deps/v8/src/futex-emulation.cc b/deps/v8/src/futex-emulation.cc
index 2d18488a78..9374986cde 100644
--- a/deps/v8/src/futex-emulation.cc
+++ b/deps/v8/src/futex-emulation.cc
@@ -12,6 +12,7 @@
#include "src/handles-inl.h"
#include "src/isolate.h"
#include "src/list-inl.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index 9ff16affe4..ff7f132305 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -5,6 +5,8 @@
#include "src/global-handles.h"
#include "src/api.h"
+#include "src/cancelable-task.h"
+#include "src/objects-inl.h"
#include "src/v8.h"
#include "src/vm-state-inl.h"
@@ -719,7 +721,7 @@ void GlobalHandles::MarkNewSpaceWeakUnmodifiedObjectsPending(
}
}
-
+template <GlobalHandles::IterationMode mode>
void GlobalHandles::IterateNewSpaceWeakUnmodifiedRoots(ObjectVisitor* v) {
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
@@ -728,18 +730,35 @@ void GlobalHandles::IterateNewSpaceWeakUnmodifiedRoots(ObjectVisitor* v) {
node->IsWeakRetainer()) {
// Pending weak phantom handles die immediately. Everything else survives.
if (node->IsPendingPhantomResetHandle()) {
- node->ResetPhantomHandle();
- ++number_of_phantom_handle_resets_;
+ if (mode == IterationMode::HANDLE_PHANTOM_NODES ||
+ mode == IterationMode::HANDLE_PHANTOM_NODES_VISIT_OTHERS) {
+ node->ResetPhantomHandle();
+ ++number_of_phantom_handle_resets_;
+ }
} else if (node->IsPendingPhantomCallback()) {
- node->CollectPhantomCallbackData(isolate(),
- &pending_phantom_callbacks_);
+ if (mode == IterationMode::HANDLE_PHANTOM_NODES ||
+ mode == IterationMode::HANDLE_PHANTOM_NODES_VISIT_OTHERS) {
+ node->CollectPhantomCallbackData(isolate(),
+ &pending_phantom_callbacks_);
+ }
} else {
- v->VisitPointer(node->location());
+ if (mode == IterationMode::VISIT_OTHERS ||
+ mode == IterationMode::HANDLE_PHANTOM_NODES_VISIT_OTHERS) {
+ v->VisitPointer(node->location());
+ }
}
}
}
}
+template void GlobalHandles::IterateNewSpaceWeakUnmodifiedRoots<
+ GlobalHandles::HANDLE_PHANTOM_NODES>(ObjectVisitor* v);
+
+template void GlobalHandles::IterateNewSpaceWeakUnmodifiedRoots<
+ GlobalHandles::VISIT_OTHERS>(ObjectVisitor* v);
+
+template void GlobalHandles::IterateNewSpaceWeakUnmodifiedRoots<
+ GlobalHandles::HANDLE_PHANTOM_NODES_VISIT_OTHERS>(ObjectVisitor* v);
DISABLE_CFI_PERF
bool GlobalHandles::IterateObjectGroups(ObjectVisitor* v,
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index 50e5ed6969..9c4ffb4f1a 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -113,6 +113,12 @@ enum WeaknessType {
class GlobalHandles {
public:
+ enum IterationMode {
+ HANDLE_PHANTOM_NODES_VISIT_OTHERS,
+ VISIT_OTHERS,
+ HANDLE_PHANTOM_NODES
+ };
+
~GlobalHandles();
// Creates a new global handle that is alive until Destroy is called.
@@ -227,6 +233,7 @@ class GlobalHandles {
// Iterates over weak independent or unmodified handles.
// See the note above.
+ template <IterationMode mode>
void IterateNewSpaceWeakUnmodifiedRoots(ObjectVisitor* v);
// Identify unmodified objects that are in weak state and marks them
@@ -290,7 +297,7 @@ class GlobalHandles {
#ifdef DEBUG
void PrintStats();
void Print();
-#endif
+#endif // DEBUG
private:
explicit GlobalHandles(Isolate* isolate);
@@ -389,8 +396,6 @@ class GlobalHandles::PendingPhantomCallback {
class EternalHandles {
public:
enum SingletonHandle {
- I18N_TEMPLATE_ONE,
- I18N_TEMPLATE_TWO,
DATE_CACHE_VERSION,
NUMBER_OF_SINGLETON_HANDLES
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index f689c667b6..50b26ebf07 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -11,6 +11,7 @@
#include <ostream>
#include "src/base/build_config.h"
+#include "src/base/flags.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
@@ -314,26 +315,24 @@ inline std::ostream& operator<<(std::ostream& os, const LanguageMode& mode) {
return os;
}
-
inline bool is_sloppy(LanguageMode language_mode) {
return language_mode == SLOPPY;
}
-
inline bool is_strict(LanguageMode language_mode) {
return language_mode != SLOPPY;
}
-
inline bool is_valid_language_mode(int language_mode) {
return language_mode == SLOPPY || language_mode == STRICT;
}
-
inline LanguageMode construct_language_mode(bool strict_bit) {
return static_cast<LanguageMode>(strict_bit);
}
+enum TypeofMode : int { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
+
// This constant is used as an undefined value when passing source positions.
const int kNoSourcePosition = -1;
@@ -591,7 +590,12 @@ enum VisitMode {
};
// Flag indicating whether code is built into the VM (one of the natives files).
-enum NativesFlag { NOT_NATIVES_CODE, EXTENSION_CODE, NATIVES_CODE };
+enum NativesFlag {
+ NOT_NATIVES_CODE,
+ EXTENSION_CODE,
+ NATIVES_CODE,
+ INSPECTOR_CODE
+};
// JavaScript defines two kinds of 'nil'.
enum NilValue { kNullValue, kUndefinedValue };
@@ -603,14 +607,6 @@ enum ParseRestriction {
ONLY_SINGLE_FUNCTION_LITERAL // Only a single FunctionLiteral expression.
};
-// TODO(gsathya): Move this to JSPromise once we create it.
-// This should be in sync with the constants in promise.js
-enum PromiseStatus {
- kPromisePending,
- kPromiseFulfilled,
- kPromiseRejected,
-};
-
// A CodeDesc describes a buffer holding instructions and relocation
// information. The instructions start at the beginning of the buffer
// and grow forward, the relocation information starts at the end of
@@ -788,10 +784,14 @@ enum CpuFeature {
FPR_GPR_MOV,
LWSYNC,
ISELECT,
+ VSX,
+ MODULO,
// S390
DISTINCT_OPS,
GENERAL_INSTR_EXT,
FLOATING_POINT_EXT,
+ VECTOR_FACILITY,
+ MISC_INSTR_EXT2,
NUMBER_OF_CPU_FEATURES,
@@ -891,6 +891,14 @@ enum ScopeType : uint8_t {
WITH_SCOPE // The scope introduced by with.
};
+// AllocationSiteMode controls whether allocations are tracked by an allocation
+// site.
+enum AllocationSiteMode {
+ DONT_TRACK_ALLOCATION_SITE,
+ TRACK_ALLOCATION_SITE,
+ LAST_ALLOCATION_SITE_MODE = TRACK_ALLOCATION_SITE
+};
+
// The mips architecture prior to revision 5 has inverted encoding for sNaN.
// The x87 FPU convert the sNaN to qNaN automatically when loading sNaN from
// memmory.
@@ -1080,7 +1088,7 @@ enum FunctionKind : uint16_t {
kConciseMethod = 1 << 2,
kConciseGeneratorMethod = kGeneratorFunction | kConciseMethod,
kDefaultConstructor = 1 << 3,
- kSubclassConstructor = 1 << 4,
+ kDerivedConstructor = 1 << 4,
kBaseConstructor = 1 << 5,
kGetterFunction = 1 << 6,
kSetterFunction = 1 << 7,
@@ -1088,9 +1096,9 @@ enum FunctionKind : uint16_t {
kModule = 1 << 9,
kAccessorFunction = kGetterFunction | kSetterFunction,
kDefaultBaseConstructor = kDefaultConstructor | kBaseConstructor,
- kDefaultSubclassConstructor = kDefaultConstructor | kSubclassConstructor,
+ kDefaultDerivedConstructor = kDefaultConstructor | kDerivedConstructor,
kClassConstructor =
- kBaseConstructor | kSubclassConstructor | kDefaultConstructor,
+ kBaseConstructor | kDerivedConstructor | kDefaultConstructor,
kAsyncArrowFunction = kArrowFunction | kAsyncFunction,
kAsyncConciseMethod = kAsyncFunction | kConciseMethod
};
@@ -1106,9 +1114,9 @@ inline bool IsValidFunctionKind(FunctionKind kind) {
kind == FunctionKind::kSetterFunction ||
kind == FunctionKind::kAccessorFunction ||
kind == FunctionKind::kDefaultBaseConstructor ||
- kind == FunctionKind::kDefaultSubclassConstructor ||
+ kind == FunctionKind::kDefaultDerivedConstructor ||
kind == FunctionKind::kBaseConstructor ||
- kind == FunctionKind::kSubclassConstructor ||
+ kind == FunctionKind::kDerivedConstructor ||
kind == FunctionKind::kAsyncFunction ||
kind == FunctionKind::kAsyncArrowFunction ||
kind == FunctionKind::kAsyncConciseMethod;
@@ -1172,10 +1180,9 @@ inline bool IsBaseConstructor(FunctionKind kind) {
return kind & FunctionKind::kBaseConstructor;
}
-
-inline bool IsSubclassConstructor(FunctionKind kind) {
+inline bool IsDerivedConstructor(FunctionKind kind) {
DCHECK(IsValidFunctionKind(kind));
- return kind & FunctionKind::kSubclassConstructor;
+ return kind & FunctionKind::kDerivedConstructor;
}
@@ -1238,10 +1245,23 @@ class BinaryOperationFeedback {
};
};
+// Type feedback is encoded in such a way that, we can combine the feedback
+// at different points by performing an 'OR' operation. Type feedback moves
+// to a more generic type when we combine feedback.
+// kSignedSmall -> kNumber -> kAny
+// kInternalizedString -> kString -> kAny
// TODO(epertoso): consider unifying this with BinaryOperationFeedback.
class CompareOperationFeedback {
public:
- enum { kNone = 0x00, kSignedSmall = 0x01, kNumber = 0x3, kAny = 0x7 };
+ enum {
+ kNone = 0x00,
+ kSignedSmall = 0x01,
+ kNumber = 0x3,
+ kNumberOrOddball = 0x7,
+ kInternalizedString = 0x8,
+ kString = 0x18,
+ kAny = 0x3F
+ };
};
// Describes how exactly a frame has been dropped from stack.
@@ -1294,6 +1314,17 @@ inline std::ostream& operator<<(std::ostream& os, IterationKind kind) {
return os;
}
+// Flags for the runtime function kDefineDataPropertyInLiteral. A property can
+// be enumerable or not, and, in case of functions, the function name
+// can be set or not.
+enum class DataPropertyInLiteralFlag {
+ kNoFlags = 0,
+ kDontEnum = 1 << 0,
+ kSetFunctionName = 1 << 1
+};
+typedef base::Flags<DataPropertyInLiteralFlag> DataPropertyInLiteralFlags;
+DEFINE_OPERATORS_FOR_FLAGS(DataPropertyInLiteralFlags)
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index 2c98209a1d..8664a3ff89 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -5,6 +5,8 @@
#ifndef V8_HANDLES_H_
#define V8_HANDLES_H_
+#include <type_traits>
+
#include "include/v8.h"
#include "src/base/functional.h"
#include "src/base/macros.h"
@@ -91,11 +93,10 @@ class Handle final : public HandleBase {
public:
V8_INLINE explicit Handle(T** location = nullptr)
: HandleBase(reinterpret_cast<Object**>(location)) {
- Object* a = nullptr;
- T* b = nullptr;
- a = b; // Fake assignment to enforce type checks.
- USE(a);
+ // Type check:
+ static_assert(std::is_base_of<Object, T>::value, "static type violation");
}
+
V8_INLINE explicit Handle(T* object) : Handle(object, object->GetIsolate()) {}
V8_INLINE Handle(T* object, Isolate* isolate) : HandleBase(object, isolate) {}
diff --git a/deps/v8/src/heap-symbols.h b/deps/v8/src/heap-symbols.h
index cee900048e..cf8739167e 100644
--- a/deps/v8/src/heap-symbols.h
+++ b/deps/v8/src/heap-symbols.h
@@ -6,6 +6,7 @@
#define V8_HEAP_SYMBOLS_H_
#define INTERNALIZED_STRING_LIST(V) \
+ V(anonymous_function_string, "(anonymous function)") \
V(anonymous_string, "anonymous") \
V(apply_string, "apply") \
V(arguments_string, "arguments") \
@@ -14,6 +15,8 @@
V(Array_string, "Array") \
V(ArrayIterator_string, "Array Iterator") \
V(assign_string, "assign") \
+ V(async_string, "async") \
+ V(await_string, "await") \
V(array_to_string, "[object Array]") \
V(boolean_to_string, "[object Boolean]") \
V(date_to_string, "[object Date]") \
@@ -57,7 +60,12 @@
V(did_handle_string, "didHandle") \
V(display_name_string, "displayName") \
V(done_string, "done") \
+ V(dot_catch_string, ".catch") \
+ V(dot_for_string, ".for") \
+ V(dot_generator_object_string, ".generator_object") \
+ V(dot_iterator_string, ".iterator") \
V(dot_result_string, ".result") \
+ V(dot_switch_tag_string, ".switch_tag") \
V(dot_string, ".") \
V(exec_string, "exec") \
V(entries_string, "entries") \
@@ -71,8 +79,6 @@
V(flags_string, "flags") \
V(float32x4_string, "float32x4") \
V(Float32x4_string, "Float32x4") \
- V(for_api_string, "for_api") \
- V(for_string, "for") \
V(function_string, "function") \
V(Function_string, "Function") \
V(Generator_string, "Generator") \
@@ -80,6 +86,7 @@
V(getOwnPropertyDescriptors_string, "getOwnPropertyDescriptors") \
V(getPrototypeOf_string, "getPrototypeOf") \
V(get_string, "get") \
+ V(get_space_string, "get ") \
V(global_string, "global") \
V(has_string, "has") \
V(hour_string, "hour") \
@@ -102,6 +109,7 @@
V(keys_string, "keys") \
V(lastIndex_string, "lastIndex") \
V(length_string, "length") \
+ V(let_string, "let") \
V(line_string, "line") \
V(literal_string, "literal") \
V(Map_string, "Map") \
@@ -109,10 +117,13 @@
V(minus_infinity_string, "-Infinity") \
V(minus_zero_string, "-0") \
V(minute_string, "minute") \
+ V(Module_string, "Module") \
V(month_string, "month") \
V(multiline_string, "multiline") \
V(name_string, "name") \
+ V(native_string, "native") \
V(nan_string, "NaN") \
+ V(new_target_string, ".new.target") \
V(next_string, "next") \
V(not_equal, "not-equal") \
V(null_string, "null") \
@@ -125,9 +136,9 @@
V(ownKeys_string, "ownKeys") \
V(position_string, "position") \
V(preventExtensions_string, "preventExtensions") \
- V(private_api_string, "private_api") \
V(Promise_string, "Promise") \
V(PromiseResolveThenableJob_string, "PromiseResolveThenableJob") \
+ V(promise_string, "promise") \
V(proto_string, "__proto__") \
V(prototype_string, "prototype") \
V(Proxy_string, "Proxy") \
@@ -135,9 +146,13 @@
V(RangeError_string, "RangeError") \
V(ReferenceError_string, "ReferenceError") \
V(RegExp_string, "RegExp") \
+ V(reject_string, "reject") \
+ V(resolve_string, "resolve") \
+ V(return_string, "return") \
V(script_string, "script") \
V(second_string, "second") \
V(setPrototypeOf_string, "setPrototypeOf") \
+ V(set_space_string, "set ") \
V(set_string, "set") \
V(Set_string, "Set") \
V(source_mapping_url_string, "source_mapping_url") \
@@ -146,13 +161,17 @@
V(source_url_string, "source_url") \
V(stack_string, "stack") \
V(stackTraceLimit_string, "stackTraceLimit") \
+ V(star_default_star_string, "*default*") \
V(sticky_string, "sticky") \
V(strict_compare_ic_string, "===") \
V(string_string, "string") \
V(String_string, "String") \
V(symbol_string, "symbol") \
V(Symbol_string, "Symbol") \
+ V(symbol_species_string, "[Symbol.species]") \
V(SyntaxError_string, "SyntaxError") \
+ V(then_string, "then") \
+ V(this_function_string, ".this_function") \
V(this_string, "this") \
V(throw_string, "throw") \
V(timed_out, "timed-out") \
@@ -163,6 +182,7 @@
V(TypeError_string, "TypeError") \
V(type_string, "type") \
V(CompileError_string, "CompileError") \
+ V(LinkError_string, "LinkError") \
V(RuntimeError_string, "RuntimeError") \
V(uint16x8_string, "uint16x8") \
V(Uint16x8_string, "Uint16x8") \
@@ -173,6 +193,8 @@
V(undefined_string, "undefined") \
V(undefined_to_string, "[object Undefined]") \
V(unicode_string, "unicode") \
+ V(use_asm_string, "use asm") \
+ V(use_strict_string, "use strict") \
V(URIError_string, "URIError") \
V(valueOf_string, "valueOf") \
V(values_string, "values") \
@@ -200,7 +222,6 @@
V(frozen_symbol) \
V(hash_code_symbol) \
V(home_object_symbol) \
- V(intl_impl_object_symbol) \
V(intl_initialized_marker_symbol) \
V(intl_pattern_symbol) \
V(intl_resolved_symbol) \
@@ -213,16 +234,9 @@
V(premonomorphic_symbol) \
V(promise_async_stack_id_symbol) \
V(promise_debug_marker_symbol) \
- V(promise_deferred_reaction_symbol) \
V(promise_forwarding_handler_symbol) \
- V(promise_fulfill_reactions_symbol) \
V(promise_handled_by_symbol) \
- V(promise_handled_hint_symbol) \
- V(promise_has_handler_symbol) \
- V(promise_raw_symbol) \
- V(promise_reject_reactions_symbol) \
- V(promise_result_symbol) \
- V(promise_state_symbol) \
+ V(promise_async_id_symbol) \
V(sealed_symbol) \
V(stack_trace_symbol) \
V(strict_function_transition_symbol) \
@@ -230,6 +244,7 @@
#define PUBLIC_SYMBOL_LIST(V) \
V(iterator_symbol, Symbol.iterator) \
+ V(intl_fallback_symbol, IntlFallback) \
V(match_symbol, Symbol.match) \
V(replace_symbol, Symbol.replace) \
V(search_symbol, Symbol.search) \
diff --git a/deps/v8/src/heap/array-buffer-tracker.cc b/deps/v8/src/heap/array-buffer-tracker.cc
index 62b848ef70..def84572b6 100644
--- a/deps/v8/src/heap/array-buffer-tracker.cc
+++ b/deps/v8/src/heap/array-buffer-tracker.cc
@@ -78,8 +78,8 @@ void LocalArrayBufferTracker::Process(Callback callback) {
void ArrayBufferTracker::FreeDeadInNewSpace(Heap* heap) {
DCHECK_EQ(heap->gc_state(), Heap::HeapState::SCAVENGE);
- for (Page* page : NewSpacePageRange(heap->new_space()->FromSpaceStart(),
- heap->new_space()->FromSpaceEnd())) {
+ for (Page* page : PageRange(heap->new_space()->FromSpaceStart(),
+ heap->new_space()->FromSpaceEnd())) {
bool empty = ProcessBuffers(page, kUpdateForwardedRemoveOthers);
CHECK(empty);
}
diff --git a/deps/v8/src/heap/embedder-tracing.cc b/deps/v8/src/heap/embedder-tracing.cc
new file mode 100644
index 0000000000..2d11724181
--- /dev/null
+++ b/deps/v8/src/heap/embedder-tracing.cc
@@ -0,0 +1,72 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/embedder-tracing.h"
+
+#include "src/base/logging.h"
+
+namespace v8 {
+namespace internal {
+
+void LocalEmbedderHeapTracer::TracePrologue() {
+ if (!InUse()) return;
+
+ CHECK(cached_wrappers_to_trace_.empty());
+ num_v8_marking_deque_was_empty_ = 0;
+ remote_tracer_->TracePrologue();
+}
+
+void LocalEmbedderHeapTracer::TraceEpilogue() {
+ if (!InUse()) return;
+
+ CHECK(cached_wrappers_to_trace_.empty());
+ remote_tracer_->TraceEpilogue();
+}
+
+void LocalEmbedderHeapTracer::AbortTracing() {
+ if (!InUse()) return;
+
+ cached_wrappers_to_trace_.clear();
+ remote_tracer_->AbortTracing();
+}
+
+void LocalEmbedderHeapTracer::EnterFinalPause() {
+ if (!InUse()) return;
+
+ remote_tracer_->EnterFinalPause();
+}
+
+bool LocalEmbedderHeapTracer::Trace(
+ double deadline, EmbedderHeapTracer::AdvanceTracingActions actions) {
+ if (!InUse()) return false;
+
+ DCHECK_EQ(0, NumberOfCachedWrappersToTrace());
+ return remote_tracer_->AdvanceTracing(deadline, actions);
+}
+
+size_t LocalEmbedderHeapTracer::NumberOfWrappersToTrace() {
+ return (InUse())
+ ? cached_wrappers_to_trace_.size() +
+ remote_tracer_->NumberOfWrappersToTrace()
+ : 0;
+}
+
+void LocalEmbedderHeapTracer::RegisterWrappersWithRemoteTracer() {
+ if (!InUse()) return;
+
+ if (cached_wrappers_to_trace_.empty()) {
+ return;
+ }
+
+ remote_tracer_->RegisterV8References(cached_wrappers_to_trace_);
+ cached_wrappers_to_trace_.clear();
+}
+
+bool LocalEmbedderHeapTracer::RequiresImmediateWrapperProcessing() {
+ const size_t kTooManyWrappers = 16000;
+ return cached_wrappers_to_trace_.size() > kTooManyWrappers;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/embedder-tracing.h b/deps/v8/src/heap/embedder-tracing.h
new file mode 100644
index 0000000000..5e10d6e2e8
--- /dev/null
+++ b/deps/v8/src/heap/embedder-tracing.h
@@ -0,0 +1,67 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_EMBEDDER_TRACING_H_
+#define V8_HEAP_EMBEDDER_TRACING_H_
+
+#include "include/v8.h"
+#include "src/flags.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+
+class V8_EXPORT_PRIVATE LocalEmbedderHeapTracer final {
+ public:
+ typedef std::pair<void*, void*> WrapperInfo;
+
+ LocalEmbedderHeapTracer()
+ : remote_tracer_(nullptr), num_v8_marking_deque_was_empty_(0) {}
+
+ void SetRemoteTracer(EmbedderHeapTracer* tracer) { remote_tracer_ = tracer; }
+ bool InUse() { return remote_tracer_ != nullptr; }
+
+ void TracePrologue();
+ void TraceEpilogue();
+ void AbortTracing();
+ void EnterFinalPause();
+ bool Trace(double deadline,
+ EmbedderHeapTracer::AdvanceTracingActions actions);
+
+ size_t NumberOfWrappersToTrace();
+ size_t NumberOfCachedWrappersToTrace() {
+ return cached_wrappers_to_trace_.size();
+ }
+ void AddWrapperToTrace(WrapperInfo entry) {
+ cached_wrappers_to_trace_.push_back(entry);
+ }
+ void ClearCachedWrappersToTrace() { cached_wrappers_to_trace_.clear(); }
+ void RegisterWrappersWithRemoteTracer();
+
+ // In order to avoid running out of memory we force tracing wrappers if there
+ // are too many of them.
+ bool RequiresImmediateWrapperProcessing();
+
+ void NotifyV8MarkingDequeWasEmpty() { num_v8_marking_deque_was_empty_++; }
+ bool ShouldFinalizeIncrementalMarking() {
+ static const size_t kMaxIncrementalFixpointRounds = 3;
+ return !FLAG_incremental_marking_wrappers || !InUse() ||
+ NumberOfWrappersToTrace() == 0 ||
+ num_v8_marking_deque_was_empty_ > kMaxIncrementalFixpointRounds;
+ }
+
+ private:
+ typedef std::vector<WrapperInfo> WrapperCache;
+
+ EmbedderHeapTracer* remote_tracer_;
+ WrapperCache cached_wrappers_to_trace_;
+ size_t num_v8_marking_deque_was_empty_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_EMBEDDER_TRACING_H_
diff --git a/deps/v8/src/heap/gc-idle-time-handler.cc b/deps/v8/src/heap/gc-idle-time-handler.cc
index 0c411f7b4c..905514c4bf 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.cc
+++ b/deps/v8/src/heap/gc-idle-time-handler.cc
@@ -146,6 +146,7 @@ GCIdleTimeAction GCIdleTimeHandler::Compute(double idle_time_in_ms,
return GCIdleTimeAction::IncrementalStep();
}
+bool GCIdleTimeHandler::Enabled() { return FLAG_incremental_marking; }
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/gc-idle-time-handler.h b/deps/v8/src/heap/gc-idle-time-handler.h
index 7ce0c1a2f6..b730a7bbba 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.h
+++ b/deps/v8/src/heap/gc-idle-time-handler.h
@@ -125,6 +125,8 @@ class V8_EXPORT_PRIVATE GCIdleTimeHandler {
GCIdleTimeAction Compute(double idle_time_in_ms,
GCIdleTimeHeapState heap_state);
+ bool Enabled();
+
void ResetNoProgressCounter() { idle_times_which_made_no_progress_ = 0; }
static size_t EstimateMarkingStepSize(double idle_time_in_ms,
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index dcd319fdae..cf881c473b 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -174,8 +174,7 @@ void GCTracer::Start(GarbageCollector collector,
current_.start_object_size = heap_->SizeOfObjects();
current_.start_memory_size = heap_->memory_allocator()->Size();
current_.start_holes_size = CountTotalHolesSize(heap_);
- current_.new_space_object_size =
- heap_->new_space()->top() - heap_->new_space()->bottom();
+ current_.new_space_object_size = heap_->new_space()->Size();
current_.incremental_marking_bytes = 0;
current_.incremental_marking_duration = 0;
@@ -510,9 +509,14 @@ void GCTracer::PrintNVP() const {
"pause=%.1f "
"mutator=%.1f "
"gc=%s "
- "reduce_memory=%d\n",
- duration, spent_in_mutator, current_.TypeName(true),
- current_.reduce_memory);
+ "reduce_memory=%d "
+ "mark=%.2f "
+ "mark.roots=%.2f "
+ "mark.old_to_new=%.2f\n",
+ duration, spent_in_mutator, "mmc", current_.reduce_memory,
+ current_.scopes[Scope::MINOR_MC_MARK],
+ current_.scopes[Scope::MINOR_MC_MARK_ROOTS],
+ current_.scopes[Scope::MINOR_MC_MARK_OLD_TO_NEW_POINTERS]);
break;
case Event::MARK_COMPACTOR:
case Event::INCREMENTAL_MARK_COMPACTOR:
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index ed62dee5f1..7aff1cf59d 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -79,6 +79,12 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
F(MC_SWEEP_CODE) \
F(MC_SWEEP_MAP) \
F(MC_SWEEP_OLD) \
+ F(MINOR_MC_MARK) \
+ F(MINOR_MC_MARK_CODE_FLUSH_CANDIDATES) \
+ F(MINOR_MC_MARK_GLOBAL_HANDLES) \
+ F(MINOR_MC_MARK_OLD_TO_NEW_POINTERS) \
+ F(MINOR_MC_MARK_ROOTS) \
+ F(MINOR_MC_MARK_WEAK) \
F(SCAVENGER_CODE_FLUSH_CANDIDATES) \
F(SCAVENGER_OLD_TO_NEW_POINTERS) \
F(SCAVENGER_ROOTS) \
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index 7d0d241289..4d060f8e43 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -21,6 +21,7 @@
#include "src/log.h"
#include "src/msan.h"
#include "src/objects-inl.h"
+#include "src/objects/scope-info.h"
#include "src/type-feedback-vector-inl.h"
namespace v8 {
@@ -698,12 +699,15 @@ void Heap::ExternalStringTable::AddString(String* string) {
}
}
-
-void Heap::ExternalStringTable::Iterate(ObjectVisitor* v) {
+void Heap::ExternalStringTable::IterateNewSpaceStrings(ObjectVisitor* v) {
if (!new_space_strings_.is_empty()) {
Object** start = &new_space_strings_[0];
v->VisitPointers(start, start + new_space_strings_.length());
}
+}
+
+void Heap::ExternalStringTable::IterateAll(ObjectVisitor* v) {
+ IterateNewSpaceStrings(v);
if (!old_space_strings_.is_empty()) {
Object** start = &old_space_strings_[0];
v->VisitPointers(start, start + old_space_strings_.length());
@@ -809,9 +813,16 @@ int Heap::GetNextTemplateSerialNumber() {
void Heap::SetSerializedTemplates(FixedArray* templates) {
DCHECK_EQ(empty_fixed_array(), serialized_templates());
+ DCHECK(isolate()->serializer_enabled());
set_serialized_templates(templates);
}
+void Heap::SetSerializedGlobalProxySizes(FixedArray* sizes) {
+ DCHECK_EQ(empty_fixed_array(), serialized_global_proxy_sizes());
+ DCHECK(isolate()->serializer_enabled());
+ set_serialized_global_proxy_sizes(sizes);
+}
+
void Heap::CreateObjectStats() {
if (V8_LIKELY(FLAG_gc_stats == 0)) return;
if (!live_object_stats_) {
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 2059dae6b7..478be1f03a 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -20,6 +20,7 @@
#include "src/global-handles.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/code-stats.h"
+#include "src/heap/embedder-tracing.h"
#include "src/heap/gc-idle-time-handler.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/incremental-marking.h"
@@ -80,6 +81,7 @@ Heap::Heap()
max_semi_space_size_(8 * (kPointerSize / 4) * MB),
initial_semispace_size_(MB),
max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
+ initial_max_old_generation_size_(max_old_generation_size_),
initial_old_generation_size_(max_old_generation_size_ /
kInitalOldGenerationLimitFactor),
old_generation_size_configured_(false),
@@ -93,6 +95,8 @@ Heap::Heap()
survived_last_scavenge_(0),
always_allocate_scope_count_(0),
memory_pressure_level_(MemoryPressureLevel::kNone),
+ out_of_memory_callback_(nullptr),
+ out_of_memory_callback_data_(nullptr),
contexts_disposed_(0),
number_of_disposed_maps_(0),
global_ic_age_(0),
@@ -155,7 +159,7 @@ Heap::Heap()
deserialization_complete_(false),
strong_roots_list_(NULL),
heap_iterator_depth_(0),
- embedder_heap_tracer_(nullptr),
+ local_embedder_heap_tracer_(nullptr),
force_oom_(false),
delay_sweeper_tasks_for_testing_(false) {
// Allow build-time customization of the max semispace size. Building
@@ -292,6 +296,9 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
return YoungGenerationCollector();
}
+void Heap::SetGCState(HeapState state) {
+ gc_state_ = state;
+}
// TODO(1238405): Combine the infrastructure for --heap-stats and
// --log-gc to avoid the complicated preprocessor and flag testing.
@@ -442,7 +449,6 @@ void Heap::GarbageCollectionPrologue() {
}
CheckNewSpaceExpansionCriteria();
UpdateNewSpaceAllocationCounter();
- store_buffer()->MoveAllEntriesToRememberedSet();
}
size_t Heap::SizeOfObjects() {
@@ -510,6 +516,22 @@ void Heap::MergeAllocationSitePretenuringFeedback(
}
}
+class Heap::SkipStoreBufferScope {
+ public:
+ explicit SkipStoreBufferScope(StoreBuffer* store_buffer)
+ : store_buffer_(store_buffer) {
+ store_buffer_->MoveAllEntriesToRememberedSet();
+ store_buffer_->SetMode(StoreBuffer::IN_GC);
+ }
+
+ ~SkipStoreBufferScope() {
+ DCHECK(store_buffer_->Empty());
+ store_buffer_->SetMode(StoreBuffer::NOT_IN_GC);
+ }
+
+ private:
+ StoreBuffer* store_buffer_;
+};
class Heap::PretenuringScope {
public:
@@ -861,6 +883,10 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
// Note: as weak callbacks can execute arbitrary code, we cannot
// hope that eventually there will be no weak callbacks invocations.
// Therefore stop recollecting after several attempts.
+ if (gc_reason == GarbageCollectionReason::kLastResort) {
+ InvokeOutOfMemoryCallback();
+ }
+ RuntimeCallTimerScope(isolate(), &RuntimeCallStats::GC_AllAvailableGarbage);
if (isolate()->concurrent_recompilation_enabled()) {
// The optimizing compiler may be unnecessarily holding on to memory.
DisallowHeapAllocation no_recursive_gc;
@@ -943,6 +969,7 @@ bool Heap::CollectGarbage(GarbageCollector collector,
const v8::GCCallbackFlags gc_callback_flags) {
// The VM is in the GC state until exiting this function.
VMState<GC> state(isolate_);
+ RuntimeCallTimerScope(isolate(), &RuntimeCallStats::GC);
#ifdef DEBUG
// Reset the allocation timeout to the GC interval, but make sure to
@@ -1022,6 +1049,7 @@ bool Heap::CollectGarbage(GarbageCollector collector,
(committed_memory_before > committed_memory_after + MB) ||
HasHighFragmentation(used_memory_after, committed_memory_after) ||
(detached_contexts()->length() > 0);
+ event.committed_memory = committed_memory_after;
if (deserialization_complete_) {
memory_reducer_->NotifyMarkCompact(event);
}
@@ -1164,7 +1192,7 @@ bool Heap::ReserveSpace(Reservation* reservations, List<Address>* maps) {
// deserializing.
Address free_space_address = free_space->address();
CreateFillerObjectAt(free_space_address, Map::kSize,
- ClearRecordedSlots::kNo, ClearBlackArea::kNo);
+ ClearRecordedSlots::kNo);
maps->Add(free_space_address);
} else {
perform_gc = true;
@@ -1195,7 +1223,7 @@ bool Heap::ReserveSpace(Reservation* reservations, List<Address>* maps) {
// deserializing.
Address free_space_address = free_space->address();
CreateFillerObjectAt(free_space_address, size,
- ClearRecordedSlots::kNo, ClearBlackArea::kNo);
+ ClearRecordedSlots::kNo);
DCHECK(space < SerializerDeserializer::kNumberOfPreallocatedSpaces);
chunk.start = free_space_address;
chunk.end = free_space_address + size;
@@ -1313,6 +1341,7 @@ bool Heap::PerformGarbageCollection(
{
Heap::PretenuringScope pretenuring_scope(this);
+ Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_);
switch (collector) {
case MARK_COMPACTOR:
@@ -1394,6 +1423,7 @@ bool Heap::PerformGarbageCollection(
void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
+ RuntimeCallTimerScope(isolate(), &RuntimeCallStats::GCPrologueCallback);
for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
if (gc_type & gc_prologue_callbacks_[i].gc_type) {
if (!gc_prologue_callbacks_[i].pass_isolate) {
@@ -1415,6 +1445,7 @@ void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
void Heap::CallGCEpilogueCallbacks(GCType gc_type,
GCCallbackFlags gc_callback_flags) {
+ RuntimeCallTimerScope(isolate(), &RuntimeCallStats::GCEpilogueCallback);
for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
if (!gc_epilogue_callbacks_[i].pass_isolate) {
@@ -1433,7 +1464,8 @@ void Heap::CallGCEpilogueCallbacks(GCType gc_type,
void Heap::MarkCompact() {
PauseAllocationObserversScope pause_observers(this);
- gc_state_ = MARK_COMPACT;
+ SetGCState(MARK_COMPACT);
+
LOG(isolate_, ResourceEvent("markcompact", "begin"));
uint64_t size_of_objects_before_gc = SizeOfObjects();
@@ -1459,7 +1491,7 @@ void Heap::MinorMarkCompact() { UNREACHABLE(); }
void Heap::MarkCompactEpilogue() {
TRACE_GC(tracer(), GCTracer::Scope::MC_EPILOGUE);
- gc_state_ = NOT_IN_GC;
+ SetGCState(NOT_IN_GC);
isolate_->counters()->objs_since_last_full()->Set(0);
@@ -1512,21 +1544,6 @@ static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
!HeapObject::cast(*p)->map_word().IsForwardingAddress();
}
-
-static bool IsUnmodifiedHeapObject(Object** p) {
- Object* object = *p;
- if (object->IsSmi()) return false;
- HeapObject* heap_object = HeapObject::cast(object);
- if (!object->IsJSObject()) return false;
- JSObject* js_object = JSObject::cast(object);
- if (!js_object->WasConstructedFromApiFunction()) return false;
- JSFunction* constructor =
- JSFunction::cast(js_object->map()->GetConstructor());
-
- return constructor->initial_map() == heap_object->map();
-}
-
-
void PromotionQueue::Initialize() {
// The last to-space page may be used for promotion queue. On promotion
// conflict, we use the emergency stack.
@@ -1605,7 +1622,7 @@ void Heap::Scavenge() {
mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
- gc_state_ = SCAVENGE;
+ SetGCState(SCAVENGE);
// Implements Cheney's copying algorithm
LOG(isolate_, ResourceEvent("scavenge", "begin"));
@@ -1615,13 +1632,6 @@ void Heap::Scavenge() {
scavenge_collector_->SelectScavengingVisitorsTable();
- if (UsingEmbedderHeapTracer()) {
- // Register found wrappers with embedder so it can add them to its marking
- // deque and correctly manage the case when v8 scavenger collects the
- // wrappers by either keeping wrappables alive, or cleaning marking deque.
- RegisterWrappersWithEmbedderHeapTracer();
- }
-
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
new_space_->Flip();
@@ -1701,8 +1711,10 @@ void Heap::Scavenge() {
isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
&IsUnscavengedHeapObject);
- isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots(
- &scavenge_visitor);
+ isolate()
+ ->global_handles()
+ ->IterateNewSpaceWeakUnmodifiedRoots<
+ GlobalHandles::HANDLE_PHANTOM_NODES_VISIT_OTHERS>(&scavenge_visitor);
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
UpdateNewSpaceReferencesInExternalStringTable(
@@ -1727,9 +1739,13 @@ void Heap::Scavenge() {
IncrementYoungSurvivorsCounter(PromotedSpaceSizeOfObjects() +
new_space_->Size() - survived_watermark);
+ // Scavenger may find new wrappers by iterating objects promoted onto a black
+ // page.
+ local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
+
LOG(isolate_, ResourceEvent("scavenge", "end"));
- gc_state_ = NOT_IN_GC;
+ SetGCState(NOT_IN_GC);
}
@@ -1882,7 +1898,7 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
v8::ExternalResourceVisitor* visitor_;
} external_string_table_visitor(visitor);
- external_string_table_.Iterate(&external_string_table_visitor);
+ external_string_table_.IterateAll(&external_string_table_visitor);
}
Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
@@ -2008,7 +2024,6 @@ void Heap::UnregisterArrayBuffer(JSArrayBuffer* buffer) {
ArrayBufferTracker::Unregister(this, buffer);
}
-
void Heap::ConfigureInitialOldGenerationSize() {
if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
old_generation_allocation_limit_ =
@@ -2019,7 +2034,6 @@ void Heap::ConfigureInitialOldGenerationSize() {
}
}
-
AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
int instance_size) {
Object* result = nullptr;
@@ -2107,8 +2121,7 @@ AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
DCHECK(chunk->owner()->identity() == space);
#endif
- CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo,
- ClearBlackArea::kNo);
+ CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
return obj;
}
@@ -2256,6 +2269,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_info)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, type_feedback_vector)
ALLOCATE_PRIMITIVE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number,
Context::NUMBER_FUNCTION_INDEX)
ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
@@ -2279,6 +2293,9 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, optimized_out);
ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, stale_register);
+ ALLOCATE_MAP(JS_PROMISE_CAPABILITY_TYPE, JSPromiseCapability::kSize,
+ js_promise_capability);
+
for (unsigned i = 0; i < arraysize(string_type_table); i++) {
const StringTypeTable& entry = string_type_table[i];
{
@@ -2344,6 +2361,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, debug_evaluate_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
+ ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, eval_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context)
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context_table)
@@ -2523,10 +2541,18 @@ AllocationResult Heap::AllocateTransitionArray(int capacity) {
return array;
}
-
-void Heap::CreateApiObjects() {
+bool Heap::CreateApiObjects() {
HandleScope scope(isolate());
set_message_listeners(*TemplateList::New(isolate(), 2));
+ HeapObject* obj = nullptr;
+ {
+ AllocationResult allocation = AllocateStruct(INTERCEPTOR_INFO_TYPE);
+ if (!allocation.To(&obj)) return false;
+ }
+ InterceptorInfo* info = InterceptorInfo::cast(obj);
+ info->set_flags(0);
+ set_noop_interceptor_info(info);
+ return true;
}
@@ -2697,10 +2723,14 @@ void Heap::CreateInitialObjects() {
}
Handle<NameDictionary> empty_properties_dictionary =
- NameDictionary::New(isolate(), 0, TENURED);
+ NameDictionary::NewEmpty(isolate(), TENURED);
empty_properties_dictionary->SetRequiresCopyOnCapacityChange();
set_empty_properties_dictionary(*empty_properties_dictionary);
+ set_public_symbol_table(*empty_properties_dictionary);
+ set_api_symbol_table(*empty_properties_dictionary);
+ set_api_private_symbol_table(*empty_properties_dictionary);
+
set_number_string_cache(
*factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
@@ -2729,9 +2759,6 @@ void Heap::CreateInitialObjects() {
set_undefined_cell(*factory->NewCell(factory->undefined_value()));
- // The symbol registry is initialized lazily.
- set_symbol_registry(Smi::kZero);
-
// Microtask queue uses the empty fixed array as a sentinel for "empty".
// Number of queued microtasks stored in Isolate::pending_microtask_count().
set_microtask_queue(empty_fixed_array());
@@ -2779,6 +2806,7 @@ void Heap::CreateInitialObjects() {
empty_fixed_array());
empty_type_feedback_vector->set(TypeFeedbackVector::kInvocationCountIndex,
Smi::kZero);
+ empty_type_feedback_vector->set_map(type_feedback_vector_map());
set_empty_type_feedback_vector(*empty_type_feedback_vector);
// We use a canonical empty LiteralsArray for all functions that neither
@@ -2817,7 +2845,7 @@ void Heap::CreateInitialObjects() {
set_script_list(Smi::kZero);
Handle<SeededNumberDictionary> slow_element_dictionary =
- SeededNumberDictionary::New(isolate(), 0, TENURED);
+ SeededNumberDictionary::NewEmpty(isolate(), TENURED);
slow_element_dictionary->set_requires_slow_elements();
set_empty_slow_element_dictionary(*slow_element_dictionary);
@@ -2864,7 +2892,12 @@ void Heap::CreateInitialObjects() {
handle(Smi::FromInt(Isolate::kProtectorValid), isolate()));
set_array_iterator_protector(*array_iterator_cell);
+ cell = factory->NewPropertyCell();
+ cell->set_value(Smi::FromInt(Isolate::kProtectorValid));
+ set_array_buffer_neutering_protector(*cell);
+
set_serialized_templates(empty_fixed_array());
+ set_serialized_global_proxy_sizes(empty_fixed_array());
set_weak_stack_trace_list(Smi::kZero);
@@ -2878,6 +2911,42 @@ void Heap::CreateInitialObjects() {
// Initialize compilation cache.
isolate_->compilation_cache()->Clear();
+
+ // Finish creating JSPromiseCapabilityMap
+ {
+ // TODO(caitp): This initialization can be removed once PromiseCapability
+ // object is no longer used by builtins implemented in javascript.
+ Handle<Map> map = factory->js_promise_capability_map();
+ map->set_inobject_properties_or_constructor_function_index(3);
+
+ Map::EnsureDescriptorSlack(map, 3);
+
+ PropertyAttributes attrs =
+ static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
+ { // promise
+ Descriptor d = Descriptor::DataField(factory->promise_string(),
+ JSPromiseCapability::kPromiseIndex,
+ attrs, Representation::Tagged());
+ map->AppendDescriptor(&d);
+ }
+
+ { // resolve
+ Descriptor d = Descriptor::DataField(factory->resolve_string(),
+ JSPromiseCapability::kResolveIndex,
+ attrs, Representation::Tagged());
+ map->AppendDescriptor(&d);
+ }
+
+ { // reject
+ Descriptor d = Descriptor::DataField(factory->reject_string(),
+ JSPromiseCapability::kRejectIndex,
+ attrs, Representation::Tagged());
+ map->AppendDescriptor(&d);
+ }
+
+ map->set_is_extensible(false);
+ set_js_promise_capability_map(*map);
+ }
}
bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
@@ -2888,7 +2957,6 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
case kInstanceofCacheAnswerRootIndex:
case kCodeStubsRootIndex:
case kEmptyScriptRootIndex:
- case kSymbolRegistryRootIndex:
case kScriptListRootIndex:
case kMaterializedObjectsRootIndex:
case kMicrotaskQueueRootIndex:
@@ -2899,6 +2967,10 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
case kNoScriptSharedFunctionInfosRootIndex:
case kWeakStackTraceListRootIndex:
case kSerializedTemplatesRootIndex:
+ case kSerializedGlobalProxySizesRootIndex:
+ case kPublicSymbolTableRootIndex:
+ case kApiSymbolTableRootIndex:
+ case kApiPrivateSymbolTableRootIndex:
// Smi values
#define SMI_ENTRY(type, name, Name) case k##Name##RootIndex:
SMI_ROOT_LIST(SMI_ENTRY)
@@ -2918,6 +2990,18 @@ bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
!InNewSpace(root(root_index));
}
+bool Heap::IsUnmodifiedHeapObject(Object** p) {
+ Object* object = *p;
+ if (object->IsSmi()) return false;
+ HeapObject* heap_object = HeapObject::cast(object);
+ if (!object->IsJSObject()) return false;
+ JSObject* js_object = JSObject::cast(object);
+ if (!js_object->WasConstructedFromApiFunction()) return false;
+ JSFunction* constructor =
+ JSFunction::cast(js_object->map()->GetConstructor());
+
+ return constructor->initial_map() == heap_object->map();
+}
int Heap::FullSizeNumberStringCacheLength() {
// Compute the size of the number string cache based on the max newspace size.
@@ -3042,6 +3126,7 @@ AllocationResult Heap::AllocateBytecodeArray(int length,
instance->set_parameter_count(parameter_count);
instance->set_interrupt_budget(interpreter::Interpreter::InterruptBudget());
instance->set_osr_loop_nesting_level(0);
+ instance->set_bytecode_age(BytecodeArray::kNoAgeBytecodeAge);
instance->set_constant_pool(constant_pool);
instance->set_handler_table(empty_fixed_array());
instance->set_source_position_table(empty_byte_array());
@@ -3050,9 +3135,9 @@ AllocationResult Heap::AllocateBytecodeArray(int length,
return result;
}
-void Heap::CreateFillerObjectAt(Address addr, int size, ClearRecordedSlots mode,
- ClearBlackArea black_area_mode) {
- if (size == 0) return;
+HeapObject* Heap::CreateFillerObjectAt(Address addr, int size,
+ ClearRecordedSlots mode) {
+ if (size == 0) return nullptr;
HeapObject* filler = HeapObject::FromAddress(addr);
if (size == kPointerSize) {
filler->set_map_no_write_barrier(
@@ -3070,20 +3155,11 @@ void Heap::CreateFillerObjectAt(Address addr, int size, ClearRecordedSlots mode,
ClearRecordedSlotRange(addr, addr + size);
}
- // If the location where the filler is created is within a black area we have
- // to clear the mark bits of the filler space.
- if (black_area_mode == ClearBlackArea::kYes &&
- incremental_marking()->black_allocation() &&
- Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(addr))) {
- Page* page = Page::FromAddress(addr);
- page->markbits()->ClearRange(page->AddressToMarkbitIndex(addr),
- page->AddressToMarkbitIndex(addr + size));
- }
-
// At this point, we may be deserializing the heap from a snapshot, and
// none of the maps have been created yet and are NULL.
DCHECK((filler->map() == NULL && !deserialization_complete_) ||
filler->map()->IsMap());
+ return filler;
}
@@ -3101,8 +3177,12 @@ bool Heap::CanMoveObjectStart(HeapObject* object) {
return Page::FromAddress(address)->SweepingDone();
}
+bool Heap::IsImmovable(HeapObject* object) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+ return chunk->NeverEvacuate() || chunk->owner()->identity() == LO_SPACE;
+}
-void Heap::AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode) {
+void Heap::AdjustLiveBytes(HeapObject* object, int by) {
// As long as the inspected object is black and we are currently not iterating
// the heap using HeapIterator, we can update the live byte count. We cannot
// update while using HeapIterator because the iterator is temporarily
@@ -3111,12 +3191,9 @@ void Heap::AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode) {
lo_space()->AdjustLiveBytes(by);
} else if (!in_heap_iterator() &&
!mark_compact_collector()->sweeping_in_progress() &&
- Marking::IsBlack(ObjectMarking::MarkBitFrom(object->address()))) {
- if (mode == SEQUENTIAL_TO_SWEEPER) {
- MemoryChunk::IncrementLiveBytesFromGC(object, by);
- } else {
- MemoryChunk::IncrementLiveBytesFromMutator(object, by);
- }
+ Marking::IsBlack(ObjectMarking::MarkBitFrom(object))) {
+ DCHECK(MemoryChunk::FromAddress(object->address())->SweepingDone());
+ MemoryChunk::IncrementLiveBytes(object, by);
}
}
@@ -3150,14 +3227,27 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
// Transfer the mark bits to their new location if the object is not within
// a black area.
if (!incremental_marking()->black_allocation() ||
- !Marking::IsBlack(ObjectMarking::MarkBitFrom(new_start))) {
- IncrementalMarking::TransferMark(this, old_start, new_start);
+ !Marking::IsBlack(
+ ObjectMarking::MarkBitFrom(HeapObject::FromAddress(new_start)))) {
+ IncrementalMarking::TransferMark(this, object,
+ HeapObject::FromAddress(new_start));
}
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
CreateFillerObjectAt(old_start, bytes_to_trim, ClearRecordedSlots::kYes);
+
+ // Clear the mark bits of the black area that belongs now to the filler.
+ // This is an optimization. The sweeper will release black fillers anyway.
+ if (incremental_marking()->black_allocation() &&
+ Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(object))) {
+ Page* page = Page::FromAddress(old_start);
+ page->markbits()->ClearRange(
+ page->AddressToMarkbitIndex(old_start),
+ page->AddressToMarkbitIndex(old_start + bytes_to_trim));
+ }
+
// Initialize header of the trimmed array. Since left trimming is only
// performed on pages which are not concurrently swept creating a filler
// object does not require synchronization.
@@ -3171,7 +3261,7 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
FixedArrayBase::cast(HeapObject::FromAddress(new_start));
// Maintain consistency of live bytes during incremental marking
- AdjustLiveBytes(new_object, -bytes_to_trim, Heap::CONCURRENT_TO_SWEEPER);
+ AdjustLiveBytes(new_object, -bytes_to_trim);
// Remove recorded slots for the new map and length offset.
ClearRecordedSlot(new_object, HeapObject::RawField(new_object, 0));
@@ -3183,15 +3273,6 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
return new_object;
}
-
-// Force instantiation of templatized method.
-template void Heap::RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
- FixedArrayBase*, int);
-template void Heap::RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
- FixedArrayBase*, int);
-
-
-template<Heap::InvocationMode mode>
void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
const int len = object->length();
DCHECK_LE(elements_to_trim, len);
@@ -3235,7 +3316,18 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
// TODO(hpayer): We should shrink the large object page if the size
// of the object changed significantly.
if (!lo_space()->Contains(object)) {
- CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kYes);
+ HeapObject* filler =
+ CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kYes);
+ DCHECK_NOT_NULL(filler);
+ // Clear the mark bits of the black area that belongs now to the filler.
+ // This is an optimization. The sweeper will release black fillers anyway.
+ if (incremental_marking()->black_allocation() &&
+ Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(filler))) {
+ Page* page = Page::FromAddress(new_end);
+ page->markbits()->ClearRange(
+ page->AddressToMarkbitIndex(new_end),
+ page->AddressToMarkbitIndex(new_end + bytes_to_trim));
+ }
}
// Initialize header of the trimmed array. We are storing the new length
@@ -3244,7 +3336,7 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
object->synchronized_set_length(len - elements_to_trim);
// Maintain consistency of live bytes during incremental marking
- AdjustLiveBytes(object, -bytes_to_trim, mode);
+ AdjustLiveBytes(object, -bytes_to_trim);
// Notify the heap profiler of change in object layout. The array may not be
// moved during GC, and size has to be adjusted nevertheless.
@@ -3331,18 +3423,24 @@ AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
if (!allocation.To(&result)) return allocation;
if (immovable) {
Address address = result->address();
+ MemoryChunk* chunk = MemoryChunk::FromAddress(address);
// Code objects which should stay at a fixed address are allocated either
// in the first page of code space (objects on the first page of each space
- // are never moved) or in large object space.
- if (!code_space_->FirstPage()->Contains(address) &&
- MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) {
- // Discard the first code allocation, which was on a page where it could
- // be moved.
- CreateFillerObjectAt(result->address(), object_size,
- ClearRecordedSlots::kNo);
- allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
- if (!allocation.To(&result)) return allocation;
- OnAllocationEvent(result, object_size);
+ // are never moved), in large object space, or (during snapshot creation)
+ // the containing page is marked as immovable.
+ if (!Heap::IsImmovable(result) &&
+ !code_space_->FirstPage()->Contains(address)) {
+ if (isolate()->serializer_enabled()) {
+ chunk->MarkNeverEvacuate();
+ } else {
+ // Discard the first code allocation, which was on a page where it could
+ // be moved.
+ CreateFillerObjectAt(result->address(), object_size,
+ ClearRecordedSlots::kNo);
+ allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
+ if (!allocation.To(&result)) return allocation;
+ OnAllocationEvent(result, object_size);
+ }
}
}
@@ -3405,6 +3503,7 @@ AllocationResult Heap::CopyBytecodeArray(BytecodeArray* bytecode_array) {
copy->set_source_position_table(bytecode_array->source_position_table());
copy->set_interrupt_budget(bytecode_array->interrupt_budget());
copy->set_osr_loop_nesting_level(bytecode_array->osr_loop_nesting_level());
+ copy->set_bytecode_age(bytecode_array->bytecode_age());
bytecode_array->CopyBytecodesTo(copy);
return copy;
}
@@ -4045,9 +4144,7 @@ void Heap::MakeHeapIterable() {
CollectAllGarbage(kMakeHeapIterableMask,
GarbageCollectionReason::kMakeHeapIterable);
}
- if (mark_compact_collector()->sweeping_in_progress()) {
- mark_compact_collector()->EnsureSweepingCompleted();
- }
+ mark_compact_collector()->EnsureSweepingCompleted();
DCHECK(IsHeapIterable());
}
@@ -4169,21 +4266,18 @@ void Heap::ReduceNewSpaceSize() {
}
}
-bool Heap::MarkingDequesAreEmpty() {
- return mark_compact_collector()->marking_deque()->IsEmpty() &&
- (!UsingEmbedderHeapTracer() ||
- (wrappers_to_trace() == 0 &&
- embedder_heap_tracer()->NumberOfWrappersToTrace() == 0));
-}
-
void Heap::FinalizeIncrementalMarkingIfComplete(
GarbageCollectionReason gc_reason) {
if (incremental_marking()->IsMarking() &&
(incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
(!incremental_marking()->finalize_marking_completed() &&
- MarkingDequesAreEmpty()))) {
+ mark_compact_collector()->marking_deque()->IsEmpty() &&
+ local_embedder_heap_tracer()->ShouldFinalizeIncrementalMarking()))) {
FinalizeIncrementalMarking(gc_reason);
- } else if (incremental_marking()->IsComplete() || MarkingDequesAreEmpty()) {
+ } else if (incremental_marking()->IsComplete() ||
+ (mark_compact_collector()->marking_deque()->IsEmpty() &&
+ local_embedder_heap_tracer()
+ ->ShouldFinalizeIncrementalMarking())) {
CollectAllGarbage(current_gc_flags_, gc_reason);
}
}
@@ -4195,13 +4289,16 @@ bool Heap::TryFinalizeIdleIncrementalMarking(
tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond();
if (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
(!incremental_marking()->finalize_marking_completed() &&
- MarkingDequesAreEmpty() &&
+ mark_compact_collector()->marking_deque()->IsEmpty() &&
+ local_embedder_heap_tracer()->ShouldFinalizeIncrementalMarking() &&
gc_idle_time_handler_->ShouldDoOverApproximateWeakClosure(
idle_time_in_ms))) {
FinalizeIncrementalMarking(gc_reason);
return true;
} else if (incremental_marking()->IsComplete() ||
- (MarkingDequesAreEmpty() &&
+ (mark_compact_collector()->marking_deque()->IsEmpty() &&
+ local_embedder_heap_tracer()
+ ->ShouldFinalizeIncrementalMarking() &&
gc_idle_time_handler_->ShouldDoFinalIncrementalMarkCompact(
idle_time_in_ms, size_of_objects,
final_incremental_mark_compact_speed_in_bytes_per_ms))) {
@@ -4484,6 +4581,18 @@ void Heap::MemoryPressureNotification(MemoryPressureLevel level,
}
}
+void Heap::SetOutOfMemoryCallback(v8::debug::OutOfMemoryCallback callback,
+ void* data) {
+ out_of_memory_callback_ = callback;
+ out_of_memory_callback_data_ = data;
+}
+
+void Heap::InvokeOutOfMemoryCallback() {
+ if (out_of_memory_callback_) {
+ out_of_memory_callback_(out_of_memory_callback_data_);
+ }
+}
+
void Heap::CollectCodeStatistics() {
CodeStatistics::ResetCodeAndMetadataStatistics(isolate());
// We do not look for code in new space, or map space. If code
@@ -4698,10 +4807,8 @@ void Heap::Verify() {
CHECK(HasBeenSetUp());
HandleScope scope(isolate());
- if (mark_compact_collector()->sweeping_in_progress()) {
- // We have to wait here for the sweeper threads to have an iterable heap.
- mark_compact_collector()->EnsureSweepingCompleted();
- }
+ // We have to wait here for the sweeper threads to have an iterable heap.
+ mark_compact_collector()->EnsureSweepingCompleted();
VerifyPointersVisitor visitor;
IterateRoots(&visitor, VISIT_ONLY_STRONG);
@@ -4729,8 +4836,8 @@ void Heap::Verify() {
void Heap::ZapFromSpace() {
if (!new_space_->IsFromSpaceCommitted()) return;
- for (Page* page : NewSpacePageRange(new_space_->FromSpaceStart(),
- new_space_->FromSpaceEnd())) {
+ for (Page* page :
+ PageRange(new_space_->FromSpaceStart(), new_space_->FromSpaceEnd())) {
for (Address cursor = page->area_start(), limit = page->area_end();
cursor < limit; cursor += kPointerSize) {
Memory::Address_at(cursor) = kFromSpaceZapValue;
@@ -4838,7 +4945,7 @@ void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
v->Synchronize(VisitorSynchronization::kStringTable);
if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
// Scavenge collections have special processing for this.
- external_string_table_.Iterate(v);
+ external_string_table_.IterateAll(v);
}
v->Synchronize(VisitorSynchronization::kExternalStringsTable);
}
@@ -4937,8 +5044,9 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
case VISIT_ONLY_STRONG_ROOT_LIST:
UNREACHABLE();
break;
- case VISIT_ONLY_STRONG:
case VISIT_ONLY_STRONG_FOR_SERIALIZATION:
+ break;
+ case VISIT_ONLY_STRONG:
isolate_->global_handles()->IterateStrongRoots(v);
break;
case VISIT_ALL_IN_SCAVENGE:
@@ -5052,7 +5160,7 @@ bool Heap::ConfigureHeap(size_t max_semi_space_size, size_t max_old_space_size,
// The old generation is paged and needs at least one page for each space.
int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
- max_old_generation_size_ =
+ initial_max_old_generation_size_ = max_old_generation_size_ =
Max(static_cast<size_t>(paged_space_count * Page::kPageSize),
max_old_generation_size_);
@@ -5307,6 +5415,13 @@ void Heap::DampenOldGenerationAllocationLimit(size_t old_gen_size,
}
}
+bool Heap::ShouldOptimizeForLoadTime() {
+ return isolate()->rail_mode() == PERFORMANCE_LOAD &&
+ !AllocationLimitOvershotByLargeMargin() &&
+ MonotonicallyIncreasingTimeInMs() <
+ isolate()->LoadStartTimeMs() + kMaxLoadTimeMs;
+}
+
// This predicate is called when an old generation space cannot allocated from
// the free list and is about to add a new page. Returning false will cause a
// major GC. It happens when the old generation allocation limit is reached and
@@ -5318,6 +5433,8 @@ bool Heap::ShouldExpandOldGenerationOnSlowAllocation() {
if (ShouldOptimizeForMemoryUsage()) return false;
+ if (ShouldOptimizeForLoadTime()) return true;
+
if (incremental_marking()->NeedsFinalization()) {
return !AllocationLimitOvershotByLargeMargin();
}
@@ -5352,9 +5469,13 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
if (old_generation_space_available > new_space_->Capacity()) {
return IncrementalMarkingLimit::kNoLimit;
}
- // We are close to the allocation limit.
- // Choose between the hard and the soft limits.
- if (old_generation_space_available == 0 || ShouldOptimizeForMemoryUsage()) {
+ if (ShouldOptimizeForMemoryUsage()) {
+ return IncrementalMarkingLimit::kHardLimit;
+ }
+ if (ShouldOptimizeForLoadTime()) {
+ return IncrementalMarkingLimit::kNoLimit;
+ }
+ if (old_generation_space_available == 0) {
return IncrementalMarkingLimit::kHardLimit;
}
return IncrementalMarkingLimit::kSoftLimit;
@@ -5477,6 +5598,7 @@ bool Heap::SetUp() {
dead_object_stats_ = new ObjectStats(this);
}
scavenge_job_ = new ScavengeJob();
+ local_embedder_heap_tracer_ = new LocalEmbedderHeapTracer();
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
LOG(isolate_, IntPtrTEvent("heap-available", Available()));
@@ -5496,7 +5618,7 @@ bool Heap::SetUp() {
bool Heap::CreateHeapObjects() {
// Create initial maps.
if (!CreateInitialMaps()) return false;
- CreateApiObjects();
+ if (!CreateApiObjects()) return false;
// Create initial objects
CreateInitialObjects();
@@ -5552,16 +5674,7 @@ void Heap::NotifyDeserializationComplete() {
void Heap::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
DCHECK_EQ(gc_state_, HeapState::NOT_IN_GC);
- embedder_heap_tracer_ = tracer;
-}
-
-void Heap::RegisterWrappersWithEmbedderHeapTracer() {
- DCHECK(UsingEmbedderHeapTracer());
- if (wrappers_to_trace_.empty()) {
- return;
- }
- embedder_heap_tracer()->RegisterV8References(wrappers_to_trace_);
- wrappers_to_trace_.clear();
+ local_embedder_heap_tracer()->SetRemoteTracer(tracer);
}
void Heap::TracePossibleWrapper(JSObject* js_object) {
@@ -5571,17 +5684,12 @@ void Heap::TracePossibleWrapper(JSObject* js_object) {
js_object->GetInternalField(0) != undefined_value() &&
js_object->GetInternalField(1) != undefined_value()) {
DCHECK(reinterpret_cast<intptr_t>(js_object->GetInternalField(0)) % 2 == 0);
- wrappers_to_trace_.push_back(std::pair<void*, void*>(
+ local_embedder_heap_tracer()->AddWrapperToTrace(std::pair<void*, void*>(
reinterpret_cast<void*>(js_object->GetInternalField(0)),
reinterpret_cast<void*>(js_object->GetInternalField(1))));
}
}
-bool Heap::RequiresImmediateWrapperProcessing() {
- const size_t kTooManyWrappers = 16000;
- return wrappers_to_trace_.size() > kTooManyWrappers;
-}
-
void Heap::RegisterExternallyReferencedObject(Object** object) {
HeapObject* heap_object = HeapObject::cast(*object);
DCHECK(Contains(heap_object));
@@ -5658,6 +5766,9 @@ void Heap::TearDown() {
dead_object_stats_ = nullptr;
}
+ delete local_embedder_heap_tracer_;
+ local_embedder_heap_tracer_ = nullptr;
+
delete scavenge_job_;
scavenge_job_ = nullptr;
@@ -5803,8 +5914,6 @@ void Heap::CompactWeakFixedArrays() {
WeakFixedArray* array = WeakFixedArray::cast(prototype_users);
array->Compact<JSObject::PrototypeRegistryCompactionCallback>();
}
- } else if (o->IsScript()) {
- CompactWeakFixedArray(Script::cast(o)->shared_function_infos());
}
}
CompactWeakFixedArray(noscript_shared_function_infos());
@@ -5909,6 +6018,18 @@ void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) {
}
}
+bool Heap::HasRecordedSlot(HeapObject* object, Object** slot) {
+ if (InNewSpace(object)) {
+ return false;
+ }
+ Address slot_addr = reinterpret_cast<Address>(slot);
+ Page* page = Page::FromAddress(slot_addr);
+ DCHECK_EQ(page->owner()->identity(), OLD_SPACE);
+ store_buffer()->MoveAllEntriesToRememberedSet();
+ return RememberedSet<OLD_TO_NEW>::Contains(page, slot_addr) ||
+ RememberedSet<OLD_TO_OLD>::Contains(page, slot_addr);
+}
+
void Heap::ClearRecordedSlotRange(Address start, Address end) {
Page* page = Page::FromAddress(start);
if (!page->InNewSpace()) {
@@ -6330,7 +6451,7 @@ void Heap::UpdateTotalGCTime(double duration) {
}
}
-void Heap::ExternalStringTable::CleanUp() {
+void Heap::ExternalStringTable::CleanUpNewSpaceStrings() {
int last = 0;
Isolate* isolate = heap_->isolate();
for (int i = 0; i < new_space_strings_.length(); ++i) {
@@ -6346,8 +6467,12 @@ void Heap::ExternalStringTable::CleanUp() {
}
new_space_strings_.Rewind(last);
new_space_strings_.Trim();
+}
- last = 0;
+void Heap::ExternalStringTable::CleanUpAll() {
+ CleanUpNewSpaceStrings();
+ int last = 0;
+ Isolate* isolate = heap_->isolate();
for (int i = 0; i < old_space_strings_.length(); ++i) {
if (old_space_strings_[i]->IsTheHole(isolate)) {
continue;
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index 013cd9a8fe..d8034891fc 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -14,6 +14,7 @@
#include "src/allocation.h"
#include "src/assert-scope.h"
#include "src/base/atomic-utils.h"
+#include "src/debug/debug-interface.h"
#include "src/globals.h"
#include "src/heap-symbols.h"
#include "src/list.h"
@@ -77,6 +78,7 @@ using v8::MemoryPressureLevel;
/* Context maps */ \
V(Map, native_context_map, NativeContextMap) \
V(Map, module_context_map, ModuleContextMap) \
+ V(Map, eval_context_map, EvalContextMap) \
V(Map, script_context_map, ScriptContextMap) \
V(Map, block_context_map, BlockContextMap) \
V(Map, catch_context_map, CatchContextMap) \
@@ -93,6 +95,7 @@ using v8::MemoryPressureLevel;
V(Map, external_map, ExternalMap) \
V(Map, bytecode_array_map, BytecodeArrayMap) \
V(Map, module_info_map, ModuleInfoMap) \
+ V(Map, type_feedback_vector_map, TypeFeedbackVectorMap) \
/* String maps */ \
V(Map, native_source_string_map, NativeSourceStringMap) \
V(Map, string_map, StringMap) \
@@ -168,6 +171,8 @@ using v8::MemoryPressureLevel;
V(PropertyCell, string_length_protector, StringLengthProtector) \
V(Cell, fast_array_iteration_protector, FastArrayIterationProtector) \
V(Cell, array_iterator_protector, ArrayIteratorProtector) \
+ V(PropertyCell, array_buffer_neutering_protector, \
+ ArrayBufferNeuteringProtector) \
/* Special numbers */ \
V(HeapNumber, nan_value, NanValue) \
V(HeapNumber, hole_nan_value, HoleNanValue) \
@@ -190,7 +195,9 @@ using v8::MemoryPressureLevel;
ExperimentalExtraNativesSourceCache) \
/* Lists and dictionaries */ \
V(NameDictionary, empty_properties_dictionary, EmptyPropertiesDictionary) \
- V(Object, symbol_registry, SymbolRegistry) \
+ V(NameDictionary, public_symbol_table, PublicSymbolTable) \
+ V(NameDictionary, api_symbol_table, ApiSymbolTable) \
+ V(NameDictionary, api_private_symbol_table, ApiPrivateSymbolTable) \
V(Object, script_list, ScriptList) \
V(UnseededNumberDictionary, code_stubs, CodeStubs) \
V(FixedArray, materialized_objects, MaterializedObjects) \
@@ -206,8 +213,10 @@ using v8::MemoryPressureLevel;
V(Object, weak_stack_trace_list, WeakStackTraceList) \
V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos) \
V(FixedArray, serialized_templates, SerializedTemplates) \
+ V(FixedArray, serialized_global_proxy_sizes, SerializedGlobalProxySizes) \
/* Configured values */ \
V(TemplateList, message_listeners, MessageListeners) \
+ V(InterceptorInfo, noop_interceptor_info, NoOpInterceptorInfo) \
V(Code, js_entry_code, JsEntryCode) \
V(Code, js_construct_entry_code, JsConstructEntryCode) \
/* Oddball maps */ \
@@ -221,7 +230,10 @@ using v8::MemoryPressureLevel;
V(Map, exception_map, ExceptionMap) \
V(Map, termination_exception_map, TerminationExceptionMap) \
V(Map, optimized_out_map, OptimizedOutMap) \
- V(Map, stale_register_map, StaleRegisterMap)
+ V(Map, stale_register_map, StaleRegisterMap) \
+ /* per-Isolate map for JSPromiseCapability. */ \
+ /* TODO(caitp): Make this a Struct */ \
+ V(Map, js_promise_capability_map, JSPromiseCapabilityMap)
// Entries in this list are limited to Smis and are not visited during GC.
#define SMI_ROOT_LIST(V) \
@@ -297,6 +309,7 @@ using v8::MemoryPressureLevel;
V(WithContextMap) \
V(BlockContextMap) \
V(ModuleContextMap) \
+ V(EvalContextMap) \
V(ScriptContextMap) \
V(UndefinedMap) \
V(TheHoleMap) \
@@ -325,6 +338,7 @@ class HeapObjectsFilter;
class HeapStats;
class HistogramTimer;
class Isolate;
+class LocalEmbedderHeapTracer;
class MemoryAllocator;
class MemoryReducer;
class ObjectIterator;
@@ -347,8 +361,6 @@ enum ArrayStorageAllocationMode {
enum class ClearRecordedSlots { kYes, kNo };
-enum class ClearBlackArea { kYes, kNo };
-
enum class GarbageCollectionReason {
kUnknown = 0,
kAllocationFailure = 1,
@@ -554,12 +566,6 @@ class Heap {
enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
- // Indicates whether live bytes adjustment is triggered
- // - from within the GC code before sweeping started (SEQUENTIAL_TO_SWEEPER),
- // - or from within GC (CONCURRENT_TO_SWEEPER),
- // - or mutator code (CONCURRENT_TO_SWEEPER).
- enum InvocationMode { SEQUENTIAL_TO_SWEEPER, CONCURRENT_TO_SWEEPER };
-
enum UpdateAllocationSiteMode { kGlobal, kCached };
// Taking this lock prevents the GC from entering a phase that relocates
@@ -607,7 +613,7 @@ class Heap {
static const int kMaxOldSpaceSizeMediumMemoryDevice =
256 * kPointerMultiplier;
static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier;
- static const int kMaxOldSpaceSizeHugeMemoryDevice = 700 * kPointerMultiplier;
+ static const int kMaxOldSpaceSizeHugeMemoryDevice = 1024 * kPointerMultiplier;
// The executable size has to be a multiple of Page::kPageSize.
// Sizes are in MB.
@@ -673,6 +679,8 @@ class Heap {
// they are in new space.
static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
+ static bool IsUnmodifiedHeapObject(Object** p);
+
// Zapping is needed for verify heap, and always done in debug builds.
static inline bool ShouldZapGarbage() {
#ifdef DEBUG
@@ -739,24 +747,22 @@ class Heap {
// Initialize a filler object to keep the ability to iterate over the heap
// when introducing gaps within pages. If slots could have been recorded in
// the freed area, then pass ClearRecordedSlots::kYes as the mode. Otherwise,
- // pass ClearRecordedSlots::kNo. If the filler was created in a black area
- // we may want to clear the corresponding mark bits with ClearBlackArea::kYes,
- // which is the default. ClearBlackArea::kNo does not clear the mark bits.
- void CreateFillerObjectAt(
- Address addr, int size, ClearRecordedSlots mode,
- ClearBlackArea black_area_mode = ClearBlackArea::kYes);
+ // pass ClearRecordedSlots::kNo.
+ HeapObject* CreateFillerObjectAt(Address addr, int size,
+ ClearRecordedSlots mode);
bool CanMoveObjectStart(HeapObject* object);
+ static bool IsImmovable(HeapObject* object);
+
// Maintain consistency of live bytes during incremental marking.
- void AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode);
+ void AdjustLiveBytes(HeapObject* object, int by);
// Trim the given array from the left. Note that this relocates the object
// start and hence is only valid if there is only a single reference to it.
FixedArrayBase* LeftTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
// Trim the given array from the right.
- template<Heap::InvocationMode mode>
void RightTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
// Converts the given boolean condition to JavaScript boolean value.
@@ -787,6 +793,9 @@ class Heap {
Object* encountered_weak_collections() const {
return encountered_weak_collections_;
}
+ void VisitEncounteredWeakCollections(ObjectVisitor* visitor) {
+ visitor->VisitPointer(&encountered_weak_collections_);
+ }
void set_encountered_weak_cells(Object* weak_cell) {
encountered_weak_cells_ = weak_cell;
@@ -816,6 +825,7 @@ class Heap {
void PrintShortHeapStatistics();
inline HeapState gc_state() { return gc_state_; }
+ void SetGCState(HeapState state);
inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
@@ -831,7 +841,7 @@ class Heap {
// Support for the API.
//
- void CreateApiObjects();
+ bool CreateApiObjects();
// Implements the corresponding V8 API function.
bool IdleNotification(double deadline_in_seconds);
@@ -841,6 +851,9 @@ class Heap {
bool is_isolate_locked);
void CheckMemoryPressure();
+ void SetOutOfMemoryCallback(v8::debug::OutOfMemoryCallback callback,
+ void* data);
+
double MonotonicallyIncreasingTimeInMs();
void RecordStats(HeapStats* stats, bool take_snapshot = false);
@@ -874,6 +887,7 @@ class Heap {
inline int GetNextTemplateSerialNumber();
inline void SetSerializedTemplates(FixedArray* templates);
+ inline void SetSerializedGlobalProxySizes(FixedArray* sizes);
// For post mortem debugging.
void RememberUnmappedPage(Address page, bool compacted);
@@ -948,6 +962,30 @@ class Heap {
return memory_pressure_level_.Value() != MemoryPressureLevel::kNone;
}
+ size_t HeapLimitForDebugging() {
+ const size_t kDebugHeapSizeFactor = 4;
+ size_t max_limit = std::numeric_limits<size_t>::max() / 4;
+ return Min(max_limit,
+ initial_max_old_generation_size_ * kDebugHeapSizeFactor);
+ }
+
+ void IncreaseHeapLimitForDebugging() {
+ max_old_generation_size_ =
+ Max(max_old_generation_size_, HeapLimitForDebugging());
+ }
+
+ void RestoreOriginalHeapLimit() {
+ // Do not set the limit lower than the live size + some slack.
+ size_t min_limit = SizeOfObjects() + SizeOfObjects() / 4;
+ max_old_generation_size_ =
+ Min(max_old_generation_size_,
+ Max(initial_max_old_generation_size_, min_limit));
+ }
+
+ bool IsHeapLimitIncreasedForDebugging() {
+ return max_old_generation_size_ == HeapLimitForDebugging();
+ }
+
// ===========================================================================
// Initialization. ===========================================================
// ===========================================================================
@@ -1172,6 +1210,8 @@ class Heap {
void ClearRecordedSlot(HeapObject* object, Object** slot);
void ClearRecordedSlotRange(Address start, Address end);
+ bool HasRecordedSlot(HeapObject* object, Object** slot);
+
// ===========================================================================
// Incremental marking API. ==================================================
// ===========================================================================
@@ -1203,24 +1243,13 @@ class Heap {
// Embedder heap tracer support. =============================================
// ===========================================================================
+ LocalEmbedderHeapTracer* local_embedder_heap_tracer() {
+ return local_embedder_heap_tracer_;
+ }
void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
-
- bool UsingEmbedderHeapTracer() { return embedder_heap_tracer() != nullptr; }
-
void TracePossibleWrapper(JSObject* js_object);
-
void RegisterExternallyReferencedObject(Object** object);
- void RegisterWrappersWithEmbedderHeapTracer();
-
- // In order to avoid running out of memory we force tracing wrappers if there
- // are too many of them.
- bool RequiresImmediateWrapperProcessing();
-
- EmbedderHeapTracer* embedder_heap_tracer() { return embedder_heap_tracer_; }
-
- size_t wrappers_to_trace() { return wrappers_to_trace_.size(); }
-
// ===========================================================================
// External string table API. ================================================
// ===========================================================================
@@ -1501,6 +1530,7 @@ class Heap {
GarbageCollectionReason gc_reason);
private:
+ class SkipStoreBufferScope;
class PretenuringScope;
// External strings table is a place where all external strings are
@@ -1511,11 +1541,13 @@ class Heap {
// Registers an external string.
inline void AddString(String* string);
- inline void Iterate(ObjectVisitor* v);
+ inline void IterateAll(ObjectVisitor* v);
+ inline void IterateNewSpaceStrings(ObjectVisitor* v);
- // Restores internal invariant and gets rid of collected strings.
- // Must be called after each Iterate() that modified the strings.
- void CleanUp();
+ // Restores internal invariant and gets rid of collected strings. Must be
+ // called after each Iterate*() that modified the strings.
+ void CleanUpAll();
+ void CleanUpNewSpaceStrings();
// Destroys all allocated memory.
void TearDown();
@@ -1632,10 +1664,6 @@ class Heap {
return current_gc_flags_ & kFinalizeIncrementalMarkingMask;
}
- // Checks whether both, the internal marking deque, and the embedder provided
- // one are empty. Avoid in fast path as it potentially calls through the API.
- bool MarkingDequesAreEmpty();
-
void PreprocessStackTraces();
// Checks whether a global GC is necessary
@@ -1747,6 +1775,8 @@ class Heap {
void CollectGarbageOnMemoryPressure();
+ void InvokeOutOfMemoryCallback();
+
// Attempt to over-approximate the weak closure by marking object groups and
// implicit references from global handles, but don't atomically complete
// marking. If we continue to mark incrementally, we might have marked
@@ -1840,6 +1870,14 @@ class Heap {
// Growing strategy. =========================================================
// ===========================================================================
+ // For some webpages RAIL mode does not switch from PERFORMANCE_LOAD.
+ // This constant limits the effect of load RAIL mode on GC.
+ // The value is arbitrary and chosen as the largest load time observed in
+ // v8 browsing benchmarks.
+ static const int kMaxLoadTimeMs = 7000;
+
+ bool ShouldOptimizeForLoadTime();
+
// Decrease the allocation limit if the new limit based on the given
// parameters is lower than the current limit.
void DampenOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
@@ -2128,6 +2166,7 @@ class Heap {
size_t max_semi_space_size_;
size_t initial_semispace_size_;
size_t max_old_generation_size_;
+ size_t initial_max_old_generation_size_;
size_t initial_old_generation_size_;
bool old_generation_size_configured_;
size_t max_executable_size_;
@@ -2148,6 +2187,9 @@ class Heap {
// and reset by a mark-compact garbage collection.
base::AtomicValue<MemoryPressureLevel> memory_pressure_level_;
+ v8::debug::OutOfMemoryCallback out_of_memory_callback_;
+ void* out_of_memory_callback_data_;
+
// For keeping track of context disposals.
int contexts_disposed_;
@@ -2338,8 +2380,7 @@ class Heap {
// The depth of HeapIterator nestings.
int heap_iterator_depth_;
- EmbedderHeapTracer* embedder_heap_tracer_;
- std::vector<std::pair<void*, void*>> wrappers_to_trace_;
+ LocalEmbedderHeapTracer* local_embedder_heap_tracer_;
// Used for testing purposes.
bool force_oom_;
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 4b1d7712a7..b0418686bf 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -32,6 +32,7 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
was_activated_(false),
black_allocation_(false),
finalize_marking_completed_(false),
+ trace_wrappers_toggle_(false),
request_type_(NONE),
new_generation_observer_(*this, kAllocatedThreshold),
old_generation_observer_(*this, kAllocatedThreshold) {}
@@ -129,27 +130,27 @@ static void MarkObjectGreyDoNotEnqueue(Object* obj) {
HeapObject* heap_obj = HeapObject::cast(obj);
MarkBit mark_bit = ObjectMarking::MarkBitFrom(HeapObject::cast(obj));
if (Marking::IsBlack(mark_bit)) {
- MemoryChunk::IncrementLiveBytesFromGC(heap_obj, -heap_obj->Size());
+ MemoryChunk::IncrementLiveBytes(heap_obj, -heap_obj->Size());
}
Marking::AnyToGrey(mark_bit);
}
}
-void IncrementalMarking::TransferMark(Heap* heap, Address old_start,
- Address new_start) {
+void IncrementalMarking::TransferMark(Heap* heap, HeapObject* from,
+ HeapObject* to) {
// This is only used when resizing an object.
- DCHECK(MemoryChunk::FromAddress(old_start) ==
- MemoryChunk::FromAddress(new_start));
+ DCHECK(MemoryChunk::FromAddress(from->address()) ==
+ MemoryChunk::FromAddress(to->address()));
if (!heap->incremental_marking()->IsMarking()) return;
// If the mark doesn't move, we don't check the color of the object.
// It doesn't matter whether the object is black, since it hasn't changed
// size, so the adjustment to the live data count will be zero anyway.
- if (old_start == new_start) return;
+ if (from == to) return;
- MarkBit new_mark_bit = ObjectMarking::MarkBitFrom(new_start);
- MarkBit old_mark_bit = ObjectMarking::MarkBitFrom(old_start);
+ MarkBit new_mark_bit = ObjectMarking::MarkBitFrom(to);
+ MarkBit old_mark_bit = ObjectMarking::MarkBitFrom(from);
#ifdef DEBUG
Marking::ObjectColor old_color = Marking::Color(old_mark_bit);
@@ -161,8 +162,7 @@ void IncrementalMarking::TransferMark(Heap* heap, Address old_start,
return;
} else if (Marking::IsGrey(old_mark_bit)) {
Marking::GreyToWhite(old_mark_bit);
- heap->incremental_marking()->WhiteToGreyAndPush(
- HeapObject::FromAddress(new_start), new_mark_bit);
+ heap->incremental_marking()->WhiteToGreyAndPush(to, new_mark_bit);
heap->incremental_marking()->RestartIfNotMarking();
}
@@ -268,7 +268,7 @@ class IncrementalMarkingMarkingVisitor
MarkBit mark_bit = ObjectMarking::MarkBitFrom(heap_object);
if (Marking::IsWhite(mark_bit)) {
Marking::MarkBlack(mark_bit);
- MemoryChunk::IncrementLiveBytesFromGC(heap_object, heap_object->Size());
+ MemoryChunk::IncrementLiveBytes(heap_object, heap_object->Size());
return true;
}
return false;
@@ -524,10 +524,10 @@ void IncrementalMarking::StartMarking() {
state_ = MARKING;
- if (heap_->UsingEmbedderHeapTracer()) {
+ {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_INCREMENTAL_WRAPPER_PROLOGUE);
- heap_->embedder_heap_tracer()->TracePrologue();
+ heap_->local_embedder_heap_tracer()->TracePrologue();
}
RecordWriteStub::Mode mode = is_compacting_
@@ -603,7 +603,7 @@ void IncrementalMarking::MarkObjectGroups() {
TRACE_GC(heap_->tracer(),
GCTracer::Scope::MC_INCREMENTAL_FINALIZE_OBJECT_GROUPING);
- DCHECK(!heap_->UsingEmbedderHeapTracer());
+ DCHECK(!heap_->local_embedder_heap_tracer()->InUse());
DCHECK(!finalize_marking_completed_);
DCHECK(IsMarking());
@@ -736,7 +736,8 @@ void IncrementalMarking::FinalizeIncrementally() {
// 4) Remove weak cell with live values from the list of weak cells, they
// do not need processing during GC.
MarkRoots();
- if (!heap_->UsingEmbedderHeapTracer()) {
+ if (!heap_->local_embedder_heap_tracer()->InUse() &&
+ FLAG_object_grouping_in_incremental_finalization) {
MarkObjectGroups();
}
if (incremental_marking_finalization_rounds_ == 0) {
@@ -750,7 +751,8 @@ void IncrementalMarking::FinalizeIncrementally() {
abs(old_marking_deque_top -
heap_->mark_compact_collector()->marking_deque()->top());
- marking_progress += static_cast<int>(heap_->wrappers_to_trace());
+ marking_progress += static_cast<int>(
+ heap_->local_embedder_heap_tracer()->NumberOfCachedWrappersToTrace());
double end = heap_->MonotonicallyIncreasingTimeInMs();
double delta = end - start;
@@ -806,8 +808,7 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
// them.
if (map_word.IsForwardingAddress()) {
HeapObject* dest = map_word.ToForwardingAddress();
- if (Marking::IsBlack(ObjectMarking::MarkBitFrom(dest->address())))
- continue;
+ if (Marking::IsBlack(ObjectMarking::MarkBitFrom(dest))) continue;
array[new_top] = dest;
new_top = ((new_top + 1) & mask);
DCHECK(new_top != marking_deque->bottom());
@@ -864,7 +865,7 @@ void IncrementalMarking::MarkBlack(HeapObject* obj, int size) {
MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
if (Marking::IsBlack(mark_bit)) return;
Marking::GreyToBlack(mark_bit);
- MemoryChunk::IncrementLiveBytesFromGC(obj, size);
+ MemoryChunk::IncrementLiveBytes(obj, size);
}
intptr_t IncrementalMarking::ProcessMarkingDeque(
@@ -890,6 +891,11 @@ intptr_t IncrementalMarking::ProcessMarkingDeque(
VisitObject(map, obj, size);
bytes_processed += size - unscanned_bytes_of_large_object_;
}
+ // Report all found wrappers to the embedder. This is necessary as the
+ // embedder could potentially invalidate wrappers as soon as V8 is done
+ // with its incremental marking processing. Any cached wrappers could
+ // result in broken pointers at this point.
+ heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
return bytes_processed;
}
@@ -933,7 +939,7 @@ void IncrementalMarking::Hurry() {
MarkBit mark_bit = ObjectMarking::MarkBitFrom(cache);
if (Marking::IsGrey(mark_bit)) {
Marking::GreyToBlack(mark_bit);
- MemoryChunk::IncrementLiveBytesFromGC(cache, cache->Size());
+ MemoryChunk::IncrementLiveBytes(cache, cache->Size());
}
}
context = Context::cast(context)->next_context_link();
@@ -1026,15 +1032,40 @@ void IncrementalMarking::Epilogue() {
double IncrementalMarking::AdvanceIncrementalMarking(
double deadline_in_ms, CompletionAction completion_action,
ForceCompletionAction force_completion, StepOrigin step_origin) {
+ HistogramTimerScope incremental_marking_scope(
+ heap_->isolate()->counters()->gc_incremental_marking());
+ TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
+ TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
DCHECK(!IsStopped());
+ DCHECK_EQ(
+ 0, heap_->local_embedder_heap_tracer()->NumberOfCachedWrappersToTrace());
double remaining_time_in_ms = 0.0;
intptr_t step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
kStepSizeInMs,
heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
+ const bool incremental_wrapper_tracing =
+ state_ == MARKING && FLAG_incremental_marking_wrappers &&
+ heap_->local_embedder_heap_tracer()->InUse();
do {
- Step(step_size_in_bytes, completion_action, force_completion, step_origin);
+ if (incremental_wrapper_tracing && trace_wrappers_toggle_) {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MC_INCREMENTAL_WRAPPER_TRACING);
+ const double wrapper_deadline =
+ heap_->MonotonicallyIncreasingTimeInMs() + kStepSizeInMs;
+ if (!heap_->local_embedder_heap_tracer()
+ ->ShouldFinalizeIncrementalMarking()) {
+ heap_->local_embedder_heap_tracer()->Trace(
+ wrapper_deadline, EmbedderHeapTracer::AdvanceTracingActions(
+ EmbedderHeapTracer::ForceCompletionAction::
+ DO_NOT_FORCE_COMPLETION));
+ }
+ } else {
+ Step(step_size_in_bytes, completion_action, force_completion,
+ step_origin);
+ }
+ trace_wrappers_toggle_ = !trace_wrappers_toggle_;
remaining_time_in_ms =
deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
} while (remaining_time_in_ms >= kStepSizeInMs && !IsComplete() &&
@@ -1109,6 +1140,10 @@ void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() {
bytes_marked_ahead_of_schedule_ -= bytes_to_process;
bytes_processed = bytes_to_process;
} else {
+ HistogramTimerScope incremental_marking_scope(
+ heap_->isolate()->counters()->gc_incremental_marking());
+ TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
+ TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
bytes_processed = Step(bytes_to_process, GC_VIA_STACK_GUARD,
FORCE_COMPLETION, StepOrigin::kV8);
}
@@ -1120,10 +1155,6 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
CompletionAction action,
ForceCompletionAction completion,
StepOrigin step_origin) {
- HistogramTimerScope incremental_marking_scope(
- heap_->isolate()->counters()->gc_incremental_marking());
- TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
- TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
double start = heap_->MonotonicallyIncreasingTimeInMs();
if (state_ == SWEEPING) {
@@ -1133,41 +1164,26 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
size_t bytes_processed = 0;
if (state_ == MARKING) {
- const bool incremental_wrapper_tracing =
- FLAG_incremental_marking_wrappers && heap_->UsingEmbedderHeapTracer();
- const bool process_wrappers =
- incremental_wrapper_tracing &&
- (heap_->RequiresImmediateWrapperProcessing() ||
- heap_->mark_compact_collector()->marking_deque()->IsEmpty());
- bool wrapper_work_left = incremental_wrapper_tracing;
- if (!process_wrappers) {
- bytes_processed = ProcessMarkingDeque(bytes_to_process);
- if (step_origin == StepOrigin::kTask) {
- bytes_marked_ahead_of_schedule_ += bytes_processed;
- }
- } else {
- const double wrapper_deadline =
- heap_->MonotonicallyIncreasingTimeInMs() + kStepSizeInMs;
- TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MC_INCREMENTAL_WRAPPER_TRACING);
- heap_->RegisterWrappersWithEmbedderHeapTracer();
- wrapper_work_left = heap_->embedder_heap_tracer()->AdvanceTracing(
- wrapper_deadline, EmbedderHeapTracer::AdvanceTracingActions(
- EmbedderHeapTracer::ForceCompletionAction::
- DO_NOT_FORCE_COMPLETION));
+ bytes_processed = ProcessMarkingDeque(bytes_to_process);
+ if (step_origin == StepOrigin::kTask) {
+ bytes_marked_ahead_of_schedule_ += bytes_processed;
}
- if (heap_->mark_compact_collector()->marking_deque()->IsEmpty() &&
- !wrapper_work_left) {
- if (completion == FORCE_COMPLETION ||
- IsIdleMarkingDelayCounterLimitReached()) {
- if (!finalize_marking_completed_) {
- FinalizeMarking(action);
+ if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
+ if (heap_->local_embedder_heap_tracer()
+ ->ShouldFinalizeIncrementalMarking()) {
+ if (completion == FORCE_COMPLETION ||
+ IsIdleMarkingDelayCounterLimitReached()) {
+ if (!finalize_marking_completed_) {
+ FinalizeMarking(action);
+ } else {
+ MarkingComplete(action);
+ }
} else {
- MarkingComplete(action);
+ IncrementIdleMarkingDelayCounter();
}
} else {
- IncrementIdleMarkingDelayCounter();
+ heap_->local_embedder_heap_tracer()->NotifyV8MarkingDequeWasEmpty();
}
}
}
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index 7ce0ae2379..5464f129a7 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -22,7 +22,7 @@ class PagedSpace;
enum class StepOrigin { kV8, kTask };
-class IncrementalMarking {
+class V8_EXPORT_PRIVATE IncrementalMarking {
public:
enum State { STOPPED, SWEEPING, MARKING, COMPLETE };
@@ -151,8 +151,7 @@ class IncrementalMarking {
INLINE(void RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
Code* value));
- V8_EXPORT_PRIVATE void RecordWriteSlow(HeapObject* obj, Object** slot,
- Object* value);
+ void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* value);
void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value);
void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
@@ -184,7 +183,7 @@ class IncrementalMarking {
static void MarkBlack(HeapObject* object, int size);
- static void TransferMark(Heap* heap, Address old_start, Address new_start);
+ static void TransferMark(Heap* heap, HeapObject* from, HeapObject* to);
// Returns true if the color transfer requires live bytes updating.
INLINE(static bool TransferColor(HeapObject* from, HeapObject* to,
@@ -298,6 +297,7 @@ class IncrementalMarking {
bool was_activated_;
bool black_allocation_;
bool finalize_marking_completed_;
+ bool trace_wrappers_toggle_;
GCRequestType request_type_;
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index 784a76f8bd..1973753b0c 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -15,7 +15,7 @@ namespace internal {
void MarkCompactCollector::PushBlack(HeapObject* obj) {
DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(obj)));
if (marking_deque()->Push(obj)) {
- MemoryChunk::IncrementLiveBytesFromGC(obj, obj->Size());
+ MemoryChunk::IncrementLiveBytes(obj, obj->Size());
} else {
MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
Marking::BlackToGrey(mark_bit);
@@ -26,7 +26,7 @@ void MarkCompactCollector::PushBlack(HeapObject* obj) {
void MarkCompactCollector::UnshiftBlack(HeapObject* obj) {
DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(obj)));
if (!marking_deque()->Unshift(obj)) {
- MemoryChunk::IncrementLiveBytesFromGC(obj, -obj->Size());
+ MemoryChunk::IncrementLiveBytes(obj, -obj->Size());
MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
Marking::BlackToGrey(mark_bit);
}
@@ -47,7 +47,7 @@ void MarkCompactCollector::SetMark(HeapObject* obj, MarkBit mark_bit) {
DCHECK(Marking::IsWhite(mark_bit));
DCHECK(ObjectMarking::MarkBitFrom(obj) == mark_bit);
Marking::WhiteToBlack(mark_bit);
- MemoryChunk::IncrementLiveBytesFromGC(obj, obj->Size());
+ MemoryChunk::IncrementLiveBytes(obj, obj->Size());
}
@@ -195,12 +195,13 @@ HeapObject* LiveObjectIterator<T>::Next() {
object = black_object;
}
} else if ((T == kGreyObjects || T == kAllLiveObjects)) {
+ map = base::NoBarrierAtomicValue<Map*>::FromAddress(addr)->Value();
object = HeapObject::FromAddress(addr);
}
// We found a live object.
if (object != nullptr) {
- if (map != nullptr && map == heap()->one_pointer_filler_map()) {
+ if (map == heap()->one_pointer_filler_map()) {
// Black areas together with slack tracking may result in black one
// word filler objects. We filter these objects out in the iterator.
object = nullptr;
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 88e6983035..c931f520b7 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -53,7 +53,6 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
#ifdef DEBUG
state_(IDLE),
#endif
- marking_parity_(ODD_MARKING_PARITY),
was_marked_incrementally_(false),
evacuation_(false),
compacting_(false),
@@ -105,7 +104,9 @@ static void VerifyMarking(Heap* heap, Address bottom, Address top) {
Address next_object_must_be_here_or_later = bottom;
for (Address current = bottom; current < top;) {
object = HeapObject::FromAddress(current);
- if (MarkCompactCollector::IsMarked(object)) {
+ // One word fillers at the end of a black area can be grey.
+ if (MarkCompactCollector::IsMarked(object) &&
+ object->map() != heap->one_pointer_filler_map()) {
CHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
CHECK(current >= next_object_must_be_here_or_later);
object->Iterate(&visitor);
@@ -133,7 +134,7 @@ static void VerifyMarking(NewSpace* space) {
// page->area_start() as start of range on all pages.
CHECK_EQ(space->bottom(), Page::FromAddress(space->bottom())->area_start());
- NewSpacePageRange range(space->bottom(), end);
+ PageRange range(space->bottom(), end);
for (auto it = range.begin(); it != range.end();) {
Page* page = *(it++);
Address limit = it != range.end() ? page->area_end() : end;
@@ -197,7 +198,7 @@ static void VerifyEvacuation(Page* page) {
static void VerifyEvacuation(NewSpace* space) {
VerifyEvacuationVisitor visitor;
- NewSpacePageRange range(space->bottom(), space->top());
+ PageRange range(space->bottom(), space->top());
for (auto it = range.begin(); it != range.end();) {
Page* page = *(it++);
Address current = page->area_start();
@@ -322,7 +323,6 @@ void MarkCompactCollector::CollectGarbage() {
Finish();
}
-
#ifdef VERIFY_HEAP
void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
for (Page* p : *space) {
@@ -333,7 +333,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
- for (Page* p : NewSpacePageRange(space->bottom(), space->top())) {
+ for (Page* p : PageRange(space->bottom(), space->top())) {
CHECK(p->markbits()->IsClean());
CHECK_EQ(0, p->LiveBytes());
}
@@ -354,7 +354,6 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
}
}
-
void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
HeapObjectIterator code_iterator(heap()->code_space());
for (HeapObject* obj = code_iterator.Next(); obj != NULL;
@@ -779,10 +778,8 @@ void MarkCompactCollector::Prepare() {
DCHECK(!FLAG_never_compact || !FLAG_always_compact);
- if (sweeping_in_progress()) {
- // Instead of waiting we could also abort the sweeper threads here.
- EnsureSweepingCompleted();
- }
+ // Instead of waiting we could also abort the sweeper threads here.
+ EnsureSweepingCompleted();
if (heap()->incremental_marking()->IsSweeping()) {
heap()->incremental_marking()->Stop();
@@ -801,22 +798,14 @@ void MarkCompactCollector::Prepare() {
AbortWeakCells();
AbortTransitionArrays();
AbortCompaction();
- if (heap_->UsingEmbedderHeapTracer()) {
- heap_->embedder_heap_tracer()->AbortTracing();
- }
+ heap_->local_embedder_heap_tracer()->AbortTracing();
marking_deque()->Clear();
was_marked_incrementally_ = false;
}
if (!was_marked_incrementally_) {
- if (heap_->UsingEmbedderHeapTracer()) {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_PROLOGUE);
- heap_->embedder_heap_tracer()->TracePrologue();
- }
- }
-
- if (heap_->UsingEmbedderHeapTracer()) {
- heap_->embedder_heap_tracer()->EnterFinalPause();
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_PROLOGUE);
+ heap_->local_embedder_heap_tracer()->TracePrologue();
}
// Don't start compaction if we are in the middle of incremental
@@ -874,13 +863,6 @@ void MarkCompactCollector::Finish() {
}
heap_->incremental_marking()->ClearIdleMarkingDelayCounter();
-
- if (marking_parity_ == EVEN_MARKING_PARITY) {
- marking_parity_ = ODD_MARKING_PARITY;
- } else {
- DCHECK(marking_parity_ == ODD_MARKING_PARITY);
- marking_parity_ = EVEN_MARKING_PARITY;
- }
}
@@ -914,6 +896,8 @@ void MarkCompactCollector::Finish() {
void CodeFlusher::ProcessJSFunctionCandidates() {
Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
+ Code* interpreter_entry_trampoline =
+ isolate_->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
Object* undefined = isolate_->heap()->undefined_value();
JSFunction* candidate = jsfunction_candidates_head_;
@@ -936,8 +920,13 @@ void CodeFlusher::ProcessJSFunctionCandidates() {
if (!shared->OptimizedCodeMapIsCleared()) {
shared->ClearOptimizedCodeMap();
}
- shared->set_code(lazy_compile);
- candidate->set_code(lazy_compile);
+ if (shared->HasBytecodeArray()) {
+ shared->set_code(interpreter_entry_trampoline);
+ candidate->set_code(interpreter_entry_trampoline);
+ } else {
+ shared->set_code(lazy_compile);
+ candidate->set_code(lazy_compile);
+ }
} else {
DCHECK(Marking::IsBlack(code_mark));
candidate->set_code(code);
@@ -964,7 +953,8 @@ void CodeFlusher::ProcessJSFunctionCandidates() {
void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
-
+ Code* interpreter_entry_trampoline =
+ isolate_->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
SharedFunctionInfo* next_candidate;
while (candidate != NULL) {
@@ -983,7 +973,11 @@ void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
if (!candidate->OptimizedCodeMapIsCleared()) {
candidate->ClearOptimizedCodeMap();
}
- candidate->set_code(lazy_compile);
+ if (candidate->HasBytecodeArray()) {
+ candidate->set_code(interpreter_entry_trampoline);
+ } else {
+ candidate->set_code(lazy_compile);
+ }
}
Object** code_slot =
@@ -1083,6 +1077,39 @@ void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
}
}
+class StaticYoungGenerationMarkingVisitor
+ : public StaticNewSpaceVisitor<StaticYoungGenerationMarkingVisitor> {
+ public:
+ static void Initialize(Heap* heap) {
+ StaticNewSpaceVisitor<StaticYoungGenerationMarkingVisitor>::Initialize();
+ }
+
+ inline static void VisitPointer(Heap* heap, HeapObject* object, Object** p) {
+ Object* target = *p;
+ if (heap->InNewSpace(target)) {
+ if (MarkRecursively(heap, HeapObject::cast(target))) return;
+ PushOnMarkingDeque(heap, target);
+ }
+ }
+
+ protected:
+ inline static void PushOnMarkingDeque(Heap* heap, Object* obj) {
+ HeapObject* object = HeapObject::cast(obj);
+ MarkBit mark_bit = ObjectMarking::MarkBitFrom(object);
+ heap->mark_compact_collector()->MarkObject(object, mark_bit);
+ }
+
+ inline static bool MarkRecursively(Heap* heap, HeapObject* object) {
+ StackLimitCheck check(heap->isolate());
+ if (check.HasOverflowed()) return false;
+
+ MarkBit mark = ObjectMarking::MarkBitFrom(object);
+ if (Marking::IsBlackOrGrey(mark)) return true;
+ heap->mark_compact_collector()->SetMark(object, mark);
+ IterateBody(object->map(), object);
+ return true;
+ }
+};
class MarkCompactMarkingVisitor
: public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
@@ -1336,11 +1363,12 @@ void MarkCompactCollector::PrepareForCodeFlushing() {
heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
- ProcessMarkingDeque();
+ ProcessMarkingDeque<MarkCompactMode::FULL>();
}
// Visitor class for marking heap roots.
+template <MarkCompactMode mode>
class RootMarkingVisitor : public ObjectVisitor {
public:
explicit RootMarkingVisitor(Heap* heap)
@@ -1362,6 +1390,10 @@ class RootMarkingVisitor : public ObjectVisitor {
HeapObject* object = HeapObject::cast(*p);
+ if (mode == MarkCompactMode::YOUNG_GENERATION &&
+ !collector_->heap()->InNewSpace(object))
+ return;
+
MarkBit mark_bit = ObjectMarking::MarkBitFrom(object);
if (Marking::IsBlackOrGrey(mark_bit)) return;
@@ -1369,14 +1401,21 @@ class RootMarkingVisitor : public ObjectVisitor {
// Mark the object.
collector_->SetMark(object, mark_bit);
- // Mark the map pointer and body, and push them on the marking stack.
- MarkBit map_mark = ObjectMarking::MarkBitFrom(map);
- collector_->MarkObject(map, map_mark);
- MarkCompactMarkingVisitor::IterateBody(map, object);
+ switch (mode) {
+ case MarkCompactMode::FULL: {
+ // Mark the map pointer and body, and push them on the marking stack.
+ MarkBit map_mark = ObjectMarking::MarkBitFrom(map);
+ collector_->MarkObject(map, map_mark);
+ MarkCompactMarkingVisitor::IterateBody(map, object);
+ } break;
+ case MarkCompactMode::YOUNG_GENERATION:
+ StaticYoungGenerationMarkingVisitor::IterateBody(map, object);
+ break;
+ }
// Mark all the objects reachable from the map and body. May leave
// overflowed objects in the heap.
- collector_->EmptyMarkingDeque();
+ collector_->EmptyMarkingDeque<mode>();
}
MarkCompactCollector* collector_;
@@ -1921,7 +1960,7 @@ void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
NewSpace* space = heap()->new_space();
- for (Page* page : NewSpacePageRange(space->bottom(), space->top())) {
+ for (Page* page : PageRange(space->bottom(), space->top())) {
DiscoverGreyObjectsOnPage(page);
if (marking_deque()->IsFull()) return;
}
@@ -1946,8 +1985,8 @@ bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
return Marking::IsWhite(mark);
}
-
-void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
+void MarkCompactCollector::MarkStringTable(
+ RootMarkingVisitor<MarkCompactMode::FULL>* visitor) {
StringTable* string_table = heap()->string_table();
// Mark the string table itself.
MarkBit string_table_mark = ObjectMarking::MarkBitFrom(string_table);
@@ -1957,7 +1996,7 @@ void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
}
// Explicitly mark the prefix.
string_table->IteratePrefix(visitor);
- ProcessMarkingDeque();
+ ProcessMarkingDeque<MarkCompactMode::FULL>();
}
@@ -1966,8 +2005,8 @@ void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
SetMark(site, mark_bit);
}
-
-void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
+void MarkCompactCollector::MarkRoots(
+ RootMarkingVisitor<MarkCompactMode::FULL>* visitor) {
// Mark the heap roots including global variables, stack variables,
// etc., and all objects reachable from them.
heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
@@ -1977,8 +2016,8 @@ void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
// There may be overflowed objects in the heap. Visit them now.
while (marking_deque()->overflowed()) {
- RefillMarkingDeque();
- EmptyMarkingDeque();
+ RefillMarkingDeque<MarkCompactMode::FULL>();
+ EmptyMarkingDeque<MarkCompactMode::FULL>();
}
}
@@ -2018,6 +2057,7 @@ void MarkCompactCollector::MarkImplicitRefGroups(
// Before: the marking stack contains zero or more heap object pointers.
// After: the marking stack is empty, and all objects reachable from the
// marking stack have been marked, or are overflowed in the heap.
+template <MarkCompactMode mode>
void MarkCompactCollector::EmptyMarkingDeque() {
while (!marking_deque()->IsEmpty()) {
HeapObject* object = marking_deque()->Pop();
@@ -2028,10 +2068,17 @@ void MarkCompactCollector::EmptyMarkingDeque() {
DCHECK(!Marking::IsWhite(ObjectMarking::MarkBitFrom(object)));
Map* map = object->map();
- MarkBit map_mark = ObjectMarking::MarkBitFrom(map);
- MarkObject(map, map_mark);
-
- MarkCompactMarkingVisitor::IterateBody(map, object);
+ switch (mode) {
+ case MarkCompactMode::FULL: {
+ MarkBit map_mark = ObjectMarking::MarkBitFrom(map);
+ MarkObject(map, map_mark);
+ MarkCompactMarkingVisitor::IterateBody(map, object);
+ } break;
+ case MarkCompactMode::YOUNG_GENERATION: {
+ DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
+ StaticYoungGenerationMarkingVisitor::IterateBody(map, object);
+ } break;
+ }
}
}
@@ -2041,6 +2088,7 @@ void MarkCompactCollector::EmptyMarkingDeque() {
// before sweeping completes. If sweeping completes, there are no remaining
// overflowed objects in the heap so the overflow flag on the markings stack
// is cleared.
+template <MarkCompactMode mode>
void MarkCompactCollector::RefillMarkingDeque() {
isolate()->CountUsage(v8::Isolate::UseCounterFeature::kMarkDequeOverflow);
DCHECK(marking_deque()->overflowed());
@@ -2048,18 +2096,17 @@ void MarkCompactCollector::RefillMarkingDeque() {
DiscoverGreyObjectsInNewSpace();
if (marking_deque()->IsFull()) return;
- DiscoverGreyObjectsInSpace(heap()->old_space());
- if (marking_deque()->IsFull()) return;
-
- DiscoverGreyObjectsInSpace(heap()->code_space());
- if (marking_deque()->IsFull()) return;
-
- DiscoverGreyObjectsInSpace(heap()->map_space());
- if (marking_deque()->IsFull()) return;
-
- LargeObjectIterator lo_it(heap()->lo_space());
- DiscoverGreyObjectsWithIterator(&lo_it);
- if (marking_deque()->IsFull()) return;
+ if (mode == MarkCompactMode::FULL) {
+ DiscoverGreyObjectsInSpace(heap()->old_space());
+ if (marking_deque()->IsFull()) return;
+ DiscoverGreyObjectsInSpace(heap()->code_space());
+ if (marking_deque()->IsFull()) return;
+ DiscoverGreyObjectsInSpace(heap()->map_space());
+ if (marking_deque()->IsFull()) return;
+ LargeObjectIterator lo_it(heap()->lo_space());
+ DiscoverGreyObjectsWithIterator(&lo_it);
+ if (marking_deque()->IsFull()) return;
+ }
marking_deque()->ClearOverflowed();
}
@@ -2069,12 +2116,14 @@ void MarkCompactCollector::RefillMarkingDeque() {
// stack. Before: the marking stack contains zero or more heap object
// pointers. After: the marking stack is empty and there are no overflowed
// objects in the heap.
+template <MarkCompactMode mode>
void MarkCompactCollector::ProcessMarkingDeque() {
- EmptyMarkingDeque();
+ EmptyMarkingDeque<mode>();
while (marking_deque()->overflowed()) {
- RefillMarkingDeque();
- EmptyMarkingDeque();
+ RefillMarkingDeque<mode>();
+ EmptyMarkingDeque<mode>();
}
+ DCHECK(marking_deque()->IsEmpty());
}
// Mark all objects reachable (transitively) from objects on the marking
@@ -2084,23 +2133,33 @@ void MarkCompactCollector::ProcessEphemeralMarking(
DCHECK(marking_deque()->IsEmpty() && !marking_deque()->overflowed());
bool work_to_do = true;
while (work_to_do) {
- if (heap_->UsingEmbedderHeapTracer()) {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_TRACING);
- heap_->RegisterWrappersWithEmbedderHeapTracer();
- heap_->embedder_heap_tracer()->AdvanceTracing(
- 0, EmbedderHeapTracer::AdvanceTracingActions(
- EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
- }
if (!only_process_harmony_weak_collections) {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_OBJECT_GROUPING);
- isolate()->global_handles()->IterateObjectGroups(
- visitor, &IsUnmarkedHeapObjectWithHeap);
- MarkImplicitRefGroups(&MarkCompactMarkingVisitor::MarkObject);
+ if (heap_->local_embedder_heap_tracer()->InUse()) {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_TRACING);
+ heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
+ heap_->local_embedder_heap_tracer()->Trace(
+ 0,
+ EmbedderHeapTracer::AdvanceTracingActions(
+ EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
+ } else {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_OBJECT_GROUPING);
+ isolate()->global_handles()->IterateObjectGroups(
+ visitor, &IsUnmarkedHeapObjectWithHeap);
+ MarkImplicitRefGroups(&MarkCompactMarkingVisitor::MarkObject);
+ }
+ } else {
+ // TODO(mlippautz): We currently do not trace through blink when
+ // discovering new objects reachable from weak roots (that have been made
+ // strong). This is a limitation of not having a separate handle type
+ // that doesn't require zapping before this phase. See crbug.com/668060.
+ heap_->local_embedder_heap_tracer()->ClearCachedWrappersToTrace();
}
ProcessWeakCollections();
work_to_do = !marking_deque()->IsEmpty();
- ProcessMarkingDeque();
+ ProcessMarkingDeque<MarkCompactMode::FULL>();
}
+ CHECK(marking_deque()->IsEmpty());
+ CHECK_EQ(0, heap()->local_embedder_heap_tracer()->NumberOfWrappersToTrace());
}
void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
@@ -2114,7 +2173,7 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
if (!code->CanDeoptAt(it.frame()->pc())) {
Code::BodyDescriptor::IterateBody(code, visitor);
}
- ProcessMarkingDeque();
+ ProcessMarkingDeque<MarkCompactMode::FULL>();
return;
}
}
@@ -2154,6 +2213,7 @@ void MarkingDeque::StartUsing() {
void MarkingDeque::StopUsing() {
base::LockGuard<base::Mutex> guard(&mutex_);
+ if (!in_use_) return;
DCHECK(IsEmpty());
DCHECK(!overflowed_);
top_ = bottom_ = mask_ = 0;
@@ -2267,6 +2327,95 @@ void MarkCompactCollector::RecordObjectStats() {
}
}
+SlotCallbackResult MarkCompactCollector::CheckAndMarkObject(
+ Heap* heap, Address slot_address) {
+ Object* object = *reinterpret_cast<Object**>(slot_address);
+ if (heap->InNewSpace(object)) {
+ // Marking happens before flipping the young generation, so the object
+ // has to be in ToSpace.
+ DCHECK(heap->InToSpace(object));
+ HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
+ MarkBit mark_bit = ObjectMarking::MarkBitFrom(heap_object);
+ if (Marking::IsBlackOrGrey(mark_bit)) {
+ return KEEP_SLOT;
+ }
+ heap->mark_compact_collector()->SetMark(heap_object, mark_bit);
+ StaticYoungGenerationMarkingVisitor::IterateBody(heap_object->map(),
+ heap_object);
+ return KEEP_SLOT;
+ }
+ return REMOVE_SLOT;
+}
+
+static bool IsUnmarkedObject(Heap* heap, Object** p) {
+ DCHECK_IMPLIES(heap->InNewSpace(*p), heap->InToSpace(*p));
+ return heap->InNewSpace(*p) &&
+ !Marking::IsBlack(ObjectMarking::MarkBitFrom(HeapObject::cast(*p)));
+}
+
+void MarkCompactCollector::MarkLiveObjectsInYoungGeneration() {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK);
+
+ PostponeInterruptsScope postpone(isolate());
+
+ StaticYoungGenerationMarkingVisitor::Initialize(heap());
+ RootMarkingVisitor<MarkCompactMode::YOUNG_GENERATION> root_visitor(heap());
+
+ marking_deque()->StartUsing();
+
+ isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
+ &Heap::IsUnmodifiedHeapObject);
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
+ heap()->IterateRoots(&root_visitor, VISIT_ALL_IN_SCAVENGE);
+ ProcessMarkingDeque<MarkCompactMode::YOUNG_GENERATION>();
+ }
+
+ {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_MARK_OLD_TO_NEW_POINTERS);
+ RememberedSet<OLD_TO_NEW>::Iterate(heap(), [this](Address addr) {
+ return CheckAndMarkObject(heap(), addr);
+ });
+ RememberedSet<OLD_TO_NEW>::IterateTyped(
+ heap(), [this](SlotType type, Address host_addr, Address addr) {
+ return UpdateTypedSlotHelper::UpdateTypedSlot(
+ isolate(), type, addr, [this](Object** addr) {
+ return CheckAndMarkObject(heap(),
+ reinterpret_cast<Address>(addr));
+ });
+ });
+ ProcessMarkingDeque<MarkCompactMode::YOUNG_GENERATION>();
+ }
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_WEAK);
+ heap()->VisitEncounteredWeakCollections(&root_visitor);
+ ProcessMarkingDeque<MarkCompactMode::YOUNG_GENERATION>();
+ }
+
+ if (is_code_flushing_enabled()) {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_MARK_CODE_FLUSH_CANDIDATES);
+ code_flusher()->IteratePointersToFromSpace(&root_visitor);
+ ProcessMarkingDeque<MarkCompactMode::YOUNG_GENERATION>();
+ }
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES);
+ isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
+ &IsUnmarkedObject);
+ isolate()
+ ->global_handles()
+ ->IterateNewSpaceWeakUnmodifiedRoots<GlobalHandles::VISIT_OTHERS>(
+ &root_visitor);
+ ProcessMarkingDeque<MarkCompactMode::YOUNG_GENERATION>();
+ }
+
+ marking_deque()->StopUsing();
+}
+
void MarkCompactCollector::MarkLiveObjects() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK);
// The recursive GC marker detects when it is nearing stack overflow,
@@ -2291,12 +2440,14 @@ void MarkCompactCollector::MarkLiveObjects() {
marking_deque()->StartUsing();
+ heap_->local_embedder_heap_tracer()->EnterFinalPause();
+
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_PREPARE_CODE_FLUSH);
PrepareForCodeFlushing();
}
- RootMarkingVisitor root_visitor(heap());
+ RootMarkingVisitor<MarkCompactMode::FULL> root_visitor(heap());
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
@@ -2328,7 +2479,7 @@ void MarkCompactCollector::MarkLiveObjects() {
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES);
heap()->isolate()->global_handles()->IdentifyWeakHandles(
&IsUnmarkedHeapObject);
- ProcessMarkingDeque();
+ ProcessMarkingDeque<MarkCompactMode::FULL>();
}
// Then we mark the objects.
@@ -2336,7 +2487,7 @@ void MarkCompactCollector::MarkLiveObjects() {
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS);
heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
- ProcessMarkingDeque();
+ ProcessMarkingDeque<MarkCompactMode::FULL>();
}
// Repeat Harmony weak maps marking to mark unmarked objects reachable from
@@ -2347,9 +2498,9 @@ void MarkCompactCollector::MarkLiveObjects() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
ProcessEphemeralMarking(&root_visitor, true);
- if (heap_->UsingEmbedderHeapTracer()) {
+ {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_EPILOGUE);
- heap()->embedder_heap_tracer()->TraceEpilogue();
+ heap()->local_embedder_heap_tracer()->TraceEpilogue();
}
}
}
@@ -2371,8 +2522,8 @@ void MarkCompactCollector::ClearNonLiveReferences() {
string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
ExternalStringTableCleaner external_visitor(heap(), nullptr);
- heap()->external_string_table_.Iterate(&external_visitor);
- heap()->external_string_table_.CleanUp();
+ heap()->external_string_table_.IterateAll(&external_visitor);
+ heap()->external_string_table_.CleanUpAll();
}
{
@@ -2578,8 +2729,8 @@ bool MarkCompactCollector::CompactTransitionArray(
// array disappeared during GC.
int trim = TransitionArray::Capacity(transitions) - transition_index;
if (trim > 0) {
- heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
- transitions, trim * TransitionArray::kTransitionSize);
+ heap_->RightTrimFixedArray(transitions,
+ trim * TransitionArray::kTransitionSize);
transitions->SetNumberOfTransitions(transition_index);
}
return descriptors_owner_died;
@@ -2597,8 +2748,8 @@ void MarkCompactCollector::TrimDescriptorArray(Map* map,
int number_of_descriptors = descriptors->number_of_descriptors_storage();
int to_trim = number_of_descriptors - number_of_own_descriptors;
if (to_trim > 0) {
- heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
- descriptors, to_trim * DescriptorArray::kDescriptorSize);
+ heap_->RightTrimFixedArray(descriptors,
+ to_trim * DescriptorArray::kDescriptorSize);
descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
@@ -2629,13 +2780,11 @@ void MarkCompactCollector::TrimEnumCache(Map* map,
int to_trim = enum_cache->length() - live_enum;
if (to_trim <= 0) return;
- heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
- descriptors->GetEnumCache(), to_trim);
+ heap_->RightTrimFixedArray(descriptors->GetEnumCache(), to_trim);
if (!descriptors->HasEnumIndicesCache()) return;
FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
- heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(enum_indices_cache,
- to_trim);
+ heap_->RightTrimFixedArray(enum_indices_cache, to_trim);
}
@@ -2890,7 +3039,7 @@ static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
void MarkCompactCollector::EvacuateNewSpacePrologue() {
NewSpace* new_space = heap()->new_space();
// Append the list of new space pages to be processed.
- for (Page* p : NewSpacePageRange(new_space->bottom(), new_space->top())) {
+ for (Page* p : PageRange(new_space->bottom(), new_space->top())) {
newspace_evacuation_candidates_.Add(p);
}
new_space->Flip();
@@ -3676,7 +3825,7 @@ void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
heap, heap->isolate()->cancelable_task_manager(), semaphore);
Address space_start = heap->new_space()->bottom();
Address space_end = heap->new_space()->top();
- for (Page* page : NewSpacePageRange(space_start, space_end)) {
+ for (Page* page : PageRange(space_start, space_end)) {
Address start =
page->Contains(space_start) ? space_start : page->area_start();
Address end = page->Contains(space_end) ? space_end : page->area_end();
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index de182073ea..9952b7953d 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -17,6 +17,8 @@
namespace v8 {
namespace internal {
+enum class MarkCompactMode { FULL, YOUNG_GENERATION };
+
// Callback function, returns whether an object is alive. The heap size
// of the object is returned in size. It optionally updates the offset
// to the first live object in the page (only used for old and map objects).
@@ -29,17 +31,15 @@ typedef void (*MarkObjectFunction)(Heap* heap, HeapObject* object);
class CodeFlusher;
class MarkCompactCollector;
class MarkingVisitor;
+template <MarkCompactMode mode>
class RootMarkingVisitor;
class ObjectMarking : public AllStatic {
public:
- INLINE(static MarkBit MarkBitFrom(Address addr)) {
- MemoryChunk* p = MemoryChunk::FromAddress(addr);
- return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr));
- }
-
- INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) {
- return MarkBitFrom(reinterpret_cast<Address>(obj));
+ V8_INLINE static MarkBit MarkBitFrom(HeapObject* obj) {
+ const Address address = obj->address();
+ MemoryChunk* p = MemoryChunk::FromAddress(address);
+ return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(address));
}
static Marking::ObjectColor Color(HeapObject* obj) {
@@ -416,6 +416,9 @@ class MarkCompactCollector {
static void Initialize();
+ static SlotCallbackResult CheckAndMarkObject(Heap* heap,
+ Address slot_address);
+
void SetUp();
void TearDown();
@@ -435,12 +438,6 @@ class MarkCompactCollector {
void AbortCompaction();
-#ifdef DEBUG
- // Checks whether performing mark-compact collection.
- bool in_use() { return state_ > PREPARE_GC; }
- bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
-#endif
-
// Determine type of object and emit deletion log event.
static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
@@ -458,15 +455,6 @@ class MarkCompactCollector {
CodeFlusher* code_flusher() { return code_flusher_; }
inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
-#ifdef VERIFY_HEAP
- void VerifyValidStoreAndSlotsBufferEntries();
- void VerifyMarkbitsAreClean();
- static void VerifyMarkbitsAreClean(PagedSpace* space);
- static void VerifyMarkbitsAreClean(NewSpace* space);
- void VerifyWeakEmbeddedObjectsInCode();
- void VerifyOmittedMapChecks();
-#endif
-
INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
return Page::FromAddress(reinterpret_cast<Address>(host))
->ShouldSkipEvacuationSlotRecording();
@@ -493,8 +481,6 @@ class MarkCompactCollector {
bool is_compacting() const { return compacting_; }
- MarkingParity marking_parity() { return marking_parity_; }
-
// Ensures that sweeping is finished.
//
// Note: Can only be called safely from main thread.
@@ -525,6 +511,21 @@ class MarkCompactCollector {
Sweeper& sweeper() { return sweeper_; }
+#ifdef DEBUG
+ // Checks whether performing mark-compact collection.
+ bool in_use() { return state_ > PREPARE_GC; }
+ bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
+#endif
+
+#ifdef VERIFY_HEAP
+ void VerifyValidStoreAndSlotsBufferEntries();
+ void VerifyMarkbitsAreClean();
+ static void VerifyMarkbitsAreClean(PagedSpace* space);
+ static void VerifyMarkbitsAreClean(NewSpace* space);
+ void VerifyWeakEmbeddedObjectsInCode();
+ void VerifyOmittedMapChecks();
+#endif
+
private:
template <PageEvacuationMode mode>
class EvacuateNewSpacePageVisitor;
@@ -564,8 +565,10 @@ class MarkCompactCollector {
friend class MarkCompactMarkingVisitor;
friend class MarkingVisitor;
friend class RecordMigratedSlotVisitor;
+ template <MarkCompactMode mode>
friend class RootMarkingVisitor;
friend class SharedFunctionInfoMarkingVisitor;
+ friend class StaticYoungGenerationMarkingVisitor;
// Mark code objects that are active on the stack to prevent them
// from being flushed.
@@ -575,6 +578,8 @@ class MarkCompactCollector {
// Marking operations for objects reachable from roots.
void MarkLiveObjects();
+ // Mark the young generation.
+ void MarkLiveObjectsInYoungGeneration();
// Pushes a black object onto the marking stack and accounts for live bytes.
// Note that this assumes live bytes have not yet been counted.
@@ -593,14 +598,15 @@ class MarkCompactCollector {
INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
// Mark the heap roots and all objects reachable from them.
- void MarkRoots(RootMarkingVisitor* visitor);
+ void MarkRoots(RootMarkingVisitor<MarkCompactMode::FULL>* visitor);
// Mark the string table specially. References to internalized strings from
// the string table are weak.
- void MarkStringTable(RootMarkingVisitor* visitor);
+ void MarkStringTable(RootMarkingVisitor<MarkCompactMode::FULL>* visitor);
// Mark objects reachable (transitively) from objects in the marking stack
// or overflowed in the heap.
+ template <MarkCompactMode mode>
void ProcessMarkingDeque();
// Mark objects reachable (transitively) from objects in the marking stack
@@ -624,11 +630,13 @@ class MarkCompactCollector {
// stack. This function empties the marking stack, but may leave
// overflowed objects in the heap, in which case the marking stack's
// overflow flag will be set.
+ template <MarkCompactMode mode>
void EmptyMarkingDeque();
// Refill the marking stack with overflowed objects from the heap. This
// function either leaves the marking stack full or clears the overflow
// flag on the marking stack.
+ template <MarkCompactMode mode>
void RefillMarkingDeque();
// Helper methods for refilling the marking stack by discovering grey objects
@@ -733,8 +741,6 @@ class MarkCompactCollector {
CollectorState state_;
#endif
- MarkingParity marking_parity_;
-
bool was_marked_incrementally_;
bool evacuation_;
diff --git a/deps/v8/src/heap/memory-reducer.cc b/deps/v8/src/heap/memory-reducer.cc
index 2aed4c714a..3645547ef5 100644
--- a/deps/v8/src/heap/memory-reducer.cc
+++ b/deps/v8/src/heap/memory-reducer.cc
@@ -17,6 +17,8 @@ const int MemoryReducer::kLongDelayMs = 8000;
const int MemoryReducer::kShortDelayMs = 500;
const int MemoryReducer::kWatchdogDelayMs = 100000;
const int MemoryReducer::kMaxNumberOfGCs = 3;
+const double MemoryReducer::kCommittedMemoryFactor = 1.1;
+const size_t MemoryReducer::kCommittedMemoryDelta = 10 * MB;
MemoryReducer::TimerTask::TimerTask(MemoryReducer* memory_reducer)
: CancelableTask(memory_reducer->heap()->isolate()),
@@ -47,6 +49,7 @@ void MemoryReducer::TimerTask::RunInternal() {
event.can_start_incremental_gc =
heap->incremental_marking()->IsStopped() &&
(heap->incremental_marking()->CanBeActivated() || optimize_for_memory);
+ event.committed_memory = heap->CommittedOldGenerationMemory();
memory_reducer_->NotifyTimer(event);
}
@@ -128,17 +131,30 @@ bool MemoryReducer::WatchdogGC(const State& state, const Event& event) {
MemoryReducer::State MemoryReducer::Step(const State& state,
const Event& event) {
if (!FLAG_incremental_marking || !FLAG_memory_reducer) {
- return State(kDone, 0, 0, state.last_gc_time_ms);
+ return State(kDone, 0, 0, state.last_gc_time_ms, 0);
}
switch (state.action) {
case kDone:
if (event.type == kTimer) {
return state;
+ } else if (event.type == kMarkCompact) {
+ if (event.committed_memory <
+ Max(static_cast<size_t>(state.committed_memory_at_last_run *
+ kCommittedMemoryFactor),
+ state.committed_memory_at_last_run + kCommittedMemoryDelta)) {
+ return state;
+ } else {
+ return State(kWait, 0, event.time_ms + kLongDelayMs,
+ event.type == kMarkCompact ? event.time_ms
+ : state.last_gc_time_ms,
+ 0);
+ }
} else {
- DCHECK(event.type == kPossibleGarbage || event.type == kMarkCompact);
+ DCHECK_EQ(kPossibleGarbage, event.type);
return State(
kWait, 0, event.time_ms + kLongDelayMs,
- event.type == kMarkCompact ? event.time_ms : state.last_gc_time_ms);
+ event.type == kMarkCompact ? event.time_ms : state.last_gc_time_ms,
+ 0);
}
case kWait:
switch (event.type) {
@@ -146,23 +162,24 @@ MemoryReducer::State MemoryReducer::Step(const State& state,
return state;
case kTimer:
if (state.started_gcs >= kMaxNumberOfGCs) {
- return State(kDone, kMaxNumberOfGCs, 0.0, state.last_gc_time_ms);
+ return State(kDone, kMaxNumberOfGCs, 0.0, state.last_gc_time_ms,
+ event.committed_memory);
} else if (event.can_start_incremental_gc &&
(event.should_start_incremental_gc ||
WatchdogGC(state, event))) {
if (state.next_gc_start_ms <= event.time_ms) {
return State(kRun, state.started_gcs + 1, 0.0,
- state.last_gc_time_ms);
+ state.last_gc_time_ms, 0);
} else {
return state;
}
} else {
return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs,
- state.last_gc_time_ms);
+ state.last_gc_time_ms, 0);
}
case kMarkCompact:
return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs,
- event.time_ms);
+ event.time_ms, 0);
}
case kRun:
if (event.type != kMarkCompact) {
@@ -171,14 +188,15 @@ MemoryReducer::State MemoryReducer::Step(const State& state,
if (state.started_gcs < kMaxNumberOfGCs &&
(event.next_gc_likely_to_collect_more || state.started_gcs == 1)) {
return State(kWait, state.started_gcs, event.time_ms + kShortDelayMs,
- event.time_ms);
+ event.time_ms, 0);
} else {
- return State(kDone, kMaxNumberOfGCs, 0.0, event.time_ms);
+ return State(kDone, kMaxNumberOfGCs, 0.0, event.time_ms,
+ event.committed_memory);
}
}
}
UNREACHABLE();
- return State(kDone, 0, 0, 0.0); // Make the compiler happy.
+ return State(kDone, 0, 0, 0.0, 0); // Make the compiler happy.
}
@@ -192,7 +210,7 @@ void MemoryReducer::ScheduleTimer(double time_ms, double delay_ms) {
isolate, timer_task, (delay_ms + kSlackMs) / 1000.0);
}
-void MemoryReducer::TearDown() { state_ = State(kDone, 0, 0, 0.0); }
+void MemoryReducer::TearDown() { state_ = State(kDone, 0, 0, 0.0, 0); }
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/memory-reducer.h b/deps/v8/src/heap/memory-reducer.h
index 0421987a3c..0f0ad6eaa0 100644
--- a/deps/v8/src/heap/memory-reducer.h
+++ b/deps/v8/src/heap/memory-reducer.h
@@ -86,15 +86,17 @@ class V8_EXPORT_PRIVATE MemoryReducer {
struct State {
State(Action action, int started_gcs, double next_gc_start_ms,
- double last_gc_time_ms)
+ double last_gc_time_ms, size_t committed_memory_at_last_run)
: action(action),
started_gcs(started_gcs),
next_gc_start_ms(next_gc_start_ms),
- last_gc_time_ms(last_gc_time_ms) {}
+ last_gc_time_ms(last_gc_time_ms),
+ committed_memory_at_last_run(committed_memory_at_last_run) {}
Action action;
int started_gcs;
double next_gc_start_ms;
double last_gc_time_ms;
+ size_t committed_memory_at_last_run;
};
enum EventType { kTimer, kMarkCompact, kPossibleGarbage };
@@ -102,6 +104,7 @@ class V8_EXPORT_PRIVATE MemoryReducer {
struct Event {
EventType type;
double time_ms;
+ size_t committed_memory;
bool next_gc_likely_to_collect_more;
bool should_start_incremental_gc;
bool can_start_incremental_gc;
@@ -109,7 +112,7 @@ class V8_EXPORT_PRIVATE MemoryReducer {
explicit MemoryReducer(Heap* heap)
: heap_(heap),
- state_(kDone, 0, 0.0, 0.0),
+ state_(kDone, 0, 0.0, 0.0, 0),
js_calls_counter_(0),
js_calls_sample_time_ms_(0.0) {}
// Callbacks.
@@ -126,6 +129,12 @@ class V8_EXPORT_PRIVATE MemoryReducer {
static const int kShortDelayMs;
static const int kWatchdogDelayMs;
static const int kMaxNumberOfGCs;
+ // The committed memory has to increase by at least this factor since the
+ // last run in order to trigger a new run after mark-compact.
+ static const double kCommittedMemoryFactor;
+ // The committed memory has to increase by at least this amount since the
+ // last run in order to trigger a new run after mark-compact.
+ static const size_t kCommittedMemoryDelta;
Heap* heap() { return heap_; }
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index ef5f65734e..9f534a20e4 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -441,10 +441,8 @@ void ObjectStatsCollector::RecordJSCollectionDetails(JSObject* obj) {
}
void ObjectStatsCollector::RecordScriptDetails(Script* obj) {
- Object* infos = WeakFixedArray::cast(obj->shared_function_infos());
- if (infos->IsWeakFixedArray())
- RecordFixedArrayHelper(obj, WeakFixedArray::cast(infos),
- SHARED_FUNCTION_INFOS_SUB_TYPE, 0);
+ FixedArray* infos = FixedArray::cast(obj->shared_function_infos());
+ RecordFixedArrayHelper(obj, infos, SHARED_FUNCTION_INFOS_SUB_TYPE, 0);
}
void ObjectStatsCollector::RecordMapDetails(Map* map_obj) {
@@ -546,13 +544,6 @@ void ObjectStatsCollector::RecordSharedFunctionInfoDetails(
if (!feedback_metadata->is_empty()) {
RecordFixedArrayHelper(sfi, feedback_metadata,
TYPE_FEEDBACK_METADATA_SUB_TYPE, 0);
- Object* names =
- feedback_metadata->get(TypeFeedbackMetadata::kNamesTableIndex);
- if (!names->IsSmi()) {
- UnseededNumberDictionary* names = UnseededNumberDictionary::cast(
- feedback_metadata->get(TypeFeedbackMetadata::kNamesTableIndex));
- RecordHashTableHelper(sfi, names, TYPE_FEEDBACK_METADATA_SUB_TYPE);
- }
}
if (!sfi->OptimizedCodeMapIsCleared()) {
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index f3502568d6..d86406bf5f 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -60,7 +60,6 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
int>::Visit);
table_.Register(kVisitByteArray, &VisitByteArray);
- table_.Register(kVisitBytecodeArray, &VisitBytecodeArray);
table_.Register(
kVisitSharedFunctionInfo,
@@ -103,19 +102,11 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
table_.template RegisterSpecializations<StructVisitor, kVisitStruct,
kVisitStructGeneric>();
-}
-template <typename StaticVisitor>
-int StaticNewSpaceVisitor<StaticVisitor>::VisitBytecodeArray(
- Map* map, HeapObject* object) {
- VisitPointers(
- map->GetHeap(), object,
- HeapObject::RawField(object, BytecodeArray::kConstantPoolOffset),
- HeapObject::RawField(object, BytecodeArray::kFrameSizeOffset));
- return reinterpret_cast<BytecodeArray*>(object)->BytecodeArraySize();
+ table_.Register(kVisitBytecodeArray, &UnreachableVisitor);
+ table_.Register(kVisitSharedFunctionInfo, &UnreachableVisitor);
}
-
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitShortcutCandidate,
@@ -157,10 +148,7 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
- table_.Register(
- kVisitBytecodeArray,
- &FixedBodyVisitor<StaticVisitor, BytecodeArray::MarkingBodyDescriptor,
- void>::Visit);
+ table_.Register(kVisitBytecodeArray, &VisitBytecodeArray);
table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
@@ -286,7 +274,6 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget(Heap* heap,
StaticVisitor::MarkObject(heap, target);
}
-
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitCodeAgeSequence(
Heap* heap, RelocInfo* rinfo) {
@@ -298,6 +285,13 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCodeAgeSequence(
StaticVisitor::MarkObject(heap, target);
}
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitBytecodeArray(
+ Map* map, HeapObject* object) {
+ FixedBodyVisitor<StaticVisitor, BytecodeArray::MarkingBodyDescriptor,
+ void>::Visit(map, object);
+ BytecodeArray::cast(object)->MakeOlder();
+}
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitNativeContext(
@@ -421,7 +415,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCode(Map* map,
Heap* heap = map->GetHeap();
Code* code = Code::cast(object);
if (FLAG_age_code && !heap->isolate()->serializer_enabled()) {
- code->MakeOlder(heap->mark_compact_collector()->marking_parity());
+ code->MakeOlder();
}
CodeBodyVisitor::Visit(map, object);
}
@@ -435,12 +429,6 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
if (shared->ic_age() != heap->global_ic_age()) {
shared->ResetForNewContext(heap->global_ic_age());
}
- if (FLAG_flush_optimized_code_cache) {
- if (!shared->OptimizedCodeMapIsCleared()) {
- // Always flush the optimized code map if requested by flag.
- shared->ClearOptimizedCodeMap();
- }
- }
MarkCompactCollector* collector = heap->mark_compact_collector();
if (collector->is_code_flushing_enabled()) {
if (IsFlushable(heap, shared)) {
@@ -600,8 +588,8 @@ bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
return false;
}
- // The function must not be a builtin.
- if (shared_info->IsBuiltin()) {
+ // The function must be user code.
+ if (!shared_info->IsUserJavaScript()) {
return false;
}
diff --git a/deps/v8/src/heap/objects-visiting.cc b/deps/v8/src/heap/objects-visiting.cc
index d4aa8b2f00..146aa58675 100644
--- a/deps/v8/src/heap/objects-visiting.cc
+++ b/deps/v8/src/heap/objects-visiting.cc
@@ -107,7 +107,6 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case JS_ARGUMENTS_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
- case JS_FIXED_ARRAY_ITERATOR_TYPE:
case JS_MODULE_NAMESPACE_TYPE:
case JS_VALUE_TYPE:
case JS_DATE_TYPE:
@@ -159,6 +158,7 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE:
case JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE:
+ case JS_PROMISE_CAPABILITY_TYPE:
case JS_PROMISE_TYPE:
case JS_BOUND_FUNCTION_TYPE:
return GetVisitorIdForSize(kVisitJSObject, kVisitJSObjectGeneric,
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index 633c277eb0..e35e47c3aa 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_VISITING_H_
#include "src/allocation.h"
+#include "src/heap/embedder-tracing.h"
#include "src/heap/heap.h"
#include "src/heap/spaces.h"
#include "src/layout-descriptor.h"
@@ -267,12 +268,17 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
// Although we are using the JSFunction body descriptor which does not
// visit the code entry, compiler wants it to be accessible.
// See JSFunction::BodyDescriptorImpl.
- INLINE(static void VisitCodeEntry(Heap* heap, HeapObject* object,
- Address entry_address)) {
+ inline static void VisitCodeEntry(Heap* heap, HeapObject* object,
+ Address entry_address) {
UNREACHABLE();
}
private:
+ inline static int UnreachableVisitor(Map* map, HeapObject* object) {
+ UNREACHABLE();
+ return 0;
+ }
+
INLINE(static int VisitByteArray(Map* map, HeapObject* object)) {
return reinterpret_cast<ByteArray*>(object)->ByteArraySize();
}
@@ -300,8 +306,6 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
return FreeSpace::cast(object)->size();
}
- INLINE(static int VisitBytecodeArray(Map* map, HeapObject* object));
-
class DataObjectVisitor {
public:
template <int object_size>
@@ -372,6 +376,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
protected:
INLINE(static void VisitMap(Map* map, HeapObject* object));
INLINE(static void VisitCode(Map* map, HeapObject* object));
+ INLINE(static void VisitBytecodeArray(Map* map, HeapObject* object));
INLINE(static void VisitSharedFunctionInfo(Map* map, HeapObject* object));
INLINE(static void VisitWeakCollection(Map* map, HeapObject* object));
INLINE(static void VisitJSFunction(Map* map, HeapObject* object));
@@ -420,7 +425,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
private:
INLINE(static void TracePossibleWrapper(HeapObject* object)) {
- if (object->GetHeap()->UsingEmbedderHeapTracer()) {
+ if (object->GetHeap()->local_embedder_heap_tracer()->InUse()) {
DCHECK(object->IsJSObject());
object->GetHeap()->TracePossibleWrapper(JSObject::cast(object));
}
diff --git a/deps/v8/src/heap/remembered-set.h b/deps/v8/src/heap/remembered-set.h
index a625b13dbf..cf17a46821 100644
--- a/deps/v8/src/heap/remembered-set.h
+++ b/deps/v8/src/heap/remembered-set.h
@@ -17,7 +17,7 @@ enum PointerDirection { OLD_TO_OLD, OLD_TO_NEW };
// TODO(ulan): Investigate performance of de-templatizing this class.
template <PointerDirection direction>
-class RememberedSet {
+class RememberedSet : public AllStatic {
public:
// Given a page and a slot in that page, this function adds the slot to the
// remembered set.
@@ -31,6 +31,19 @@ class RememberedSet {
slot_set[offset / Page::kPageSize].Insert(offset % Page::kPageSize);
}
+ // Given a page and a slot in that page, this function returns true if
+ // the remembered set contains the slot.
+ static bool Contains(MemoryChunk* chunk, Address slot_addr) {
+ DCHECK(chunk->Contains(slot_addr));
+ SlotSet* slot_set = GetSlotSet(chunk);
+ if (slot_set == nullptr) {
+ return false;
+ }
+ uintptr_t offset = slot_addr - chunk->address();
+ return slot_set[offset / Page::kPageSize].Contains(offset %
+ Page::kPageSize);
+ }
+
// Given a page and a slot in that page, this function removes the slot from
// the remembered set.
// If the slot was never added, then the function does nothing.
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index cad0e8af25..f2722e81de 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -139,7 +139,7 @@ class ScavengingVisitor : public StaticVisitorBase {
if (marks_handling == TRANSFER_MARKS) {
if (IncrementalMarking::TransferColor(source, target, size)) {
- MemoryChunk::IncrementLiveBytesFromGC(target, size);
+ MemoryChunk::IncrementLiveBytes(target, size);
}
}
}
diff --git a/deps/v8/src/heap/slot-set.h b/deps/v8/src/heap/slot-set.h
index da61052b8a..7612199c3c 100644
--- a/deps/v8/src/heap/slot-set.h
+++ b/deps/v8/src/heap/slot-set.h
@@ -66,6 +66,18 @@ class SlotSet : public Malloced {
}
// The slot offset specifies a slot at address page_start_ + slot_offset.
+ // Returns true if the set contains the slot.
+ bool Contains(int slot_offset) {
+ int bucket_index, cell_index, bit_index;
+ SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
+ base::AtomicValue<uint32_t>* current_bucket = bucket[bucket_index].Value();
+ if (current_bucket == nullptr) {
+ return false;
+ }
+ return (current_bucket[cell_index].Value() & (1u << bit_index)) != 0;
+ }
+
+ // The slot offset specifies a slot at address page_start_ + slot_offset.
void Remove(int slot_offset) {
int bucket_index, cell_index, bit_index;
SlotToIndices(slot_offset, &bucket_index, &cell_index, &bit_index);
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index f3f9215f3d..2079a80a0b 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -28,10 +28,14 @@ PageIteratorImpl<PAGE_TYPE> PageIteratorImpl<PAGE_TYPE>::operator++(int) {
return tmp;
}
-NewSpacePageRange::NewSpacePageRange(Address start, Address limit)
- : range_(Page::FromAddress(start),
- Page::FromAllocationAreaAddress(limit)->next_page()) {
- SemiSpace::AssertValidRange(start, limit);
+PageRange::PageRange(Address start, Address limit)
+ : begin_(Page::FromAddress(start)),
+ end_(Page::FromAllocationAreaAddress(limit)->next_page()) {
+#ifdef DEBUG
+ if (begin_->InNewSpace()) {
+ SemiSpace::AssertValidRange(start, limit);
+ }
+#endif // DEBUG
}
// -----------------------------------------------------------------------------
@@ -221,7 +225,7 @@ void Page::InitializeFreeListCategories() {
}
}
-void MemoryChunk::IncrementLiveBytesFromGC(HeapObject* object, int by) {
+void MemoryChunk::IncrementLiveBytes(HeapObject* object, int by) {
MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by);
}
@@ -244,18 +248,8 @@ void MemoryChunk::IncrementLiveBytes(int by) {
DCHECK_LE(static_cast<size_t>(live_byte_count_), size_);
}
-void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
- if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->SweepingDone()) {
- static_cast<PagedSpace*>(chunk->owner())->Allocate(by);
- }
- chunk->IncrementLiveBytes(by);
-}
-
bool PagedSpace::Contains(Address addr) {
- Page* p = Page::FromAddress(addr);
- if (!Page::IsValid(p)) return false;
- return p->owner() == this;
+ return MemoryChunk::FromAnyPointerAddress(heap(), addr)->owner() == this;
}
bool PagedSpace::Contains(Object* o) {
@@ -288,7 +282,7 @@ MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
MemoryChunk* chunk = MemoryChunk::FromAddress(addr);
uintptr_t offset = addr - chunk->address();
if (offset < MemoryChunk::kHeaderSize || !chunk->HasPageHeader()) {
- chunk = heap->lo_space()->FindPage(addr);
+ chunk = heap->lo_space()->FindPageThreadSafe(addr);
}
return chunk;
}
@@ -436,11 +430,10 @@ AllocationResult PagedSpace::AllocateRawUnaligned(
if (object == NULL) {
object = SlowAllocateRaw(size_in_bytes);
}
- if (object != NULL) {
- if (heap()->incremental_marking()->black_allocation()) {
- Marking::MarkBlack(ObjectMarking::MarkBitFrom(object));
- MemoryChunk::IncrementLiveBytesFromGC(object, size_in_bytes);
- }
+ if (object != NULL && heap()->incremental_marking()->black_allocation()) {
+ Address start = object->address();
+ Address end = object->address() + size_in_bytes;
+ Page::FromAllocationAreaAddress(start)->CreateBlackArea(start, end);
}
}
@@ -479,12 +472,19 @@ AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
if (object == NULL) {
object = SlowAllocateRaw(allocation_size);
}
- if (object != NULL && filler_size != 0) {
- object = heap()->AlignWithFiller(object, size_in_bytes, allocation_size,
- alignment);
- // Filler objects are initialized, so mark only the aligned object memory
- // as uninitialized.
- allocation_size = size_in_bytes;
+ if (object != NULL) {
+ if (heap()->incremental_marking()->black_allocation()) {
+ Address start = object->address();
+ Address end = object->address() + allocation_size;
+ Page::FromAllocationAreaAddress(start)->CreateBlackArea(start, end);
+ }
+ if (filler_size != 0) {
+ object = heap()->AlignWithFiller(object, size_in_bytes, allocation_size,
+ alignment);
+ // Filler objects are initialized, so mark only the aligned object
+ // memory as uninitialized.
+ allocation_size = size_in_bytes;
+ }
}
}
@@ -596,6 +596,17 @@ LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
FATAL("Code page is too large.");
}
heap->incremental_marking()->SetOldSpacePageFlags(chunk);
+
+ MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
+
+ // Initialize the owner field for each contained page (except the first, which
+ // is initialized by MemoryChunk::Initialize).
+ for (Address addr = chunk->address() + Page::kPageSize + Page::kOwnerOffset;
+ addr < chunk->area_end(); addr += Page::kPageSize) {
+ // Clear out kPageHeaderTag.
+ Memory::Address_at(addr) = 0;
+ }
+
return static_cast<LargePage*>(chunk);
}
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index e0e6d12fda..8d98520d43 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -335,7 +335,7 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public v8::Task {
private:
// v8::Task overrides.
void Run() override {
- unmapper_->PerformFreeMemoryOnQueuedChunks();
+ unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
unmapper_->pending_unmapping_tasks_semaphore_.Signal();
}
@@ -350,7 +350,7 @@ void MemoryAllocator::Unmapper::FreeQueuedChunks() {
new UnmapFreeMemoryTask(this), v8::Platform::kShortRunningTask);
concurrent_unmapping_tasks_active_++;
} else {
- PerformFreeMemoryOnQueuedChunks();
+ PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
}
}
@@ -364,6 +364,7 @@ bool MemoryAllocator::Unmapper::WaitUntilCompleted() {
return waited;
}
+template <MemoryAllocator::Unmapper::FreeMode mode>
void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
MemoryChunk* chunk = nullptr;
// Regular chunks.
@@ -372,6 +373,14 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
allocator_->PerformFreeMemory(chunk);
if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
}
+ if (mode == MemoryAllocator::Unmapper::FreeMode::kReleasePooled) {
+ // The previous loop uncommitted any pages marked as pooled and added them
+ // to the pooled list. In case of kReleasePooled we need to free them
+ // though.
+ while ((chunk = GetMemoryChunkSafe<kPooled>()) != nullptr) {
+ allocator_->Free<MemoryAllocator::kAlreadyPooled>(chunk);
+ }
+ }
// Non-regular chunks.
while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
allocator_->PerformFreeMemory(chunk);
@@ -382,7 +391,10 @@ void MemoryAllocator::Unmapper::TearDown() {
WaitUntilCompleted();
ReconsiderDelayedChunks();
CHECK(delayed_regular_chunks_.empty());
- PerformFreeMemoryOnQueuedChunks();
+ PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
+ for (int i = 0; i < kNumberOfChunkQueues; i++) {
+ DCHECK(chunks_[i].empty());
+ }
}
void MemoryAllocator::Unmapper::ReconsiderDelayedChunks() {
@@ -833,6 +845,16 @@ size_t Page::ShrinkToHighWaterMark() {
return unused;
}
+void Page::CreateBlackArea(Address start, Address end) {
+ DCHECK(heap()->incremental_marking()->black_allocation());
+ DCHECK_EQ(Page::FromAddress(start), this);
+ DCHECK_NE(start, end);
+ DCHECK_EQ(Page::FromAddress(end - 1), this);
+ markbits()->SetRange(AddressToMarkbitIndex(start),
+ AddressToMarkbitIndex(end));
+ IncrementLiveBytes(static_cast<int>(end - start));
+}
+
void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk,
Address start_free) {
// We do not allow partial shrink for code.
@@ -899,6 +921,11 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
PreFreeMemory(chunk);
PerformFreeMemory(chunk);
break;
+ case kAlreadyPooled:
+ // Pooled pages cannot be touched anymore as their memory is uncommitted.
+ FreeMemory(chunk->address(), static_cast<size_t>(MemoryChunk::kPageSize),
+ Executability::NOT_EXECUTABLE);
+ break;
case kPooledAndQueue:
DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
@@ -909,13 +936,14 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
// The chunks added to this queue will be freed by a concurrent thread.
unmapper()->AddMemoryChunkSafe(chunk);
break;
- default:
- UNREACHABLE();
}
}
template void MemoryAllocator::Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
+template void MemoryAllocator::Free<MemoryAllocator::kAlreadyPooled>(
+ MemoryChunk* chunk);
+
template void MemoryAllocator::Free<MemoryAllocator::kPreFreeAndQueue>(
MemoryChunk* chunk);
@@ -1287,25 +1315,6 @@ bool PagedSpace::ContainsSlow(Address addr) {
return false;
}
-
-Object* PagedSpace::FindObject(Address addr) {
- // Note: this function can only be called on iterable spaces.
- DCHECK(!heap()->mark_compact_collector()->in_use());
-
- if (!Contains(addr)) return Smi::kZero; // Signaling not found.
-
- Page* p = Page::FromAddress(addr);
- HeapObjectIterator it(p);
- for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
- Address cur = obj->address();
- Address next = cur + obj->Size();
- if ((cur <= addr) && (addr < next)) return obj;
- }
-
- UNREACHABLE();
- return Smi::kZero;
-}
-
void PagedSpace::ShrinkImmortalImmovablePages() {
DCHECK(!heap()->deserialization_complete());
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
@@ -1361,10 +1370,7 @@ void PagedSpace::SetAllocationInfo(Address top, Address limit) {
SetTopAndLimit(top, limit);
if (top != nullptr && top != limit &&
heap()->incremental_marking()->black_allocation()) {
- Page* page = Page::FromAllocationAreaAddress(top);
- page->markbits()->SetRange(page->AddressToMarkbitIndex(top),
- page->AddressToMarkbitIndex(limit));
- page->IncrementLiveBytes(static_cast<int>(limit - top));
+ Page::FromAllocationAreaAddress(top)->CreateBlackArea(top, limit);
}
}
@@ -1373,10 +1379,8 @@ void PagedSpace::MarkAllocationInfoBlack() {
Address current_top = top();
Address current_limit = limit();
if (current_top != nullptr && current_top != current_limit) {
- Page* page = Page::FromAllocationAreaAddress(current_top);
- page->markbits()->SetRange(page->AddressToMarkbitIndex(current_top),
- page->AddressToMarkbitIndex(current_limit));
- page->IncrementLiveBytes(static_cast<int>(current_limit - current_top));
+ Page::FromAllocationAreaAddress(current_top)
+ ->CreateBlackArea(current_top, current_limit);
}
}
@@ -2095,7 +2099,7 @@ void SemiSpace::set_age_mark(Address mark) {
DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
age_mark_ = mark;
// Mark all pages up to the one containing mark.
- for (Page* p : NewSpacePageRange(space_start(), mark)) {
+ for (Page* p : PageRange(space_start(), mark)) {
p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
}
}
@@ -2616,7 +2620,7 @@ HeapObject* FreeList::Allocate(size_t size_in_bytes) {
// Memory in the linear allocation area is counted as allocated. We may free
// a little of this again immediately - see below.
- owner_->Allocate(static_cast<int>(new_node_size));
+ owner_->AccountAllocatedBytes(new_node_size);
if (owner_->heap()->inline_allocation_disabled()) {
// Keep the linear allocation area empty if requested to do so, just
@@ -2806,7 +2810,6 @@ void PagedSpace::RepairFreeListsAfterDeserialization() {
}
}
-
HeapObject* PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
@@ -2820,7 +2823,6 @@ HeapObject* PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
return nullptr;
}
-
HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
@@ -2877,9 +2879,7 @@ void PagedSpace::ReportStatistics() {
", available: %" V8PRIdPTR ", %%%d\n",
Capacity(), Waste(), Available(), pct);
- if (heap()->mark_compact_collector()->sweeping_in_progress()) {
- heap()->mark_compact_collector()->EnsureSweepingCompleted();
- }
+ heap()->mark_compact_collector()->EnsureSweepingCompleted();
ClearHistograms(heap()->isolate());
HeapObjectIterator obj_it(this);
for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
@@ -2994,7 +2994,6 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
InsertChunkMapEntries(page);
HeapObject* object = page->GetObject();
- MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size);
if (Heap::ShouldZapGarbage()) {
// Make the object consistent so the heap can be verified in OldSpaceStep.
@@ -3010,7 +3009,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
if (heap()->incremental_marking()->black_allocation()) {
Marking::MarkBlack(ObjectMarking::MarkBitFrom(object));
- MemoryChunk::IncrementLiveBytesFromGC(object, object_size);
+ MemoryChunk::IncrementLiveBytes(object, object_size);
}
return object;
}
@@ -3033,6 +3032,10 @@ Object* LargeObjectSpace::FindObject(Address a) {
return Smi::kZero; // Signaling not found.
}
+LargePage* LargeObjectSpace::FindPageThreadSafe(Address a) {
+ base::LockGuard<base::Mutex> guard(&chunk_map_mutex_);
+ return FindPage(a);
+}
LargePage* LargeObjectSpace::FindPage(Address a) {
uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
@@ -3069,6 +3072,9 @@ void LargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
uintptr_t start = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
uintptr_t limit = (reinterpret_cast<uintptr_t>(page) + (page->size() - 1)) /
MemoryChunk::kAlignment;
+ // There may be concurrent access on the chunk map. We have to take the lock
+ // here.
+ base::LockGuard<base::Mutex> guard(&chunk_map_mutex_);
for (uintptr_t key = start; key <= limit; key++) {
base::HashMap::Entry* entry = chunk_map_.InsertNew(
reinterpret_cast<void*>(key), static_cast<uint32_t>(key));
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index f5701adc69..48551fa264 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -317,8 +317,11 @@ class MemoryChunk {
static const intptr_t kAlignmentMask = kAlignment - 1;
static const intptr_t kSizeOffset = 0;
-
- static const intptr_t kFlagsOffset = kSizeOffset + kPointerSize;
+ static const intptr_t kFlagsOffset = kSizeOffset + kSizetSize;
+ static const intptr_t kAreaStartOffset = kFlagsOffset + kIntptrSize;
+ static const intptr_t kAreaEndOffset = kAreaStartOffset + kPointerSize;
+ static const intptr_t kReservationOffset = kAreaEndOffset + kPointerSize;
+ static const intptr_t kOwnerOffset = kReservationOffset + 2 * kPointerSize;
static const size_t kMinHeaderSize =
kSizeOffset + kSizetSize // size_t size
@@ -367,8 +370,7 @@ class MemoryChunk {
static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
- static inline void IncrementLiveBytesFromMutator(HeapObject* object, int by);
- static inline void IncrementLiveBytesFromGC(HeapObject* object, int by);
+ static inline void IncrementLiveBytes(HeapObject* object, int by);
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromAddress(Address a) {
@@ -553,10 +555,11 @@ class MemoryChunk {
void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); }
Space* owner() const {
- if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
- kPageHeaderTag) {
- return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
- kPageHeaderTag);
+ intptr_t owner_value = base::NoBarrierAtomicValue<intptr_t>::FromAddress(
+ const_cast<Address*>(&owner_))
+ ->Value();
+ if ((owner_value & kPageHeaderTagMask) == kPageHeaderTag) {
+ return reinterpret_cast<Space*>(owner_value - kPageHeaderTag);
} else {
return nullptr;
}
@@ -769,6 +772,8 @@ class Page : public MemoryChunk {
size_t ShrinkToHighWaterMark();
+ void CreateBlackArea(Address start, Address end);
+
#ifdef DEBUG
void Print();
#endif // DEBUG
@@ -1092,7 +1097,7 @@ class SkipList {
// A space acquires chunks of memory from the operating system. The memory
// allocator allocates and deallocates pages for the paged heap spaces and large
// pages for large object space.
-class MemoryAllocator {
+class V8_EXPORT_PRIVATE MemoryAllocator {
public:
// Unmapper takes care of concurrently unmapping and uncommitting memory
// chunks.
@@ -1144,6 +1149,11 @@ class MemoryAllocator {
kNumberOfChunkQueues,
};
+ enum class FreeMode {
+ kUncommitPooled,
+ kReleasePooled,
+ };
+
template <ChunkQueueType type>
void AddMemoryChunkSafe(MemoryChunk* chunk) {
base::LockGuard<base::Mutex> guard(&mutex_);
@@ -1165,6 +1175,7 @@ class MemoryAllocator {
}
void ReconsiderDelayedChunks();
+ template <FreeMode mode>
void PerformFreeMemoryOnQueuedChunks();
base::Mutex mutex_;
@@ -1187,6 +1198,7 @@ class MemoryAllocator {
enum FreeMode {
kFull,
+ kAlreadyPooled,
kPreFreeAndQueue,
kPooledAndQueue,
};
@@ -1376,6 +1388,15 @@ class MemoryAllocator {
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
};
+extern template Page*
+MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
+ size_t size, PagedSpace* owner, Executability executable);
+extern template Page*
+MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
+ size_t size, SemiSpace* owner, Executability executable);
+extern template Page*
+MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
+ size_t size, SemiSpace* owner, Executability executable);
// -----------------------------------------------------------------------------
// Interface for heap object iterator to be implemented by all object space
@@ -1419,6 +1440,8 @@ class PageRange {
typedef PageIterator iterator;
PageRange(Page* begin, Page* end) : begin_(begin), end_(end) {}
explicit PageRange(Page* page) : PageRange(page, page->next_page()) {}
+ inline PageRange(Address start, Address limit);
+
iterator begin() { return iterator(begin_); }
iterator end() { return iterator(end_); }
@@ -1641,7 +1664,7 @@ class AllocationStats BASE_EMBEDDED {
// words in size.
// At least 16384 words (huge): This list is for objects of 2048 words or
// larger. Empty pages are also added to this list.
-class FreeList {
+class V8_EXPORT_PRIVATE FreeList {
public:
// This method returns how much memory can be allocated after freeing
// maximum_freed memory.
@@ -1878,18 +1901,7 @@ class LocalAllocationBuffer {
AllocationInfo allocation_info_;
};
-class NewSpacePageRange {
- public:
- typedef PageRange::iterator iterator;
- inline NewSpacePageRange(Address start, Address limit);
- iterator begin() { return range_.begin(); }
- iterator end() { return range_.end(); }
-
- private:
- PageRange range_;
-};
-
-class PagedSpace : public Space {
+class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
public:
typedef PageIterator iterator;
@@ -1915,12 +1927,6 @@ class PagedSpace : public Space {
inline bool Contains(Object* o);
bool ContainsSlow(Address addr);
- // Given an address occupied by a live object, return that object if it is
- // in this space, or a Smi if it is not. The implementation iterates over
- // objects in the page containing the address, the cost is linear in the
- // number of objects in the page. It may be slow.
- Object* FindObject(Address addr);
-
// During boot the free_space_map is created, and afterwards we may need
// to write it into the free list nodes that were already created.
void RepairFreeListsAfterDeserialization();
@@ -2034,7 +2040,9 @@ class PagedSpace : public Space {
void MarkAllocationInfoBlack();
- void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); }
+ void AccountAllocatedBytes(size_t bytes) {
+ accounting_stats_.AllocateBytes(bytes);
+ }
void IncreaseCapacity(size_t bytes);
@@ -2820,6 +2828,9 @@ class LargeObjectSpace : public Space {
// The function iterates through all objects in this space, may be slow.
Object* FindObject(Address a);
+ // Takes the chunk_map_mutex_ and calls FindPage after that.
+ LargePage* FindPageThreadSafe(Address a);
+
// Finds a large object page containing the given address, returns NULL
// if such a page doesn't exist.
LargePage* FindPage(Address a);
@@ -2870,6 +2881,9 @@ class LargeObjectSpace : public Space {
size_t size_; // allocated bytes
int page_count_; // number of chunks
size_t objects_size_; // size of objects
+ // The chunk_map_mutex_ has to be used when the chunk map is accessed
+ // concurrently.
+ base::Mutex chunk_map_mutex_;
// Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
base::HashMap chunk_map_;
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
index 974b85e1c8..94a8ca81b7 100644
--- a/deps/v8/src/heap/store-buffer.cc
+++ b/deps/v8/src/heap/store-buffer.cc
@@ -16,13 +16,19 @@ namespace v8 {
namespace internal {
StoreBuffer::StoreBuffer(Heap* heap)
- : heap_(heap), top_(nullptr), current_(0), virtual_memory_(nullptr) {
+ : heap_(heap),
+ top_(nullptr),
+ current_(0),
+ mode_(NOT_IN_GC),
+ virtual_memory_(nullptr) {
for (int i = 0; i < kStoreBuffers; i++) {
start_[i] = nullptr;
limit_[i] = nullptr;
lazy_top_[i] = nullptr;
}
task_running_ = false;
+ insertion_callback = &InsertDuringRuntime;
+ deletion_callback = &DeleteDuringRuntime;
}
void StoreBuffer::SetUp() {
@@ -85,7 +91,7 @@ void StoreBuffer::FlipStoreBuffers() {
current_ = other;
top_ = start_[current_];
- if (!task_running_) {
+ if (!task_running_ && FLAG_concurrent_sweeping) {
task_running_ = true;
Task* task = new Task(heap_->isolate(), this);
V8::GetCurrentPlatform()->CallOnBackgroundThread(
@@ -137,29 +143,5 @@ void StoreBuffer::ConcurrentlyProcessStoreBuffer() {
task_running_ = false;
}
-void StoreBuffer::DeleteEntry(Address start, Address end) {
- // Deletions coming from the GC are directly deleted from the remembered
- // set. Deletions coming from the runtime are added to the store buffer
- // to allow concurrent processing.
- if (heap_->gc_state() == Heap::NOT_IN_GC) {
- if (top_ + sizeof(Address) * 2 > limit_[current_]) {
- StoreBufferOverflow(heap_->isolate());
- }
- *top_ = MarkDeletionAddress(start);
- top_++;
- *top_ = end;
- top_++;
- } else {
- // In GC the store buffer has to be empty at any time.
- DCHECK(Empty());
- Page* page = Page::FromAddress(start);
- if (end) {
- RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
- SlotSet::PREFREE_EMPTY_BUCKETS);
- } else {
- RememberedSet<OLD_TO_NEW>::Remove(page, start);
- }
- }
-}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/store-buffer.h b/deps/v8/src/heap/store-buffer.h
index 09faf4dcbd..be46cb3242 100644
--- a/deps/v8/src/heap/store-buffer.h
+++ b/deps/v8/src/heap/store-buffer.h
@@ -24,7 +24,9 @@ namespace internal {
// slots are moved to the remembered set.
class StoreBuffer {
public:
- static const int kStoreBufferSize = 1 << (14 + kPointerSizeLog2);
+ enum StoreBufferMode { IN_GC, NOT_IN_GC };
+
+ static const int kStoreBufferSize = 1 << (11 + kPointerSizeLog2);
static const int kStoreBufferMask = kStoreBufferSize - 1;
static const int kStoreBuffers = 2;
static const intptr_t kDeletionTag = 1;
@@ -63,22 +65,77 @@ class StoreBuffer {
// If we only want to delete a single slot, end should be set to null which
// will be written into the second field. When processing the store buffer
// the more efficient Remove method will be called in this case.
- void DeleteEntry(Address start, Address end = nullptr);
+ void DeleteEntry(Address start, Address end = nullptr) {
+ // Deletions coming from the GC are directly deleted from the remembered
+ // set. Deletions coming from the runtime are added to the store buffer
+ // to allow concurrent processing.
+ deletion_callback(this, start, end);
+ }
+
+ static void DeleteDuringGarbageCollection(StoreBuffer* store_buffer,
+ Address start, Address end) {
+ // In GC the store buffer has to be empty at any time.
+ DCHECK(store_buffer->Empty());
+ DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
+ Page* page = Page::FromAddress(start);
+ if (end) {
+ RememberedSet<OLD_TO_NEW>::RemoveRange(page, start, end,
+ SlotSet::PREFREE_EMPTY_BUCKETS);
+ } else {
+ RememberedSet<OLD_TO_NEW>::Remove(page, start);
+ }
+ }
+
+ static void DeleteDuringRuntime(StoreBuffer* store_buffer, Address start,
+ Address end) {
+ DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
+ store_buffer->InsertDeletionIntoStoreBuffer(start, end);
+ }
+
+ void InsertDeletionIntoStoreBuffer(Address start, Address end) {
+ if (top_ + sizeof(Address) * 2 > limit_[current_]) {
+ StoreBufferOverflow(heap_->isolate());
+ }
+ *top_ = MarkDeletionAddress(start);
+ top_++;
+ *top_ = end;
+ top_++;
+ }
+
+ static void InsertDuringGarbageCollection(StoreBuffer* store_buffer,
+ Address slot) {
+ DCHECK(store_buffer->mode() != StoreBuffer::NOT_IN_GC);
+ RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
+ }
+
+ static void InsertDuringRuntime(StoreBuffer* store_buffer, Address slot) {
+ DCHECK(store_buffer->mode() == StoreBuffer::NOT_IN_GC);
+ store_buffer->InsertIntoStoreBuffer(slot);
+ }
+
+ void InsertIntoStoreBuffer(Address slot) {
+ if (top_ + sizeof(Address) > limit_[current_]) {
+ StoreBufferOverflow(heap_->isolate());
+ }
+ *top_ = slot;
+ top_++;
+ }
void InsertEntry(Address slot) {
// Insertions coming from the GC are directly inserted into the remembered
// set. Insertions coming from the runtime are added to the store buffer to
// allow concurrent processing.
- if (heap_->gc_state() == Heap::NOT_IN_GC) {
- if (top_ + sizeof(Address) > limit_[current_]) {
- StoreBufferOverflow(heap_->isolate());
- }
- *top_ = slot;
- top_++;
+ insertion_callback(this, slot);
+ }
+
+ void SetMode(StoreBufferMode mode) {
+ mode_ = mode;
+ if (mode == NOT_IN_GC) {
+ insertion_callback = &InsertDuringRuntime;
+ deletion_callback = &DeleteDuringRuntime;
} else {
- // In GC the store buffer has to be empty at any time.
- DCHECK(Empty());
- RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
+ insertion_callback = &InsertDuringGarbageCollection;
+ deletion_callback = &DeleteDuringGarbageCollection;
}
}
@@ -95,6 +152,8 @@ class StoreBuffer {
return top_ == start_[current_];
}
+ Heap* heap() { return heap_; }
+
private:
// There are two store buffers. If one store buffer fills up, the main thread
// publishes the top pointer of the store buffer that needs processing in its
@@ -119,6 +178,8 @@ class StoreBuffer {
DISALLOW_COPY_AND_ASSIGN(Task);
};
+ StoreBufferMode mode() const { return mode_; }
+
void FlipStoreBuffers();
Heap* heap_;
@@ -142,7 +203,17 @@ class StoreBuffer {
// Points to the current buffer in use.
int current_;
+ // During GC, entries are directly added to the remembered set without
+ // going through the store buffer. This is signaled by a special
+ // IN_GC mode.
+ StoreBufferMode mode_;
+
base::VirtualMemory* virtual_memory_;
+
+ // Callbacks are more efficient than reading out the gc state for every
+ // store buffer operation.
+ std::function<void(StoreBuffer*, Address)> insertion_callback;
+ std::function<void(StoreBuffer*, Address, Address)> deletion_callback;
};
} // namespace internal
diff --git a/deps/v8/src/i18n.cc b/deps/v8/src/i18n.cc
index 58b8a8dc5c..d2245ef34a 100644
--- a/deps/v8/src/i18n.cc
+++ b/deps/v8/src/i18n.cc
@@ -10,6 +10,7 @@
#include "src/api.h"
#include "src/factory.h"
#include "src/isolate.h"
+#include "src/objects-inl.h"
#include "unicode/brkiter.h"
#include "unicode/calendar.h"
#include "unicode/coll.h"
@@ -224,23 +225,6 @@ void SetResolvedDateSettings(Isolate* isolate,
}
-template<int internal_fields, EternalHandles::SingletonHandle field>
-Handle<ObjectTemplateInfo> GetEternal(Isolate* isolate) {
- if (isolate->eternal_handles()->Exists(field)) {
- return Handle<ObjectTemplateInfo>::cast(
- isolate->eternal_handles()->GetSingleton(field));
- }
- v8::Local<v8::ObjectTemplate> raw_template =
- v8::ObjectTemplate::New(reinterpret_cast<v8::Isolate*>(isolate));
- raw_template->SetInternalFieldCount(internal_fields);
- return Handle<ObjectTemplateInfo>::cast(
- isolate->eternal_handles()->CreateSingleton(
- isolate,
- *v8::Utils::OpenHandle(*raw_template),
- field));
-}
-
-
icu::DecimalFormat* CreateICUNumberFormat(
Isolate* isolate,
const icu::Locale& icu_locale,
@@ -702,18 +686,6 @@ void SetResolvedBreakIteratorSettings(Isolate* isolate,
// static
-Handle<ObjectTemplateInfo> I18N::GetTemplate(Isolate* isolate) {
- return GetEternal<1, i::EternalHandles::I18N_TEMPLATE_ONE>(isolate);
-}
-
-
-// static
-Handle<ObjectTemplateInfo> I18N::GetTemplate2(Isolate* isolate) {
- return GetEternal<2, i::EternalHandles::I18N_TEMPLATE_TWO>(isolate);
-}
-
-
-// static
icu::SimpleDateFormat* DateFormat::InitializeDateTimeFormat(
Isolate* isolate,
Handle<String> locale,
@@ -759,16 +731,7 @@ icu::SimpleDateFormat* DateFormat::InitializeDateTimeFormat(
icu::SimpleDateFormat* DateFormat::UnpackDateFormat(
Isolate* isolate,
Handle<JSObject> obj) {
- Handle<String> key =
- isolate->factory()->NewStringFromStaticChars("dateFormat");
- Maybe<bool> maybe = JSReceiver::HasOwnProperty(obj, key);
- CHECK(maybe.IsJust());
- if (maybe.FromJust()) {
- return reinterpret_cast<icu::SimpleDateFormat*>(
- obj->GetInternalField(0));
- }
-
- return NULL;
+ return reinterpret_cast<icu::SimpleDateFormat*>(obj->GetInternalField(0));
}
void DateFormat::DeleteDateFormat(const v8::WeakCallbackInfo<void>& data) {
@@ -823,15 +786,7 @@ icu::DecimalFormat* NumberFormat::InitializeNumberFormat(
icu::DecimalFormat* NumberFormat::UnpackNumberFormat(
Isolate* isolate,
Handle<JSObject> obj) {
- Handle<String> key =
- isolate->factory()->NewStringFromStaticChars("numberFormat");
- Maybe<bool> maybe = JSReceiver::HasOwnProperty(obj, key);
- CHECK(maybe.IsJust());
- if (maybe.FromJust()) {
- return reinterpret_cast<icu::DecimalFormat*>(obj->GetInternalField(0));
- }
-
- return NULL;
+ return reinterpret_cast<icu::DecimalFormat*>(obj->GetInternalField(0));
}
void NumberFormat::DeleteNumberFormat(const v8::WeakCallbackInfo<void>& data) {
@@ -883,14 +838,7 @@ icu::Collator* Collator::InitializeCollator(
icu::Collator* Collator::UnpackCollator(Isolate* isolate,
Handle<JSObject> obj) {
- Handle<String> key = isolate->factory()->NewStringFromStaticChars("collator");
- Maybe<bool> maybe = JSReceiver::HasOwnProperty(obj, key);
- CHECK(maybe.IsJust());
- if (maybe.FromJust()) {
- return reinterpret_cast<icu::Collator*>(obj->GetInternalField(0));
- }
-
- return NULL;
+ return reinterpret_cast<icu::Collator*>(obj->GetInternalField(0));
}
void Collator::DeleteCollator(const v8::WeakCallbackInfo<void>& data) {
@@ -898,11 +846,8 @@ void Collator::DeleteCollator(const v8::WeakCallbackInfo<void>& data) {
GlobalHandles::Destroy(reinterpret_cast<Object**>(data.GetParameter()));
}
-
-icu::BreakIterator* BreakIterator::InitializeBreakIterator(
- Isolate* isolate,
- Handle<String> locale,
- Handle<JSObject> options,
+icu::BreakIterator* V8BreakIterator::InitializeBreakIterator(
+ Isolate* isolate, Handle<String> locale, Handle<JSObject> options,
Handle<JSObject> resolved) {
// Convert BCP47 into ICU locale format.
UErrorCode status = U_ZERO_ERROR;
@@ -942,21 +887,12 @@ icu::BreakIterator* BreakIterator::InitializeBreakIterator(
return break_iterator;
}
-
-icu::BreakIterator* BreakIterator::UnpackBreakIterator(Isolate* isolate,
- Handle<JSObject> obj) {
- Handle<String> key =
- isolate->factory()->NewStringFromStaticChars("breakIterator");
- Maybe<bool> maybe = JSReceiver::HasOwnProperty(obj, key);
- CHECK(maybe.IsJust());
- if (maybe.FromJust()) {
- return reinterpret_cast<icu::BreakIterator*>(obj->GetInternalField(0));
- }
-
- return NULL;
+icu::BreakIterator* V8BreakIterator::UnpackBreakIterator(Isolate* isolate,
+ Handle<JSObject> obj) {
+ return reinterpret_cast<icu::BreakIterator*>(obj->GetInternalField(0));
}
-void BreakIterator::DeleteBreakIterator(
+void V8BreakIterator::DeleteBreakIterator(
const v8::WeakCallbackInfo<void>& data) {
delete reinterpret_cast<icu::BreakIterator*>(data.GetInternalField(0));
delete reinterpret_cast<icu::UnicodeString*>(data.GetInternalField(1));
diff --git a/deps/v8/src/i18n.h b/deps/v8/src/i18n.h
index 2a4c208601..a87ac97663 100644
--- a/deps/v8/src/i18n.h
+++ b/deps/v8/src/i18n.h
@@ -7,6 +7,7 @@
#define V8_I18N_H_
#include "src/handles.h"
+#include "src/objects.h"
#include "unicode/uversion.h"
namespace U_ICU_NAMESPACE {
@@ -19,22 +20,6 @@ class SimpleDateFormat;
namespace v8 {
namespace internal {
-// Forward declarations.
-class ObjectTemplateInfo;
-
-class I18N {
- public:
- // Creates an ObjectTemplate with one internal field.
- static Handle<ObjectTemplateInfo> GetTemplate(Isolate* isolate);
-
- // Creates an ObjectTemplate with two internal fields.
- static Handle<ObjectTemplateInfo> GetTemplate2(Isolate* isolate);
-
- private:
- I18N();
-};
-
-
class DateFormat {
public:
// Create a formatter for the specificied locale and options. Returns the
@@ -53,6 +38,10 @@ class DateFormat {
// holds the pointer gets garbage collected.
static void DeleteDateFormat(const v8::WeakCallbackInfo<void>& data);
+ // Layout description.
+ static const int kSimpleDateFormat = JSObject::kHeaderSize;
+ static const int kSize = kSimpleDateFormat + kPointerSize;
+
private:
DateFormat();
};
@@ -76,6 +65,10 @@ class NumberFormat {
// holds the pointer gets garbage collected.
static void DeleteNumberFormat(const v8::WeakCallbackInfo<void>& data);
+ // Layout description.
+ static const int kDecimalFormat = JSObject::kHeaderSize;
+ static const int kSize = kDecimalFormat + kPointerSize;
+
private:
NumberFormat();
};
@@ -98,11 +91,15 @@ class Collator {
// the pointer gets garbage collected.
static void DeleteCollator(const v8::WeakCallbackInfo<void>& data);
+ // Layout description.
+ static const int kCollator = JSObject::kHeaderSize;
+ static const int kSize = kCollator + kPointerSize;
+
private:
Collator();
};
-class BreakIterator {
+class V8BreakIterator {
public:
// Create a BreakIterator for the specificied locale and options. Returns the
// resolved settings for the locale / options.
@@ -120,8 +117,13 @@ class BreakIterator {
// holds the pointer gets garbage collected.
static void DeleteBreakIterator(const v8::WeakCallbackInfo<void>& data);
+ // Layout description.
+ static const int kBreakIterator = JSObject::kHeaderSize;
+ static const int kUnicodeString = kBreakIterator + kPointerSize;
+ static const int kSize = kUnicodeString + kPointerSize;
+
private:
- BreakIterator();
+ V8BreakIterator();
};
} // namespace internal
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index d4de79ef59..021177478d 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -201,13 +201,18 @@ uint32_t RelocInfo::wasm_memory_size_reference() {
return Memory::uint32_at(pc_);
}
+uint32_t RelocInfo::wasm_function_table_size_reference() {
+ DCHECK(IsWasmFunctionTableSizeReference(rmode_));
+ return Memory::uint32_at(pc_);
+}
+
void RelocInfo::unchecked_update_wasm_memory_reference(
Address address, ICacheFlushMode flush_mode) {
Memory::Address_at(pc_) = address;
}
-void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
- ICacheFlushMode flush_mode) {
+void RelocInfo::unchecked_update_wasm_size(uint32_t size,
+ ICacheFlushMode flush_mode) {
Memory::uint32_at(pc_) = size;
}
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 79f4125354..ddee696162 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -1434,9 +1434,6 @@ class Assembler : public AssemblerBase {
return pc_offset() - label->pos();
}
- // Mark generator continuation.
- void RecordGeneratorContinuation();
-
// Mark address of a debug break slot.
void RecordDebugBreakSlot(RelocInfo::Mode mode);
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 9b2c51e99b..6afd1c4945 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -34,17 +34,6 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kNewArray);
}
-void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
- descriptor->Initialize(eax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
-void FastFunctionBindStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
- descriptor->Initialize(eax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
ExternalReference miss) {
// Update the static counter each time a new code stub is generated.
@@ -1225,9 +1214,11 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cc == equal) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(edx);
- __ Push(eax);
- __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+ __ Push(esi);
+ __ Call(strict() ? isolate()->builtins()->StrictEqual()
+ : isolate()->builtins()->Equal(),
+ RelocInfo::CODE_TARGET);
+ __ Pop(esi);
}
// Turn true into 0 and false into some non-zero value.
STATIC_ASSERT(EQUAL == 0);
@@ -1631,7 +1622,6 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
void CodeStub::GenerateFPStubs(Isolate* isolate) {
// Generate if not already in cache.
CEntryStub(isolate, 1, kSaveFPRegs).GetCode();
- isolate->set_fp_stubs_generated(true);
}
@@ -2030,40 +2020,6 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
}
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- String::Encoding encoding) {
- DCHECK(!scratch.is(dest));
- DCHECK(!scratch.is(src));
- DCHECK(!scratch.is(count));
-
- // Nothing to do for zero characters.
- Label done;
- __ test(count, count);
- __ j(zero, &done);
-
- // Make count the number of bytes to copy.
- if (encoding == String::TWO_BYTE_ENCODING) {
- __ shl(count, 1);
- }
-
- Label loop;
- __ bind(&loop);
- __ mov_b(scratch, Operand(src, 0));
- __ mov_b(Operand(dest, 0), scratch);
- __ inc(src);
- __ inc(dest);
- __ dec(count);
- __ j(not_zero, &loop);
-
- __ bind(&done);
-}
-
-
void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
Register left,
Register right,
@@ -2679,67 +2635,6 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ jmp(done);
}
-
-// Probe the name dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found leaving the
-// index into the dictionary in |r0|. Jump to the |miss| label
-// otherwise.
-void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register r0,
- Register r1) {
- DCHECK(!elements.is(r0));
- DCHECK(!elements.is(r1));
- DCHECK(!name.is(r0));
- DCHECK(!name.is(r1));
-
- __ AssertName(name);
-
- __ mov(r1, FieldOperand(elements, kCapacityOffset));
- __ shr(r1, kSmiTagSize); // convert smi to int
- __ dec(r1);
-
- // Generate an unrolled loop that performs a few probes before
- // giving up. Measurements done on Gmail indicate that 2 probes
- // cover ~93% of loads from dictionaries.
- for (int i = 0; i < kInlinedProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ mov(r0, FieldOperand(name, Name::kHashFieldOffset));
- __ shr(r0, Name::kHashShift);
- if (i > 0) {
- __ add(r0, Immediate(NameDictionary::GetProbeOffset(i)));
- }
- __ and_(r0, r1);
-
- // Scale the index by multiplying by the entry size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- __ lea(r0, Operand(r0, r0, times_2, 0)); // r0 = r0 * 3
-
- // Check if the key is identical to the name.
- __ cmp(name, Operand(elements,
- r0,
- times_4,
- kElementsStartOffset - kHeapObjectTag));
- __ j(equal, done);
- }
-
- NameDictionaryLookupStub stub(masm->isolate(), elements, r1, r0,
- POSITIVE_LOOKUP);
- __ push(name);
- __ mov(r0, FieldOperand(name, Name::kHashFieldOffset));
- __ shr(r0, Name::kHashShift);
- __ push(r0);
- __ CallStub(&stub);
-
- __ test(r1, r1);
- __ j(zero, miss);
- __ jmp(done);
-}
-
-
void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
@@ -3016,329 +2911,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ jmp(ecx); // Return to IC Miss stub, continuation still on stack.
}
-void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
- KeyedStoreICStub stub(isolate(), state());
- stub.GenerateForTrampoline(masm);
-}
-
-// value is on the stack already.
-static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
- Register key, Register vector,
- Register slot, Register feedback,
- bool is_polymorphic, Label* miss) {
- // feedback initially contains the feedback array
- Label next, next_loop, prepare_next;
- Label load_smi_map, compare_map;
- Label start_polymorphic;
- Label pop_and_miss;
-
- __ push(receiver);
- // Value, vector and slot are passed on the stack, so no need to save/restore
- // them.
-
- Register receiver_map = receiver;
- Register cached_map = vector;
-
- // Receiver might not be a heap object.
- __ JumpIfSmi(receiver, &load_smi_map);
- __ mov(receiver_map, FieldOperand(receiver, 0));
- __ bind(&compare_map);
- __ mov(cached_map, FieldOperand(feedback, FixedArray::OffsetOfElementAt(0)));
-
- // A named keyed store might have a 2 element array, all other cases can count
- // on an array with at least 2 {map, handler} pairs, so they can go right
- // into polymorphic array handling.
- __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
- __ j(not_equal, &start_polymorphic);
-
- // found, now call handler.
- Register handler = feedback;
- DCHECK(handler.is(StoreWithVectorDescriptor::ValueRegister()));
- __ mov(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1)));
- __ pop(receiver);
- __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
- __ jmp(handler);
-
- // Polymorphic, we have to loop from 2 to N
- __ bind(&start_polymorphic);
- __ push(key);
- Register counter = key;
- __ mov(counter, Immediate(Smi::FromInt(2)));
-
- if (!is_polymorphic) {
- // If is_polymorphic is false, we may only have a two element array.
- // Check against length now in that case.
- __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
- __ j(greater_equal, &pop_and_miss);
- }
-
- __ bind(&next_loop);
- __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
- __ j(not_equal, &prepare_next);
- __ mov(handler, FieldOperand(feedback, counter, times_half_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
- __ pop(key);
- __ pop(receiver);
- __ jmp(handler);
-
- __ bind(&prepare_next);
- __ add(counter, Immediate(Smi::FromInt(2)));
- __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
- __ j(less, &next_loop);
-
- // We exhausted our array of map handler pairs.
- __ bind(&pop_and_miss);
- __ pop(key);
- __ pop(receiver);
- __ jmp(miss);
-
- __ bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ jmp(&compare_map);
-}
-
-
-static void HandleMonomorphicStoreCase(MacroAssembler* masm, Register receiver,
- Register key, Register vector,
- Register slot, Register weak_cell,
- Label* miss) {
- // The store ic value is on the stack.
- DCHECK(weak_cell.is(StoreWithVectorDescriptor::ValueRegister()));
-
- // feedback initially contains the feedback array
- Label compare_smi_map;
-
- // Move the weak map into the weak_cell register.
- Register ic_map = weak_cell;
- __ mov(ic_map, FieldOperand(weak_cell, WeakCell::kValueOffset));
-
- // Receiver might not be a heap object.
- __ JumpIfSmi(receiver, &compare_smi_map);
- __ cmp(ic_map, FieldOperand(receiver, 0));
- __ j(not_equal, miss);
- __ mov(weak_cell, FieldOperand(vector, slot, times_half_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- __ lea(weak_cell, FieldOperand(weak_cell, Code::kHeaderSize));
- // jump to the handler.
- __ jmp(weak_cell);
-
- // In microbenchmarks, it made sense to unroll this code so that the call to
- // the handler is duplicated for a HeapObject receiver and a Smi receiver.
- __ bind(&compare_smi_map);
- __ CompareRoot(ic_map, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, miss);
- __ mov(weak_cell, FieldOperand(vector, slot, times_half_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- __ lea(weak_cell, FieldOperand(weak_cell, Code::kHeaderSize));
- // jump to the handler.
- __ jmp(weak_cell);
-}
-
-void KeyedStoreICStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
-
-void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-
-static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
- Register receiver, Register key,
- Register vector, Register slot,
- Register feedback, Label* miss) {
- // feedback initially contains the feedback array
- Label next, next_loop, prepare_next;
- Label load_smi_map, compare_map;
- Label transition_call;
- Label pop_and_miss;
-
- __ push(receiver);
- // Value, vector and slot are passed on the stack, so no need to save/restore
- // them.
-
- Register receiver_map = receiver;
- Register cached_map = vector;
-
- // Receiver might not be a heap object.
- __ JumpIfSmi(receiver, &load_smi_map);
- __ mov(receiver_map, FieldOperand(receiver, 0));
- __ bind(&compare_map);
-
- // Polymorphic, we have to loop from 0 to N - 1
- __ push(key);
- // Current stack layout:
- // - esp[0] -- key
- // - esp[4] -- receiver
- // - esp[8] -- return address
- // - esp[12] -- vector
- // - esp[16] -- slot
- // - esp[20] -- value
- //
- // Required stack layout for handler call (see StoreWithVectorDescriptor):
- // - esp[0] -- return address
- // - esp[4] -- vector
- // - esp[8] -- slot
- // - esp[12] -- value
- // - receiver, key, handler in registers.
- Register counter = key;
- __ mov(counter, Immediate(Smi::kZero));
- __ bind(&next_loop);
- __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
- __ j(not_equal, &prepare_next);
- __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- __ CompareRoot(cached_map, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &transition_call);
- __ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size,
- FixedArray::kHeaderSize + 2 * kPointerSize));
- __ pop(key);
- __ pop(receiver);
- __ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
- __ jmp(feedback);
-
- __ bind(&transition_call);
- // Current stack layout:
- // - esp[0] -- key
- // - esp[4] -- receiver
- // - esp[8] -- return address
- // - esp[12] -- vector
- // - esp[16] -- slot
- // - esp[20] -- value
- //
- // Required stack layout for handler call (see StoreTransitionDescriptor):
- // - esp[0] -- return address
- // - esp[4] -- vector
- // - esp[8] -- slot
- // - esp[12] -- value
- // - receiver, key, map, handler in registers.
- __ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size,
- FixedArray::kHeaderSize + 2 * kPointerSize));
- __ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
-
- __ mov(cached_map, FieldOperand(cached_map, WeakCell::kValueOffset));
- // The weak cell may have been cleared.
- __ JumpIfSmi(cached_map, &pop_and_miss);
- DCHECK(!cached_map.is(StoreTransitionDescriptor::MapRegister()));
- __ mov(StoreTransitionDescriptor::MapRegister(), cached_map);
-
- // Call store transition handler using StoreTransitionDescriptor calling
- // convention.
- __ pop(key);
- __ pop(receiver);
- // Ensure that the transition handler we are going to call has the same
- // number of stack arguments which means that we don't have to adapt them
- // before the call.
- STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
- STATIC_ASSERT(StoreTransitionDescriptor::kStackArgumentsCount == 3);
- STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount -
- StoreWithVectorDescriptor::kValue ==
- StoreTransitionDescriptor::kParameterCount -
- StoreTransitionDescriptor::kValue);
- STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount -
- StoreWithVectorDescriptor::kSlot ==
- StoreTransitionDescriptor::kParameterCount -
- StoreTransitionDescriptor::kSlot);
- STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount -
- StoreWithVectorDescriptor::kVector ==
- StoreTransitionDescriptor::kParameterCount -
- StoreTransitionDescriptor::kVector);
- __ jmp(feedback);
-
- __ bind(&prepare_next);
- __ add(counter, Immediate(Smi::FromInt(3)));
- __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
- __ j(less, &next_loop);
-
- // We exhausted our array of map handler pairs.
- __ bind(&pop_and_miss);
- __ pop(key);
- __ pop(receiver);
- __ jmp(miss);
-
- __ bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ jmp(&compare_map);
-}
-
-void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // edx
- Register key = StoreWithVectorDescriptor::NameRegister(); // ecx
- Register value = StoreWithVectorDescriptor::ValueRegister(); // eax
- Register vector = StoreWithVectorDescriptor::VectorRegister(); // ebx
- Register slot = StoreWithVectorDescriptor::SlotRegister(); // edi
- Label miss;
-
- if (StoreWithVectorDescriptor::kPassLastArgsOnStack) {
- // Current stack layout:
- // - esp[8] -- value
- // - esp[4] -- slot
- // - esp[0] -- return address
- STATIC_ASSERT(StoreDescriptor::kStackArgumentsCount == 2);
- STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
- if (in_frame) {
- __ RecordComment("[ StoreDescriptor -> StoreWithVectorDescriptor");
- // If the vector is not on the stack, then insert the vector beneath
- // return address in order to prepare for calling handler with
- // StoreWithVector calling convention.
- __ push(Operand(esp, 0));
- __ mov(Operand(esp, 4), StoreWithVectorDescriptor::VectorRegister());
- __ RecordComment("]");
- } else {
- __ mov(vector, Operand(esp, 1 * kPointerSize));
- }
- __ mov(slot, Operand(esp, 2 * kPointerSize));
- }
-
- Register scratch = value;
- __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
- FixedArray::kHeaderSize));
-
- // Is it a weak cell?
- Label try_array;
- Label not_array, smi_key, key_okay;
- __ CompareRoot(FieldOperand(scratch, 0), Heap::kWeakCellMapRootIndex);
- __ j(not_equal, &try_array);
- HandleMonomorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss);
-
- // Is it a fixed array?
- __ bind(&try_array);
- __ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &not_array);
- HandlePolymorphicKeyedStoreCase(masm, receiver, key, vector, slot, scratch,
- &miss);
-
- __ bind(&not_array);
- Label try_poly_name;
- __ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
- __ j(not_equal, &try_poly_name);
-
- Handle<Code> megamorphic_stub =
- KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
- __ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
-
- __ bind(&try_poly_name);
- // We might have a name in feedback, and a fixed array in the next slot.
- __ cmp(key, scratch);
- __ j(not_equal, &miss);
- // If the name comparison succeeded, we know we have a fixed array with
- // at least one map/handler pair.
- __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, false,
- &miss);
-
- __ bind(&miss);
- KeyedStoreIC::GenerateMiss(masm);
-}
-
-
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(ebx);
CallICStub stub(isolate(), state());
@@ -3692,136 +3264,6 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateCase(masm, FAST_ELEMENTS);
}
-
-void FastNewObjectStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- edi : target
- // -- edx : new target
- // -- esi : context
- // -- esp[0] : return address
- // -----------------------------------
- __ AssertFunction(edi);
- __ AssertReceiver(edx);
-
- // Verify that the new target is a JSFunction.
- Label new_object;
- __ CmpObjectType(edx, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &new_object);
-
- // Load the initial map and verify that it's in fact a map.
- __ mov(ecx, FieldOperand(edx, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(ecx, &new_object);
- __ CmpObjectType(ecx, MAP_TYPE, ebx);
- __ j(not_equal, &new_object);
-
- // Fall back to runtime if the target differs from the new target's
- // initial map constructor.
- __ cmp(edi, FieldOperand(ecx, Map::kConstructorOrBackPointerOffset));
- __ j(not_equal, &new_object);
-
- // Allocate the JSObject on the heap.
- Label allocate, done_allocate;
- __ movzx_b(ebx, FieldOperand(ecx, Map::kInstanceSizeOffset));
- __ lea(ebx, Operand(ebx, times_pointer_size, 0));
- __ Allocate(ebx, eax, edi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
- __ bind(&done_allocate);
-
- // Initialize the JSObject fields.
- __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
- masm->isolate()->factory()->empty_fixed_array());
- __ mov(FieldOperand(eax, JSObject::kElementsOffset),
- masm->isolate()->factory()->empty_fixed_array());
- STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ lea(ebx, FieldOperand(eax, JSObject::kHeaderSize));
-
- // ----------- S t a t e -------------
- // -- eax : result (tagged)
- // -- ebx : result fields (untagged)
- // -- edi : result end (untagged)
- // -- ecx : initial map
- // -- esi : context
- // -- esp[0] : return address
- // -----------------------------------
-
- // Perform in-object slack tracking if requested.
- Label slack_tracking;
- STATIC_ASSERT(Map::kNoSlackTracking == 0);
- __ test(FieldOperand(ecx, Map::kBitField3Offset),
- Immediate(Map::ConstructionCounter::kMask));
- __ j(not_zero, &slack_tracking, Label::kNear);
- {
- // Initialize all in-object fields with undefined.
- __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
- __ InitializeFieldsWithFiller(ebx, edi, edx);
- __ Ret();
- }
- __ bind(&slack_tracking);
- {
- // Decrease generous allocation count.
- STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
- __ sub(FieldOperand(ecx, Map::kBitField3Offset),
- Immediate(1 << Map::ConstructionCounter::kShift));
-
- // Initialize the in-object fields with undefined.
- __ movzx_b(edx, FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset));
- __ neg(edx);
- __ lea(edx, Operand(edi, edx, times_pointer_size, 0));
- __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
- __ InitializeFieldsWithFiller(ebx, edx, edi);
-
- // Initialize the remaining (reserved) fields with one pointer filler map.
- __ movzx_b(edx, FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset));
- __ lea(edx, Operand(ebx, edx, times_pointer_size, 0));
- __ LoadRoot(edi, Heap::kOnePointerFillerMapRootIndex);
- __ InitializeFieldsWithFiller(ebx, edx, edi);
-
- // Check if we can finalize the instance size.
- Label finalize;
- STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
- __ test(FieldOperand(ecx, Map::kBitField3Offset),
- Immediate(Map::ConstructionCounter::kMask));
- __ j(zero, &finalize, Label::kNear);
- __ Ret();
-
- // Finalize the instance size.
- __ bind(&finalize);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(eax);
- __ Push(ecx);
- __ CallRuntime(Runtime::kFinalizeInstanceSize);
- __ Pop(eax);
- }
- __ Ret();
- }
-
- // Fall back to %AllocateInNewSpace.
- __ bind(&allocate);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(ebx);
- __ Push(ecx);
- __ Push(ebx);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ Pop(ecx);
- }
- __ movzx_b(ebx, FieldOperand(ecx, Map::kInstanceSizeOffset));
- __ lea(edi, Operand(eax, ebx, times_pointer_size, 0));
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ dec(edi);
- __ jmp(&done_allocate);
-
- // Fall back to %NewObject.
- __ bind(&new_object);
- __ PopReturnAddressTo(ecx);
- __ Push(edi);
- __ Push(edx);
- __ PushReturnAddressFrom(ecx);
- __ TailCallRuntime(Runtime::kNewObject);
-}
-
-
void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- edi : function
diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h
index c1878f0207..649e2ccf16 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.h
+++ b/deps/v8/src/ia32/code-stubs-ia32.h
@@ -16,16 +16,6 @@ void ArrayNativeCode(MacroAssembler* masm,
class StringHelper : public AllStatic {
public:
- // Generate code for copying characters using the rep movs instruction.
- // Copies ecx characters from esi to edi. Copying of overlapping regions is
- // not supported.
- static void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- String::Encoding encoding);
-
// Compares two flat one byte strings and returns result in eax.
static void GenerateCompareFlatOneByteStrings(MacroAssembler* masm,
Register left, Register right,
@@ -68,14 +58,6 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
Handle<Name> name,
Register r0);
- static void GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register r0,
- Register r1);
-
bool SometimesSetsUpAFrame() override { return false; }
private:
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 18e53641e6..ccd159e299 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -485,309 +485,6 @@ MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
#define __ ACCESS_MASM(masm)
-
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register target_map,
- AllocationSiteMode mode,
- Label* allocation_memento_found) {
- Register scratch = edi;
- DCHECK(!AreAliased(receiver, key, value, target_map, scratch));
-
- if (mode == TRACK_ALLOCATION_SITE) {
- DCHECK(allocation_memento_found != NULL);
- __ JumpIfJSArrayHasAllocationMemento(
- receiver, scratch, allocation_memento_found);
- }
-
- // Set transitioned map.
- __ mov(FieldOperand(receiver, HeapObject::kMapOffset), target_map);
- __ RecordWriteField(receiver,
- HeapObject::kMapOffset,
- target_map,
- scratch,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateSmiToDouble(
- MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register target_map,
- AllocationSiteMode mode,
- Label* fail) {
- // Return address is on the stack.
- DCHECK(receiver.is(edx));
- DCHECK(key.is(ecx));
- DCHECK(value.is(eax));
- DCHECK(target_map.is(ebx));
-
- Label loop, entry, convert_hole, gc_required, only_change_map;
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
- __ j(equal, &only_change_map);
-
- __ push(eax);
- __ push(ebx);
- __ push(esi);
-
- __ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset));
-
- // Allocate new FixedDoubleArray.
- // edx: receiver
- // edi: length of source FixedArray (smi-tagged)
- AllocationFlags flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT);
- __ Allocate(FixedDoubleArray::kHeaderSize, times_8, edi,
- REGISTER_VALUE_IS_SMI, eax, ebx, no_reg, &gc_required, flags);
-
- // eax: destination FixedDoubleArray
- // edi: number of elements
- // edx: receiver
- __ mov(FieldOperand(eax, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_double_array_map()));
- __ mov(FieldOperand(eax, FixedDoubleArray::kLengthOffset), edi);
- __ mov(esi, FieldOperand(edx, JSObject::kElementsOffset));
- // Replace receiver's backing store with newly created FixedDoubleArray.
- __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
- __ mov(ebx, eax);
- __ RecordWriteField(edx,
- JSObject::kElementsOffset,
- ebx,
- edi,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- __ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset));
-
- // Prepare for conversion loop.
- ExternalReference canonical_the_hole_nan_reference =
- ExternalReference::address_of_the_hole_nan();
- XMMRegister the_hole_nan = xmm1;
- __ movsd(the_hole_nan,
- Operand::StaticVariable(canonical_the_hole_nan_reference));
- __ jmp(&entry);
-
- // Call into runtime if GC is required.
- __ bind(&gc_required);
-
- // Restore registers before jumping into runtime.
- __ pop(esi);
- __ pop(ebx);
- __ pop(eax);
- __ jmp(fail);
-
- // Convert and copy elements
- // esi: source FixedArray
- __ bind(&loop);
- __ mov(ebx, FieldOperand(esi, edi, times_2, FixedArray::kHeaderSize));
- // ebx: current element from source
- // edi: index of current element
- __ JumpIfNotSmi(ebx, &convert_hole);
-
- // Normal smi, convert it to double and store.
- __ SmiUntag(ebx);
- __ Cvtsi2sd(xmm0, ebx);
- __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
- xmm0);
- __ jmp(&entry);
-
- // Found hole, store hole_nan_as_double instead.
- __ bind(&convert_hole);
-
- if (FLAG_debug_code) {
- __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
- __ Assert(equal, kObjectFoundInSmiOnlyArray);
- }
-
- __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
- the_hole_nan);
-
- __ bind(&entry);
- __ sub(edi, Immediate(Smi::FromInt(1)));
- __ j(not_sign, &loop);
-
- // Restore registers.
- __ pop(esi);
- __ pop(ebx);
- __ pop(eax);
-
- __ bind(&only_change_map);
- // eax: value
- // ebx: target map
- // Set transitioned map.
- __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
- __ RecordWriteField(edx,
- HeapObject::kMapOffset,
- ebx,
- edi,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateDoubleToObject(
- MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register target_map,
- AllocationSiteMode mode,
- Label* fail) {
- // Return address is on the stack.
- DCHECK(receiver.is(edx));
- DCHECK(key.is(ecx));
- DCHECK(value.is(eax));
- DCHECK(target_map.is(ebx));
-
- Label loop, entry, convert_hole, gc_required, only_change_map, success;
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
- __ j(equal, &only_change_map);
-
- __ push(esi);
- __ push(eax);
- __ push(edx);
- __ push(ebx);
-
- __ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
-
- // Allocate new FixedArray.
- // ebx: length of source FixedDoubleArray (smi-tagged)
- __ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
- __ Allocate(edi, eax, esi, no_reg, &gc_required, NO_ALLOCATION_FLAGS);
-
- // eax: destination FixedArray
- // ebx: number of elements
- __ mov(FieldOperand(eax, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_array_map()));
- __ mov(FieldOperand(eax, FixedArray::kLengthOffset), ebx);
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
-
- // Allocating heap numbers in the loop below can fail and cause a jump to
- // gc_required. We can't leave a partly initialized FixedArray behind,
- // so pessimistically fill it with holes now.
- Label initialization_loop, initialization_loop_entry;
- __ jmp(&initialization_loop_entry, Label::kNear);
- __ bind(&initialization_loop);
- __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
- masm->isolate()->factory()->the_hole_value());
- __ bind(&initialization_loop_entry);
- __ sub(ebx, Immediate(Smi::FromInt(1)));
- __ j(not_sign, &initialization_loop);
-
- __ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
- __ jmp(&entry);
-
- // ebx: target map
- // edx: receiver
- // Set transitioned map.
- __ bind(&only_change_map);
- __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
- __ RecordWriteField(edx,
- HeapObject::kMapOffset,
- ebx,
- edi,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ jmp(&success);
-
- // Call into runtime if GC is required.
- __ bind(&gc_required);
- __ pop(ebx);
- __ pop(edx);
- __ pop(eax);
- __ pop(esi);
- __ jmp(fail);
-
- // Box doubles into heap numbers.
- // edi: source FixedDoubleArray
- // eax: destination FixedArray
- __ bind(&loop);
- // ebx: index of current element (smi-tagged)
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
- __ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32));
- __ j(equal, &convert_hole);
-
- // Non-hole double, copy value into a heap number.
- __ AllocateHeapNumber(edx, esi, no_reg, &gc_required);
- // edx: new heap number
- __ movsd(xmm0,
- FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
- __ movsd(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
- __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx);
- __ mov(esi, ebx);
- __ RecordWriteArray(eax,
- edx,
- esi,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ jmp(&entry, Label::kNear);
-
- // Replace the-hole NaN with the-hole pointer.
- __ bind(&convert_hole);
- __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
- masm->isolate()->factory()->the_hole_value());
-
- __ bind(&entry);
- __ sub(ebx, Immediate(Smi::FromInt(1)));
- __ j(not_sign, &loop);
-
- __ pop(ebx);
- __ pop(edx);
- // ebx: target map
- // edx: receiver
- // Set transitioned map.
- __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
- __ RecordWriteField(edx,
- HeapObject::kMapOffset,
- ebx,
- edi,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- // Replace receiver's backing store with newly created and filled FixedArray.
- __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
- __ RecordWriteField(edx,
- JSObject::kElementsOffset,
- eax,
- edi,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- // Restore registers.
- __ pop(eax);
- __ pop(esi);
-
- __ bind(&success);
-}
-
-
void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Factory* factory,
Register string,
@@ -919,32 +616,24 @@ bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
return result;
}
+Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
+ if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
-void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
- MarkingParity* parity) {
- if (IsYoungSequence(isolate, sequence)) {
- *age = kNoAgeCodeAge;
- *parity = NO_MARKING_PARITY;
- } else {
- sequence++; // Skip the kCallOpcode byte
- Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
- Assembler::kCallTargetAddressOffset;
- Code* stub = GetCodeFromTargetAddress(target_address);
- GetCodeAgeAndParity(stub, age, parity);
- }
+ sequence++; // Skip the kCallOpcode byte
+ Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
+ Assembler::kCallTargetAddressOffset;
+ Code* stub = GetCodeFromTargetAddress(target_address);
+ return GetAgeOfCodeAgeStub(stub);
}
-
-void Code::PatchPlatformCodeAge(Isolate* isolate,
- byte* sequence,
- Code::Age age,
- MarkingParity parity) {
+void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
+ Code::Age age) {
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
Assembler::FlushICache(isolate, sequence, young_length);
} else {
- Code* stub = GetCodeAgeStub(isolate, age, parity);
+ Code* stub = GetCodeAgeStub(isolate, age);
CodePatcher patcher(isolate, sequence, young_length);
patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
}
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 390f3a76a8..7410a46a61 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -164,8 +164,7 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// Right trim the relocation info to free up remaining space.
const int delta = reloc_info->length() - new_reloc_length;
if (delta > 0) {
- isolate->heap()->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
- reloc_info, delta);
+ isolate->heap()->RightTrimFixedArray(reloc_info, delta);
}
}
diff --git a/deps/v8/src/ia32/interface-descriptors-ia32.cc b/deps/v8/src/ia32/interface-descriptors-ia32.cc
index 8ce78720de..cef6449ca0 100644
--- a/deps/v8/src/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/ia32/interface-descriptors-ia32.cc
@@ -64,16 +64,11 @@ const Register GrowArrayElementsDescriptor::KeyRegister() { return ebx; }
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {ebx};
+ // SharedFunctionInfo, vector, slot index.
+ Register registers[] = {ebx, ecx, edx};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void FastNewObjectDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {edi, edx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void FastNewRestParameterDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edi};
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 2fa9d0eda5..0c7c2203f0 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -810,67 +810,6 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
cmpb(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
}
-void MacroAssembler::CheckFastObjectElements(Register map,
- Label* fail,
- Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
- j(below_equal, fail, distance);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastHoleyElementValue));
- j(above, fail, distance);
-}
-
-
-void MacroAssembler::CheckFastSmiElements(Register map,
- Label* fail,
- Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
- j(above, fail, distance);
-}
-
-
-void MacroAssembler::StoreNumberToDoubleElements(
- Register maybe_number,
- Register elements,
- Register key,
- Register scratch1,
- XMMRegister scratch2,
- Label* fail,
- int elements_offset) {
- Label smi_value, done;
- JumpIfSmi(maybe_number, &smi_value, Label::kNear);
-
- CheckMap(maybe_number,
- isolate()->factory()->heap_number_map(),
- fail,
- DONT_DO_SMI_CHECK);
-
- // Double value, turn potential sNaN into qNaN.
- Move(scratch2, 1.0);
- mulsd(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
- jmp(&done, Label::kNear);
-
- bind(&smi_value);
- // Value is a smi. Convert to a double and store.
- // Preserve original value.
- mov(scratch1, maybe_number);
- SmiUntag(scratch1);
- Cvtsi2sd(scratch2, scratch1);
- bind(&done);
- movsd(FieldOperand(elements, key, times_4,
- FixedDoubleArray::kHeaderSize - elements_offset),
- scratch2);
-}
-
-
void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
}
@@ -1654,139 +1593,6 @@ void MacroAssembler::AllocateHeapNumber(Register result,
mov(FieldOperand(result, HeapObject::kMapOffset), Immediate(map));
}
-
-void MacroAssembler::AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- DCHECK(kShortSize == 2);
- // scratch1 = length * 2 + kObjectAlignmentMask.
- lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
-
- // Allocate two byte string in new space.
- Allocate(SeqTwoByteString::kHeaderSize, times_1, scratch1,
- REGISTER_VALUE_IS_INT32, result, scratch2, scratch3, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map, length and hash field.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->string_map()));
- mov(scratch1, length);
- SmiTag(scratch1);
- mov(FieldOperand(result, String::kLengthOffset), scratch1);
- mov(FieldOperand(result, String::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateOneByteString(Register result, Register length,
- Register scratch1, Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- mov(scratch1, length);
- DCHECK(kCharSize == 1);
- add(scratch1, Immediate(kObjectAlignmentMask));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
-
- // Allocate one-byte string in new space.
- Allocate(SeqOneByteString::kHeaderSize, times_1, scratch1,
- REGISTER_VALUE_IS_INT32, result, scratch2, scratch3, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map, length and hash field.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->one_byte_string_map()));
- mov(scratch1, length);
- SmiTag(scratch1);
- mov(FieldOperand(result, String::kLengthOffset), scratch1);
- mov(FieldOperand(result, String::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateOneByteString(Register result, int length,
- Register scratch1, Register scratch2,
- Label* gc_required) {
- DCHECK(length > 0);
-
- // Allocate one-byte string in new space.
- Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2,
- gc_required, NO_ALLOCATION_FLAGS);
-
- // Set the map, length and hash field.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->one_byte_string_map()));
- mov(FieldOperand(result, String::kLengthOffset),
- Immediate(Smi::FromInt(length)));
- mov(FieldOperand(result, String::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateTwoByteConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map. The other fields are left uninitialized.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->cons_string_map()));
-}
-
-
-void MacroAssembler::AllocateOneByteConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map. The other fields are left uninitialized.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->cons_one_byte_string_map()));
-}
-
-
-void MacroAssembler::AllocateTwoByteSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map. The other fields are left uninitialized.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->sliced_string_map()));
-}
-
-
-void MacroAssembler::AllocateOneByteSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map. The other fields are left uninitialized.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->sliced_one_byte_string_map()));
-}
-
-
void MacroAssembler::AllocateJSValue(Register result, Register constructor,
Register value, Register scratch,
Label* gc_required) {
@@ -2131,16 +1937,14 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
}
-
-void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual) {
- Label skip_flooding;
- ExternalReference last_step_action =
- ExternalReference::debug_last_step_action_address(isolate());
- STATIC_ASSERT(StepFrame > StepIn);
- cmpb(Operand::StaticVariable(last_step_action), Immediate(StepIn));
- j(less, &skip_flooding);
+void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_hook;
+ ExternalReference debug_hook_active =
+ ExternalReference::debug_hook_on_function_call_address(isolate());
+ cmpb(Operand::StaticVariable(debug_hook_active), Immediate(0));
+ j(equal, &skip_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -2157,7 +1961,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
}
Push(fun);
Push(fun);
- CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ CallRuntime(Runtime::kDebugOnFunctionCall);
Pop(fun);
if (new_target.is_valid()) {
Pop(new_target);
@@ -2171,7 +1975,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
SmiUntag(expected.reg());
}
}
- bind(&skip_flooding);
+ bind(&skip_hook);
}
@@ -2185,8 +1989,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK(function.is(edi));
DCHECK_IMPLIES(new_target.is_valid(), new_target.is(edx));
- if (call_wrapper.NeedsDebugStepCheck()) {
- FloodFunctionIfStepping(function, new_target, expected, actual);
+ if (call_wrapper.NeedsDebugHookCheck()) {
+ CheckDebugHook(function, new_target, expected, actual);
}
// Clear the new.target register if not given.
@@ -2291,28 +2095,6 @@ void MacroAssembler::LoadGlobalProxy(Register dst) {
mov(dst, ContextOperand(dst, Context::GLOBAL_PROXY_INDEX));
}
-
-void MacroAssembler::LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match) {
- DCHECK(IsFastElementsKind(expected_kind));
- DCHECK(IsFastElementsKind(transitioned_kind));
-
- // Check that the function's map is the same as the expected cached map.
- mov(scratch, NativeContextOperand());
- cmp(map_in_out,
- ContextOperand(scratch, Context::ArrayMapIndex(expected_kind)));
- j(not_equal, no_map_match);
-
- // Use the transitioned cached map.
- mov(map_in_out,
- ContextOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
-}
-
-
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the native context from the current context.
mov(function, NativeContextOperand());
@@ -2759,19 +2541,6 @@ void MacroAssembler::LoadPowerOf2(XMMRegister dst,
psllq(dst, HeapNumber::kMantissaBits);
}
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
- Register instance_type, Register scratch, Label* failure) {
- if (!scratch.is(instance_type)) {
- mov(scratch, instance_type);
- }
- and_(scratch,
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
- cmp(scratch, kStringTag | kSeqStringTag | kOneByteStringTag);
- j(not_equal, failure);
-}
-
-
void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register object1,
Register object2,
Register scratch1,
@@ -3162,43 +2931,6 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
cmp(scratch_reg, Immediate(isolate()->factory()->allocation_memento_map()));
}
-
-void MacroAssembler::JumpIfDictionaryInPrototypeChain(
- Register object,
- Register scratch0,
- Register scratch1,
- Label* found) {
- DCHECK(!scratch1.is(scratch0));
- Factory* factory = isolate()->factory();
- Register current = scratch0;
- Label loop_again, end;
-
- // scratch contained elements pointer.
- mov(current, object);
- mov(current, FieldOperand(current, HeapObject::kMapOffset));
- mov(current, FieldOperand(current, Map::kPrototypeOffset));
- cmp(current, Immediate(factory->null_value()));
- j(equal, &end);
-
- // Loop based on the map going up the prototype chain.
- bind(&loop_again);
- mov(current, FieldOperand(current, HeapObject::kMapOffset));
- STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
- STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
- CmpInstanceType(current, JS_OBJECT_TYPE);
- j(below, found);
- mov(scratch1, FieldOperand(current, Map::kBitField2Offset));
- DecodeField<Map::ElementsKindBits>(scratch1);
- cmp(scratch1, Immediate(DICTIONARY_ELEMENTS));
- j(equal, found);
- mov(current, FieldOperand(current, Map::kPrototypeOffset));
- cmp(current, Immediate(factory->null_value()));
- j(not_equal, &loop_again);
-
- bind(&end);
-}
-
-
void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
DCHECK(!dividend.is(eax));
DCHECK(!dividend.is(edx));
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index e8ff59d41b..50ff068551 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -260,16 +260,6 @@ class MacroAssembler: public Assembler {
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst);
- // Conditionally load the cached Array transitioned map of type
- // transitioned_kind from the native context if the map in register
- // map_in_out is the cached Array map in the native context of
- // expected_kind.
- void LoadTransitionedArrayMapConditional(ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match);
-
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
@@ -344,9 +334,10 @@ class MacroAssembler: public Assembler {
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
- void FloodFunctionIfStepping(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual);
+ // On function call, call into the debugger if necessary.
+ void CheckDebugHook(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
@@ -391,24 +382,6 @@ class MacroAssembler: public Assembler {
// Compare instance type for map.
void CmpInstanceType(Register map, InstanceType type);
- // Check if a map for a JSObject indicates that the object can have both smi
- // and HeapObject elements. Jump to the specified label if it does not.
- void CheckFastObjectElements(Register map, Label* fail,
- Label::Distance distance = Label::kFar);
-
- // Check if a map for a JSObject indicates that the object has fast smi only
- // elements. Jump to the specified label if it does not.
- void CheckFastSmiElements(Register map, Label* fail,
- Label::Distance distance = Label::kFar);
-
- // Check to see if maybe_number can be stored as a double in
- // FastDoubleElements. If it can, store it at the index specified by key in
- // the FastDoubleElements array elements, otherwise jump to fail.
- void StoreNumberToDoubleElements(Register maybe_number, Register elements,
- Register key, Register scratch1,
- XMMRegister scratch2, Label* fail,
- int offset = 0);
-
// Compare an object's map with the specified map.
void CompareMap(Register obj, Handle<Map> map);
@@ -640,31 +613,6 @@ class MacroAssembler: public Assembler {
void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
Label* gc_required, MutableMode mode = IMMUTABLE);
- // Allocate a sequential string. All the header fields of the string object
- // are initialized.
- void AllocateTwoByteString(Register result, Register length,
- Register scratch1, Register scratch2,
- Register scratch3, Label* gc_required);
- void AllocateOneByteString(Register result, Register length,
- Register scratch1, Register scratch2,
- Register scratch3, Label* gc_required);
- void AllocateOneByteString(Register result, int length, Register scratch1,
- Register scratch2, Label* gc_required);
-
- // Allocate a raw cons string object. Only the map field of the result is
- // initialized.
- void AllocateTwoByteConsString(Register result, Register scratch1,
- Register scratch2, Label* gc_required);
- void AllocateOneByteConsString(Register result, Register scratch1,
- Register scratch2, Label* gc_required);
-
- // Allocate a raw sliced string object. Only the map field of the result is
- // initialized.
- void AllocateTwoByteSlicedString(Register result, Register scratch1,
- Register scratch2, Label* gc_required);
- void AllocateOneByteSlicedString(Register result, Register scratch1,
- Register scratch2, Label* gc_required);
-
// Allocate and initialize a JSValue wrapper with the specified {constructor}
// and {value}.
void AllocateJSValue(Register result, Register constructor, Register value,
@@ -889,13 +837,6 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// String utilities.
- // Check whether the instance type represents a flat one-byte string. Jump to
- // the label if not. If the instance type can be scratched specify same
- // register for both instance type and scratch.
- void JumpIfInstanceTypeIsNotSequentialOneByte(
- Register instance_type, Register scratch,
- Label* on_not_flat_one_byte_string);
-
// Checks if both objects are sequential one-byte strings, and jumps to label
// if either is not.
void JumpIfNotBothSequentialOneByteStrings(
@@ -943,20 +884,6 @@ class MacroAssembler: public Assembler {
Register scratch_reg,
Label* no_memento_found);
- void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
- Register scratch_reg,
- Label* memento_found) {
- Label no_memento_found;
- TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
- &no_memento_found);
- j(equal, memento_found);
- bind(&no_memento_found);
- }
-
- // Jumps to found label if a prototype map has dictionary elements.
- void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
- Register scratch1, Label* found);
-
private:
bool generating_stub_;
bool has_frame_;
diff --git a/deps/v8/src/ic/accessor-assembler-impl.h b/deps/v8/src/ic/accessor-assembler-impl.h
new file mode 100644
index 0000000000..1699b5c855
--- /dev/null
+++ b/deps/v8/src/ic/accessor-assembler-impl.h
@@ -0,0 +1,203 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SRC_IC_ACCESSOR_ASSEMBLER_IMPL_H_
+#define V8_SRC_IC_ACCESSOR_ASSEMBLER_IMPL_H_
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+namespace compiler {
+class CodeAssemblerState;
+}
+
+using compiler::Node;
+
+#define ACCESSOR_ASSEMBLER_PUBLIC_INTERFACE(V) \
+ V(LoadIC) \
+ V(LoadField) \
+ V(LoadICTrampoline) \
+ V(KeyedLoadICTF) \
+ V(KeyedLoadICTrampolineTF) \
+ V(KeyedLoadICMegamorphic) \
+ V(StoreIC) \
+ V(StoreICTrampoline)
+// The other IC entry points need custom handling because of additional
+// parameters like "typeof_mode" or "language_mode".
+
+class AccessorAssemblerImpl : public CodeStubAssembler {
+ public:
+ explicit AccessorAssemblerImpl(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+#define DECLARE_PUBLIC_METHOD(Name) void Generate##Name();
+
+ ACCESSOR_ASSEMBLER_PUBLIC_INTERFACE(DECLARE_PUBLIC_METHOD)
+#undef DECLARE_PUBLIC_METHOD
+
+ void GenerateLoadICProtoArray(bool throw_reference_error_if_nonexistent);
+
+ void GenerateLoadGlobalIC(TypeofMode typeof_mode);
+ void GenerateLoadGlobalICTrampoline(TypeofMode typeof_mode);
+
+ void GenerateKeyedStoreICTF(LanguageMode language_mode);
+ void GenerateKeyedStoreICTrampolineTF(LanguageMode language_mode);
+
+ void TryProbeStubCache(StubCache* stub_cache, Node* receiver, Node* name,
+ Label* if_handler, Variable* var_handler,
+ Label* if_miss);
+
+ Node* StubCachePrimaryOffsetForTesting(Node* name, Node* map) {
+ return StubCachePrimaryOffset(name, map);
+ }
+ Node* StubCacheSecondaryOffsetForTesting(Node* name, Node* map) {
+ return StubCacheSecondaryOffset(name, map);
+ }
+
+ protected:
+ struct LoadICParameters {
+ LoadICParameters(Node* context, Node* receiver, Node* name, Node* slot,
+ Node* vector)
+ : context(context),
+ receiver(receiver),
+ name(name),
+ slot(slot),
+ vector(vector) {}
+
+ Node* context;
+ Node* receiver;
+ Node* name;
+ Node* slot;
+ Node* vector;
+ };
+
+ struct StoreICParameters : public LoadICParameters {
+ StoreICParameters(Node* context, Node* receiver, Node* name, Node* value,
+ Node* slot, Node* vector)
+ : LoadICParameters(context, receiver, name, slot, vector),
+ value(value) {}
+ Node* value;
+ };
+
+ enum ElementSupport { kOnlyProperties, kSupportElements };
+ void HandleStoreICHandlerCase(
+ const StoreICParameters* p, Node* handler, Label* miss,
+ ElementSupport support_elements = kOnlyProperties);
+
+ private:
+ // Stub generation entry points.
+
+ void LoadIC(const LoadICParameters* p);
+ void LoadICProtoArray(const LoadICParameters* p, Node* handler,
+ bool throw_reference_error_if_nonexistent);
+ void LoadGlobalIC(const LoadICParameters* p, TypeofMode typeof_mode);
+ void KeyedLoadIC(const LoadICParameters* p);
+ void KeyedLoadICGeneric(const LoadICParameters* p);
+ void StoreIC(const StoreICParameters* p);
+ void KeyedStoreIC(const StoreICParameters* p, LanguageMode language_mode);
+
+ // IC dispatcher behavior.
+
+ // Checks monomorphic case. Returns {feedback} entry of the vector.
+ Node* TryMonomorphicCase(Node* slot, Node* vector, Node* receiver_map,
+ Label* if_handler, Variable* var_handler,
+ Label* if_miss);
+ void HandlePolymorphicCase(Node* receiver_map, Node* feedback,
+ Label* if_handler, Variable* var_handler,
+ Label* if_miss, int unroll_count);
+ void HandleKeyedStorePolymorphicCase(Node* receiver_map, Node* feedback,
+ Label* if_handler, Variable* var_handler,
+ Label* if_transition_handler,
+ Variable* var_transition_map_cell,
+ Label* if_miss);
+
+ // LoadIC implementation.
+
+ void HandleLoadICHandlerCase(
+ const LoadICParameters* p, Node* handler, Label* miss,
+ ElementSupport support_elements = kOnlyProperties);
+
+ void HandleLoadICSmiHandlerCase(const LoadICParameters* p, Node* holder,
+ Node* smi_handler, Label* miss,
+ ElementSupport support_elements);
+
+ void HandleLoadICProtoHandlerCase(const LoadICParameters* p, Node* handler,
+ Variable* var_holder,
+ Variable* var_smi_handler,
+ Label* if_smi_handler, Label* miss,
+ bool throw_reference_error_if_nonexistent);
+
+ Node* EmitLoadICProtoArrayCheck(const LoadICParameters* p, Node* handler,
+ Node* handler_length, Node* handler_flags,
+ Label* miss,
+ bool throw_reference_error_if_nonexistent);
+
+ // LoadGlobalIC implementation.
+
+ void HandleLoadGlobalICHandlerCase(const LoadICParameters* p, Node* handler,
+ Label* miss,
+ bool throw_reference_error_if_nonexistent);
+
+ // StoreIC implementation.
+
+ void HandleStoreICElementHandlerCase(const StoreICParameters* p,
+ Node* handler, Label* miss);
+
+ void HandleStoreICProtoHandler(const StoreICParameters* p, Node* handler,
+ Label* miss);
+ // If |transition| is nullptr then the normal field store is generated or
+ // transitioning store otherwise.
+ void HandleStoreICSmiHandlerCase(Node* handler_word, Node* holder,
+ Node* value, Node* transition, Label* miss);
+ // If |transition| is nullptr then the normal field store is generated or
+ // transitioning store otherwise.
+ void HandleStoreFieldAndReturn(Node* handler_word, Node* holder,
+ Representation representation, Node* value,
+ Node* transition, Label* miss);
+
+ // Low-level helpers.
+
+ Node* PrepareValueForStore(Node* handler_word, Node* holder,
+ Representation representation, Node* transition,
+ Node* value, Label* bailout);
+
+ // Extends properties backing store by JSObject::kFieldsAdded elements.
+ void ExtendPropertiesBackingStore(Node* object);
+
+ void StoreNamedField(Node* handler_word, Node* object, bool is_inobject,
+ Representation representation, Node* value,
+ bool transition_to_field);
+
+ void EmitFastElementsBoundsCheck(Node* object, Node* elements,
+ Node* intptr_index,
+ Node* is_jsarray_condition, Label* miss);
+ void EmitElementLoad(Node* object, Node* elements, Node* elements_kind,
+ Node* key, Node* is_jsarray_condition, Label* if_hole,
+ Label* rebox_double, Variable* var_double_value,
+ Label* unimplemented_elements_kind, Label* out_of_bounds,
+ Label* miss);
+ void CheckPrototype(Node* prototype_cell, Node* name, Label* miss);
+ void NameDictionaryNegativeLookup(Node* object, Node* name, Label* miss);
+
+ // Stub cache access helpers.
+
+ // This enum is used here as a replacement for StubCache::Table to avoid
+ // including stub cache header.
+ enum StubCacheTable : int;
+
+ Node* StubCachePrimaryOffset(Node* name, Node* map);
+ Node* StubCacheSecondaryOffset(Node* name, Node* seed);
+
+ void TryProbeStubCacheTable(StubCache* stub_cache, StubCacheTable table_id,
+ Node* entry_offset, Node* name, Node* map,
+ Label* if_handler, Variable* var_handler,
+ Label* if_miss);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SRC_IC_ACCESSOR_ASSEMBLER_IMPL_H_
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
new file mode 100644
index 0000000000..9c795c1325
--- /dev/null
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -0,0 +1,1933 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/ic/accessor-assembler.h"
+#include "src/ic/accessor-assembler-impl.h"
+
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+#include "src/ic/handler-configuration.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+using compiler::CodeAssemblerState;
+
+//////////////////// Private helpers.
+
+Node* AccessorAssemblerImpl::TryMonomorphicCase(Node* slot, Node* vector,
+ Node* receiver_map,
+ Label* if_handler,
+ Variable* var_handler,
+ Label* if_miss) {
+ Comment("TryMonomorphicCase");
+ DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
+
+ // TODO(ishell): add helper class that hides offset computations for a series
+ // of loads.
+ int32_t header_size = FixedArray::kHeaderSize - kHeapObjectTag;
+ // Adding |header_size| with a separate IntPtrAdd rather than passing it
+ // into ElementOffsetFromIndex() allows it to be folded into a single
+ // [base, index, offset] indirect memory access on x64.
+ Node* offset =
+ ElementOffsetFromIndex(slot, FAST_HOLEY_ELEMENTS, SMI_PARAMETERS);
+ Node* feedback = Load(MachineType::AnyTagged(), vector,
+ IntPtrAdd(offset, IntPtrConstant(header_size)));
+
+ // Try to quickly handle the monomorphic case without knowing for sure
+ // if we have a weak cell in feedback. We do know it's safe to look
+ // at WeakCell::kValueOffset.
+ GotoIf(WordNotEqual(receiver_map, LoadWeakCellValueUnchecked(feedback)),
+ if_miss);
+
+ Node* handler =
+ Load(MachineType::AnyTagged(), vector,
+ IntPtrAdd(offset, IntPtrConstant(header_size + kPointerSize)));
+
+ var_handler->Bind(handler);
+ Goto(if_handler);
+ return feedback;
+}
+
+void AccessorAssemblerImpl::HandlePolymorphicCase(
+ Node* receiver_map, Node* feedback, Label* if_handler,
+ Variable* var_handler, Label* if_miss, int unroll_count) {
+ Comment("HandlePolymorphicCase");
+ DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
+
+ // Iterate {feedback} array.
+ const int kEntrySize = 2;
+
+ for (int i = 0; i < unroll_count; i++) {
+ Label next_entry(this);
+ Node* cached_map =
+ LoadWeakCellValue(LoadFixedArrayElement(feedback, i * kEntrySize));
+ GotoIf(WordNotEqual(receiver_map, cached_map), &next_entry);
+
+ // Found, now call handler.
+ Node* handler = LoadFixedArrayElement(feedback, i * kEntrySize + 1);
+ var_handler->Bind(handler);
+ Goto(if_handler);
+
+ Bind(&next_entry);
+ }
+
+ // Loop from {unroll_count}*kEntrySize to {length}.
+ Node* init = IntPtrConstant(unroll_count * kEntrySize);
+ Node* length = LoadAndUntagFixedArrayBaseLength(feedback);
+ BuildFastLoop(
+ MachineType::PointerRepresentation(), init, length,
+ [this, receiver_map, feedback, if_handler, var_handler](Node* index) {
+ Node* cached_map =
+ LoadWeakCellValue(LoadFixedArrayElement(feedback, index));
+
+ Label next_entry(this);
+ GotoIf(WordNotEqual(receiver_map, cached_map), &next_entry);
+
+ // Found, now call handler.
+ Node* handler = LoadFixedArrayElement(feedback, index, kPointerSize);
+ var_handler->Bind(handler);
+ Goto(if_handler);
+
+ Bind(&next_entry);
+ },
+ kEntrySize, IndexAdvanceMode::kPost);
+ // The loop falls through if no handler was found.
+ Goto(if_miss);
+}
+
+void AccessorAssemblerImpl::HandleKeyedStorePolymorphicCase(
+ Node* receiver_map, Node* feedback, Label* if_handler,
+ Variable* var_handler, Label* if_transition_handler,
+ Variable* var_transition_map_cell, Label* if_miss) {
+ DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
+ DCHECK_EQ(MachineRepresentation::kTagged, var_transition_map_cell->rep());
+
+ const int kEntrySize = 3;
+
+ Node* init = IntPtrConstant(0);
+ Node* length = LoadAndUntagFixedArrayBaseLength(feedback);
+ BuildFastLoop(MachineType::PointerRepresentation(), init, length,
+ [this, receiver_map, feedback, if_handler, var_handler,
+ if_transition_handler, var_transition_map_cell](Node* index) {
+ Node* cached_map =
+ LoadWeakCellValue(LoadFixedArrayElement(feedback, index));
+ Label next_entry(this);
+ GotoIf(WordNotEqual(receiver_map, cached_map), &next_entry);
+
+ Node* maybe_transition_map_cell =
+ LoadFixedArrayElement(feedback, index, kPointerSize);
+
+ var_handler->Bind(
+ LoadFixedArrayElement(feedback, index, 2 * kPointerSize));
+ GotoIf(WordEqual(maybe_transition_map_cell,
+ LoadRoot(Heap::kUndefinedValueRootIndex)),
+ if_handler);
+ var_transition_map_cell->Bind(maybe_transition_map_cell);
+ Goto(if_transition_handler);
+
+ Bind(&next_entry);
+ },
+ kEntrySize, IndexAdvanceMode::kPost);
+ // The loop falls through if no handler was found.
+ Goto(if_miss);
+}
+
+void AccessorAssemblerImpl::HandleLoadICHandlerCase(
+ const LoadICParameters* p, Node* handler, Label* miss,
+ ElementSupport support_elements) {
+ Comment("have_handler");
+ Variable var_holder(this, MachineRepresentation::kTagged);
+ var_holder.Bind(p->receiver);
+ Variable var_smi_handler(this, MachineRepresentation::kTagged);
+ var_smi_handler.Bind(handler);
+
+ Variable* vars[] = {&var_holder, &var_smi_handler};
+ Label if_smi_handler(this, 2, vars);
+ Label try_proto_handler(this), call_handler(this);
+
+ Branch(TaggedIsSmi(handler), &if_smi_handler, &try_proto_handler);
+
+ // |handler| is a Smi, encoding what to do. See SmiHandler methods
+ // for the encoding format.
+ Bind(&if_smi_handler);
+ {
+ HandleLoadICSmiHandlerCase(p, var_holder.value(), var_smi_handler.value(),
+ miss, support_elements);
+ }
+
+ Bind(&try_proto_handler);
+ {
+ GotoIf(IsCodeMap(LoadMap(handler)), &call_handler);
+ HandleLoadICProtoHandlerCase(p, handler, &var_holder, &var_smi_handler,
+ &if_smi_handler, miss, false);
+ }
+
+ Bind(&call_handler);
+ {
+ typedef LoadWithVectorDescriptor Descriptor;
+ TailCallStub(Descriptor(isolate()), handler, p->context, p->receiver,
+ p->name, p->slot, p->vector);
+ }
+}
+
+void AccessorAssemblerImpl::HandleLoadICSmiHandlerCase(
+ const LoadICParameters* p, Node* holder, Node* smi_handler, Label* miss,
+ ElementSupport support_elements) {
+ Variable var_double_value(this, MachineRepresentation::kFloat64);
+ Label rebox_double(this, &var_double_value);
+
+ Node* handler_word = SmiUntag(smi_handler);
+ Node* handler_kind = DecodeWord<LoadHandler::KindBits>(handler_word);
+ if (support_elements == kSupportElements) {
+ Label property(this);
+ GotoUnless(
+ WordEqual(handler_kind, IntPtrConstant(LoadHandler::kForElements)),
+ &property);
+
+ Comment("element_load");
+ Node* intptr_index = TryToIntptr(p->name, miss);
+ Node* elements = LoadElements(holder);
+ Node* is_jsarray_condition =
+ IsSetWord<LoadHandler::IsJsArrayBits>(handler_word);
+ Node* elements_kind =
+ DecodeWord32FromWord<LoadHandler::ElementsKindBits>(handler_word);
+ Label if_hole(this), unimplemented_elements_kind(this);
+ Label* out_of_bounds = miss;
+ EmitElementLoad(holder, elements, elements_kind, intptr_index,
+ is_jsarray_condition, &if_hole, &rebox_double,
+ &var_double_value, &unimplemented_elements_kind,
+ out_of_bounds, miss);
+
+ Bind(&unimplemented_elements_kind);
+ {
+ // Smi handlers should only be installed for supported elements kinds.
+ // Crash if we get here.
+ DebugBreak();
+ Goto(miss);
+ }
+
+ Bind(&if_hole);
+ {
+ Comment("convert hole");
+ GotoUnless(IsSetWord<LoadHandler::ConvertHoleBits>(handler_word), miss);
+ Node* protector_cell = LoadRoot(Heap::kArrayProtectorRootIndex);
+ DCHECK(isolate()->heap()->array_protector()->IsPropertyCell());
+ GotoUnless(
+ WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
+ SmiConstant(Smi::FromInt(Isolate::kProtectorValid))),
+ miss);
+ Return(UndefinedConstant());
+ }
+
+ Bind(&property);
+ Comment("property_load");
+ }
+
+ Label constant(this), field(this);
+ Branch(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kForFields)),
+ &field, &constant);
+
+ Bind(&field);
+ {
+ Comment("field_load");
+ Node* offset = DecodeWord<LoadHandler::FieldOffsetBits>(handler_word);
+
+ Label inobject(this), out_of_object(this);
+ Branch(IsSetWord<LoadHandler::IsInobjectBits>(handler_word), &inobject,
+ &out_of_object);
+
+ Bind(&inobject);
+ {
+ Label is_double(this);
+ GotoIf(IsSetWord<LoadHandler::IsDoubleBits>(handler_word), &is_double);
+ Return(LoadObjectField(holder, offset));
+
+ Bind(&is_double);
+ if (FLAG_unbox_double_fields) {
+ var_double_value.Bind(
+ LoadObjectField(holder, offset, MachineType::Float64()));
+ } else {
+ Node* mutable_heap_number = LoadObjectField(holder, offset);
+ var_double_value.Bind(LoadHeapNumberValue(mutable_heap_number));
+ }
+ Goto(&rebox_double);
+ }
+
+ Bind(&out_of_object);
+ {
+ Label is_double(this);
+ Node* properties = LoadProperties(holder);
+ Node* value = LoadObjectField(properties, offset);
+ GotoIf(IsSetWord<LoadHandler::IsDoubleBits>(handler_word), &is_double);
+ Return(value);
+
+ Bind(&is_double);
+ var_double_value.Bind(LoadHeapNumberValue(value));
+ Goto(&rebox_double);
+ }
+
+ Bind(&rebox_double);
+ Return(AllocateHeapNumberWithValue(var_double_value.value()));
+ }
+
+ Bind(&constant);
+ {
+ Comment("constant_load");
+ Node* descriptors = LoadMapDescriptors(LoadMap(holder));
+ Node* descriptor =
+ DecodeWord<LoadHandler::DescriptorValueIndexBits>(handler_word);
+ CSA_ASSERT(this,
+ UintPtrLessThan(descriptor,
+ LoadAndUntagFixedArrayBaseLength(descriptors)));
+ Node* value = LoadFixedArrayElement(descriptors, descriptor);
+
+ Label if_accessor_info(this);
+ GotoIf(IsSetWord<LoadHandler::IsAccessorInfoBits>(handler_word),
+ &if_accessor_info);
+ Return(value);
+
+ Bind(&if_accessor_info);
+ Callable callable = CodeFactory::ApiGetter(isolate());
+ TailCallStub(callable, p->context, p->receiver, holder, value);
+ }
+}
+
+void AccessorAssemblerImpl::HandleLoadICProtoHandlerCase(
+ const LoadICParameters* p, Node* handler, Variable* var_holder,
+ Variable* var_smi_handler, Label* if_smi_handler, Label* miss,
+ bool throw_reference_error_if_nonexistent) {
+ DCHECK_EQ(MachineRepresentation::kTagged, var_holder->rep());
+ DCHECK_EQ(MachineRepresentation::kTagged, var_smi_handler->rep());
+
+ // IC dispatchers rely on these assumptions to be held.
+ STATIC_ASSERT(FixedArray::kLengthOffset == LoadHandler::kHolderCellOffset);
+ DCHECK_EQ(FixedArray::OffsetOfElementAt(LoadHandler::kSmiHandlerIndex),
+ LoadHandler::kSmiHandlerOffset);
+ DCHECK_EQ(FixedArray::OffsetOfElementAt(LoadHandler::kValidityCellIndex),
+ LoadHandler::kValidityCellOffset);
+
+ // Both FixedArray and Tuple3 handlers have validity cell at the same offset.
+ Label validity_cell_check_done(this);
+ Node* validity_cell =
+ LoadObjectField(handler, LoadHandler::kValidityCellOffset);
+ GotoIf(WordEqual(validity_cell, IntPtrConstant(0)),
+ &validity_cell_check_done);
+ Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
+ GotoIf(WordNotEqual(cell_value,
+ SmiConstant(Smi::FromInt(Map::kPrototypeChainValid))),
+ miss);
+ Goto(&validity_cell_check_done);
+
+ Bind(&validity_cell_check_done);
+ Node* smi_handler = LoadObjectField(handler, LoadHandler::kSmiHandlerOffset);
+ CSA_ASSERT(this, TaggedIsSmi(smi_handler));
+ Node* handler_flags = SmiUntag(smi_handler);
+
+ Label check_prototypes(this);
+ GotoUnless(
+ IsSetWord<LoadHandler::DoNegativeLookupOnReceiverBits>(handler_flags),
+ &check_prototypes);
+ {
+ CSA_ASSERT(this, Word32BinaryNot(
+ HasInstanceType(p->receiver, JS_GLOBAL_OBJECT_TYPE)));
+ // We have a dictionary receiver, do a negative lookup check.
+ NameDictionaryNegativeLookup(p->receiver, p->name, miss);
+ Goto(&check_prototypes);
+ }
+
+ Bind(&check_prototypes);
+ Node* maybe_holder_cell =
+ LoadObjectField(handler, LoadHandler::kHolderCellOffset);
+ Label array_handler(this), tuple_handler(this);
+ Branch(TaggedIsSmi(maybe_holder_cell), &array_handler, &tuple_handler);
+
+ Bind(&tuple_handler);
+ {
+ Label load_existent(this);
+ GotoIf(WordNotEqual(maybe_holder_cell, NullConstant()), &load_existent);
+ // This is a handler for a load of a non-existent value.
+ if (throw_reference_error_if_nonexistent) {
+ TailCallRuntime(Runtime::kThrowReferenceError, p->context, p->name);
+ } else {
+ Return(UndefinedConstant());
+ }
+
+ Bind(&load_existent);
+ Node* holder = LoadWeakCellValue(maybe_holder_cell);
+ // The |holder| is guaranteed to be alive at this point since we passed
+ // both the receiver map check and the validity cell check.
+ CSA_ASSERT(this, WordNotEqual(holder, IntPtrConstant(0)));
+
+ var_holder->Bind(holder);
+ var_smi_handler->Bind(smi_handler);
+ Goto(if_smi_handler);
+ }
+
+ Bind(&array_handler);
+ {
+ typedef LoadICProtoArrayDescriptor Descriptor;
+ LoadICProtoArrayStub stub(isolate(), throw_reference_error_if_nonexistent);
+ Node* target = HeapConstant(stub.GetCode());
+ TailCallStub(Descriptor(isolate()), target, p->context, p->receiver,
+ p->name, p->slot, p->vector, handler);
+ }
+}
+
+Node* AccessorAssemblerImpl::EmitLoadICProtoArrayCheck(
+ const LoadICParameters* p, Node* handler, Node* handler_length,
+ Node* handler_flags, Label* miss,
+ bool throw_reference_error_if_nonexistent) {
+ Variable start_index(this, MachineType::PointerRepresentation());
+ start_index.Bind(IntPtrConstant(LoadHandler::kFirstPrototypeIndex));
+
+ Label can_access(this);
+ GotoUnless(IsSetWord<LoadHandler::DoAccessCheckOnReceiverBits>(handler_flags),
+ &can_access);
+ {
+ // Skip this entry of a handler.
+ start_index.Bind(IntPtrConstant(LoadHandler::kFirstPrototypeIndex + 1));
+
+ int offset =
+ FixedArray::OffsetOfElementAt(LoadHandler::kFirstPrototypeIndex);
+ Node* expected_native_context =
+ LoadWeakCellValue(LoadObjectField(handler, offset), miss);
+ CSA_ASSERT(this, IsNativeContext(expected_native_context));
+
+ Node* native_context = LoadNativeContext(p->context);
+ GotoIf(WordEqual(expected_native_context, native_context), &can_access);
+ // If the receiver is not a JSGlobalProxy then we miss.
+ GotoUnless(IsJSGlobalProxy(p->receiver), miss);
+ // For JSGlobalProxy receiver try to compare security tokens of current
+ // and expected native contexts.
+ Node* expected_token = LoadContextElement(expected_native_context,
+ Context::SECURITY_TOKEN_INDEX);
+ Node* current_token =
+ LoadContextElement(native_context, Context::SECURITY_TOKEN_INDEX);
+ Branch(WordEqual(expected_token, current_token), &can_access, miss);
+ }
+ Bind(&can_access);
+
+ BuildFastLoop(
+ MachineType::PointerRepresentation(), start_index.value(), handler_length,
+ [this, p, handler, miss](Node* current) {
+ Node* prototype_cell = LoadFixedArrayElement(handler, current);
+ CheckPrototype(prototype_cell, p->name, miss);
+ },
+ 1, IndexAdvanceMode::kPost);
+
+ Node* maybe_holder_cell =
+ LoadFixedArrayElement(handler, LoadHandler::kHolderCellIndex);
+ Label load_existent(this);
+ GotoIf(WordNotEqual(maybe_holder_cell, NullConstant()), &load_existent);
+ // This is a handler for a load of a non-existent value.
+ if (throw_reference_error_if_nonexistent) {
+ TailCallRuntime(Runtime::kThrowReferenceError, p->context, p->name);
+ } else {
+ Return(UndefinedConstant());
+ }
+
+ Bind(&load_existent);
+ Node* holder = LoadWeakCellValue(maybe_holder_cell);
+ // The |holder| is guaranteed to be alive at this point since we passed
+ // the receiver map check, the validity cell check and the prototype chain
+ // check.
+ CSA_ASSERT(this, WordNotEqual(holder, IntPtrConstant(0)));
+ return holder;
+}
+
+void AccessorAssemblerImpl::HandleLoadGlobalICHandlerCase(
+ const LoadICParameters* pp, Node* handler, Label* miss,
+ bool throw_reference_error_if_nonexistent) {
+ LoadICParameters p = *pp;
+ DCHECK_NULL(p.receiver);
+ Node* native_context = LoadNativeContext(p.context);
+ p.receiver = LoadContextElement(native_context, Context::EXTENSION_INDEX);
+
+ Variable var_holder(this, MachineRepresentation::kTagged);
+ Variable var_smi_handler(this, MachineRepresentation::kTagged);
+ Label if_smi_handler(this);
+ HandleLoadICProtoHandlerCase(&p, handler, &var_holder, &var_smi_handler,
+ &if_smi_handler, miss,
+ throw_reference_error_if_nonexistent);
+ Bind(&if_smi_handler);
+ HandleLoadICSmiHandlerCase(&p, var_holder.value(), var_smi_handler.value(),
+ miss, kOnlyProperties);
+}
+
+void AccessorAssemblerImpl::HandleStoreICHandlerCase(
+ const StoreICParameters* p, Node* handler, Label* miss,
+ ElementSupport support_elements) {
+ Label if_smi_handler(this), if_nonsmi_handler(this);
+ Label if_proto_handler(this), if_element_handler(this), call_handler(this);
+
+ Branch(TaggedIsSmi(handler), &if_smi_handler, &if_nonsmi_handler);
+
+ // |handler| is a Smi, encoding what to do. See SmiHandler methods
+ // for the encoding format.
+ Bind(&if_smi_handler);
+ {
+ Node* holder = p->receiver;
+ Node* handler_word = SmiUntag(handler);
+
+ // Handle non-transitioning field stores.
+ HandleStoreICSmiHandlerCase(handler_word, holder, p->value, nullptr, miss);
+ }
+
+ Bind(&if_nonsmi_handler);
+ {
+ Node* handler_map = LoadMap(handler);
+ if (support_elements == kSupportElements) {
+ GotoIf(IsTuple2Map(handler_map), &if_element_handler);
+ }
+ Branch(IsCodeMap(handler_map), &call_handler, &if_proto_handler);
+ }
+
+ if (support_elements == kSupportElements) {
+ Bind(&if_element_handler);
+ { HandleStoreICElementHandlerCase(p, handler, miss); }
+ }
+
+ Bind(&if_proto_handler);
+ {
+ HandleStoreICProtoHandler(p, handler, miss);
+ }
+
+ // |handler| is a heap object. Must be code, call it.
+ Bind(&call_handler);
+ {
+ StoreWithVectorDescriptor descriptor(isolate());
+ TailCallStub(descriptor, handler, p->context, p->receiver, p->name,
+ p->value, p->slot, p->vector);
+ }
+}
+
+void AccessorAssemblerImpl::HandleStoreICElementHandlerCase(
+ const StoreICParameters* p, Node* handler, Label* miss) {
+ Comment("HandleStoreICElementHandlerCase");
+ Node* validity_cell = LoadObjectField(handler, Tuple2::kValue1Offset);
+ Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
+ GotoIf(WordNotEqual(cell_value,
+ SmiConstant(Smi::FromInt(Map::kPrototypeChainValid))),
+ miss);
+
+ Node* code_handler = LoadObjectField(handler, Tuple2::kValue2Offset);
+ CSA_ASSERT(this, IsCodeMap(LoadMap(code_handler)));
+
+ StoreWithVectorDescriptor descriptor(isolate());
+ TailCallStub(descriptor, code_handler, p->context, p->receiver, p->name,
+ p->value, p->slot, p->vector);
+}
+
+void AccessorAssemblerImpl::HandleStoreICProtoHandler(
+ const StoreICParameters* p, Node* handler, Label* miss) {
+ // IC dispatchers rely on these assumptions to be held.
+ STATIC_ASSERT(FixedArray::kLengthOffset ==
+ StoreHandler::kTransitionCellOffset);
+ DCHECK_EQ(FixedArray::OffsetOfElementAt(StoreHandler::kSmiHandlerIndex),
+ StoreHandler::kSmiHandlerOffset);
+ DCHECK_EQ(FixedArray::OffsetOfElementAt(StoreHandler::kValidityCellIndex),
+ StoreHandler::kValidityCellOffset);
+
+ // Both FixedArray and Tuple3 handlers have validity cell at the same offset.
+ Label validity_cell_check_done(this);
+ Node* validity_cell =
+ LoadObjectField(handler, StoreHandler::kValidityCellOffset);
+ GotoIf(WordEqual(validity_cell, IntPtrConstant(0)),
+ &validity_cell_check_done);
+ Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
+ GotoIf(WordNotEqual(cell_value,
+ SmiConstant(Smi::FromInt(Map::kPrototypeChainValid))),
+ miss);
+ Goto(&validity_cell_check_done);
+
+ Bind(&validity_cell_check_done);
+ Node* smi_handler = LoadObjectField(handler, StoreHandler::kSmiHandlerOffset);
+ CSA_ASSERT(this, TaggedIsSmi(smi_handler));
+
+ Node* maybe_transition_cell =
+ LoadObjectField(handler, StoreHandler::kTransitionCellOffset);
+ Label array_handler(this), tuple_handler(this);
+ Branch(TaggedIsSmi(maybe_transition_cell), &array_handler, &tuple_handler);
+
+ Variable var_transition(this, MachineRepresentation::kTagged);
+ Label if_transition(this), if_transition_to_constant(this);
+ Bind(&tuple_handler);
+ {
+ Node* transition = LoadWeakCellValue(maybe_transition_cell, miss);
+ var_transition.Bind(transition);
+ Goto(&if_transition);
+ }
+
+ Bind(&array_handler);
+ {
+ Node* length = SmiUntag(maybe_transition_cell);
+ BuildFastLoop(MachineType::PointerRepresentation(),
+ IntPtrConstant(StoreHandler::kFirstPrototypeIndex), length,
+ [this, p, handler, miss](Node* current) {
+ Node* prototype_cell =
+ LoadFixedArrayElement(handler, current);
+ CheckPrototype(prototype_cell, p->name, miss);
+ },
+ 1, IndexAdvanceMode::kPost);
+
+ Node* maybe_transition_cell =
+ LoadFixedArrayElement(handler, StoreHandler::kTransitionCellIndex);
+ Node* transition = LoadWeakCellValue(maybe_transition_cell, miss);
+ var_transition.Bind(transition);
+ Goto(&if_transition);
+ }
+
+ Bind(&if_transition);
+ {
+ Node* holder = p->receiver;
+ Node* transition = var_transition.value();
+ Node* handler_word = SmiUntag(smi_handler);
+
+ GotoIf(IsSetWord32<Map::Deprecated>(LoadMapBitField3(transition)), miss);
+
+ Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
+ GotoIf(WordEqual(handler_kind,
+ IntPtrConstant(StoreHandler::kTransitionToConstant)),
+ &if_transition_to_constant);
+
+ // Handle transitioning field stores.
+ HandleStoreICSmiHandlerCase(handler_word, holder, p->value, transition,
+ miss);
+
+ Bind(&if_transition_to_constant);
+ {
+ // Check that constant matches value.
+ Node* value_index_in_descriptor =
+ DecodeWord<StoreHandler::DescriptorValueIndexBits>(handler_word);
+ Node* descriptors = LoadMapDescriptors(transition);
+ Node* constant =
+ LoadFixedArrayElement(descriptors, value_index_in_descriptor);
+ GotoIf(WordNotEqual(p->value, constant), miss);
+
+ StoreMap(p->receiver, transition);
+ Return(p->value);
+ }
+ }
+}
+
+void AccessorAssemblerImpl::HandleStoreICSmiHandlerCase(Node* handler_word,
+ Node* holder,
+ Node* value,
+ Node* transition,
+ Label* miss) {
+ Comment(transition ? "transitioning field store" : "field store");
+
+#ifdef DEBUG
+ Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
+ if (transition) {
+ CSA_ASSERT(
+ this,
+ Word32Or(
+ WordEqual(handler_kind,
+ IntPtrConstant(StoreHandler::kTransitionToField)),
+ WordEqual(handler_kind,
+ IntPtrConstant(StoreHandler::kTransitionToConstant))));
+ } else {
+ CSA_ASSERT(this, WordEqual(handler_kind,
+ IntPtrConstant(StoreHandler::kStoreField)));
+ }
+#endif
+
+ Node* field_representation =
+ DecodeWord<StoreHandler::FieldRepresentationBits>(handler_word);
+
+ Label if_smi_field(this), if_double_field(this), if_heap_object_field(this),
+ if_tagged_field(this);
+
+ GotoIf(WordEqual(field_representation, IntPtrConstant(StoreHandler::kTagged)),
+ &if_tagged_field);
+ GotoIf(WordEqual(field_representation,
+ IntPtrConstant(StoreHandler::kHeapObject)),
+ &if_heap_object_field);
+ GotoIf(WordEqual(field_representation, IntPtrConstant(StoreHandler::kDouble)),
+ &if_double_field);
+ CSA_ASSERT(this, WordEqual(field_representation,
+ IntPtrConstant(StoreHandler::kSmi)));
+ Goto(&if_smi_field);
+
+ Bind(&if_tagged_field);
+ {
+ Comment("store tagged field");
+ HandleStoreFieldAndReturn(handler_word, holder, Representation::Tagged(),
+ value, transition, miss);
+ }
+
+ Bind(&if_double_field);
+ {
+ Comment("store double field");
+ HandleStoreFieldAndReturn(handler_word, holder, Representation::Double(),
+ value, transition, miss);
+ }
+
+ Bind(&if_heap_object_field);
+ {
+ Comment("store heap object field");
+ HandleStoreFieldAndReturn(handler_word, holder,
+ Representation::HeapObject(), value, transition,
+ miss);
+ }
+
+ Bind(&if_smi_field);
+ {
+ Comment("store smi field");
+ HandleStoreFieldAndReturn(handler_word, holder, Representation::Smi(),
+ value, transition, miss);
+ }
+}
+
+void AccessorAssemblerImpl::HandleStoreFieldAndReturn(
+ Node* handler_word, Node* holder, Representation representation,
+ Node* value, Node* transition, Label* miss) {
+ bool transition_to_field = transition != nullptr;
+ Node* prepared_value = PrepareValueForStore(
+ handler_word, holder, representation, transition, value, miss);
+
+ Label if_inobject(this), if_out_of_object(this);
+ Branch(IsSetWord<StoreHandler::IsInobjectBits>(handler_word), &if_inobject,
+ &if_out_of_object);
+
+ Bind(&if_inobject);
+ {
+ StoreNamedField(handler_word, holder, true, representation, prepared_value,
+ transition_to_field);
+ if (transition_to_field) {
+ StoreMap(holder, transition);
+ }
+ Return(value);
+ }
+
+ Bind(&if_out_of_object);
+ {
+ if (transition_to_field) {
+ Label storage_extended(this);
+ GotoUnless(IsSetWord<StoreHandler::ExtendStorageBits>(handler_word),
+ &storage_extended);
+ Comment("[ Extend storage");
+ ExtendPropertiesBackingStore(holder);
+ Comment("] Extend storage");
+ Goto(&storage_extended);
+
+ Bind(&storage_extended);
+ }
+
+ StoreNamedField(handler_word, holder, false, representation, prepared_value,
+ transition_to_field);
+ if (transition_to_field) {
+ StoreMap(holder, transition);
+ }
+ Return(value);
+ }
+}
+
+Node* AccessorAssemblerImpl::PrepareValueForStore(Node* handler_word,
+ Node* holder,
+ Representation representation,
+ Node* transition, Node* value,
+ Label* bailout) {
+ if (representation.IsDouble()) {
+ value = TryTaggedToFloat64(value, bailout);
+
+ } else if (representation.IsHeapObject()) {
+ GotoIf(TaggedIsSmi(value), bailout);
+ Node* value_index_in_descriptor =
+ DecodeWord<StoreHandler::DescriptorValueIndexBits>(handler_word);
+ Node* descriptors =
+ LoadMapDescriptors(transition ? transition : LoadMap(holder));
+ Node* maybe_field_type =
+ LoadFixedArrayElement(descriptors, value_index_in_descriptor);
+
+ Label done(this);
+ GotoIf(TaggedIsSmi(maybe_field_type), &done);
+ // Check that value type matches the field type.
+ {
+ Node* field_type = LoadWeakCellValue(maybe_field_type, bailout);
+ Branch(WordEqual(LoadMap(value), field_type), &done, bailout);
+ }
+ Bind(&done);
+
+ } else if (representation.IsSmi()) {
+ GotoUnless(TaggedIsSmi(value), bailout);
+
+ } else {
+ DCHECK(representation.IsTagged());
+ }
+ return value;
+}
+
+void AccessorAssemblerImpl::ExtendPropertiesBackingStore(Node* object) {
+ Node* properties = LoadProperties(object);
+ Node* length = LoadFixedArrayBaseLength(properties);
+
+ ParameterMode mode = OptimalParameterMode();
+ length = TaggedToParameter(length, mode);
+
+ Node* delta = IntPtrOrSmiConstant(JSObject::kFieldsAdded, mode);
+ Node* new_capacity = IntPtrOrSmiAdd(length, delta, mode);
+
+ // Grow properties array.
+ ElementsKind kind = FAST_ELEMENTS;
+ DCHECK(kMaxNumberOfDescriptors + JSObject::kFieldsAdded <
+ FixedArrayBase::GetMaxLengthForNewSpaceAllocation(kind));
+ // The size of a new properties backing store is guaranteed to be small
+ // enough that the new backing store will be allocated in new space.
+ CSA_ASSERT(this,
+ UintPtrOrSmiLessThan(
+ new_capacity,
+ IntPtrOrSmiConstant(
+ kMaxNumberOfDescriptors + JSObject::kFieldsAdded, mode),
+ mode));
+
+ Node* new_properties = AllocateFixedArray(kind, new_capacity, mode);
+
+ FillFixedArrayWithValue(kind, new_properties, length, new_capacity,
+ Heap::kUndefinedValueRootIndex, mode);
+
+ // |new_properties| is guaranteed to be in new space, so we can skip
+ // the write barrier.
+ CopyFixedArrayElements(kind, properties, new_properties, length,
+ SKIP_WRITE_BARRIER, mode);
+
+ StoreObjectField(object, JSObject::kPropertiesOffset, new_properties);
+}
+
+void AccessorAssemblerImpl::StoreNamedField(Node* handler_word, Node* object,
+ bool is_inobject,
+ Representation representation,
+ Node* value,
+ bool transition_to_field) {
+ bool store_value_as_double = representation.IsDouble();
+ Node* property_storage = object;
+ if (!is_inobject) {
+ property_storage = LoadProperties(object);
+ }
+
+ Node* offset = DecodeWord<StoreHandler::FieldOffsetBits>(handler_word);
+ if (representation.IsDouble()) {
+ if (!FLAG_unbox_double_fields || !is_inobject) {
+ if (transition_to_field) {
+ Node* heap_number = AllocateHeapNumberWithValue(value, MUTABLE);
+ // Store the new mutable heap number into the object.
+ value = heap_number;
+ store_value_as_double = false;
+ } else {
+ // Load the heap number.
+ property_storage = LoadObjectField(property_storage, offset);
+ // Store the double value into it.
+ offset = IntPtrConstant(HeapNumber::kValueOffset);
+ }
+ }
+ }
+
+ if (store_value_as_double) {
+ StoreObjectFieldNoWriteBarrier(property_storage, offset, value,
+ MachineRepresentation::kFloat64);
+ } else if (representation.IsSmi()) {
+ StoreObjectFieldNoWriteBarrier(property_storage, offset, value);
+ } else {
+ StoreObjectField(property_storage, offset, value);
+ }
+}
+
+void AccessorAssemblerImpl::EmitFastElementsBoundsCheck(
+ Node* object, Node* elements, Node* intptr_index,
+ Node* is_jsarray_condition, Label* miss) {
+ Variable var_length(this, MachineType::PointerRepresentation());
+ Comment("Fast elements bounds check");
+ Label if_array(this), length_loaded(this, &var_length);
+ GotoIf(is_jsarray_condition, &if_array);
+ {
+ var_length.Bind(SmiUntag(LoadFixedArrayBaseLength(elements)));
+ Goto(&length_loaded);
+ }
+ Bind(&if_array);
+ {
+ var_length.Bind(SmiUntag(LoadJSArrayLength(object)));
+ Goto(&length_loaded);
+ }
+ Bind(&length_loaded);
+ GotoUnless(UintPtrLessThan(intptr_index, var_length.value()), miss);
+}
+
+void AccessorAssemblerImpl::EmitElementLoad(
+ Node* object, Node* elements, Node* elements_kind, Node* intptr_index,
+ Node* is_jsarray_condition, Label* if_hole, Label* rebox_double,
+ Variable* var_double_value, Label* unimplemented_elements_kind,
+ Label* out_of_bounds, Label* miss) {
+ Label if_typed_array(this), if_fast_packed(this), if_fast_holey(this),
+ if_fast_double(this), if_fast_holey_double(this), if_nonfast(this),
+ if_dictionary(this);
+ GotoIf(
+ Int32GreaterThan(elements_kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)),
+ &if_nonfast);
+
+ EmitFastElementsBoundsCheck(object, elements, intptr_index,
+ is_jsarray_condition, out_of_bounds);
+ int32_t kinds[] = {// Handled by if_fast_packed.
+ FAST_SMI_ELEMENTS, FAST_ELEMENTS,
+ // Handled by if_fast_holey.
+ FAST_HOLEY_SMI_ELEMENTS, FAST_HOLEY_ELEMENTS,
+ // Handled by if_fast_double.
+ FAST_DOUBLE_ELEMENTS,
+ // Handled by if_fast_holey_double.
+ FAST_HOLEY_DOUBLE_ELEMENTS};
+ Label* labels[] = {// FAST_{SMI,}_ELEMENTS
+ &if_fast_packed, &if_fast_packed,
+ // FAST_HOLEY_{SMI,}_ELEMENTS
+ &if_fast_holey, &if_fast_holey,
+ // FAST_DOUBLE_ELEMENTS
+ &if_fast_double,
+ // FAST_HOLEY_DOUBLE_ELEMENTS
+ &if_fast_holey_double};
+ Switch(elements_kind, unimplemented_elements_kind, kinds, labels,
+ arraysize(kinds));
+
+ Bind(&if_fast_packed);
+ {
+ Comment("fast packed elements");
+ Return(LoadFixedArrayElement(elements, intptr_index));
+ }
+
+ Bind(&if_fast_holey);
+ {
+ Comment("fast holey elements");
+ Node* element = LoadFixedArrayElement(elements, intptr_index);
+ GotoIf(WordEqual(element, TheHoleConstant()), if_hole);
+ Return(element);
+ }
+
+ Bind(&if_fast_double);
+ {
+ Comment("packed double elements");
+ var_double_value->Bind(LoadFixedDoubleArrayElement(elements, intptr_index,
+ MachineType::Float64()));
+ Goto(rebox_double);
+ }
+
+ Bind(&if_fast_holey_double);
+ {
+ Comment("holey double elements");
+ Node* value = LoadFixedDoubleArrayElement(elements, intptr_index,
+ MachineType::Float64(), 0,
+ INTPTR_PARAMETERS, if_hole);
+ var_double_value->Bind(value);
+ Goto(rebox_double);
+ }
+
+ Bind(&if_nonfast);
+ {
+ STATIC_ASSERT(LAST_ELEMENTS_KIND == LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
+ GotoIf(Int32GreaterThanOrEqual(
+ elements_kind,
+ Int32Constant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND)),
+ &if_typed_array);
+ GotoIf(Word32Equal(elements_kind, Int32Constant(DICTIONARY_ELEMENTS)),
+ &if_dictionary);
+ Goto(unimplemented_elements_kind);
+ }
+
+ Bind(&if_dictionary);
+ {
+ Comment("dictionary elements");
+ GotoIf(IntPtrLessThan(intptr_index, IntPtrConstant(0)), out_of_bounds);
+ Variable var_entry(this, MachineType::PointerRepresentation());
+ Label if_found(this);
+ NumberDictionaryLookup<SeededNumberDictionary>(
+ elements, intptr_index, &if_found, &var_entry, if_hole);
+ Bind(&if_found);
+ // Check that the value is a data property.
+ Node* details_index = EntryToIndex<SeededNumberDictionary>(
+ var_entry.value(), SeededNumberDictionary::kEntryDetailsIndex);
+ Node* details = SmiToWord32(LoadFixedArrayElement(elements, details_index));
+ Node* kind = DecodeWord32<PropertyDetails::KindField>(details);
+ // TODO(jkummerow): Support accessors without missing?
+ GotoUnless(Word32Equal(kind, Int32Constant(kData)), miss);
+ // Finally, load the value.
+ Node* value_index = EntryToIndex<SeededNumberDictionary>(
+ var_entry.value(), SeededNumberDictionary::kEntryValueIndex);
+ Return(LoadFixedArrayElement(elements, value_index));
+ }
+
+ Bind(&if_typed_array);
+ {
+ Comment("typed elements");
+ // Check if buffer has been neutered.
+ Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
+ GotoIf(IsDetachedBuffer(buffer), miss);
+
+ // Bounds check.
+ Node* length =
+ SmiUntag(LoadObjectField(object, JSTypedArray::kLengthOffset));
+ GotoUnless(UintPtrLessThan(intptr_index, length), out_of_bounds);
+
+ // Backing store = external_pointer + base_pointer.
+ Node* external_pointer =
+ LoadObjectField(elements, FixedTypedArrayBase::kExternalPointerOffset,
+ MachineType::Pointer());
+ Node* base_pointer =
+ LoadObjectField(elements, FixedTypedArrayBase::kBasePointerOffset);
+ Node* backing_store =
+ IntPtrAdd(external_pointer, BitcastTaggedToWord(base_pointer));
+
+ Label uint8_elements(this), int8_elements(this), uint16_elements(this),
+ int16_elements(this), uint32_elements(this), int32_elements(this),
+ float32_elements(this), float64_elements(this);
+ Label* elements_kind_labels[] = {
+ &uint8_elements, &uint8_elements, &int8_elements,
+ &uint16_elements, &int16_elements, &uint32_elements,
+ &int32_elements, &float32_elements, &float64_elements};
+ int32_t elements_kinds[] = {
+ UINT8_ELEMENTS, UINT8_CLAMPED_ELEMENTS, INT8_ELEMENTS,
+ UINT16_ELEMENTS, INT16_ELEMENTS, UINT32_ELEMENTS,
+ INT32_ELEMENTS, FLOAT32_ELEMENTS, FLOAT64_ELEMENTS};
+ const size_t kTypedElementsKindCount =
+ LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND -
+ FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND + 1;
+ DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kinds));
+ DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kind_labels));
+ Switch(elements_kind, miss, elements_kinds, elements_kind_labels,
+ kTypedElementsKindCount);
+ Bind(&uint8_elements);
+ {
+ Comment("UINT8_ELEMENTS"); // Handles UINT8_CLAMPED_ELEMENTS too.
+ Node* element = Load(MachineType::Uint8(), backing_store, intptr_index);
+ Return(SmiFromWord32(element));
+ }
+ Bind(&int8_elements);
+ {
+ Comment("INT8_ELEMENTS");
+ Node* element = Load(MachineType::Int8(), backing_store, intptr_index);
+ Return(SmiFromWord32(element));
+ }
+ Bind(&uint16_elements);
+ {
+ Comment("UINT16_ELEMENTS");
+ Node* index = WordShl(intptr_index, IntPtrConstant(1));
+ Node* element = Load(MachineType::Uint16(), backing_store, index);
+ Return(SmiFromWord32(element));
+ }
+ Bind(&int16_elements);
+ {
+ Comment("INT16_ELEMENTS");
+ Node* index = WordShl(intptr_index, IntPtrConstant(1));
+ Node* element = Load(MachineType::Int16(), backing_store, index);
+ Return(SmiFromWord32(element));
+ }
+ Bind(&uint32_elements);
+ {
+ Comment("UINT32_ELEMENTS");
+ Node* index = WordShl(intptr_index, IntPtrConstant(2));
+ Node* element = Load(MachineType::Uint32(), backing_store, index);
+ Return(ChangeUint32ToTagged(element));
+ }
+ Bind(&int32_elements);
+ {
+ Comment("INT32_ELEMENTS");
+ Node* index = WordShl(intptr_index, IntPtrConstant(2));
+ Node* element = Load(MachineType::Int32(), backing_store, index);
+ Return(ChangeInt32ToTagged(element));
+ }
+ Bind(&float32_elements);
+ {
+ Comment("FLOAT32_ELEMENTS");
+ Node* index = WordShl(intptr_index, IntPtrConstant(2));
+ Node* element = Load(MachineType::Float32(), backing_store, index);
+ var_double_value->Bind(ChangeFloat32ToFloat64(element));
+ Goto(rebox_double);
+ }
+ Bind(&float64_elements);
+ {
+ Comment("FLOAT64_ELEMENTS");
+ Node* index = WordShl(intptr_index, IntPtrConstant(3));
+ Node* element = Load(MachineType::Float64(), backing_store, index);
+ var_double_value->Bind(element);
+ Goto(rebox_double);
+ }
+ }
+}
+
+void AccessorAssemblerImpl::CheckPrototype(Node* prototype_cell, Node* name,
+ Label* miss) {
+ Node* maybe_prototype = LoadWeakCellValue(prototype_cell, miss);
+
+ Label done(this);
+ Label if_property_cell(this), if_dictionary_object(this);
+
+ // |maybe_prototype| is either a PropertyCell or a slow-mode prototype.
+ Branch(WordEqual(LoadMap(maybe_prototype),
+ LoadRoot(Heap::kGlobalPropertyCellMapRootIndex)),
+ &if_property_cell, &if_dictionary_object);
+
+ Bind(&if_dictionary_object);
+ {
+ CSA_ASSERT(this, IsDictionaryMap(LoadMap(maybe_prototype)));
+ NameDictionaryNegativeLookup(maybe_prototype, name, miss);
+ Goto(&done);
+ }
+
+ Bind(&if_property_cell);
+ {
+ // Ensure the property cell still contains the hole.
+ Node* value = LoadObjectField(maybe_prototype, PropertyCell::kValueOffset);
+ GotoIf(WordNotEqual(value, LoadRoot(Heap::kTheHoleValueRootIndex)), miss);
+ Goto(&done);
+ }
+
+ Bind(&done);
+}
+
+void AccessorAssemblerImpl::NameDictionaryNegativeLookup(Node* object,
+ Node* name,
+ Label* miss) {
+ CSA_ASSERT(this, IsDictionaryMap(LoadMap(object)));
+ Node* properties = LoadProperties(object);
+ // Ensure the property does not exist in a dictionary-mode object.
+ Variable var_name_index(this, MachineType::PointerRepresentation());
+ Label done(this);
+ NameDictionaryLookup<NameDictionary>(properties, name, miss, &var_name_index,
+ &done);
+ Bind(&done);
+}
+
+//////////////////// Stub cache access helpers.
+
+enum AccessorAssemblerImpl::StubCacheTable : int {
+ kPrimary = static_cast<int>(StubCache::kPrimary),
+ kSecondary = static_cast<int>(StubCache::kSecondary)
+};
+
+Node* AccessorAssemblerImpl::StubCachePrimaryOffset(Node* name, Node* map) {
+ // See v8::internal::StubCache::PrimaryOffset().
+ STATIC_ASSERT(StubCache::kCacheIndexShift == Name::kHashShift);
+ // Compute the hash of the name (use entire hash field).
+ Node* hash_field = LoadNameHashField(name);
+ CSA_ASSERT(this,
+ Word32Equal(Word32And(hash_field,
+ Int32Constant(Name::kHashNotComputedMask)),
+ Int32Constant(0)));
+
+ // Using only the low bits in 64-bit mode is unlikely to increase the
+ // risk of collision even if the heap is spread over an area larger than
+ // 4Gb (and not at all if it isn't).
+ Node* map32 = TruncateWordToWord32(BitcastTaggedToWord(map));
+ Node* hash = Int32Add(hash_field, map32);
+ // Base the offset on a simple combination of name and map.
+ hash = Word32Xor(hash, Int32Constant(StubCache::kPrimaryMagic));
+ uint32_t mask = (StubCache::kPrimaryTableSize - 1)
+ << StubCache::kCacheIndexShift;
+ return ChangeUint32ToWord(Word32And(hash, Int32Constant(mask)));
+}
+
+Node* AccessorAssemblerImpl::StubCacheSecondaryOffset(Node* name, Node* seed) {
+ // See v8::internal::StubCache::SecondaryOffset().
+
+ // Use the seed from the primary cache in the secondary cache.
+ Node* name32 = TruncateWordToWord32(BitcastTaggedToWord(name));
+ Node* hash = Int32Sub(TruncateWordToWord32(seed), name32);
+ hash = Int32Add(hash, Int32Constant(StubCache::kSecondaryMagic));
+ int32_t mask = (StubCache::kSecondaryTableSize - 1)
+ << StubCache::kCacheIndexShift;
+ return ChangeUint32ToWord(Word32And(hash, Int32Constant(mask)));
+}
+
+void AccessorAssemblerImpl::TryProbeStubCacheTable(
+ StubCache* stub_cache, StubCacheTable table_id, Node* entry_offset,
+ Node* name, Node* map, Label* if_handler, Variable* var_handler,
+ Label* if_miss) {
+ StubCache::Table table = static_cast<StubCache::Table>(table_id);
+#ifdef DEBUG
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+ Goto(if_miss);
+ return;
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+ Goto(if_miss);
+ return;
+ }
+#endif
+ // The {table_offset} holds the entry offset times four (due to masking
+ // and shifting optimizations).
+ const int kMultiplier = sizeof(StubCache::Entry) >> Name::kHashShift;
+ entry_offset = IntPtrMul(entry_offset, IntPtrConstant(kMultiplier));
+
+ // Check that the key in the entry matches the name.
+ Node* key_base =
+ ExternalConstant(ExternalReference(stub_cache->key_reference(table)));
+ Node* entry_key = Load(MachineType::Pointer(), key_base, entry_offset);
+ GotoIf(WordNotEqual(name, entry_key), if_miss);
+
+ // Get the map entry from the cache.
+ DCHECK_EQ(kPointerSize * 2, stub_cache->map_reference(table).address() -
+ stub_cache->key_reference(table).address());
+ Node* entry_map =
+ Load(MachineType::Pointer(), key_base,
+ IntPtrAdd(entry_offset, IntPtrConstant(kPointerSize * 2)));
+ GotoIf(WordNotEqual(map, entry_map), if_miss);
+
+ DCHECK_EQ(kPointerSize, stub_cache->value_reference(table).address() -
+ stub_cache->key_reference(table).address());
+ Node* handler = Load(MachineType::TaggedPointer(), key_base,
+ IntPtrAdd(entry_offset, IntPtrConstant(kPointerSize)));
+
+ // We found the handler.
+ var_handler->Bind(handler);
+ Goto(if_handler);
+}
+
+void AccessorAssemblerImpl::TryProbeStubCache(StubCache* stub_cache,
+ Node* receiver, Node* name,
+ Label* if_handler,
+ Variable* var_handler,
+ Label* if_miss) {
+ Label try_secondary(this), miss(this);
+
+ Counters* counters = isolate()->counters();
+ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
+
+ // Check that the {receiver} isn't a smi.
+ GotoIf(TaggedIsSmi(receiver), &miss);
+
+ Node* receiver_map = LoadMap(receiver);
+
+ // Probe the primary table.
+ Node* primary_offset = StubCachePrimaryOffset(name, receiver_map);
+ TryProbeStubCacheTable(stub_cache, kPrimary, primary_offset, name,
+ receiver_map, if_handler, var_handler, &try_secondary);
+
+ Bind(&try_secondary);
+ {
+ // Probe the secondary table.
+ Node* secondary_offset = StubCacheSecondaryOffset(name, primary_offset);
+ TryProbeStubCacheTable(stub_cache, kSecondary, secondary_offset, name,
+ receiver_map, if_handler, var_handler, &miss);
+ }
+
+ Bind(&miss);
+ {
+ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
+ Goto(if_miss);
+ }
+}
+
+//////////////////// Entry points into private implementation (one per stub).
+
+void AccessorAssemblerImpl::LoadIC(const LoadICParameters* p) {
+ Variable var_handler(this, MachineRepresentation::kTagged);
+ // TODO(ishell): defer blocks when it works.
+ Label if_handler(this, &var_handler), try_polymorphic(this),
+ try_megamorphic(this /*, Label::kDeferred*/),
+ miss(this /*, Label::kDeferred*/);
+
+ Node* receiver_map = LoadReceiverMap(p->receiver);
+ GotoIf(IsSetWord32<Map::Deprecated>(LoadMapBitField3(receiver_map)), &miss);
+
+ // Check monomorphic case.
+ Node* feedback =
+ TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
+ &var_handler, &try_polymorphic);
+ Bind(&if_handler);
+ { HandleLoadICHandlerCase(p, var_handler.value(), &miss); }
+
+ Bind(&try_polymorphic);
+ {
+ // Check polymorphic case.
+ Comment("LoadIC_try_polymorphic");
+ GotoUnless(WordEqual(LoadMap(feedback), FixedArrayMapConstant()),
+ &try_megamorphic);
+ HandlePolymorphicCase(receiver_map, feedback, &if_handler, &var_handler,
+ &miss, 2);
+ }
+
+ Bind(&try_megamorphic);
+ {
+ // Check megamorphic case.
+ GotoUnless(
+ WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
+ &miss);
+
+ TryProbeStubCache(isolate()->load_stub_cache(), p->receiver, p->name,
+ &if_handler, &var_handler, &miss);
+ }
+ Bind(&miss);
+ {
+ TailCallRuntime(Runtime::kLoadIC_Miss, p->context, p->receiver, p->name,
+ p->slot, p->vector);
+ }
+}
+
+void AccessorAssemblerImpl::LoadICProtoArray(
+ const LoadICParameters* p, Node* handler,
+ bool throw_reference_error_if_nonexistent) {
+ Label miss(this);
+ CSA_ASSERT(this, Word32BinaryNot(TaggedIsSmi(handler)));
+ CSA_ASSERT(this, IsFixedArrayMap(LoadMap(handler)));
+
+ Node* smi_handler = LoadObjectField(handler, LoadHandler::kSmiHandlerOffset);
+ Node* handler_flags = SmiUntag(smi_handler);
+
+ Node* handler_length = LoadAndUntagFixedArrayBaseLength(handler);
+
+ Node* holder =
+ EmitLoadICProtoArrayCheck(p, handler, handler_length, handler_flags,
+ &miss, throw_reference_error_if_nonexistent);
+
+ HandleLoadICSmiHandlerCase(p, holder, smi_handler, &miss, kOnlyProperties);
+
+ Bind(&miss);
+ {
+ TailCallRuntime(Runtime::kLoadIC_Miss, p->context, p->receiver, p->name,
+ p->slot, p->vector);
+ }
+}
+
+void AccessorAssemblerImpl::LoadGlobalIC(const LoadICParameters* p,
+ TypeofMode typeof_mode) {
+ Label try_handler(this), call_handler(this), miss(this);
+ Node* weak_cell =
+ LoadFixedArrayElement(p->vector, p->slot, 0, SMI_PARAMETERS);
+ CSA_ASSERT(this, HasInstanceType(weak_cell, WEAK_CELL_TYPE));
+
+ // Load value or try handler case if the {weak_cell} is cleared.
+ Node* property_cell = LoadWeakCellValue(weak_cell, &try_handler);
+ CSA_ASSERT(this, HasInstanceType(property_cell, PROPERTY_CELL_TYPE));
+
+ Node* value = LoadObjectField(property_cell, PropertyCell::kValueOffset);
+ GotoIf(WordEqual(value, TheHoleConstant()), &miss);
+ Return(value);
+
+ Node* handler;
+ Bind(&try_handler);
+ {
+ handler =
+ LoadFixedArrayElement(p->vector, p->slot, kPointerSize, SMI_PARAMETERS);
+ CSA_ASSERT(this, Word32BinaryNot(TaggedIsSmi(handler)));
+ GotoIf(WordEqual(handler, LoadRoot(Heap::kuninitialized_symbolRootIndex)),
+ &miss);
+ GotoIf(IsCodeMap(LoadMap(handler)), &call_handler);
+
+ bool throw_reference_error_if_nonexistent =
+ typeof_mode == NOT_INSIDE_TYPEOF;
+ HandleLoadGlobalICHandlerCase(p, handler, &miss,
+ throw_reference_error_if_nonexistent);
+ }
+
+ Bind(&call_handler);
+ {
+ LoadWithVectorDescriptor descriptor(isolate());
+ Node* native_context = LoadNativeContext(p->context);
+ Node* receiver =
+ LoadContextElement(native_context, Context::EXTENSION_INDEX);
+ TailCallStub(descriptor, handler, p->context, receiver, p->name, p->slot,
+ p->vector);
+ }
+ Bind(&miss);
+ {
+ TailCallRuntime(Runtime::kLoadGlobalIC_Miss, p->context, p->name, p->slot,
+ p->vector);
+ }
+}
+
+void AccessorAssemblerImpl::KeyedLoadIC(const LoadICParameters* p) {
+ Variable var_handler(this, MachineRepresentation::kTagged);
+ // TODO(ishell): defer blocks when it works.
+ Label if_handler(this, &var_handler), try_polymorphic(this),
+ try_megamorphic(this /*, Label::kDeferred*/),
+ try_polymorphic_name(this /*, Label::kDeferred*/),
+ miss(this /*, Label::kDeferred*/);
+
+ Node* receiver_map = LoadReceiverMap(p->receiver);
+ GotoIf(IsSetWord32<Map::Deprecated>(LoadMapBitField3(receiver_map)), &miss);
+
+ // Check monomorphic case.
+ Node* feedback =
+ TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
+ &var_handler, &try_polymorphic);
+ Bind(&if_handler);
+ { HandleLoadICHandlerCase(p, var_handler.value(), &miss, kSupportElements); }
+
+ Bind(&try_polymorphic);
+ {
+ // Check polymorphic case.
+ Comment("KeyedLoadIC_try_polymorphic");
+ GotoUnless(WordEqual(LoadMap(feedback), FixedArrayMapConstant()),
+ &try_megamorphic);
+ HandlePolymorphicCase(receiver_map, feedback, &if_handler, &var_handler,
+ &miss, 2);
+ }
+
+ Bind(&try_megamorphic);
+ {
+ // Check megamorphic case.
+ Comment("KeyedLoadIC_try_megamorphic");
+ GotoUnless(
+ WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
+ &try_polymorphic_name);
+ // TODO(jkummerow): Inline this? Or some of it?
+ TailCallStub(CodeFactory::KeyedLoadIC_Megamorphic(isolate()), p->context,
+ p->receiver, p->name, p->slot, p->vector);
+ }
+ Bind(&try_polymorphic_name);
+ {
+ // We might have a name in feedback, and a fixed array in the next slot.
+ Comment("KeyedLoadIC_try_polymorphic_name");
+ GotoUnless(WordEqual(feedback, p->name), &miss);
+ // If the name comparison succeeded, we know we have a fixed array with
+ // at least one map/handler pair.
+ Node* offset = ElementOffsetFromIndex(
+ p->slot, FAST_HOLEY_ELEMENTS, SMI_PARAMETERS,
+ FixedArray::kHeaderSize + kPointerSize - kHeapObjectTag);
+ Node* array = Load(MachineType::AnyTagged(), p->vector, offset);
+ HandlePolymorphicCase(receiver_map, array, &if_handler, &var_handler, &miss,
+ 1);
+ }
+ Bind(&miss);
+ {
+ Comment("KeyedLoadIC_miss");
+ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, p->context, p->receiver,
+ p->name, p->slot, p->vector);
+ }
+}
+
+void AccessorAssemblerImpl::KeyedLoadICGeneric(const LoadICParameters* p) {
+ Variable var_index(this, MachineType::PointerRepresentation());
+ Variable var_details(this, MachineRepresentation::kWord32);
+ Variable var_value(this, MachineRepresentation::kTagged);
+ Label if_index(this), if_unique_name(this), if_element_hole(this),
+ if_oob(this), slow(this), stub_cache_miss(this),
+ if_property_dictionary(this), if_found_on_receiver(this);
+
+ Node* receiver = p->receiver;
+ GotoIf(TaggedIsSmi(receiver), &slow);
+ Node* receiver_map = LoadMap(receiver);
+ Node* instance_type = LoadMapInstanceType(receiver_map);
+ // Receivers requiring non-standard element accesses (interceptors, access
+ // checks, strings and string wrappers, proxies) are handled in the runtime.
+ GotoIf(Int32LessThanOrEqual(instance_type,
+ Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
+ &slow);
+
+ Node* key = p->name;
+ TryToName(key, &if_index, &var_index, &if_unique_name, &slow);
+
+ Bind(&if_index);
+ {
+ Comment("integer index");
+ Node* index = var_index.value();
+ Node* elements = LoadElements(receiver);
+ Node* elements_kind = LoadMapElementsKind(receiver_map);
+ Node* is_jsarray_condition =
+ Word32Equal(instance_type, Int32Constant(JS_ARRAY_TYPE));
+ Variable var_double_value(this, MachineRepresentation::kFloat64);
+ Label rebox_double(this, &var_double_value);
+
+ // Unimplemented elements kinds fall back to a runtime call.
+ Label* unimplemented_elements_kind = &slow;
+ IncrementCounter(isolate()->counters()->ic_keyed_load_generic_smi(), 1);
+ EmitElementLoad(receiver, elements, elements_kind, index,
+ is_jsarray_condition, &if_element_hole, &rebox_double,
+ &var_double_value, unimplemented_elements_kind, &if_oob,
+ &slow);
+
+ Bind(&rebox_double);
+ Return(AllocateHeapNumberWithValue(var_double_value.value()));
+ }
+
+ Bind(&if_oob);
+ {
+ Comment("out of bounds");
+ Node* index = var_index.value();
+ // Negative keys can't take the fast OOB path.
+ GotoIf(IntPtrLessThan(index, IntPtrConstant(0)), &slow);
+ // Positive OOB indices are effectively the same as hole loads.
+ Goto(&if_element_hole);
+ }
+
+ Bind(&if_element_hole);
+ {
+ Comment("found the hole");
+ Label return_undefined(this);
+ BranchIfPrototypesHaveNoElements(receiver_map, &return_undefined, &slow);
+
+ Bind(&return_undefined);
+ Return(UndefinedConstant());
+ }
+
+ Node* properties = nullptr;
+ Bind(&if_unique_name);
+ {
+ Comment("key is unique name");
+ // Check if the receiver has fast or slow properties.
+ properties = LoadProperties(receiver);
+ Node* properties_map = LoadMap(properties);
+ GotoIf(WordEqual(properties_map, LoadRoot(Heap::kHashTableMapRootIndex)),
+ &if_property_dictionary);
+
+ // Try looking up the property on the receiver; if unsuccessful, look
+ // for a handler in the stub cache.
+ Comment("DescriptorArray lookup");
+
+ // Skip linear search if there are too many descriptors.
+ // TODO(jkummerow): Consider implementing binary search.
+ // See also TryLookupProperty() which has the same limitation.
+ const int32_t kMaxLinear = 210;
+ Label stub_cache(this);
+ Node* bitfield3 = LoadMapBitField3(receiver_map);
+ Node* nof =
+ DecodeWordFromWord32<Map::NumberOfOwnDescriptorsBits>(bitfield3);
+ GotoIf(UintPtrLessThan(IntPtrConstant(kMaxLinear), nof), &stub_cache);
+ Node* descriptors = LoadMapDescriptors(receiver_map);
+ Variable var_name_index(this, MachineType::PointerRepresentation());
+ Label if_descriptor_found(this);
+ DescriptorLookupLinear(key, descriptors, nof, &if_descriptor_found,
+ &var_name_index, &stub_cache);
+
+ Bind(&if_descriptor_found);
+ {
+ LoadPropertyFromFastObject(receiver, receiver_map, descriptors,
+ var_name_index.value(), &var_details,
+ &var_value);
+ Goto(&if_found_on_receiver);
+ }
+
+ Bind(&stub_cache);
+ {
+ Comment("stub cache probe for fast property load");
+ Variable var_handler(this, MachineRepresentation::kTagged);
+ Label found_handler(this, &var_handler), stub_cache_miss(this);
+ TryProbeStubCache(isolate()->load_stub_cache(), receiver, key,
+ &found_handler, &var_handler, &stub_cache_miss);
+ Bind(&found_handler);
+ { HandleLoadICHandlerCase(p, var_handler.value(), &slow); }
+
+ Bind(&stub_cache_miss);
+ {
+ Comment("KeyedLoadGeneric_miss");
+ TailCallRuntime(Runtime::kKeyedLoadIC_Miss, p->context, p->receiver,
+ p->name, p->slot, p->vector);
+ }
+ }
+ }
+
+ Bind(&if_property_dictionary);
+ {
+ Comment("dictionary property load");
+ // We checked for LAST_CUSTOM_ELEMENTS_RECEIVER before, which rules out
+ // seeing global objects here (which would need special handling).
+
+ Variable var_name_index(this, MachineType::PointerRepresentation());
+ Label dictionary_found(this, &var_name_index);
+ NameDictionaryLookup<NameDictionary>(properties, key, &dictionary_found,
+ &var_name_index, &slow);
+ Bind(&dictionary_found);
+ {
+ LoadPropertyFromNameDictionary(properties, var_name_index.value(),
+ &var_details, &var_value);
+ Goto(&if_found_on_receiver);
+ }
+ }
+
+ Bind(&if_found_on_receiver);
+ {
+ Node* value = CallGetterIfAccessor(var_value.value(), var_details.value(),
+ p->context, receiver, &slow);
+ IncrementCounter(isolate()->counters()->ic_keyed_load_generic_symbol(), 1);
+ Return(value);
+ }
+
+ Bind(&slow);
+ {
+ Comment("KeyedLoadGeneric_slow");
+ IncrementCounter(isolate()->counters()->ic_keyed_load_generic_slow(), 1);
+ // TODO(jkummerow): Should we use the GetProperty TF stub instead?
+ TailCallRuntime(Runtime::kKeyedGetProperty, p->context, p->receiver,
+ p->name);
+ }
+}
+
+void AccessorAssemblerImpl::StoreIC(const StoreICParameters* p) {
+ Variable var_handler(this, MachineRepresentation::kTagged);
+ // TODO(ishell): defer blocks when it works.
+ Label if_handler(this, &var_handler), try_polymorphic(this),
+ try_megamorphic(this /*, Label::kDeferred*/),
+ miss(this /*, Label::kDeferred*/);
+
+ Node* receiver_map = LoadReceiverMap(p->receiver);
+ GotoIf(IsSetWord32<Map::Deprecated>(LoadMapBitField3(receiver_map)), &miss);
+
+ // Check monomorphic case.
+ Node* feedback =
+ TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
+ &var_handler, &try_polymorphic);
+ Bind(&if_handler);
+ {
+ Comment("StoreIC_if_handler");
+ HandleStoreICHandlerCase(p, var_handler.value(), &miss);
+ }
+
+ Bind(&try_polymorphic);
+ {
+ // Check polymorphic case.
+ Comment("StoreIC_try_polymorphic");
+ GotoUnless(
+ WordEqual(LoadMap(feedback), LoadRoot(Heap::kFixedArrayMapRootIndex)),
+ &try_megamorphic);
+ HandlePolymorphicCase(receiver_map, feedback, &if_handler, &var_handler,
+ &miss, 2);
+ }
+
+ Bind(&try_megamorphic);
+ {
+ // Check megamorphic case.
+ GotoUnless(
+ WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
+ &miss);
+
+ TryProbeStubCache(isolate()->store_stub_cache(), p->receiver, p->name,
+ &if_handler, &var_handler, &miss);
+ }
+ Bind(&miss);
+ {
+ TailCallRuntime(Runtime::kStoreIC_Miss, p->context, p->value, p->slot,
+ p->vector, p->receiver, p->name);
+ }
+}
+
+void AccessorAssemblerImpl::KeyedStoreIC(const StoreICParameters* p,
+ LanguageMode language_mode) {
+ // TODO(ishell): defer blocks when it works.
+ Label miss(this /*, Label::kDeferred*/);
+ {
+ Variable var_handler(this, MachineRepresentation::kTagged);
+
+ // TODO(ishell): defer blocks when it works.
+ Label if_handler(this, &var_handler), try_polymorphic(this),
+ try_megamorphic(this /*, Label::kDeferred*/),
+ try_polymorphic_name(this /*, Label::kDeferred*/);
+
+ Node* receiver_map = LoadReceiverMap(p->receiver);
+ GotoIf(IsSetWord32<Map::Deprecated>(LoadMapBitField3(receiver_map)), &miss);
+
+ // Check monomorphic case.
+ Node* feedback =
+ TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
+ &var_handler, &try_polymorphic);
+ Bind(&if_handler);
+ {
+ Comment("KeyedStoreIC_if_handler");
+ HandleStoreICHandlerCase(p, var_handler.value(), &miss, kSupportElements);
+ }
+
+ Bind(&try_polymorphic);
+ {
+ // CheckPolymorphic case.
+ Comment("KeyedStoreIC_try_polymorphic");
+ GotoUnless(
+ WordEqual(LoadMap(feedback), LoadRoot(Heap::kFixedArrayMapRootIndex)),
+ &try_megamorphic);
+ Label if_transition_handler(this);
+ Variable var_transition_map_cell(this, MachineRepresentation::kTagged);
+ HandleKeyedStorePolymorphicCase(receiver_map, feedback, &if_handler,
+ &var_handler, &if_transition_handler,
+ &var_transition_map_cell, &miss);
+ Bind(&if_transition_handler);
+ Comment("KeyedStoreIC_polymorphic_transition");
+ {
+ Node* handler = var_handler.value();
+
+ Label call_handler(this);
+ Variable var_code_handler(this, MachineRepresentation::kTagged);
+ var_code_handler.Bind(handler);
+ GotoUnless(IsTuple2Map(LoadMap(handler)), &call_handler);
+ {
+ CSA_ASSERT(this, IsTuple2Map(LoadMap(handler)));
+
+ // Check validity cell.
+ Node* validity_cell = LoadObjectField(handler, Tuple2::kValue1Offset);
+ Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
+ GotoIf(
+ WordNotEqual(cell_value, SmiConstant(Map::kPrototypeChainValid)),
+ &miss);
+
+ var_code_handler.Bind(
+ LoadObjectField(handler, Tuple2::kValue2Offset));
+ Goto(&call_handler);
+ }
+
+ Bind(&call_handler);
+ {
+ Node* code_handler = var_code_handler.value();
+ CSA_ASSERT(this, IsCodeMap(LoadMap(code_handler)));
+
+ Node* transition_map =
+ LoadWeakCellValue(var_transition_map_cell.value(), &miss);
+ StoreTransitionDescriptor descriptor(isolate());
+ TailCallStub(descriptor, code_handler, p->context, p->receiver,
+ p->name, transition_map, p->value, p->slot, p->vector);
+ }
+ }
+ }
+
+ Bind(&try_megamorphic);
+ {
+ // Check megamorphic case.
+ Comment("KeyedStoreIC_try_megamorphic");
+ GotoUnless(
+ WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
+ &try_polymorphic_name);
+ TailCallStub(
+ CodeFactory::KeyedStoreIC_Megamorphic(isolate(), language_mode),
+ p->context, p->receiver, p->name, p->value, p->slot, p->vector);
+ }
+
+ Bind(&try_polymorphic_name);
+ {
+ // We might have a name in feedback, and a fixed array in the next slot.
+ Comment("KeyedStoreIC_try_polymorphic_name");
+ GotoUnless(WordEqual(feedback, p->name), &miss);
+ // If the name comparison succeeded, we know we have a FixedArray with
+ // at least one map/handler pair.
+ Node* offset = ElementOffsetFromIndex(
+ p->slot, FAST_HOLEY_ELEMENTS, SMI_PARAMETERS,
+ FixedArray::kHeaderSize + kPointerSize - kHeapObjectTag);
+ Node* array = Load(MachineType::AnyTagged(), p->vector, offset);
+ HandlePolymorphicCase(receiver_map, array, &if_handler, &var_handler,
+ &miss, 1);
+ }
+ }
+ Bind(&miss);
+ {
+ Comment("KeyedStoreIC_miss");
+ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, p->context, p->value, p->slot,
+ p->vector, p->receiver, p->name);
+ }
+}
+
+//////////////////// Public methods.
+
+void AccessorAssemblerImpl::GenerateLoadIC() {
+ typedef LoadWithVectorDescriptor Descriptor;
+
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
+
+ LoadICParameters p(context, receiver, name, slot, vector);
+ LoadIC(&p);
+}
+
+void AccessorAssemblerImpl::GenerateLoadICTrampoline() {
+ typedef LoadDescriptor Descriptor;
+
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* context = Parameter(Descriptor::kContext);
+ Node* vector = LoadTypeFeedbackVectorForStub();
+
+ LoadICParameters p(context, receiver, name, slot, vector);
+ LoadIC(&p);
+}
+
+void AccessorAssemblerImpl::GenerateLoadICProtoArray(
+ bool throw_reference_error_if_nonexistent) {
+ typedef LoadICProtoArrayStub::Descriptor Descriptor;
+
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* handler = Parameter(Descriptor::kHandler);
+ Node* context = Parameter(Descriptor::kContext);
+
+ LoadICParameters p(context, receiver, name, slot, vector);
+ LoadICProtoArray(&p, handler, throw_reference_error_if_nonexistent);
+}
+
+void AccessorAssemblerImpl::GenerateLoadField() {
+ typedef LoadFieldStub::Descriptor Descriptor;
+
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = nullptr;
+ Node* slot = nullptr;
+ Node* vector = nullptr;
+ Node* context = Parameter(Descriptor::kContext);
+ LoadICParameters p(context, receiver, name, slot, vector);
+
+ HandleLoadICSmiHandlerCase(&p, receiver, Parameter(Descriptor::kSmiHandler),
+ nullptr, kOnlyProperties);
+}
+
+void AccessorAssemblerImpl::GenerateLoadGlobalIC(TypeofMode typeof_mode) {
+ typedef LoadGlobalWithVectorDescriptor Descriptor;
+
+ Node* name = Parameter(Descriptor::kName);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
+
+ LoadICParameters p(context, nullptr, name, slot, vector);
+ LoadGlobalIC(&p, typeof_mode);
+}
+
+void AccessorAssemblerImpl::GenerateLoadGlobalICTrampoline(
+ TypeofMode typeof_mode) {
+ typedef LoadGlobalDescriptor Descriptor;
+
+ Node* name = Parameter(Descriptor::kName);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* context = Parameter(Descriptor::kContext);
+ Node* vector = LoadTypeFeedbackVectorForStub();
+
+ LoadICParameters p(context, nullptr, name, slot, vector);
+ LoadGlobalIC(&p, typeof_mode);
+}
+
+void AccessorAssemblerImpl::GenerateKeyedLoadICTF() {
+ typedef LoadWithVectorDescriptor Descriptor;
+
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
+
+ LoadICParameters p(context, receiver, name, slot, vector);
+ KeyedLoadIC(&p);
+}
+
+void AccessorAssemblerImpl::GenerateKeyedLoadICTrampolineTF() {
+ typedef LoadDescriptor Descriptor;
+
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* context = Parameter(Descriptor::kContext);
+ Node* vector = LoadTypeFeedbackVectorForStub();
+
+ LoadICParameters p(context, receiver, name, slot, vector);
+ KeyedLoadIC(&p);
+}
+
+void AccessorAssemblerImpl::GenerateKeyedLoadICMegamorphic() {
+ typedef LoadWithVectorDescriptor Descriptor;
+
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
+
+ LoadICParameters p(context, receiver, name, slot, vector);
+ KeyedLoadICGeneric(&p);
+}
+
+void AccessorAssemblerImpl::GenerateStoreIC() {
+ typedef StoreWithVectorDescriptor Descriptor;
+
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* value = Parameter(Descriptor::kValue);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
+
+ StoreICParameters p(context, receiver, name, value, slot, vector);
+ StoreIC(&p);
+}
+
+void AccessorAssemblerImpl::GenerateStoreICTrampoline() {
+ typedef StoreDescriptor Descriptor;
+
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* value = Parameter(Descriptor::kValue);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* context = Parameter(Descriptor::kContext);
+ Node* vector = LoadTypeFeedbackVectorForStub();
+
+ StoreICParameters p(context, receiver, name, value, slot, vector);
+ StoreIC(&p);
+}
+
+void AccessorAssemblerImpl::GenerateKeyedStoreICTF(LanguageMode language_mode) {
+ typedef StoreWithVectorDescriptor Descriptor;
+
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* value = Parameter(Descriptor::kValue);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
+
+ StoreICParameters p(context, receiver, name, value, slot, vector);
+ KeyedStoreIC(&p, language_mode);
+}
+
+void AccessorAssemblerImpl::GenerateKeyedStoreICTrampolineTF(
+ LanguageMode language_mode) {
+ typedef StoreDescriptor Descriptor;
+
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* value = Parameter(Descriptor::kValue);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* context = Parameter(Descriptor::kContext);
+ Node* vector = LoadTypeFeedbackVectorForStub();
+
+ StoreICParameters p(context, receiver, name, value, slot, vector);
+ KeyedStoreIC(&p, language_mode);
+}
+
+//////////////////// AccessorAssembler implementation.
+
+#define DISPATCH_TO_IMPL(Name) \
+ void AccessorAssembler::Generate##Name(CodeAssemblerState* state) { \
+ AccessorAssemblerImpl assembler(state); \
+ assembler.Generate##Name(); \
+ }
+
+ACCESSOR_ASSEMBLER_PUBLIC_INTERFACE(DISPATCH_TO_IMPL)
+#undef DISPATCH_TO_IMPL
+
+void AccessorAssembler::GenerateLoadICProtoArray(
+ CodeAssemblerState* state, bool throw_reference_error_if_nonexistent) {
+ AccessorAssemblerImpl assembler(state);
+ assembler.GenerateLoadICProtoArray(throw_reference_error_if_nonexistent);
+}
+
+void AccessorAssembler::GenerateLoadGlobalIC(CodeAssemblerState* state,
+ TypeofMode typeof_mode) {
+ AccessorAssemblerImpl assembler(state);
+ assembler.GenerateLoadGlobalIC(typeof_mode);
+}
+
+void AccessorAssembler::GenerateLoadGlobalICTrampoline(
+ CodeAssemblerState* state, TypeofMode typeof_mode) {
+ AccessorAssemblerImpl assembler(state);
+ assembler.GenerateLoadGlobalICTrampoline(typeof_mode);
+}
+
+void AccessorAssembler::GenerateKeyedStoreICTF(CodeAssemblerState* state,
+ LanguageMode language_mode) {
+ AccessorAssemblerImpl assembler(state);
+ assembler.GenerateKeyedStoreICTF(language_mode);
+}
+
+void AccessorAssembler::GenerateKeyedStoreICTrampolineTF(
+ CodeAssemblerState* state, LanguageMode language_mode) {
+ AccessorAssemblerImpl assembler(state);
+ assembler.GenerateKeyedStoreICTrampolineTF(language_mode);
+}
+
+#undef ACCESSOR_ASSEMBLER_PUBLIC_INTERFACE
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/ic/accessor-assembler.h b/deps/v8/src/ic/accessor-assembler.h
new file mode 100644
index 0000000000..3b75c2e54d
--- /dev/null
+++ b/deps/v8/src/ic/accessor-assembler.h
@@ -0,0 +1,45 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SRC_IC_ACCESSOR_ASSEMBLER_H_
+#define V8_SRC_IC_ACCESSOR_ASSEMBLER_H_
+
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+namespace compiler {
+class CodeAssemblerState;
+}
+
+class AccessorAssembler {
+ public:
+ static void GenerateLoadIC(compiler::CodeAssemblerState* state);
+ static void GenerateLoadICTrampoline(compiler::CodeAssemblerState* state);
+ static void GenerateLoadICProtoArray(
+ compiler::CodeAssemblerState* state,
+ bool throw_reference_error_if_nonexistent);
+ static void GenerateLoadGlobalIC(compiler::CodeAssemblerState* state,
+ TypeofMode typeof_mode);
+ static void GenerateLoadGlobalICTrampoline(
+ compiler::CodeAssemblerState* state, TypeofMode typeof_mode);
+ static void GenerateKeyedLoadICTF(compiler::CodeAssemblerState* state);
+ static void GenerateKeyedLoadICTrampolineTF(
+ compiler::CodeAssemblerState* state);
+ static void GenerateKeyedLoadICMegamorphic(
+ compiler::CodeAssemblerState* state);
+ static void GenerateLoadField(compiler::CodeAssemblerState* state);
+ static void GenerateStoreIC(compiler::CodeAssemblerState* state);
+ static void GenerateStoreICTrampoline(compiler::CodeAssemblerState* state);
+ static void GenerateKeyedStoreICTF(compiler::CodeAssemblerState* state,
+ LanguageMode language_mode);
+ static void GenerateKeyedStoreICTrampolineTF(
+ compiler::CodeAssemblerState* state, LanguageMode language_mode);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SRC_IC_ACCESSOR_ASSEMBLER_H_
diff --git a/deps/v8/src/ic/arm/handler-compiler-arm.cc b/deps/v8/src/ic/arm/handler-compiler-arm.cc
index 6145d43641..3f2d0e42de 100644
--- a/deps/v8/src/ic/arm/handler-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/handler-compiler-arm.cc
@@ -135,14 +135,6 @@ void PropertyHandlerCompiler::DiscardVectorAndSlot() {
__ add(sp, sp, Operand(2 * kPointerSize));
}
-void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
- // No-op. Return address is in lr register.
-}
-
-void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
- // No-op. Return address is in lr register.
-}
-
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
@@ -189,18 +181,6 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
}
-
-void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register result, Label* miss) {
- __ LoadNativeContextSlot(index, result);
- // Load its initial map. The global functions all have initial maps.
- __ ldr(result,
- FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
- // Load the prototype from the initial map.
- __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
-}
-
-
void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
MacroAssembler* masm, Register receiver, Register scratch1,
Register scratch2, Label* miss_label) {
@@ -355,58 +335,6 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
}
}
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
- __ mov(this->name(), Operand(name));
-}
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
- Register map_reg,
- Register scratch,
- Label* miss) {
- Handle<WeakCell> cell = Map::WeakCellForMap(transition);
- DCHECK(!map_reg.is(scratch));
- __ LoadWeakValue(map_reg, cell, miss);
- if (transition->CanBeDeprecated()) {
- __ ldr(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
- __ tst(scratch, Operand(Map::Deprecated::kMask));
- __ b(ne, miss);
- }
-}
-
-
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
- int descriptor,
- Register value_reg,
- Register scratch,
- Label* miss_label) {
- DCHECK(!map_reg.is(scratch));
- DCHECK(!map_reg.is(value_reg));
- DCHECK(!value_reg.is(scratch));
- __ LoadInstanceDescriptors(map_reg, scratch);
- __ ldr(scratch,
- FieldMemOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
- __ cmp(value_reg, scratch);
- __ b(ne, miss_label);
-}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
- Register value_reg,
- Label* miss_label) {
- Register map_reg = scratch1();
- Register scratch = scratch2();
- DCHECK(!value_reg.is(map_reg));
- DCHECK(!value_reg.is(scratch));
- __ JumpIfSmi(value_reg, miss_label);
- if (field_type->IsClass()) {
- __ ldr(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
- __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
- scratch);
- __ b(ne, miss_label);
- }
-}
-
void PropertyHandlerCompiler::GenerateAccessCheck(
Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
Label* miss, bool compare_native_contexts_only) {
@@ -538,13 +466,6 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
}
}
-
-void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
- // Return the constant value.
- __ Move(r0, value);
- __ Ret();
-}
-
void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
LookupIterator* it, Register holder_reg) {
DCHECK(holder()->HasNamedInterceptor());
diff --git a/deps/v8/src/ic/arm/ic-arm.cc b/deps/v8/src/ic/arm/ic-arm.cc
index babf497a5b..fad0737a1c 100644
--- a/deps/v8/src/ic/arm/ic-arm.cc
+++ b/deps/v8/src/ic/arm/ic-arm.cc
@@ -19,183 +19,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-// Helper function used from LoadIC GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-// label is done.
-// name: Property name. It is not clobbered if a jump to the miss label is
-// done
-// result: Register for the result. It is only updated if a jump to the miss
-// label is not done. Can be the same as elements or name clobbering
-// one of these in the case of not jumping to the miss label.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
- Register elements, Register name,
- Register result, Register scratch1,
- Register scratch2) {
- // Main use of the scratch registers.
- // scratch1: Used as temporary and to hold the capacity of the property
- // dictionary.
- // scratch2: Used as temporary.
- Label done;
-
- // Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
- name, scratch1, scratch2);
-
- // If probing finds an entry check that the value is a normal
- // property.
- __ bind(&done); // scratch2 == elements + 4 * index
- const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
- __ tst(scratch1, Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
- __ b(ne, miss);
-
- // Get the value at the masked, scaled index and return.
- __ ldr(result,
- FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
-}
-
-
-// Helper function used from StoreIC::GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-// label is done.
-// name: Property name. It is not clobbered if a jump to the miss label is
-// done
-// value: The value to store.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
- Register elements, Register name,
- Register value, Register scratch1,
- Register scratch2) {
- // Main use of the scratch registers.
- // scratch1: Used as temporary and to hold the capacity of the property
- // dictionary.
- // scratch2: Used as temporary.
- Label done;
-
- // Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
- name, scratch1, scratch2);
-
- // If probing finds an entry in the dictionary check that the value
- // is a normal property that is not read only.
- __ bind(&done); // scratch2 == elements + 4 * index
- const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- const int kTypeAndReadOnlyMask =
- (PropertyDetails::TypeField::kMask |
- PropertyDetails::AttributesField::encode(READ_ONLY))
- << kSmiTagSize;
- __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
- __ tst(scratch1, Operand(kTypeAndReadOnlyMask));
- __ b(ne, miss);
-
- // Store the value at the masked, scaled index and return.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ add(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
- __ str(value, MemOperand(scratch2));
-
- // Update the write barrier. Make sure not to clobber the value.
- __ mov(scratch1, value);
- __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
- kDontSaveFPRegs);
-}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
- Register dictionary = r0;
- DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
- DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
-
- Label slow;
-
- __ ldr(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
- JSObject::kPropertiesOffset));
- GenerateDictionaryLoad(masm, &slow, dictionary,
- LoadDescriptor::NameRegister(), r0, r3, r4);
- __ Ret();
-
- // Dictionary load failed, go slow (but don't miss).
- __ bind(&slow);
- GenerateRuntimeGetProperty(masm);
-}
-
-
-// A register that isn't one of the parameters to the load ic.
-static const Register LoadIC_TempRegister() { return r3; }
-
-
-static void LoadIC_PushArgs(MacroAssembler* masm) {
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register name = LoadDescriptor::NameRegister();
- Register slot = LoadDescriptor::SlotRegister();
- Register vector = LoadWithVectorDescriptor::VectorRegister();
-
- __ Push(receiver, name, slot, vector);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
- // The return address is in lr.
- Isolate* isolate = masm->isolate();
-
- DCHECK(!AreAliased(r4, r5, LoadWithVectorDescriptor::SlotRegister(),
- LoadWithVectorDescriptor::VectorRegister()));
- __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, r4, r5);
-
- LoadIC_PushArgs(masm);
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kLoadIC_Miss);
-}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // The return address is in lr.
-
- __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
- __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kGetProperty);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
- // The return address is in lr.
- Isolate* isolate = masm->isolate();
-
- DCHECK(!AreAliased(r4, r5, LoadWithVectorDescriptor::SlotRegister(),
- LoadWithVectorDescriptor::VectorRegister()));
- __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, r4, r5);
-
- LoadIC_PushArgs(masm);
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
-}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // The return address is in lr.
-
- __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
-
- // Perform tail call to the entry.
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kKeyedGetProperty);
-}
-
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreWithVectorDescriptor::ValueRegister(),
StoreWithVectorDescriptor::SlotRegister(),
@@ -219,314 +42,6 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
}
-static void KeyedStoreGenerateMegamorphicHelper(
- MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
- KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
- Register value, Register key, Register receiver, Register receiver_map,
- Register elements_map, Register elements) {
- Label transition_smi_elements;
- Label finish_object_store, non_double_value, transition_double_elements;
- Label fast_double_without_map_check;
-
- // Fast case: Do the store, could be either Object or double.
- __ bind(fast_object);
- Register scratch = r4;
- Register address = r5;
- DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
- scratch, address));
-
- if (check_map == kCheckMap) {
- __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ cmp(elements_map,
- Operand(masm->isolate()->factory()->fixed_array_map()));
- __ b(ne, fast_double);
- }
-
- // HOLECHECK: guards "A[i] = V"
- // We have to go to the runtime if the current value is the hole because
- // there may be a callback on the element
- Label holecheck_passed1;
- __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(scratch, MemOperand::PointerAddressFromSmiKey(address, key, PreIndex));
- __ cmp(scratch, Operand(masm->isolate()->factory()->the_hole_value()));
- __ b(ne, &holecheck_passed1);
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
-
- __ bind(&holecheck_passed1);
-
- // Smi stores don't require further checks.
- Label non_smi_value;
- __ JumpIfNotSmi(value, &non_smi_value);
-
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ add(scratch, key, Operand(Smi::FromInt(1)));
- __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
- }
- // It's irrelevant whether array is smi-only or not when writing a smi.
- __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ str(value, MemOperand::PointerAddressFromSmiKey(address, key));
- __ Ret();
-
- __ bind(&non_smi_value);
- // Escape to elements kind transition case.
- __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
-
- // Fast elements array, store the value to the elements backing store.
- __ bind(&finish_object_store);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ add(scratch, key, Operand(Smi::FromInt(1)));
- __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
- }
- __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(address, address, Operand::PointerOffsetFromSmiKey(key));
- __ str(value, MemOperand(address));
- // Update write barrier for the elements array address.
- __ mov(scratch, value); // Preserve the value which is returned.
- __ RecordWrite(elements, address, scratch, kLRHasNotBeenSaved,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ Ret();
-
- __ bind(fast_double);
- if (check_map == kCheckMap) {
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
- __ b(ne, slow);
- }
-
- // HOLECHECK: guards "A[i] double hole?"
- // We have to see if the double version of the hole is present. If so
- // go to the runtime.
- __ add(address, elements,
- Operand((FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32)) -
- kHeapObjectTag));
- __ ldr(scratch, MemOperand(address, key, LSL, kPointerSizeLog2, PreIndex));
- __ cmp(scratch, Operand(kHoleNanUpper32));
- __ b(ne, &fast_double_without_map_check);
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
-
- __ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value, key, elements, scratch, d0,
- &transition_double_elements);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ add(scratch, key, Operand(Smi::FromInt(1)));
- __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
- }
- __ Ret();
-
- __ bind(&transition_smi_elements);
- // Transition the array appropriately depending on the value type.
- __ ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
- __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
- __ b(ne, &non_double_value);
-
- // Value is a double. Transition FAST_SMI_ELEMENTS ->
- // FAST_DOUBLE_ELEMENTS and complete the store.
- __ LoadTransitionedArrayMapConditional(
- FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
- AllocationSiteMode mode =
- AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
- receiver_map, mode, slow);
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&fast_double_without_map_check);
-
- __ bind(&non_double_value);
- // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
- receiver_map, scratch, slow);
- mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- masm, receiver, key, value, receiver_map, mode, slow);
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-
- __ bind(&transition_double_elements);
- // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
- // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
- // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
- receiver_map, scratch, slow);
- mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(
- masm, receiver, key, value, receiver_map, mode, slow);
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
- LanguageMode language_mode) {
- // ---------- S t a t e --------------
- // -- r0 : value
- // -- r1 : key
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
- Label slow, fast_object, fast_object_grow;
- Label fast_double, fast_double_grow;
- Label array, extra, check_if_double_array, maybe_name_key, miss;
-
- // Register usage.
- Register value = StoreDescriptor::ValueRegister();
- Register key = StoreDescriptor::NameRegister();
- Register receiver = StoreDescriptor::ReceiverRegister();
- DCHECK(receiver.is(r1));
- DCHECK(key.is(r2));
- DCHECK(value.is(r0));
- Register receiver_map = r3;
- Register elements_map = r6;
- Register elements = r9; // Elements array of the receiver.
- // r4 and r5 are used as general scratch registers.
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &maybe_name_key);
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, &slow);
- // Get the map of the object.
- __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks.
- // The generic stub does not perform map checks.
- __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
- __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
- __ b(ne, &slow);
- // Check if the object is a JS array or not.
- __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
- __ cmp(r4, Operand(JS_ARRAY_TYPE));
- __ b(eq, &array);
- // Check that the object is some kind of JS object EXCEPT JS Value type. In
- // the case that the object is a value-wrapper object, we enter the runtime
- // system to make sure that indexing into string objects works as intended.
- STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
- __ cmp(r4, Operand(JS_OBJECT_TYPE));
- __ b(lo, &slow);
-
- // Object case: Check key against length in the elements array.
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // Check array bounds. Both the key and the length of FixedArray are smis.
- __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ cmp(key, Operand(ip));
- __ b(lo, &fast_object);
-
- // Slow case, handle jump to runtime.
- __ bind(&slow);
- // Entry registers are intact.
- // r0: value.
- // r1: key.
- // r2: receiver.
- PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
- // Never returns to here.
-
- __ bind(&maybe_name_key);
- __ ldr(r4, FieldMemOperand(key, HeapObject::kMapOffset));
- __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(r4, &slow);
-
- // We use register r8, because otherwise probing the megamorphic stub cache
- // would require pushing temporaries on the stack.
- // TODO(mvstanton): quit using register r8 when
- // FLAG_enable_embedded_constant_pool is turned on.
- DCHECK(!FLAG_enable_embedded_constant_pool);
- Register temporary2 = r8;
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Register vector = StoreWithVectorDescriptor::VectorRegister();
- Register slot = StoreWithVectorDescriptor::SlotRegister();
-
- DCHECK(!AreAliased(vector, slot, r5, temporary2, r6, r9));
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
- __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
- __ mov(slot, Operand(Smi::FromInt(slot_index)));
-
- masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, r5,
- temporary2, r6, r9);
- // Cache miss.
- __ b(&miss);
-
- // Extra capacity case: Check if there is extra capacity to
- // perform the store and update the length. Used for adding one
- // element to the array by writing to array[array.length].
- __ bind(&extra);
- // Condition code from comparing key and array length is still available.
- __ b(ne, &slow); // Only support writing to writing to array[array.length].
- // Check for room in the elements backing store.
- // Both the key and the length of FixedArray are smis.
- __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ cmp(key, Operand(ip));
- __ b(hs, &slow);
- __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
- __ b(ne, &check_if_double_array);
- __ jmp(&fast_object_grow);
-
- __ bind(&check_if_double_array);
- __ cmp(elements_map,
- Operand(masm->isolate()->factory()->fixed_double_array_map()));
- __ b(ne, &slow);
- __ jmp(&fast_double_grow);
-
- // Array case: Get the length and the elements array from the JS
- // array. Check that the array is in fast mode (and writable); if it
- // is the length is always a smi.
- __ bind(&array);
- __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
- // Check the key against the length in the array.
- __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ cmp(key, Operand(ip));
- __ b(hs, &extra);
-
- KeyedStoreGenerateMegamorphicHelper(
- masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
- value, key, receiver, receiver_map, elements_map, elements);
- KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
- &fast_double_grow, &slow, kDontCheckMap,
- kIncrementLength, value, key, receiver,
- receiver_map, elements_map, elements);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
- StoreIC_PushArgs(masm);
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kStoreIC_Miss);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
- Label miss;
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- Register dictionary = r5;
- DCHECK(receiver.is(r1));
- DCHECK(name.is(r2));
- DCHECK(value.is(r0));
- DCHECK(StoreWithVectorDescriptor::VectorRegister().is(r3));
- DCHECK(StoreWithVectorDescriptor::SlotRegister().is(r4));
-
- __ ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
- GenerateDictionaryStore(masm, &miss, dictionary, name, value, r6, r9);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->ic_store_normal_hit(), 1, r6, r9);
- __ Ret();
-
- __ bind(&miss);
- __ IncrementCounter(counters->ic_store_normal_miss(), 1, r6, r9);
- GenerateMiss(masm);
-}
-
-
#undef __
diff --git a/deps/v8/src/ic/arm/ic-compiler-arm.cc b/deps/v8/src/ic/arm/ic-compiler-arm.cc
deleted file mode 100644
index 318523199a..0000000000
--- a/deps/v8/src/ic/arm/ic-compiler-arm.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_ARM
-
-#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-void PropertyICCompiler::GenerateRuntimeSetProperty(
- MacroAssembler* masm, LanguageMode language_mode) {
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
-
- __ mov(r0, Operand(Smi::FromInt(language_mode)));
- __ Push(r0);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty);
-}
-
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/ic/arm/stub-cache-arm.cc b/deps/v8/src/ic/arm/stub-cache-arm.cc
deleted file mode 100644
index b0f93e32dc..0000000000
--- a/deps/v8/src/ic/arm/stub-cache-arm.cc
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_ARM
-
-#include "src/codegen.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/interface-descriptors.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
- StubCache::Table table, Register receiver, Register name,
- // The offset is scaled by 4, based on
- // kCacheIndexShift, which is two bits
- Register offset, Register scratch, Register scratch2,
- Register offset_scratch) {
- ExternalReference key_offset(stub_cache->key_reference(table));
- ExternalReference value_offset(stub_cache->value_reference(table));
- ExternalReference map_offset(stub_cache->map_reference(table));
-
- uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
- uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
- uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
-
- // Check the relative positions of the address fields.
- DCHECK(value_off_addr > key_off_addr);
- DCHECK((value_off_addr - key_off_addr) % 4 == 0);
- DCHECK((value_off_addr - key_off_addr) < (256 * 4));
- DCHECK(map_off_addr > key_off_addr);
- DCHECK((map_off_addr - key_off_addr) % 4 == 0);
- DCHECK((map_off_addr - key_off_addr) < (256 * 4));
-
- Label miss;
- Register base_addr = scratch;
- scratch = no_reg;
-
- // Multiply by 3 because there are 3 fields per entry (name, code, map).
- __ add(offset_scratch, offset, Operand(offset, LSL, 1));
-
- // Calculate the base address of the entry.
- __ add(base_addr, offset_scratch, Operand(key_offset));
-
- // Check that the key in the entry matches the name.
- __ ldr(ip, MemOperand(base_addr, 0));
- __ cmp(name, ip);
- __ b(ne, &miss);
-
- // Check the map matches.
- __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
- __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ cmp(ip, scratch2);
- __ b(ne, &miss);
-
- // Get the code entry from the cache.
- Register code = scratch2;
- scratch2 = no_reg;
- __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr));
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ jmp(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ jmp(&miss);
- }
-#endif
-
- // Jump to the first instruction in the code stub.
- __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // Miss: fall through.
- __ bind(&miss);
-}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
- Register name, Register scratch, Register extra,
- Register extra2, Register extra3) {
- Label miss;
-
- // Make sure that code is valid. The multiplying code relies on the
- // entry size being 12.
- DCHECK(sizeof(Entry) == 12);
-
- // Make sure that there are no register conflicts.
- DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
-
- // Check scratch, extra and extra2 registers are valid.
- DCHECK(!scratch.is(no_reg));
- DCHECK(!extra.is(no_reg));
- DCHECK(!extra2.is(no_reg));
- DCHECK(!extra3.is(no_reg));
-
-#ifdef DEBUG
- // If vector-based ics are in use, ensure that scratch, extra, extra2 and
- // extra3 don't conflict with the vector and slot registers, which need
- // to be preserved for a handler call or miss.
- if (IC::ICUseVector(ic_kind_)) {
- Register vector, slot;
- if (ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC) {
- vector = StoreWithVectorDescriptor::VectorRegister();
- slot = StoreWithVectorDescriptor::SlotRegister();
- } else {
- DCHECK(ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC);
- vector = LoadWithVectorDescriptor::VectorRegister();
- slot = LoadWithVectorDescriptor::SlotRegister();
- }
- DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
- }
-#endif
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
- extra3);
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Get the map of the receiver and compute the hash.
- __ ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
- __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ add(scratch, scratch, Operand(ip));
- __ eor(scratch, scratch, Operand(kPrimaryMagic));
- __ mov(ip, Operand(kPrimaryTableSize - 1));
- __ and_(scratch, scratch, Operand(ip, LSL, kCacheIndexShift));
-
- // Probe the primary table.
- ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
- extra3);
-
- // Primary miss: Compute hash for secondary probe.
- __ sub(scratch, scratch, Operand(name));
- __ add(scratch, scratch, Operand(kSecondaryMagic));
- __ mov(ip, Operand(kSecondaryTableSize - 1));
- __ and_(scratch, scratch, Operand(ip, LSL, kCacheIndexShift));
-
- // Probe the secondary table.
- ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
- extra3);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ bind(&miss);
- __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
- extra3);
-}
-
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
index 58d0bb7446..8c89908f4e 100644
--- a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
@@ -44,14 +44,6 @@ void PropertyHandlerCompiler::DiscardVectorAndSlot() {
__ Drop(2);
}
-void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
- // No-op. Return address is in lr register.
-}
-
-void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
- // No-op. Return address is in lr register.
-}
-
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
@@ -91,18 +83,6 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
}
-
-void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register result, Label* miss) {
- __ LoadNativeContextSlot(index, result);
- // Load its initial map. The global functions all have initial maps.
- __ Ldr(result,
- FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
- // Load the prototype from the initial map.
- __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
-}
-
-
void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
MacroAssembler* masm, Register receiver, Register scratch1,
Register scratch2, Label* miss_label) {
@@ -386,57 +366,6 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
}
}
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
- __ Mov(this->name(), Operand(name));
-}
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
- Register map_reg,
- Register scratch,
- Label* miss) {
- Handle<WeakCell> cell = Map::WeakCellForMap(transition);
- DCHECK(!map_reg.is(scratch));
- __ LoadWeakValue(map_reg, cell, miss);
- if (transition->CanBeDeprecated()) {
- __ Ldrsw(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
- __ TestAndBranchIfAnySet(scratch, Map::Deprecated::kMask, miss);
- }
-}
-
-
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
- int descriptor,
- Register value_reg,
- Register scratch,
- Label* miss_label) {
- DCHECK(!map_reg.is(scratch));
- DCHECK(!map_reg.is(value_reg));
- DCHECK(!value_reg.is(scratch));
- __ LoadInstanceDescriptors(map_reg, scratch);
- __ Ldr(scratch,
- FieldMemOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
- __ Cmp(value_reg, scratch);
- __ B(ne, miss_label);
-}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
- Register value_reg,
- Label* miss_label) {
- Register map_reg = scratch1();
- Register scratch = scratch2();
- DCHECK(!value_reg.is(map_reg));
- DCHECK(!value_reg.is(scratch));
- __ JumpIfSmi(value_reg, miss_label);
- if (field_type->IsClass()) {
- __ Ldr(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
- __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
- scratch);
- __ B(ne, miss_label);
- }
-}
-
void PropertyHandlerCompiler::GenerateAccessCheck(
Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
Label* miss, bool compare_native_contexts_only) {
@@ -572,13 +501,6 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
}
}
-
-void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
- // Return the constant value.
- __ LoadObject(x0, value);
- __ Ret();
-}
-
void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
LookupIterator* it, Register holder_reg) {
DCHECK(!AreAliased(receiver(), this->name(), scratch1(), scratch2(),
diff --git a/deps/v8/src/ic/arm64/ic-arm64.cc b/deps/v8/src/ic/arm64/ic-arm64.cc
index 0ced207d8a..04fdff76e1 100644
--- a/deps/v8/src/ic/arm64/ic-arm64.cc
+++ b/deps/v8/src/ic/arm64/ic-arm64.cc
@@ -15,164 +15,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-// Helper function used from LoadIC GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-// label is done.
-// name: Property name. It is not clobbered if a jump to the miss label is
-// done
-// result: Register for the result. It is only updated if a jump to the miss
-// label is not done.
-// The scratch registers need to be different from elements, name and result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
- Register elements, Register name,
- Register result, Register scratch1,
- Register scratch2) {
- DCHECK(!AreAliased(elements, name, scratch1, scratch2));
- DCHECK(!AreAliased(result, scratch1, scratch2));
-
- Label done;
-
- // Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
- name, scratch1, scratch2);
-
- // If probing finds an entry check that the value is a normal property.
- __ Bind(&done);
-
- static const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
- static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- __ Ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
- __ Tst(scratch1, Smi::FromInt(PropertyDetails::TypeField::kMask));
- __ B(ne, miss);
-
- // Get the value at the masked, scaled index and return.
- __ Ldr(result,
- FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
-}
-
-
-// Helper function used from StoreIC::GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-// label is done.
-// name: Property name. It is not clobbered if a jump to the miss label is
-// done
-// value: The value to store (never clobbered).
-//
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
- Register elements, Register name,
- Register value, Register scratch1,
- Register scratch2) {
- DCHECK(!AreAliased(elements, name, value, scratch1, scratch2));
-
- Label done;
-
- // Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
- name, scratch1, scratch2);
-
- // If probing finds an entry in the dictionary check that the value
- // is a normal property that is not read only.
- __ Bind(&done);
-
- static const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
- static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- static const int kTypeAndReadOnlyMask =
- PropertyDetails::TypeField::kMask |
- PropertyDetails::AttributesField::encode(READ_ONLY);
- __ Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
- __ Tst(scratch1, kTypeAndReadOnlyMask);
- __ B(ne, miss);
-
- // Store the value at the masked, scaled index and return.
- static const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ Add(scratch2, scratch2, kValueOffset - kHeapObjectTag);
- __ Str(value, MemOperand(scratch2));
-
- // Update the write barrier. Make sure not to clobber the value.
- __ Mov(scratch1, value);
- __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
- kDontSaveFPRegs);
-}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
- Register dictionary = x0;
- DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
- DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
- Label slow;
-
- __ Ldr(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
- JSObject::kPropertiesOffset));
- GenerateDictionaryLoad(masm, &slow, dictionary,
- LoadDescriptor::NameRegister(), x0, x3, x4);
- __ Ret();
-
- // Dictionary load failed, go slow (but don't miss).
- __ Bind(&slow);
- GenerateRuntimeGetProperty(masm);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
- // The return address is in lr.
- Isolate* isolate = masm->isolate();
- ASM_LOCATION("LoadIC::GenerateMiss");
-
- DCHECK(!AreAliased(x4, x5, LoadWithVectorDescriptor::SlotRegister(),
- LoadWithVectorDescriptor::VectorRegister()));
- __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, x4, x5);
-
- // Perform tail call to the entry.
- __ Push(LoadWithVectorDescriptor::ReceiverRegister(),
- LoadWithVectorDescriptor::NameRegister(),
- LoadWithVectorDescriptor::SlotRegister(),
- LoadWithVectorDescriptor::VectorRegister());
- __ TailCallRuntime(Runtime::kLoadIC_Miss);
-}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // The return address is in lr.
- __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kGetProperty);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
- // The return address is in lr.
- Isolate* isolate = masm->isolate();
-
- DCHECK(!AreAliased(x10, x11, LoadWithVectorDescriptor::SlotRegister(),
- LoadWithVectorDescriptor::VectorRegister()));
- __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, x10, x11);
-
- __ Push(LoadWithVectorDescriptor::ReceiverRegister(),
- LoadWithVectorDescriptor::NameRegister(),
- LoadWithVectorDescriptor::SlotRegister(),
- LoadWithVectorDescriptor::VectorRegister());
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
-}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // The return address is in lr.
- __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kKeyedGetProperty);
-}
-
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreWithVectorDescriptor::ValueRegister(),
StoreWithVectorDescriptor::SlotRegister(),
@@ -197,298 +39,6 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
}
-static void KeyedStoreGenerateMegamorphicHelper(
- MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
- KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
- Register value, Register key, Register receiver, Register receiver_map,
- Register elements_map, Register elements) {
- DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
- x10, x11));
-
- Label transition_smi_elements;
- Label transition_double_elements;
- Label fast_double_without_map_check;
- Label non_double_value;
- Label finish_store;
-
- __ Bind(fast_object);
- if (check_map == kCheckMap) {
- __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ Cmp(elements_map,
- Operand(masm->isolate()->factory()->fixed_array_map()));
- __ B(ne, fast_double);
- }
-
- // HOLECHECK: guards "A[i] = V"
- // We have to go to the runtime if the current value is the hole because there
- // may be a callback on the element.
- Label holecheck_passed;
- __ Add(x10, elements, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
- __ Ldr(x11, MemOperand(x10));
- __ JumpIfNotRoot(x11, Heap::kTheHoleValueRootIndex, &holecheck_passed);
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
- __ bind(&holecheck_passed);
-
- // Smi stores don't require further checks.
- __ JumpIfSmi(value, &finish_store);
-
- // Escape to elements kind transition case.
- __ CheckFastObjectElements(receiver_map, x10, &transition_smi_elements);
-
- __ Bind(&finish_store);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ Add(x10, key, Smi::FromInt(1));
- __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
- }
-
- Register address = x11;
- __ Add(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Add(address, address, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
- __ Str(value, MemOperand(address));
-
- Label dont_record_write;
- __ JumpIfSmi(value, &dont_record_write);
-
- // Update write barrier for the elements array address.
- __ Mov(x10, value); // Preserve the value which is returned.
- __ RecordWrite(elements, address, x10, kLRHasNotBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- __ Bind(&dont_record_write);
- __ Ret();
-
-
- __ Bind(fast_double);
- if (check_map == kCheckMap) {
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- __ JumpIfNotRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex, slow);
- }
-
- // HOLECHECK: guards "A[i] double hole?"
- // We have to see if the double version of the hole is present. If so go to
- // the runtime.
- __ Add(x10, elements, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
- __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
- __ Ldr(x11, MemOperand(x10));
- __ CompareAndBranch(x11, kHoleNanInt64, ne, &fast_double_without_map_check);
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
-
- __ Bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value, key, elements, x10, d0,
- &transition_double_elements);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ Add(x10, key, Smi::FromInt(1));
- __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
- }
- __ Ret();
-
-
- __ Bind(&transition_smi_elements);
- // Transition the array appropriately depending on the value type.
- __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
- __ JumpIfNotRoot(x10, Heap::kHeapNumberMapRootIndex, &non_double_value);
-
- // Value is a double. Transition FAST_SMI_ELEMENTS ->
- // FAST_DOUBLE_ELEMENTS and complete the store.
- __ LoadTransitionedArrayMapConditional(
- FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, x10, x11, slow);
- AllocationSiteMode mode =
- AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
- receiver_map, mode, slow);
- __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ B(&fast_double_without_map_check);
-
- __ Bind(&non_double_value);
- // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
- receiver_map, x10, x11, slow);
-
- mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- masm, receiver, key, value, receiver_map, mode, slow);
-
- __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ B(&finish_store);
-
- __ Bind(&transition_double_elements);
- // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
- // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
- // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
- receiver_map, x10, x11, slow);
- mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(
- masm, receiver, key, value, receiver_map, mode, slow);
- __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ B(&finish_store);
-}
-
-
-void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
- LanguageMode language_mode) {
- ASM_LOCATION("KeyedStoreIC::GenerateMegamorphic");
- Label slow;
- Label array;
- Label fast_object;
- Label extra;
- Label fast_object_grow;
- Label fast_double_grow;
- Label fast_double;
- Label maybe_name_key;
- Label miss;
-
- Register value = StoreDescriptor::ValueRegister();
- Register key = StoreDescriptor::NameRegister();
- Register receiver = StoreDescriptor::ReceiverRegister();
- DCHECK(receiver.is(x1));
- DCHECK(key.is(x2));
- DCHECK(value.is(x0));
-
- Register receiver_map = x3;
- Register elements = x4;
- Register elements_map = x5;
-
- __ JumpIfNotSmi(key, &maybe_name_key);
- __ JumpIfSmi(receiver, &slow);
- __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
- // Check that the receiver does not require access checks.
- // The generic stub does not perform map checks.
- __ Ldrb(x10, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
- __ TestAndBranchIfAnySet(x10, (1 << Map::kIsAccessCheckNeeded), &slow);
-
- // Check if the object is a JS array or not.
- Register instance_type = x10;
- __ CompareInstanceType(receiver_map, instance_type, JS_ARRAY_TYPE);
- __ B(eq, &array);
- // Check that the object is some kind of JS object EXCEPT JS Value type. In
- // the case that the object is a value-wrapper object, we enter the runtime
- // system to make sure that indexing into string objects works as intended.
- STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
- __ Cmp(instance_type, JS_OBJECT_TYPE);
- __ B(lo, &slow);
-
- // Object case: Check key against length in the elements array.
- __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // Check array bounds. Both the key and the length of FixedArray are smis.
- __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Cmp(x10, Operand::UntagSmi(key));
- __ B(hi, &fast_object);
-
-
- __ Bind(&slow);
- // Slow case, handle jump to runtime.
- // Live values:
- // x0: value
- // x1: key
- // x2: receiver
- PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
- // Never returns to here.
-
- __ bind(&maybe_name_key);
- __ Ldr(x10, FieldMemOperand(key, HeapObject::kMapOffset));
- __ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(x10, &slow);
-
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Register vector = StoreWithVectorDescriptor::VectorRegister();
- Register slot = StoreWithVectorDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, x5, x6, x7, x8));
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
- __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
- __ Mov(slot, Operand(Smi::FromInt(slot_index)));
-
- masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, x5,
- x6, x7, x8);
- // Cache miss.
- __ B(&miss);
-
- __ Bind(&extra);
- // Extra capacity case: Check if there is extra capacity to
- // perform the store and update the length. Used for adding one
- // element to the array by writing to array[array.length].
-
- // Check for room in the elements backing store.
- // Both the key and the length of FixedArray are smis.
- __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Cmp(x10, Operand::UntagSmi(key));
- __ B(ls, &slow);
-
- __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ Cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
- __ B(eq, &fast_object_grow);
- __ Cmp(elements_map,
- Operand(masm->isolate()->factory()->fixed_double_array_map()));
- __ B(eq, &fast_double_grow);
- __ B(&slow);
-
-
- __ Bind(&array);
- // Array case: Get the length and the elements array from the JS
- // array. Check that the array is in fast mode (and writable); if it
- // is the length is always a smi.
-
- __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
- // Check the key against the length in the array.
- __ Ldrsw(x10, UntagSmiFieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Cmp(x10, Operand::UntagSmi(key));
- __ B(eq, &extra); // We can handle the case where we are appending 1 element.
- __ B(lo, &slow);
-
- KeyedStoreGenerateMegamorphicHelper(
- masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
- value, key, receiver, receiver_map, elements_map, elements);
- KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
- &fast_double_grow, &slow, kDontCheckMap,
- kIncrementLength, value, key, receiver,
- receiver_map, elements_map, elements);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
- StoreIC_PushArgs(masm);
-
- // Tail call to the entry.
- __ TailCallRuntime(Runtime::kStoreIC_Miss);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
- Label miss;
- Register value = StoreDescriptor::ValueRegister();
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register dictionary = x5;
- DCHECK(!AreAliased(value, receiver, name,
- StoreWithVectorDescriptor::SlotRegister(),
- StoreWithVectorDescriptor::VectorRegister(), x5, x6, x7));
-
- __ Ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
- GenerateDictionaryStore(masm, &miss, dictionary, name, value, x6, x7);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->ic_store_normal_hit(), 1, x6, x7);
- __ Ret();
-
- // Cache miss: Jump to runtime.
- __ Bind(&miss);
- __ IncrementCounter(counters->ic_store_normal_miss(), 1, x6, x7);
- GenerateMiss(masm);
-}
-
-
Condition CompareIC::ComputeCondition(Token::Value op) {
switch (op) {
case Token::EQ_STRICT:
diff --git a/deps/v8/src/ic/arm64/ic-compiler-arm64.cc b/deps/v8/src/ic/arm64/ic-compiler-arm64.cc
deleted file mode 100644
index c99c637ab1..0000000000
--- a/deps/v8/src/ic/arm64/ic-compiler-arm64.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_ARM64
-
-#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void PropertyICCompiler::GenerateRuntimeSetProperty(
- MacroAssembler* masm, LanguageMode language_mode) {
- ASM_LOCATION("PropertyICCompiler::GenerateRuntimeSetProperty");
-
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
-
- __ Mov(x10, Smi::FromInt(language_mode));
- __ Push(x10);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty);
-}
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/ic/arm64/stub-cache-arm64.cc b/deps/v8/src/ic/arm64/stub-cache-arm64.cc
deleted file mode 100644
index 81c820725a..0000000000
--- a/deps/v8/src/ic/arm64/stub-cache-arm64.cc
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_ARM64
-
-#include "src/codegen.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/interface-descriptors.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define __ ACCESS_MASM(masm)
-
-
-// Probe primary or secondary table.
-// If the entry is found in the cache, the generated code jump to the first
-// instruction of the stub in the cache.
-// If there is a miss the code fall trough.
-//
-// 'receiver', 'name' and 'offset' registers are preserved on miss.
-static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
- StubCache::Table table, Register receiver, Register name,
- // The offset is scaled by 4, based on
- // kCacheIndexShift, which is two bits
- Register offset, Register scratch, Register scratch2,
- Register scratch3) {
- // Some code below relies on the fact that the Entry struct contains
- // 3 pointers (name, code, map).
- STATIC_ASSERT(sizeof(StubCache::Entry) == (3 * kPointerSize));
-
- ExternalReference key_offset(stub_cache->key_reference(table));
- ExternalReference value_offset(stub_cache->value_reference(table));
- ExternalReference map_offset(stub_cache->map_reference(table));
-
- uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
- uintptr_t value_off_addr =
- reinterpret_cast<uintptr_t>(value_offset.address());
- uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
-
- Label miss;
-
- DCHECK(!AreAliased(name, offset, scratch, scratch2, scratch3));
-
- // Multiply by 3 because there are 3 fields per entry.
- __ Add(scratch3, offset, Operand(offset, LSL, 1));
-
- // Calculate the base address of the entry.
- __ Mov(scratch, key_offset);
- __ Add(
- scratch, scratch,
- Operand(scratch3, LSL, kPointerSizeLog2 - StubCache::kCacheIndexShift));
-
- // Check that the key in the entry matches the name.
- __ Ldr(scratch2, MemOperand(scratch));
- __ Cmp(name, scratch2);
- __ B(ne, &miss);
-
- // Check the map matches.
- __ Ldr(scratch2, MemOperand(scratch, map_off_addr - key_off_addr));
- __ Ldr(scratch3, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Cmp(scratch2, scratch3);
- __ B(ne, &miss);
-
- // Get the code entry from the cache.
- __ Ldr(scratch, MemOperand(scratch, value_off_addr - key_off_addr));
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ B(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ B(&miss);
- }
-#endif
-
- // Jump to the first instruction in the code stub.
- __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag);
- __ Br(scratch);
-
- // Miss: fall through.
- __ Bind(&miss);
-}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
- Register name, Register scratch, Register extra,
- Register extra2, Register extra3) {
- Label miss;
-
- // Make sure that there are no register conflicts.
- DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
-
- // Make sure extra and extra2 registers are valid.
- DCHECK(!extra.is(no_reg));
- DCHECK(!extra2.is(no_reg));
- DCHECK(!extra3.is(no_reg));
-
-#ifdef DEBUG
- // If vector-based ics are in use, ensure that scratch, extra, extra2 and
- // extra3 don't conflict with the vector and slot registers, which need
- // to be preserved for a handler call or miss.
- if (IC::ICUseVector(ic_kind_)) {
- Register vector, slot;
- if (ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC) {
- vector = StoreWithVectorDescriptor::VectorRegister();
- slot = StoreWithVectorDescriptor::SlotRegister();
- } else {
- DCHECK(ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC);
- vector = LoadWithVectorDescriptor::VectorRegister();
- slot = LoadWithVectorDescriptor::SlotRegister();
- }
- DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
- }
-#endif
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
- extra3);
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Compute the hash for primary table.
- __ Ldr(scratch.W(), FieldMemOperand(name, Name::kHashFieldOffset));
- __ Ldr(extra, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Add(scratch, scratch, extra);
- __ Eor(scratch, scratch, kPrimaryMagic);
- __ And(scratch, scratch,
- Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
-
- // Probe the primary table.
- ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
- extra3);
-
- // Primary miss: Compute hash for secondary table.
- __ Sub(scratch, scratch, Operand(name));
- __ Add(scratch, scratch, Operand(kSecondaryMagic));
- __ And(scratch, scratch,
- Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
-
- // Probe the secondary table.
- ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
- extra3);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ Bind(&miss);
- __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
- extra3);
-}
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/ic/handler-compiler.cc b/deps/v8/src/ic/handler-compiler.cc
index 05e9031915..16aec0b494 100644
--- a/deps/v8/src/ic/handler-compiler.cc
+++ b/deps/v8/src/ic/handler-compiler.cc
@@ -24,60 +24,6 @@ Handle<Code> PropertyHandlerCompiler::Find(Handle<Name> name,
return handle(code);
}
-
-Handle<Code> NamedLoadHandlerCompiler::ComputeLoadNonexistent(
- Handle<Name> name, Handle<Map> receiver_map) {
- Isolate* isolate = name->GetIsolate();
- if (receiver_map->prototype()->IsNull(isolate)) {
- // TODO(jkummerow/verwaest): If there is no prototype and the property
- // is nonexistent, introduce a builtin to handle this (fast properties
- // -> return undefined, dictionary properties -> do negative lookup).
- return Handle<Code>();
- }
- CacheHolderFlag flag;
- Handle<Map> stub_holder_map =
- IC::GetHandlerCacheHolder(receiver_map, false, isolate, &flag);
-
- // If no dictionary mode objects are present in the prototype chain, the load
- // nonexistent IC stub can be shared for all names for a given map and we use
- // the empty string for the map cache in that case. If there are dictionary
- // mode objects involved, we need to do negative lookups in the stub and
- // therefore the stub will be specific to the name.
- Handle<Name> cache_name =
- receiver_map->is_dictionary_map()
- ? name
- : Handle<Name>::cast(isolate->factory()->nonexistent_symbol());
- Handle<Map> current_map = stub_holder_map;
- Handle<JSObject> last(JSObject::cast(receiver_map->prototype()));
- while (true) {
- if (current_map->is_dictionary_map()) cache_name = name;
- if (current_map->prototype()->IsNull(isolate)) break;
- if (name->IsPrivate()) {
- // TODO(verwaest): Use nonexistent_private_symbol.
- cache_name = name;
- if (!current_map->has_hidden_prototype()) break;
- }
-
- last = handle(JSObject::cast(current_map->prototype()));
- current_map = handle(last->map());
- }
- // Compile the stub that is either shared for all names or
- // name specific if there are global objects involved.
- Handle<Code> handler = PropertyHandlerCompiler::Find(
- cache_name, stub_holder_map, Code::LOAD_IC, flag);
- if (!handler.is_null()) {
- TRACE_HANDLER_STATS(isolate, LoadIC_HandlerCacheHit_NonExistent);
- return handler;
- }
-
- TRACE_HANDLER_STATS(isolate, LoadIC_LoadNonexistent);
- NamedLoadHandlerCompiler compiler(isolate, receiver_map, last, flag);
- handler = compiler.CompileLoadNonexistent(cache_name);
- Map::UpdateCodeCache(stub_holder_map, cache_name, handler);
- return handler;
-}
-
-
Handle<Code> PropertyHandlerCompiler::GetCode(Code::Kind kind,
Handle<Name> name) {
Code::Flags flags = Code::ComputeHandlerFlags(kind, cache_holder());
@@ -149,87 +95,6 @@ Register PropertyHandlerCompiler::Frontend(Handle<Name> name) {
return reg;
}
-
-void PropertyHandlerCompiler::NonexistentFrontendHeader(Handle<Name> name,
- Label* miss,
- Register scratch1,
- Register scratch2) {
- Register holder_reg;
- Handle<Map> last_map;
- if (holder().is_null()) {
- holder_reg = receiver();
- last_map = map();
- // If |type| has null as its prototype, |holder()| is
- // Handle<JSObject>::null().
- DCHECK(last_map->prototype() == isolate()->heap()->null_value());
- } else {
- last_map = handle(holder()->map());
- // This condition matches the branches below.
- bool need_holder =
- last_map->is_dictionary_map() && !last_map->IsJSGlobalObjectMap();
- holder_reg =
- FrontendHeader(receiver(), name, miss,
- need_holder ? RETURN_HOLDER : DONT_RETURN_ANYTHING);
- }
-
- if (last_map->is_dictionary_map()) {
- if (last_map->IsJSGlobalObjectMap()) {
- Handle<JSGlobalObject> global =
- holder().is_null()
- ? Handle<JSGlobalObject>::cast(isolate()->global_object())
- : Handle<JSGlobalObject>::cast(holder());
- GenerateCheckPropertyCell(masm(), global, name, scratch1, miss);
- } else {
- if (!name->IsUniqueName()) {
- DCHECK(name->IsString());
- name = factory()->InternalizeString(Handle<String>::cast(name));
- }
- DCHECK(holder().is_null() ||
- holder()->property_dictionary()->FindEntry(name) ==
- NameDictionary::kNotFound);
- GenerateDictionaryNegativeLookup(masm(), miss, holder_reg, name, scratch1,
- scratch2);
- }
- }
-}
-
-
-Handle<Code> NamedLoadHandlerCompiler::CompileLoadField(Handle<Name> name,
- FieldIndex field) {
- Register reg = Frontend(name);
- __ Move(receiver(), reg);
- LoadFieldStub stub(isolate(), field);
- GenerateTailCall(masm(), stub.GetCode());
- return GetCode(kind(), name);
-}
-
-
-Handle<Code> NamedLoadHandlerCompiler::CompileLoadConstant(Handle<Name> name,
- int constant_index) {
- Register reg = Frontend(name);
- __ Move(receiver(), reg);
- LoadConstantStub stub(isolate(), constant_index);
- GenerateTailCall(masm(), stub.GetCode());
- return GetCode(kind(), name);
-}
-
-
-Handle<Code> NamedLoadHandlerCompiler::CompileLoadNonexistent(
- Handle<Name> name) {
- Label miss;
- if (IC::ShouldPushPopSlotAndVector(kind())) {
- DCHECK(kind() == Code::LOAD_IC);
- PushVectorAndSlot();
- }
- NonexistentFrontendHeader(name, &miss, scratch2(), scratch3());
- if (IC::ShouldPushPopSlotAndVector(kind())) {
- DiscardVectorAndSlot();
- }
- GenerateLoadConstant(isolate()->factory()->undefined_value());
- FrontendFooter(name, &miss);
- return GetCode(kind(), name);
-}
-
Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
Handle<Name> name, Handle<AccessorInfo> callback, Handle<Code> slow_stub) {
if (V8_UNLIKELY(FLAG_runtime_stats)) {
@@ -298,10 +163,13 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadInterceptor(
case LookupIterator::NOT_FOUND:
case LookupIterator::INTEGER_INDEXED_EXOTIC:
break;
- case LookupIterator::DATA:
- inline_followup =
- it->property_details().type() == DATA && !it->is_dictionary_holder();
+ case LookupIterator::DATA: {
+ PropertyDetails details = it->property_details();
+ inline_followup = details.kind() == kData &&
+ details.location() == kField &&
+ !it->is_dictionary_holder();
break;
+ }
case LookupIterator::ACCESSOR: {
Handle<Object> accessors = it->GetAccessors();
if (accessors->IsAccessorInfo()) {
@@ -409,9 +277,13 @@ void NamedLoadHandlerCompiler::GenerateLoadPostInterceptor(
case LookupIterator::TRANSITION:
UNREACHABLE();
case LookupIterator::DATA: {
- DCHECK_EQ(DATA, it->property_details().type());
- __ Move(receiver(), reg);
- LoadFieldStub stub(isolate(), it->GetFieldIndex());
+ DCHECK_EQ(kData, it->property_details().kind());
+ DCHECK_EQ(kField, it->property_details().location());
+ __ Move(LoadFieldDescriptor::ReceiverRegister(), reg);
+ Handle<Object> smi_handler =
+ LoadIC::SimpleFieldLoad(isolate(), it->GetFieldIndex());
+ __ Move(LoadFieldDescriptor::SmiHandlerRegister(), smi_handler);
+ LoadFieldStub stub(isolate());
GenerateTailCall(masm(), stub.GetCode());
break;
}
@@ -440,150 +312,6 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadViaGetter(
return GetCode(kind(), name);
}
-
-// TODO(verwaest): Cleanup. holder() is actually the receiver.
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
- Handle<Map> transition, Handle<Name> name) {
- Label miss;
-
- // Ensure that the StoreTransitionStub we are going to call has the same
- // number of stack arguments. This means that we don't have to adapt them
- // if we decide to call the transition or miss stub.
- STATIC_ASSERT(Descriptor::kStackArgumentsCount ==
- StoreTransitionDescriptor::kStackArgumentsCount);
- STATIC_ASSERT(Descriptor::kStackArgumentsCount == 0 ||
- Descriptor::kStackArgumentsCount == 3);
- STATIC_ASSERT(Descriptor::kParameterCount - Descriptor::kValue ==
- StoreTransitionDescriptor::kParameterCount -
- StoreTransitionDescriptor::kValue);
- STATIC_ASSERT(Descriptor::kParameterCount - Descriptor::kSlot ==
- StoreTransitionDescriptor::kParameterCount -
- StoreTransitionDescriptor::kSlot);
- STATIC_ASSERT(Descriptor::kParameterCount - Descriptor::kVector ==
- StoreTransitionDescriptor::kParameterCount -
- StoreTransitionDescriptor::kVector);
-
- if (Descriptor::kPassLastArgsOnStack) {
- __ LoadParameterFromStack<Descriptor>(value(), Descriptor::kValue);
- }
-
- bool need_save_restore = IC::ShouldPushPopSlotAndVector(kind());
- if (need_save_restore) {
- PushVectorAndSlot();
- }
-
- // Check that we are allowed to write this.
- bool is_nonexistent = holder()->map() == transition->GetBackPointer();
- if (is_nonexistent) {
- // Find the top object.
- Handle<JSObject> last;
- PrototypeIterator::WhereToEnd end =
- name->IsPrivate() ? PrototypeIterator::END_AT_NON_HIDDEN
- : PrototypeIterator::END_AT_NULL;
- PrototypeIterator iter(isolate(), holder(), kStartAtPrototype, end);
- while (!iter.IsAtEnd()) {
- last = PrototypeIterator::GetCurrent<JSObject>(iter);
- iter.Advance();
- }
- if (!last.is_null()) set_holder(last);
- NonexistentFrontendHeader(name, &miss, scratch1(), scratch2());
- } else {
- FrontendHeader(receiver(), name, &miss, DONT_RETURN_ANYTHING);
- DCHECK(holder()->HasFastProperties());
- }
-
- int descriptor = transition->LastAdded();
- Handle<DescriptorArray> descriptors(transition->instance_descriptors());
- PropertyDetails details = descriptors->GetDetails(descriptor);
- Representation representation = details.representation();
- DCHECK(!representation.IsNone());
-
- // Stub is never generated for objects that require access checks.
- DCHECK(!transition->is_access_check_needed());
-
- // Call to respective StoreTransitionStub.
- Register map_reg = StoreTransitionDescriptor::MapRegister();
-
- if (details.type() == DATA_CONSTANT) {
- DCHECK(descriptors->GetValue(descriptor)->IsJSFunction());
- GenerateRestoreMap(transition, map_reg, scratch1(), &miss);
- GenerateConstantCheck(map_reg, descriptor, value(), scratch1(), &miss);
- if (need_save_restore) {
- PopVectorAndSlot();
- }
- GenerateRestoreName(name);
- StoreMapStub stub(isolate());
- GenerateTailCall(masm(), stub.GetCode());
-
- } else {
- if (representation.IsHeapObject()) {
- GenerateFieldTypeChecks(descriptors->GetFieldType(descriptor), value(),
- &miss);
- }
- StoreTransitionStub::StoreMode store_mode =
- Map::cast(transition->GetBackPointer())->unused_property_fields() == 0
- ? StoreTransitionStub::ExtendStorageAndStoreMapAndValue
- : StoreTransitionStub::StoreMapAndValue;
- GenerateRestoreMap(transition, map_reg, scratch1(), &miss);
- if (need_save_restore) {
- PopVectorAndSlot();
- }
- // We need to pass name on the stack.
- PopReturnAddress(this->name());
- __ Push(name);
- PushReturnAddress(this->name());
-
- FieldIndex index = FieldIndex::ForDescriptor(*transition, descriptor);
- __ Move(StoreNamedTransitionDescriptor::FieldOffsetRegister(),
- Smi::FromInt(index.index() << kPointerSizeLog2));
-
- StoreTransitionStub stub(isolate(), index.is_inobject(), representation,
- store_mode);
- GenerateTailCall(masm(), stub.GetCode());
- }
-
- __ bind(&miss);
- if (need_save_restore) {
- PopVectorAndSlot();
- }
- GenerateRestoreName(name);
- TailCallBuiltin(masm(), MissBuiltin(kind()));
-
- return GetCode(kind(), name);
-}
-
-bool NamedStoreHandlerCompiler::RequiresFieldTypeChecks(
- FieldType* field_type) const {
- return field_type->IsClass();
-}
-
-
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreField(LookupIterator* it) {
- Label miss;
- DCHECK(it->representation().IsHeapObject());
-
- FieldType* field_type = *it->GetFieldType();
- bool need_save_restore = false;
- if (RequiresFieldTypeChecks(field_type)) {
- need_save_restore = IC::ShouldPushPopSlotAndVector(kind());
- if (Descriptor::kPassLastArgsOnStack) {
- __ LoadParameterFromStack<Descriptor>(value(), Descriptor::kValue);
- }
- if (need_save_restore) PushVectorAndSlot();
- GenerateFieldTypeChecks(field_type, value(), &miss);
- if (need_save_restore) PopVectorAndSlot();
- }
-
- StoreFieldStub stub(isolate(), it->GetFieldIndex(), it->representation());
- GenerateTailCall(masm(), stub.GetCode());
-
- __ bind(&miss);
- if (need_save_restore) PopVectorAndSlot();
- TailCallBuiltin(masm(), MissBuiltin(kind()));
- return GetCode(kind(), it->name());
-}
-
-
Handle<Code> NamedStoreHandlerCompiler::CompileStoreViaSetter(
Handle<JSObject> object, Handle<Name> name, int accessor_index,
int expected_arguments) {
@@ -640,13 +368,8 @@ Handle<Object> ElementHandlerCompiler::GetKeyedLoadHandler(
}
bool is_js_array = instance_type == JS_ARRAY_TYPE;
if (elements_kind == DICTIONARY_ELEMENTS) {
- if (FLAG_tf_load_ic_stub) {
- TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadElementDH);
- return LoadHandler::LoadElement(isolate, elements_kind, false,
- is_js_array);
- }
- TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadDictionaryElementStub);
- return LoadDictionaryElementStub(isolate).GetCode();
+ TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadElementDH);
+ return LoadHandler::LoadElement(isolate, elements_kind, false, is_js_array);
}
DCHECK(IsFastElementsKind(elements_kind) ||
IsFixedTypedArrayElementsKind(elements_kind));
@@ -654,16 +377,9 @@ Handle<Object> ElementHandlerCompiler::GetKeyedLoadHandler(
bool convert_hole_to_undefined =
is_js_array && elements_kind == FAST_HOLEY_ELEMENTS &&
*receiver_map == isolate->get_initial_js_array_map(elements_kind);
- if (FLAG_tf_load_ic_stub) {
- TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadElementDH);
- return LoadHandler::LoadElement(isolate, elements_kind,
- convert_hole_to_undefined, is_js_array);
- } else {
- TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadFastElementStub);
- return LoadFastElementStub(isolate, is_js_array, elements_kind,
- convert_hole_to_undefined)
- .GetCode();
- }
+ TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadElementDH);
+ return LoadHandler::LoadElement(isolate, elements_kind,
+ convert_hole_to_undefined, is_js_array);
}
void ElementHandlerCompiler::CompileElementHandlers(
diff --git a/deps/v8/src/ic/handler-compiler.h b/deps/v8/src/ic/handler-compiler.h
index 0dec36af2f..65f6fbbef3 100644
--- a/deps/v8/src/ic/handler-compiler.h
+++ b/deps/v8/src/ic/handler-compiler.h
@@ -40,8 +40,6 @@ class PropertyHandlerCompiler : public PropertyAccessCompiler {
// Frontend loads from receiver(), returns holder register which may be
// different.
Register Frontend(Handle<Name> name);
- void NonexistentFrontendHeader(Handle<Name> name, Label* miss,
- Register scratch1, Register scratch2);
// When FLAG_vector_ics is true, handlers that have the possibility of missing
// will need to save and pass these to miss handlers.
@@ -52,9 +50,6 @@ class PropertyHandlerCompiler : public PropertyAccessCompiler {
void DiscardVectorAndSlot();
- void PushReturnAddress(Register tmp);
- void PopReturnAddress(Register tmp);
-
// TODO(verwaest): Make non-static.
static void GenerateApiAccessorCall(MacroAssembler* masm,
const CallOptimization& optimization,
@@ -134,8 +129,6 @@ class NamedLoadHandlerCompiler : public PropertyHandlerCompiler {
virtual ~NamedLoadHandlerCompiler() {}
- Handle<Code> CompileLoadField(Handle<Name> name, FieldIndex index);
-
Handle<Code> CompileLoadCallback(Handle<Name> name,
Handle<AccessorInfo> callback,
Handle<Code> slow_stub);
@@ -144,8 +137,6 @@ class NamedLoadHandlerCompiler : public PropertyHandlerCompiler {
const CallOptimization& call_optimization,
int accessor_index, Handle<Code> slow_stub);
- Handle<Code> CompileLoadConstant(Handle<Name> name, int constant_index);
-
// The LookupIterator is used to perform a lookup behind the interceptor. If
// the iterator points to a LookupIterator::PROPERTY, its access will be
// inlined.
@@ -157,10 +148,6 @@ class NamedLoadHandlerCompiler : public PropertyHandlerCompiler {
Handle<Code> CompileLoadGlobal(Handle<PropertyCell> cell, Handle<Name> name,
bool is_configurable);
- // Static interface
- static Handle<Code> ComputeLoadNonexistent(Handle<Name> name,
- Handle<Map> map);
-
static void GenerateLoadViaGetter(MacroAssembler* masm, Handle<Map> map,
Register receiver, Register holder,
int accessor_index, int expected_arguments,
@@ -193,11 +180,7 @@ class NamedLoadHandlerCompiler : public PropertyHandlerCompiler {
virtual void FrontendFooter(Handle<Name> name, Label* miss);
private:
- Handle<Code> CompileLoadNonexistent(Handle<Name> name);
- void GenerateLoadConstant(Handle<Object> value);
void GenerateLoadCallback(Register reg, Handle<AccessorInfo> callback);
- void GenerateLoadCallback(const CallOptimization& call_optimization,
- Handle<Map> receiver_map);
// Helper emits no code if vector-ics are disabled.
void InterceptorVectorSlotPush(Register holder_reg);
@@ -209,17 +192,6 @@ class NamedLoadHandlerCompiler : public PropertyHandlerCompiler {
Register holder_reg);
void GenerateLoadPostInterceptor(LookupIterator* it, Register reg);
- // Generates prototype loading code that uses the objects from the
- // context we were in when this function was called. If the context
- // has changed, a jump to miss is performed. This ties the generated
- // code to a particular context and so must not be used in cases
- // where the generated code is not allowed to have references to
- // objects from a context.
- static void GenerateDirectLoadGlobalFunctionPrototype(MacroAssembler* masm,
- int index,
- Register prototype,
- Label* miss);
-
Register scratch3() { return registers_[4]; }
};
@@ -244,9 +216,6 @@ class NamedStoreHandlerCompiler : public PropertyHandlerCompiler {
void ZapStackArgumentsRegisterAliases();
- Handle<Code> CompileStoreTransition(Handle<Map> transition,
- Handle<Name> name);
- Handle<Code> CompileStoreField(LookupIterator* it);
Handle<Code> CompileStoreCallback(Handle<JSObject> object, Handle<Name> name,
Handle<AccessorInfo> callback,
LanguageMode language_mode);
@@ -275,18 +244,6 @@ class NamedStoreHandlerCompiler : public PropertyHandlerCompiler {
void GenerateRestoreName(Label* label, Handle<Name> name);
private:
- void GenerateRestoreName(Handle<Name> name);
- void GenerateRestoreMap(Handle<Map> transition, Register map_reg,
- Register scratch, Label* miss);
-
- void GenerateConstantCheck(Register map_reg, int descriptor,
- Register value_reg, Register scratch,
- Label* miss_label);
-
- bool RequiresFieldTypeChecks(FieldType* field_type) const;
- void GenerateFieldTypeChecks(FieldType* field_type, Register value_reg,
- Label* miss_label);
-
static Register value();
};
diff --git a/deps/v8/src/ic/handler-configuration-inl.h b/deps/v8/src/ic/handler-configuration-inl.h
index 505d67cf42..8aa887d2b6 100644
--- a/deps/v8/src/ic/handler-configuration-inl.h
+++ b/deps/v8/src/ic/handler-configuration-inl.h
@@ -104,7 +104,8 @@ Handle<Object> StoreHandler::StoreField(Isolate* isolate, Kind kind,
int value_index = DescriptorArray::ToValueIndex(descriptor);
DCHECK(kind == kStoreField || kind == kTransitionToField);
- DCHECK_IMPLIES(kind == kStoreField, !extend_storage);
+ DCHECK_IMPLIES(extend_storage, kind == kTransitionToField);
+ DCHECK_IMPLIES(field_index.is_inobject(), !extend_storage);
int config = StoreHandler::KindBits::encode(kind) |
StoreHandler::ExtendStorageBits::encode(extend_storage) |
diff --git a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
index 68fd1b9d98..b63e82b70a 100644
--- a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
@@ -83,16 +83,6 @@ void PropertyHandlerCompiler::DiscardVectorAndSlot() {
__ add(esp, Immediate(2 * kPointerSize));
}
-void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
- MacroAssembler* masm = this->masm();
- __ push(tmp);
-}
-
-void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
- MacroAssembler* masm = this->masm();
- __ pop(tmp);
-}
-
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
@@ -132,18 +122,6 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ DecrementCounter(counters->negative_lookups_miss(), 1);
}
-
-void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register result, Label* miss) {
- __ LoadGlobalFunction(index, result);
- // Load its initial map. The global functions all have initial maps.
- __ mov(result,
- FieldOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
- // Load the prototype from the initial map.
- __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
-}
-
-
void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
MacroAssembler* masm, Register receiver, Register scratch1,
Register scratch2, Label* miss_label) {
@@ -359,58 +337,6 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
}
}
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
- __ mov(this->name(), Immediate(name));
-}
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
- Register map_reg,
- Register scratch,
- Label* miss) {
- Handle<WeakCell> cell = Map::WeakCellForMap(transition);
- DCHECK(!map_reg.is(scratch));
- __ LoadWeakValue(map_reg, cell, miss);
- if (transition->CanBeDeprecated()) {
- __ mov(scratch, FieldOperand(map_reg, Map::kBitField3Offset));
- __ and_(scratch, Immediate(Map::Deprecated::kMask));
- __ j(not_zero, miss);
- }
-}
-
-
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
- int descriptor,
- Register value_reg,
- Register scratch,
- Label* miss_label) {
- DCHECK(!map_reg.is(scratch));
- DCHECK(!map_reg.is(value_reg));
- DCHECK(!value_reg.is(scratch));
- __ LoadInstanceDescriptors(map_reg, scratch);
- __ mov(scratch,
- FieldOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
- __ cmp(value_reg, scratch);
- __ j(not_equal, miss_label);
-}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
- Register value_reg,
- Label* miss_label) {
- Register map_reg = scratch1();
- Register scratch = scratch2();
- DCHECK(!value_reg.is(map_reg));
- DCHECK(!value_reg.is(scratch));
- __ JumpIfSmi(value_reg, miss_label);
- if (field_type->IsClass()) {
- __ mov(map_reg, FieldOperand(value_reg, HeapObject::kMapOffset));
- __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
- scratch);
- __ j(not_equal, miss_label);
- }
-}
-
void PropertyHandlerCompiler::GenerateAccessCheck(
Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
Label* miss, bool compare_native_contexts_only) {
@@ -540,14 +466,6 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
}
}
-
-void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
- // Return the constant value.
- __ LoadObject(eax, value);
- __ ret(0);
-}
-
-
void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
LookupIterator* it, Register holder_reg) {
DCHECK(holder()->HasNamedInterceptor());
diff --git a/deps/v8/src/ic/ia32/ic-compiler-ia32.cc b/deps/v8/src/ic/ia32/ic-compiler-ia32.cc
deleted file mode 100644
index a52f04689a..0000000000
--- a/deps/v8/src/ic/ia32/ic-compiler-ia32.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_IA32
-
-#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-void PropertyICCompiler::GenerateRuntimeSetProperty(
- MacroAssembler* masm, LanguageMode language_mode) {
- typedef StoreWithVectorDescriptor Descriptor;
- STATIC_ASSERT(Descriptor::kStackArgumentsCount == 3);
- // ----------- S t a t e -------------
- // -- esp[12] : value
- // -- esp[8] : slot
- // -- esp[4] : vector
- // -- esp[0] : return address
- // -----------------------------------
- __ LoadParameterFromStack<Descriptor>(Descriptor::ValueRegister(),
- Descriptor::kValue);
-
- __ mov(Operand(esp, 12), Descriptor::ReceiverRegister());
- __ mov(Operand(esp, 8), Descriptor::NameRegister());
- __ mov(Operand(esp, 4), Descriptor::ValueRegister());
- __ pop(ebx);
- __ push(Immediate(Smi::FromInt(language_mode)));
- __ push(ebx); // return address
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty);
-}
-
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ic/ia32/ic-ia32.cc b/deps/v8/src/ic/ia32/ic-ia32.cc
index 44a5b9f531..4bf0eaee92 100644
--- a/deps/v8/src/ic/ia32/ic-ia32.cc
+++ b/deps/v8/src/ic/ia32/ic-ia32.cc
@@ -18,440 +18,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-// Helper function used to load a property from a dictionary backing
-// storage. This function may fail to load a property even though it is
-// in the dictionary, so code at miss_label must always call a backup
-// property load that is complete. This function is safe to call if
-// name is not internalized, and will jump to the miss_label in that
-// case. The generated code assumes that the receiver has slow
-// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
- Register elements, Register name,
- Register r0, Register r1, Register result) {
- // Register use:
- //
- // elements - holds the property dictionary on entry and is unchanged.
- //
- // name - holds the name of the property on entry and is unchanged.
- //
- // Scratch registers:
- //
- // r0 - used for the index into the property dictionary
- //
- // r1 - used to hold the capacity of the property dictionary.
- //
- // result - holds the result on exit.
-
- Label done;
-
- // Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done,
- elements, name, r0, r1);
-
- // If probing finds an entry in the dictionary, r0 contains the
- // index into the dictionary. Check that the value is a normal
- // property.
- __ bind(&done);
- const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
- Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
- __ j(not_zero, miss_label);
-
- // Get the value at the masked, scaled index.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ mov(result, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
-}
-
-
-// Helper function used to store a property to a dictionary backing
-// storage. This function may fail to store a property eventhough it
-// is in the dictionary, so code at miss_label must always call a
-// backup property store that is complete. This function is safe to
-// call if name is not internalized, and will jump to the miss_label in
-// that case. The generated code assumes that the receiver has slow
-// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
- Register elements, Register name,
- Register value, Register r0, Register r1) {
- // Register use:
- //
- // elements - holds the property dictionary on entry and is clobbered.
- //
- // name - holds the name of the property on entry and is unchanged.
- //
- // value - holds the value to store and is unchanged.
- //
- // r0 - used for index into the property dictionary and is clobbered.
- //
- // r1 - used to hold the capacity of the property dictionary and is clobbered.
- Label done;
-
-
- // Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done,
- elements, name, r0, r1);
-
- // If probing finds an entry in the dictionary, r0 contains the
- // index into the dictionary. Check that the value is a normal
- // property that is not read only.
- __ bind(&done);
- const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- const int kTypeAndReadOnlyMask =
- (PropertyDetails::TypeField::kMask |
- PropertyDetails::AttributesField::encode(READ_ONLY))
- << kSmiTagSize;
- __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
- Immediate(kTypeAndReadOnlyMask));
- __ j(not_zero, miss_label);
-
- // Store the value at the masked, scaled index.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ lea(r0, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
- __ mov(Operand(r0, 0), value);
-
- // Update write barrier. Make sure not to clobber the value.
- __ mov(r1, value);
- __ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
-}
-
-static void KeyedStoreGenerateMegamorphicHelper(
- MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
- KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
- Label transition_smi_elements;
- Label finish_object_store, non_double_value, transition_double_elements;
- Label fast_double_without_map_check;
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register key = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- DCHECK(receiver.is(edx));
- DCHECK(key.is(ecx));
- DCHECK(value.is(eax));
- // key is a smi.
- // ebx: FixedArray receiver->elements
- // edi: receiver map
- // Fast case: Do the store, could either Object or double.
- __ bind(fast_object);
- if (check_map == kCheckMap) {
- __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
- __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
- __ j(not_equal, fast_double);
- }
-
- // HOLECHECK: guards "A[i] = V"
- // We have to go to the runtime if the current value is the hole because
- // there may be a callback on the element
- Label holecheck_passed1;
- __ cmp(FixedArrayElementOperand(ebx, key),
- masm->isolate()->factory()->the_hole_value());
- __ j(not_equal, &holecheck_passed1);
- __ JumpIfDictionaryInPrototypeChain(receiver, ebx, edi, slow);
- __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-
- __ bind(&holecheck_passed1);
-
- // Smi stores don't require further checks.
- Label non_smi_value;
- __ JumpIfNotSmi(value, &non_smi_value);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ add(FieldOperand(receiver, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- }
- // It's irrelevant whether array is smi-only or not when writing a smi.
- __ mov(FixedArrayElementOperand(ebx, key), value);
- __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
-
- __ bind(&non_smi_value);
- // Escape to elements kind transition case.
- __ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset));
- __ CheckFastObjectElements(edi, &transition_smi_elements);
-
- // Fast elements array, store the value to the elements backing store.
- __ bind(&finish_object_store);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ add(FieldOperand(receiver, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- }
- __ mov(FixedArrayElementOperand(ebx, key), value);
- // Update write barrier for the elements array address.
- __ mov(edx, value); // Preserve the value which is returned.
- __ RecordWriteArray(ebx, edx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
-
- __ bind(fast_double);
- if (check_map == kCheckMap) {
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
- __ j(not_equal, slow);
- // If the value is a number, store it as a double in the FastDoubleElements
- // array.
- }
-
- // HOLECHECK: guards "A[i] double hole?"
- // We have to see if the double version of the hole is present. If so
- // go to the runtime.
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
- __ cmp(FieldOperand(ebx, key, times_4, offset), Immediate(kHoleNanUpper32));
- __ j(not_equal, &fast_double_without_map_check);
- __ JumpIfDictionaryInPrototypeChain(receiver, ebx, edi, slow);
- __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-
- __ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value, ebx, key, edi, xmm0,
- &transition_double_elements);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ add(FieldOperand(receiver, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- }
- __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
-
- __ bind(&transition_smi_elements);
- __ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
-
- // Transition the array appropriately depending on the value type.
- __ CheckMap(value, masm->isolate()->factory()->heap_number_map(),
- &non_double_value, DONT_DO_SMI_CHECK);
-
- // Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS
- // and complete the store.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS, ebx, edi, slow);
- AllocationSiteMode mode =
- AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
- ebx, mode, slow);
- __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&fast_double_without_map_check);
-
- __ bind(&non_double_value);
- // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, ebx,
- edi, slow);
- mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- masm, receiver, key, value, ebx, mode, slow);
- __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-
- __ bind(&transition_double_elements);
- // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
- // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
- // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
- ebx, edi, slow);
- mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, receiver, key,
- value, ebx, mode, slow);
- __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
- LanguageMode language_mode) {
- typedef StoreWithVectorDescriptor Descriptor;
- // Return address is on the stack.
- Label slow, fast_object, fast_object_grow;
- Label fast_double, fast_double_grow;
- Label array, extra, check_if_double_array, maybe_name_key, miss;
- Register receiver = Descriptor::ReceiverRegister();
- Register key = Descriptor::NameRegister();
- DCHECK(receiver.is(edx));
- DCHECK(key.is(ecx));
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, &slow);
- // Get the map from the receiver.
- __ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks.
- // The generic stub does not perform map checks.
- __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded));
- __ j(not_zero, &slow);
-
- __ LoadParameterFromStack<Descriptor>(Descriptor::ValueRegister(),
- Descriptor::kValue);
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &maybe_name_key);
- __ CmpInstanceType(edi, JS_ARRAY_TYPE);
- __ j(equal, &array);
- // Check that the object is some kind of JS object EXCEPT JS Value type. In
- // the case that the object is a value-wrapper object, we enter the runtime
- // system to make sure that indexing into string objects works as intended.
- STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
- __ CmpInstanceType(edi, JS_OBJECT_TYPE);
- __ j(below, &slow);
-
- // Object case: Check key against length in the elements array.
- // Key is a smi.
- // edi: receiver map
- __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
- // Check array bounds. Both the key and the length of FixedArray are smis.
- __ cmp(key, FieldOperand(ebx, FixedArray::kLengthOffset));
- __ j(below, &fast_object);
-
- // Slow case: call runtime.
- __ bind(&slow);
- PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
- // Never returns to here.
-
- __ bind(&maybe_name_key);
- __ mov(ebx, FieldOperand(key, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(ebx, &slow);
-
- masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, edi,
- no_reg);
-
- // Cache miss.
- __ jmp(&miss);
-
- // Extra capacity case: Check if there is extra capacity to
- // perform the store and update the length. Used for adding one
- // element to the array by writing to array[array.length].
- __ bind(&extra);
- // receiver is a JSArray.
- // key is a smi.
- // ebx: receiver->elements, a FixedArray
- // edi: receiver map
- // flags: compare (key, receiver.length())
- // do not leave holes in the array:
- __ j(not_equal, &slow);
- __ cmp(key, FieldOperand(ebx, FixedArray::kLengthOffset));
- __ j(above_equal, &slow);
- __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
- __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
- __ j(not_equal, &check_if_double_array);
- __ jmp(&fast_object_grow);
-
- __ bind(&check_if_double_array);
- __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
- __ j(not_equal, &slow);
- __ jmp(&fast_double_grow);
-
- // Array case: Get the length and the elements array from the JS
- // array. Check that the array is in fast mode (and writable); if it
- // is the length is always a smi.
- __ bind(&array);
- // receiver is a JSArray.
- // key is a smi.
- // edi: receiver map
- __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-
- // Check the key against the length in the array and fall through to the
- // common store code.
- __ cmp(key, FieldOperand(receiver, JSArray::kLengthOffset)); // Compare smis.
- __ j(above_equal, &extra);
-
- KeyedStoreGenerateMegamorphicHelper(masm, &fast_object, &fast_double, &slow,
- kCheckMap, kDontIncrementLength);
- KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
- &fast_double_grow, &slow, kDontCheckMap,
- kIncrementLength);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
- Register dictionary = eax;
- DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
- DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
-
- Label slow;
-
- __ mov(dictionary, FieldOperand(LoadDescriptor::ReceiverRegister(),
- JSObject::kPropertiesOffset));
- GenerateDictionaryLoad(masm, &slow, dictionary,
- LoadDescriptor::NameRegister(), edi, ebx, eax);
- __ ret(0);
-
- // Dictionary load failed, go slow (but don't miss).
- __ bind(&slow);
- GenerateRuntimeGetProperty(masm);
-}
-
-
-static void LoadIC_PushArgs(MacroAssembler* masm) {
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register name = LoadDescriptor::NameRegister();
-
- Register slot = LoadDescriptor::SlotRegister();
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- DCHECK(!edi.is(receiver) && !edi.is(name) && !edi.is(slot) &&
- !edi.is(vector));
-
- __ pop(edi);
- __ push(receiver);
- __ push(name);
- __ push(slot);
- __ push(vector);
- __ push(edi);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
- // Return address is on the stack.
- __ IncrementCounter(masm->isolate()->counters()->ic_load_miss(), 1);
- LoadIC_PushArgs(masm);
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kLoadIC_Miss);
-}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // Return address is on the stack.
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register name = LoadDescriptor::NameRegister();
- DCHECK(!ebx.is(receiver) && !ebx.is(name));
-
- __ pop(ebx);
- __ push(receiver);
- __ push(name);
- __ push(ebx);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kGetProperty);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
- // Return address is on the stack.
- __ IncrementCounter(masm->isolate()->counters()->ic_keyed_load_miss(), 1);
-
- LoadIC_PushArgs(masm);
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
-}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // Return address is on the stack.
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register name = LoadDescriptor::NameRegister();
- DCHECK(!ebx.is(receiver) && !ebx.is(name));
-
- __ pop(ebx);
- __ push(receiver);
- __ push(name);
- __ push(ebx);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kKeyedGetProperty);
-}
-
static void StoreIC_PushArgs(MacroAssembler* masm) {
Register receiver = StoreWithVectorDescriptor::ReceiverRegister();
Register name = StoreWithVectorDescriptor::NameRegister();
@@ -470,50 +36,6 @@ static void StoreIC_PushArgs(MacroAssembler* masm) {
__ push(return_address);
}
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
- // Return address is on the stack.
- StoreIC_PushArgs(masm);
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kStoreIC_Miss);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
- typedef StoreWithVectorDescriptor Descriptor;
- Label restore_miss;
- Register receiver = Descriptor::ReceiverRegister();
- Register name = Descriptor::NameRegister();
- Register value = Descriptor::ValueRegister();
- // Since the slot and vector values are passed on the stack we can use
- // respective registers as scratch registers.
- Register scratch1 = Descriptor::VectorRegister();
- Register scratch2 = Descriptor::SlotRegister();
-
- __ LoadParameterFromStack<Descriptor>(value, Descriptor::kValue);
-
- // A lot of registers are needed for storing to slow case objects.
- // Push and restore receiver but rely on GenerateDictionaryStore preserving
- // the value and name.
- __ push(receiver);
-
- Register dictionary = receiver;
- __ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
- GenerateDictionaryStore(masm, &restore_miss, dictionary, name, value,
- scratch1, scratch2);
- __ Drop(1);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->ic_store_normal_hit(), 1);
- __ ret(Descriptor::kStackArgumentsCount * kPointerSize);
-
- __ bind(&restore_miss);
- __ pop(receiver);
- __ IncrementCounter(counters->ic_store_normal_miss(), 1);
- GenerateMiss(masm);
-}
-
-
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// Return address is on the stack.
StoreIC_PushArgs(masm);
diff --git a/deps/v8/src/ic/ia32/stub-cache-ia32.cc b/deps/v8/src/ic/ia32/stub-cache-ia32.cc
deleted file mode 100644
index 82700d34a7..0000000000
--- a/deps/v8/src/ic/ia32/stub-cache-ia32.cc
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_IA32
-
-#include "src/codegen.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/interface-descriptors.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
- StubCache::Table table, Register name, Register receiver,
- // The offset is scaled by 4, based on
- // kCacheIndexShift, which is two bits
- Register offset, Register extra) {
- ExternalReference key_offset(stub_cache->key_reference(table));
- ExternalReference value_offset(stub_cache->value_reference(table));
- ExternalReference map_offset(stub_cache->map_reference(table));
-
- Label miss;
- Code::Kind ic_kind = stub_cache->ic_kind();
- bool is_vector_store =
- IC::ICUseVector(ic_kind) &&
- (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC);
-
- // Multiply by 3 because there are 3 fields per entry (name, code, map).
- __ lea(offset, Operand(offset, offset, times_2, 0));
-
- if (extra.is_valid()) {
- // Get the code entry from the cache.
- __ mov(extra, Operand::StaticArray(offset, times_1, value_offset));
-
- // Check that the key in the entry matches the name.
- __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
- __ j(not_equal, &miss);
-
- // Check the map matches.
- __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
- __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
- __ j(not_equal, &miss);
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ jmp(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ jmp(&miss);
- }
-#endif
-
- if (is_vector_store) {
- // The value, vector and slot were passed to the IC on the stack and
- // they are still there. So we can just jump to the handler.
- DCHECK(extra.is(StoreWithVectorDescriptor::SlotRegister()));
- __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(extra);
- } else {
- // The vector and slot were pushed onto the stack before starting the
- // probe, and need to be dropped before calling the handler.
- __ pop(LoadWithVectorDescriptor::VectorRegister());
- __ pop(LoadDescriptor::SlotRegister());
- __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(extra);
- }
-
- __ bind(&miss);
- } else {
- DCHECK(ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC);
-
- // Save the offset on the stack.
- __ push(offset);
-
- // Check that the key in the entry matches the name.
- __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
- __ j(not_equal, &miss);
-
- // Check the map matches.
- __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
- __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
- __ j(not_equal, &miss);
-
- // Restore offset register.
- __ mov(offset, Operand(esp, 0));
-
- // Get the code entry from the cache.
- __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ jmp(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ jmp(&miss);
- }
-#endif
-
- // Restore offset and re-load code entry from cache.
- __ pop(offset);
- __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
-
- // Jump to the first instruction in the code stub.
- if (is_vector_store) {
- DCHECK(offset.is(StoreWithVectorDescriptor::SlotRegister()));
- }
- __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(offset);
-
- // Pop at miss.
- __ bind(&miss);
- __ pop(offset);
- }
-}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
- Register name, Register scratch, Register extra,
- Register extra2, Register extra3) {
- Label miss;
-
- // Assert that code is valid. The multiplying code relies on the entry size
- // being 12.
- DCHECK(sizeof(Entry) == 12);
-
- // Assert that there are no register conflicts.
- DCHECK(!scratch.is(receiver));
- DCHECK(!scratch.is(name));
- DCHECK(!extra.is(receiver));
- DCHECK(!extra.is(name));
- DCHECK(!extra.is(scratch));
-
- // Assert scratch and extra registers are valid, and extra2/3 are unused.
- DCHECK(!scratch.is(no_reg));
- DCHECK(extra2.is(no_reg));
- DCHECK(extra3.is(no_reg));
-
- Register offset = scratch;
- scratch = no_reg;
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Get the map of the receiver and compute the hash.
- __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
- __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(offset, kPrimaryMagic);
- // We mask out the last two bits because they are not part of the hash and
- // they are always 01 for maps. Also in the two 'and' instructions below.
- __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
- // ProbeTable expects the offset to be pointer scaled, which it is, because
- // the heap object tag size is 2 and the pointer size log 2 is also 2.
- DCHECK(kCacheIndexShift == kPointerSizeLog2);
-
- // Probe the primary table.
- ProbeTable(this, masm, kPrimary, name, receiver, offset, extra);
-
- // Primary miss: Compute hash for secondary probe.
- __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
- __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(offset, kPrimaryMagic);
- __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
- __ sub(offset, name);
- __ add(offset, Immediate(kSecondaryMagic));
- __ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
-
- // Probe the secondary table.
- ProbeTable(this, masm, kSecondary, name, receiver, offset, extra);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ bind(&miss);
- __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
-}
-
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ic/ic-compiler.cc b/deps/v8/src/ic/ic-compiler.cc
index 750c88daa9..fcda0c1fa3 100644
--- a/deps/v8/src/ic/ic-compiler.cc
+++ b/deps/v8/src/ic/ic-compiler.cc
@@ -10,7 +10,7 @@
namespace v8 {
namespace internal {
-Handle<Code> PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
+Handle<Object> PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
Handle<Map> receiver_map, KeyedAccessStoreMode store_mode) {
Isolate* isolate = receiver_map->GetIsolate();
@@ -20,14 +20,14 @@ Handle<Code> PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
store_mode == STORE_NO_TRANSITION_HANDLE_COW);
PropertyICCompiler compiler(isolate);
- Handle<Code> code =
+ Handle<Object> handler =
compiler.CompileKeyedStoreMonomorphicHandler(receiver_map, store_mode);
- return code;
+ return handler;
}
void PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers(
MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
- CodeHandleList* handlers, KeyedAccessStoreMode store_mode) {
+ List<Handle<Object>>* handlers, KeyedAccessStoreMode store_mode) {
Isolate* isolate = receiver_maps->at(0)->GetIsolate();
DCHECK(store_mode == STANDARD_STORE ||
store_mode == STORE_AND_GROW_NO_TRANSITION ||
@@ -38,13 +38,12 @@ void PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers(
receiver_maps, transitioned_maps, handlers, store_mode);
}
-
void PropertyICCompiler::CompileKeyedStorePolymorphicHandlers(
MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
- CodeHandleList* handlers, KeyedAccessStoreMode store_mode) {
+ List<Handle<Object>>* handlers, KeyedAccessStoreMode store_mode) {
for (int i = 0; i < receiver_maps->length(); ++i) {
Handle<Map> receiver_map(receiver_maps->at(i));
- Handle<Code> cached_stub;
+ Handle<Object> handler;
Handle<Map> transitioned_map;
{
Map* tmap = receiver_map->FindElementsKindTransitionedMap(receiver_maps);
@@ -61,21 +60,29 @@ void PropertyICCompiler::CompileKeyedStorePolymorphicHandlers(
ElementsKind elements_kind = receiver_map->elements_kind();
TRACE_HANDLER_STATS(isolate(),
KeyedStoreIC_ElementsTransitionAndStoreStub);
- cached_stub =
+ Handle<Code> stub =
ElementsTransitionAndStoreStub(isolate(), elements_kind,
transitioned_map->elements_kind(),
- is_js_array, store_mode).GetCode();
+ is_js_array, store_mode)
+ .GetCode();
+ Handle<Object> validity_cell =
+ Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+ if (validity_cell.is_null()) {
+ handler = stub;
+ } else {
+ handler = isolate()->factory()->NewTuple2(validity_cell, stub);
+ }
+
} else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
// TODO(mvstanton): Consider embedding store_mode in the state of the slow
// keyed store ic for uniformity.
TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_SlowStub);
- cached_stub = isolate()->builtins()->KeyedStoreIC_Slow();
+ handler = isolate()->builtins()->KeyedStoreIC_Slow();
} else {
- cached_stub =
- CompileKeyedStoreMonomorphicHandler(receiver_map, store_mode);
+ handler = CompileKeyedStoreMonomorphicHandler(receiver_map, store_mode);
}
- DCHECK(!cached_stub.is_null());
- handlers->Add(cached_stub);
+ DCHECK(!handler.is_null());
+ handlers->Add(handler);
transitioned_maps->Add(transitioned_map);
}
}
@@ -83,8 +90,7 @@ void PropertyICCompiler::CompileKeyedStorePolymorphicHandlers(
#define __ ACCESS_MASM(masm())
-
-Handle<Code> PropertyICCompiler::CompileKeyedStoreMonomorphicHandler(
+Handle<Object> PropertyICCompiler::CompileKeyedStoreMonomorphicHandler(
Handle<Map> receiver_map, KeyedAccessStoreMode store_mode) {
ElementsKind elements_kind = receiver_map->elements_kind();
bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
@@ -101,7 +107,12 @@ Handle<Code> PropertyICCompiler::CompileKeyedStoreMonomorphicHandler(
TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_StoreElementStub);
stub = StoreElementStub(isolate(), elements_kind, store_mode).GetCode();
}
- return stub;
+ Handle<Object> validity_cell =
+ Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+ if (validity_cell.is_null()) {
+ return stub;
+ }
+ return isolate()->factory()->NewTuple2(validity_cell, stub);
}
diff --git a/deps/v8/src/ic/ic-compiler.h b/deps/v8/src/ic/ic-compiler.h
index fa3ba15af2..b8d6635ae0 100644
--- a/deps/v8/src/ic/ic-compiler.h
+++ b/deps/v8/src/ic/ic-compiler.h
@@ -14,29 +14,22 @@ namespace internal {
class PropertyICCompiler : public PropertyAccessCompiler {
public:
// Keyed
- static Handle<Code> ComputeKeyedStoreMonomorphicHandler(
+ static Handle<Object> ComputeKeyedStoreMonomorphicHandler(
Handle<Map> receiver_map, KeyedAccessStoreMode store_mode);
static void ComputeKeyedStorePolymorphicHandlers(
MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
- CodeHandleList* handlers, KeyedAccessStoreMode store_mode);
-
- // Helpers
- // TODO(verwaest): Move all uses of these helpers to the PropertyICCompiler
- // and make the helpers private.
- static void GenerateRuntimeSetProperty(MacroAssembler* masm,
- LanguageMode language_mode);
-
+ List<Handle<Object>>* handlers, KeyedAccessStoreMode store_mode);
private:
explicit PropertyICCompiler(Isolate* isolate)
: PropertyAccessCompiler(isolate, Code::KEYED_STORE_IC,
kCacheOnReceiver) {}
- Handle<Code> CompileKeyedStoreMonomorphicHandler(
+ Handle<Object> CompileKeyedStoreMonomorphicHandler(
Handle<Map> receiver_map, KeyedAccessStoreMode store_mode);
void CompileKeyedStorePolymorphicHandlers(MapHandleList* receiver_maps,
MapHandleList* transitioned_maps,
- CodeHandleList* handlers,
+ List<Handle<Object>>* handlers,
KeyedAccessStoreMode store_mode);
};
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index 1b5d063270..b286315c01 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -93,8 +93,8 @@ Code* IC::target() const {
}
bool IC::IsHandler(Object* object) {
- return (object->IsSmi() && (object != nullptr)) || object->IsTuple3() ||
- object->IsFixedArray() ||
+ return (object->IsSmi() && (object != nullptr)) || object->IsTuple2() ||
+ object->IsTuple3() || object->IsFixedArray() ||
(object->IsCode() && Code::cast(object)->is_handler());
}
diff --git a/deps/v8/src/ic/ic-state.cc b/deps/v8/src/ic/ic-state.cc
index f94803681b..7439ecd2c0 100644
--- a/deps/v8/src/ic/ic-state.cc
+++ b/deps/v8/src/ic/ic-state.cc
@@ -61,6 +61,23 @@ ExtraICState BinaryOpICState::GetExtraICState() const {
return extra_ic_state;
}
+std::string BinaryOpICState::ToString() const {
+ std::string ret = "(";
+ ret += Token::Name(op_);
+ if (CouldCreateAllocationMementos()) ret += "_CreateAllocationMementos";
+ ret += ":";
+ ret += BinaryOpICState::KindToString(left_kind_);
+ ret += "*";
+ if (fixed_right_arg_.IsJust()) {
+ ret += fixed_right_arg_.FromJust();
+ } else {
+ ret += BinaryOpICState::KindToString(right_kind_);
+ }
+ ret += "->";
+ ret += BinaryOpICState::KindToString(result_kind_);
+ ret += ")";
+ return ret;
+}
// static
void BinaryOpICState::GenerateAheadOfTime(
diff --git a/deps/v8/src/ic/ic-state.h b/deps/v8/src/ic/ic-state.h
index 1ba37b99db..836979c4f0 100644
--- a/deps/v8/src/ic/ic-state.h
+++ b/deps/v8/src/ic/ic-state.h
@@ -82,6 +82,7 @@ class BinaryOpICState final BASE_EMBEDDED {
}
ExtraICState GetExtraICState() const;
+ std::string ToString() const;
static void GenerateAheadOfTime(Isolate*,
void (*Generate)(Isolate*,
@@ -234,6 +235,13 @@ class LoadGlobalICState final BASE_EMBEDDED {
static TypeofMode GetTypeofMode(ExtraICState state) {
return LoadGlobalICState(state).typeof_mode();
}
+
+ // For convenience, a statically declared encoding of typeof mode
+ // IC state.
+ static const ExtraICState kInsideTypeOfState = INSIDE_TYPEOF
+ << TypeofModeBits::kShift;
+ static const ExtraICState kNotInsideTypeOfState = NOT_INSIDE_TYPEOF
+ << TypeofModeBits::kShift;
};
diff --git a/deps/v8/src/ic/ic-stats.cc b/deps/v8/src/ic/ic-stats.cc
new file mode 100644
index 0000000000..de2529fcd9
--- /dev/null
+++ b/deps/v8/src/ic/ic-stats.cc
@@ -0,0 +1,144 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/ic/ic-stats.h"
+
+#include "src/flags.h"
+#include "src/objects-inl.h"
+#include "src/tracing/trace-event.h"
+#include "src/tracing/traced-value.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+base::LazyInstance<ICStats>::type ICStats::instance_ =
+ LAZY_INSTANCE_INITIALIZER;
+
+ICStats::ICStats() : ic_infos_(MAX_IC_INFO), pos_(0) {
+ base::NoBarrier_Store(&enabled_, 0);
+}
+
+void ICStats::Begin() {
+ if (V8_LIKELY(!FLAG_ic_stats)) return;
+ base::NoBarrier_Store(&enabled_, 1);
+}
+
+void ICStats::End() {
+ if (base::NoBarrier_Load(&enabled_) != 1) return;
+ ++pos_;
+ if (pos_ == MAX_IC_INFO) {
+ Dump();
+ }
+ base::NoBarrier_Store(&enabled_, 0);
+}
+
+void ICStats::Reset() {
+ for (auto ic_info : ic_infos_) {
+ ic_info.Reset();
+ }
+ pos_ = 0;
+}
+
+void ICStats::Dump() {
+ auto value = v8::tracing::TracedValue::Create();
+ value->BeginArray("data");
+ for (int i = 0; i < pos_; ++i) {
+ ic_infos_[i].AppendToTracedValue(value.get());
+ }
+ value->EndArray();
+
+ TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("v8.ic_stats"), "V8.ICStats",
+ TRACE_EVENT_SCOPE_THREAD, "ic-stats", std::move(value));
+ Reset();
+}
+
+const char* ICStats::GetOrCacheScriptName(Script* script) {
+ if (script_name_map_.find(script) != script_name_map_.end()) {
+ return script_name_map_[script].get();
+ }
+ Object* script_name_raw = script->name();
+ if (script_name_raw->IsString()) {
+ String* script_name = String::cast(script_name_raw);
+ char* c_script_name =
+ script_name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL)
+ .release();
+ script_name_map_.insert(
+ std::make_pair(script, std::unique_ptr<char[]>(c_script_name)));
+ return c_script_name;
+ } else {
+ script_name_map_.insert(
+ std::make_pair(script, std::unique_ptr<char[]>(nullptr)));
+ return nullptr;
+ }
+ return nullptr;
+}
+
+const char* ICStats::GetOrCacheFunctionName(JSFunction* function) {
+ if (function_name_map_.find(function) != function_name_map_.end()) {
+ return function_name_map_[function].get();
+ }
+ SharedFunctionInfo* shared = function->shared();
+ ic_infos_[pos_].is_optimized = function->IsOptimized();
+ char* function_name = shared->DebugName()->ToCString().release();
+ function_name_map_.insert(
+ std::make_pair(function, std::unique_ptr<char[]>(function_name)));
+ return function_name;
+}
+
+ICInfo::ICInfo()
+ : function_name(nullptr),
+ script_offset(0),
+ script_name(nullptr),
+ line_num(-1),
+ is_constructor(false),
+ is_optimized(false),
+ map(nullptr),
+ is_dictionary_map(0),
+ number_of_own_descriptors(0) {}
+
+void ICInfo::Reset() {
+ type.clear();
+ function_name = nullptr;
+ script_offset = 0;
+ script_name = nullptr;
+ line_num = -1;
+ is_constructor = false;
+ is_optimized = false;
+ state.clear();
+ map = nullptr;
+ is_dictionary_map = false;
+ number_of_own_descriptors = 0;
+ instance_type.clear();
+}
+
+void ICInfo::AppendToTracedValue(v8::tracing::TracedValue* value) const {
+ value->BeginDictionary();
+ value->SetString("type", type);
+ if (function_name) {
+ value->SetString("functionName", function_name);
+ if (is_optimized) {
+ value->SetInteger("optimized", is_optimized);
+ }
+ }
+ if (script_offset) value->SetInteger("offset", script_offset);
+ if (script_name) value->SetString("scriptName", script_name);
+ if (line_num != -1) value->SetInteger("lineNum", line_num);
+ if (is_constructor) value->SetInteger("constructor", is_constructor);
+ if (!state.empty()) value->SetString("state", state);
+ if (map) {
+ // V8 cannot represent integer above 2^53 - 1 in JavaScript from JSON,
+ // thus `map` should be converted to a string rather than an integer.
+ std::stringstream ss;
+ ss << map;
+ value->SetString("map", ss.str());
+ }
+ if (map) value->SetInteger("dict", is_dictionary_map);
+ if (map) value->SetInteger("own", number_of_own_descriptors);
+ if (!instance_type.empty()) value->SetString("instanceType", instance_type);
+ value->EndDictionary();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/ic/ic-stats.h b/deps/v8/src/ic/ic-stats.h
new file mode 100644
index 0000000000..a3015d0a6a
--- /dev/null
+++ b/deps/v8/src/ic/ic-stats.h
@@ -0,0 +1,77 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_IC_IC_STATS_H_
+#define V8_IC_IC_STATS_H_
+
+#include <memory>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include "src/base/atomicops.h"
+#include "src/base/lazy-instance.h"
+
+namespace v8 {
+
+namespace tracing {
+class TracedValue;
+}
+
+namespace internal {
+
+class JSFunction;
+class Script;
+
+struct ICInfo {
+ ICInfo();
+ void Reset();
+ void AppendToTracedValue(v8::tracing::TracedValue* value) const;
+ std::string type;
+ const char* function_name;
+ int script_offset;
+ const char* script_name;
+ int line_num;
+ bool is_constructor;
+ bool is_optimized;
+ std::string state;
+ // Address of the map.
+ void* map;
+ // Whether map is a dictionary map.
+ bool is_dictionary_map;
+ // Number of own descriptors.
+ unsigned number_of_own_descriptors;
+ std::string instance_type;
+};
+
+class ICStats {
+ public:
+ const int MAX_IC_INFO = 4096;
+
+ ICStats();
+ void Dump();
+ void Begin();
+ void End();
+ void Reset();
+ V8_INLINE ICInfo& Current() {
+ DCHECK(pos_ >= 0 && pos_ < MAX_IC_INFO);
+ return ic_infos_[pos_];
+ }
+ const char* GetOrCacheScriptName(Script* script);
+ const char* GetOrCacheFunctionName(JSFunction* function);
+ V8_INLINE static ICStats* instance() { return instance_.Pointer(); }
+
+ private:
+ static base::LazyInstance<ICStats>::type instance_;
+ base::Atomic32 enabled_;
+ std::vector<ICInfo> ic_infos_;
+ std::unordered_map<Script*, std::unique_ptr<char[]>> script_name_map_;
+ std::unordered_map<JSFunction*, std::unique_ptr<char[]>> function_name_map_;
+ int pos_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_IC_IC_STATS_H_
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 7e0cefdca9..fa04e0fca0 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -21,6 +21,7 @@
#include "src/ic/handler-configuration-inl.h"
#include "src/ic/ic-compiler.h"
#include "src/ic/ic-inl.h"
+#include "src/ic/ic-stats.h"
#include "src/ic/stub-cache.h"
#include "src/isolate-inl.h"
#include "src/macro-assembler.h"
@@ -29,6 +30,7 @@
#include "src/runtime/runtime-utils.h"
#include "src/runtime/runtime.h"
#include "src/tracing/trace-event.h"
+#include "src/tracing/tracing-category-observer.h"
namespace v8 {
namespace internal {
@@ -90,7 +92,7 @@ const char* GetTransitionMarkModifier(KeyedAccessStoreMode mode) {
void IC::TraceIC(const char* type, Handle<Object> name) {
- if (FLAG_trace_ic) {
+ if (FLAG_ic_stats) {
if (AddressIsDeoptimizedCode()) return;
DCHECK(UseVector());
State new_state = nexus()->StateFromFeedback();
@@ -101,8 +103,17 @@ void IC::TraceIC(const char* type, Handle<Object> name) {
void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
State new_state) {
- if (!FLAG_trace_ic) return;
- PrintF("[%s%s in ", is_keyed() ? "Keyed" : "", type);
+ if (V8_LIKELY(!FLAG_ic_stats)) return;
+
+ if (FLAG_ic_stats &
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING) {
+ ICStats::instance()->Begin();
+ ICInfo& ic_info = ICStats::instance()->Current();
+ ic_info.type = is_keyed() ? "Keyed" : "";
+ ic_info.type += type;
+ } else {
+ PrintF("[%s%s in ", is_keyed() ? "Keyed" : "", type);
+ }
// TODO(jkummerow): Add support for "apply". The logic is roughly:
// marker = [fp_ + kMarkerOffset];
@@ -121,8 +132,14 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
code_offset =
static_cast<int>(pc() - function->code()->instruction_start());
}
- JavaScriptFrame::PrintFunctionAndOffset(function, function->abstract_code(),
- code_offset, stdout, true);
+ if (FLAG_ic_stats &
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING) {
+ JavaScriptFrame::CollectFunctionAndOffsetForICStats(
+ function, function->abstract_code(), code_offset);
+ } else {
+ JavaScriptFrame::PrintFunctionAndOffset(
+ function, function->abstract_code(), code_offset, stdout, true);
+ }
}
const char* modifier = "";
@@ -135,17 +152,45 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
if (!receiver_map().is_null()) {
map = *receiver_map();
}
- PrintF(" (%c->%c%s) map=(%p", TransitionMarkFromState(old_state),
- TransitionMarkFromState(new_state), modifier,
- reinterpret_cast<void*>(map));
+ if (FLAG_ic_stats &
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING) {
+ ICInfo& ic_info = ICStats::instance()->Current();
+ // Reverse enough space for IC transition state, the longest length is 17.
+ ic_info.state.reserve(17);
+ ic_info.state = "(";
+ ic_info.state += TransitionMarkFromState(old_state);
+ ic_info.state += "->";
+ ic_info.state += TransitionMarkFromState(new_state);
+ ic_info.state += modifier;
+ ic_info.state += ")";
+ ic_info.map = reinterpret_cast<void*>(map);
+ } else {
+ PrintF(" (%c->%c%s) map=(%p", TransitionMarkFromState(old_state),
+ TransitionMarkFromState(new_state), modifier,
+ reinterpret_cast<void*>(map));
+ }
if (map != nullptr) {
- PrintF(" dict=%u own=%u type=", map->is_dictionary_map(),
- map->NumberOfOwnDescriptors());
- std::cout << map->instance_type();
+ if (FLAG_ic_stats &
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING) {
+ ICInfo& ic_info = ICStats::instance()->Current();
+ ic_info.is_dictionary_map = map->is_dictionary_map();
+ ic_info.number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ ic_info.instance_type = std::to_string(map->instance_type());
+ } else {
+ PrintF(" dict=%u own=%u type=", map->is_dictionary_map(),
+ map->NumberOfOwnDescriptors());
+ std::cout << map->instance_type();
+ }
+ }
+ if (FLAG_ic_stats &
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING) {
+ // TODO(lpy) Add name as key field in ICStats.
+ ICStats::instance()->End();
+ } else {
+ PrintF(") ");
+ name->ShortPrint(stdout);
+ PrintF("]\n");
}
- PrintF(") ");
- name->ShortPrint(stdout);
- PrintF("]\n");
}
@@ -342,7 +387,7 @@ void IC::UpdateState(Handle<Object> receiver, Handle<Object> name) {
update_receiver_map(receiver);
if (!name->IsString()) return;
if (state() != MONOMORPHIC && state() != POLYMORPHIC) return;
- if (receiver->IsUndefined(isolate()) || receiver->IsNull(isolate())) return;
+ if (receiver->IsNullOrUndefined(isolate())) return;
// Remove the target from the code cache if it became invalid
// because of changes in the prototype chain to avoid hitting it
@@ -564,7 +609,7 @@ void IC::ConfigureVectorState(Handle<Name> name, Handle<Map> map,
nexus->ConfigureMonomorphic(map, handler);
} else if (kind() == Code::LOAD_GLOBAL_IC) {
LoadGlobalICNexus* nexus = casted_nexus<LoadGlobalICNexus>();
- nexus->ConfigureHandlerMode(Handle<Code>::cast(handler));
+ nexus->ConfigureHandlerMode(handler);
} else if (kind() == Code::KEYED_LOAD_IC) {
KeyedLoadICNexus* nexus = casted_nexus<KeyedLoadICNexus>();
nexus->ConfigureMonomorphic(name, map, handler);
@@ -603,10 +648,9 @@ void IC::ConfigureVectorState(Handle<Name> name, MapHandleList* maps,
OnTypeFeedbackChanged(isolate(), get_host());
}
-
void IC::ConfigureVectorState(MapHandleList* maps,
MapHandleList* transitioned_maps,
- CodeHandleList* handlers) {
+ List<Handle<Object>>* handlers) {
DCHECK(UseVector());
DCHECK(kind() == Code::KEYED_STORE_IC);
KeyedStoreICNexus* nexus = casted_nexus<KeyedStoreICNexus>();
@@ -620,7 +664,14 @@ void IC::ConfigureVectorState(MapHandleList* maps,
MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
// If the object is undefined or null it's illegal to try to get any
// of its properties; throw a TypeError in that case.
- if (object->IsUndefined(isolate()) || object->IsNull(isolate())) {
+ if (object->IsNullOrUndefined(isolate())) {
+ if (FLAG_use_ic && state() != UNINITIALIZED && state() != PREMONOMORPHIC) {
+ // Ensure the IC state progresses.
+ TRACE_HANDLER_STATS(isolate(), LoadIC_NonReceiver);
+ update_receiver_map(object);
+ PatchCache(name, slow_stub());
+ TRACE_IC("LoadIC", name);
+ }
return TypeError(MessageTemplate::kNonObjectPropertyLoad, object, name);
}
@@ -794,6 +845,7 @@ void IC::PatchCache(Handle<Name> name, Handle<Object> handler) {
DCHECK(IsHandler(*handler));
// Currently only LoadIC and KeyedLoadIC support non-code handlers.
DCHECK_IMPLIES(!handler->IsCode(), kind() == Code::LOAD_IC ||
+ kind() == Code::LOAD_GLOBAL_IC ||
kind() == Code::KEYED_LOAD_IC ||
kind() == Code::STORE_IC ||
kind() == Code::KEYED_STORE_IC);
@@ -831,23 +883,9 @@ void IC::PatchCache(Handle<Name> name, Handle<Object> handler) {
}
}
-Handle<Code> KeyedStoreIC::ChooseMegamorphicStub(Isolate* isolate,
- ExtraICState extra_state) {
- DCHECK(!FLAG_tf_store_ic_stub);
- LanguageMode mode = StoreICState::GetLanguageMode(extra_state);
- return is_strict(mode)
- ? isolate->builtins()->KeyedStoreIC_Megamorphic_Strict()
- : isolate->builtins()->KeyedStoreIC_Megamorphic();
-}
-
-Handle<Object> LoadIC::SimpleFieldLoad(FieldIndex index) {
- if (FLAG_tf_load_ic_stub) {
- TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldDH);
- return LoadHandler::LoadField(isolate(), index);
- }
- TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldStub);
- LoadFieldStub stub(isolate(), index);
- return stub.GetCode();
+Handle<Object> LoadIC::SimpleFieldLoad(Isolate* isolate, FieldIndex index) {
+ TRACE_HANDLER_STATS(isolate, LoadIC_LoadFieldDH);
+ return LoadHandler::LoadField(isolate, index);
}
namespace {
@@ -1044,7 +1082,7 @@ bool IsCompatibleReceiver(LookupIterator* lookup, Handle<Map> receiver_map) {
if (holder->HasFastProperties()) {
if (getter->IsJSFunction()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
- if (!receiver->IsJSObject() && !function->shared()->IsBuiltin() &&
+ if (!receiver->IsJSObject() && function->shared()->IsUserJavaScript() &&
is_sloppy(function->shared()->language_mode())) {
// Calling sloppy non-builtins with a value as the receiver
// requires boxing.
@@ -1077,26 +1115,17 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
lookup->state() == LookupIterator::ACCESS_CHECK) {
code = slow_stub();
} else if (!lookup->IsFound()) {
- if (kind() == Code::LOAD_IC) {
+ if (kind() == Code::LOAD_IC || kind() == Code::LOAD_GLOBAL_IC) {
TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNonexistentDH);
code = LoadNonExistent(receiver_map(), lookup->name());
- } else if (kind() == Code::LOAD_GLOBAL_IC) {
- code = NamedLoadHandlerCompiler::ComputeLoadNonexistent(lookup->name(),
- receiver_map());
- // TODO(jkummerow/verwaest): Introduce a builtin that handles this case.
- if (code.is_null()) code = slow_stub();
} else {
code = slow_stub();
}
} else {
if (kind() == Code::LOAD_GLOBAL_IC &&
lookup->state() == LookupIterator::DATA &&
- lookup->GetHolder<Object>()->IsJSGlobalObject()) {
-#if DEBUG
- Handle<Object> holder = lookup->GetHolder<Object>();
- Handle<Object> receiver = lookup->GetReceiver();
- DCHECK_EQ(*receiver, *holder);
-#endif
+ lookup->GetReceiver().is_identical_to(lookup->GetHolder<Object>())) {
+ DCHECK(lookup->GetReceiver()->IsJSGlobalObject());
// Now update the cell in the feedback vector.
LoadGlobalICNexus* nexus = casted_nexus<LoadGlobalICNexus>();
nexus->ConfigurePropertyCellMode(lookup->GetPropertyCell());
@@ -1108,22 +1137,15 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
code = slow_stub();
}
} else if (lookup->state() == LookupIterator::INTERCEPTOR) {
- if (kind() == Code::LOAD_GLOBAL_IC) {
- // The interceptor handler requires name but it is not passed explicitly
- // to LoadGlobalIC and the LoadGlobalIC dispatcher also does not load
- // it so we will just use slow stub.
+ // Perform a lookup behind the interceptor. Copy the LookupIterator
+ // since the original iterator will be used to fetch the value.
+ LookupIterator it = *lookup;
+ it.Next();
+ LookupForRead(&it);
+ if (it.state() == LookupIterator::ACCESSOR &&
+ !IsCompatibleReceiver(&it, receiver_map())) {
+ TRACE_GENERIC_IC(isolate(), "LoadIC", "incompatible receiver type");
code = slow_stub();
- } else {
- // Perform a lookup behind the interceptor. Copy the LookupIterator
- // since the original iterator will be used to fetch the value.
- LookupIterator it = *lookup;
- it.Next();
- LookupForRead(&it);
- if (it.state() == LookupIterator::ACCESSOR &&
- !IsCompatibleReceiver(&it, receiver_map())) {
- TRACE_GENERIC_IC(isolate(), "LoadIC", "incompatible receiver type");
- code = slow_stub();
- }
}
}
if (code.is_null()) code = ComputeHandler(lookup);
@@ -1288,7 +1310,7 @@ Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
if (receiver->IsString() &&
Name::Equals(isolate()->factory()->length_string(), lookup->name())) {
FieldIndex index = FieldIndex::ForInObjectOffset(String::kLengthOffset);
- return SimpleFieldLoad(index);
+ return SimpleFieldLoad(isolate(), index);
}
if (receiver->IsStringWrapper() &&
@@ -1326,7 +1348,7 @@ Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
if (Accessors::IsJSObjectFieldAccessor(map, lookup->name(),
&object_offset)) {
FieldIndex index = FieldIndex::ForInObjectOffset(object_offset, *map);
- return SimpleFieldLoad(index);
+ return SimpleFieldLoad(isolate(), index);
}
if (IsCompatibleReceiver(lookup, map)) {
@@ -1356,26 +1378,15 @@ Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
return slow_stub();
}
- if (FLAG_tf_load_ic_stub) {
- Handle<Object> smi_handler = LoadHandler::LoadApiGetter(
- isolate(), lookup->GetAccessorIndex());
- if (receiver_is_holder) {
- TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterDH);
- return smi_handler;
- }
- if (kind() != Code::LOAD_GLOBAL_IC) {
- TRACE_HANDLER_STATS(isolate(),
- LoadIC_LoadApiGetterFromPrototypeDH);
- return LoadFromPrototype(map, holder, lookup->name(),
- smi_handler);
- }
- } else {
- if (receiver_is_holder) {
- TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterStub);
- int index = lookup->GetAccessorIndex();
- LoadApiGetterStub stub(isolate(), true, index);
- return stub.GetCode();
- }
+ Handle<Object> smi_handler =
+ LoadHandler::LoadApiGetter(isolate(), lookup->GetAccessorIndex());
+ if (receiver_is_holder) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterDH);
+ return smi_handler;
+ }
+ if (kind() != Code::LOAD_GLOBAL_IC) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterFromPrototypeDH);
+ return LoadFromPrototype(map, holder, lookup->name(), smi_handler);
}
break; // Custom-compiled handler.
}
@@ -1385,6 +1396,7 @@ Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
}
case LookupIterator::DATA: {
+ DCHECK_EQ(kData, lookup->property_details().kind());
if (lookup->is_dictionary_holder()) {
if (kind() != Code::LOAD_IC && kind() != Code::LOAD_GLOBAL_IC) {
TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
@@ -1406,40 +1418,26 @@ Handle<Object> LoadIC::GetMapIndependentHandler(LookupIterator* lookup) {
}
// -------------- Fields --------------
- if (lookup->property_details().type() == DATA) {
+ if (lookup->property_details().location() == kField) {
FieldIndex field = lookup->GetFieldIndex();
- Handle<Object> smi_handler = SimpleFieldLoad(field);
+ Handle<Object> smi_handler = SimpleFieldLoad(isolate(), field);
if (receiver_is_holder) {
return smi_handler;
}
- if (FLAG_tf_load_ic_stub && kind() != Code::LOAD_GLOBAL_IC) {
- TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldFromPrototypeDH);
- return LoadFromPrototype(map, holder, lookup->name(), smi_handler);
- }
- break; // Custom-compiled handler.
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldFromPrototypeDH);
+ return LoadFromPrototype(map, holder, lookup->name(), smi_handler);
}
// -------------- Constant properties --------------
- DCHECK(lookup->property_details().type() == DATA_CONSTANT);
- if (FLAG_tf_load_ic_stub) {
- Handle<Object> smi_handler =
- LoadHandler::LoadConstant(isolate(), lookup->GetConstantIndex());
- if (receiver_is_holder) {
- TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantDH);
- return smi_handler;
- }
- if (kind() != Code::LOAD_GLOBAL_IC) {
- TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantFromPrototypeDH);
- return LoadFromPrototype(map, holder, lookup->name(), smi_handler);
- }
- } else {
- if (receiver_is_holder) {
- TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantStub);
- LoadConstantStub stub(isolate(), lookup->GetConstantIndex());
- return stub.GetCode();
- }
+ DCHECK_EQ(kDescriptor, lookup->property_details().location());
+ Handle<Object> smi_handler =
+ LoadHandler::LoadConstant(isolate(), lookup->GetConstantIndex());
+ if (receiver_is_holder) {
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantDH);
+ return smi_handler;
}
- break; // Custom-compiled handler.
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantFromPrototypeDH);
+ return LoadFromPrototype(map, holder, lookup->name(), smi_handler);
}
case LookupIterator::INTEGER_INDEXED_EXOTIC:
@@ -1543,33 +1541,15 @@ Handle<Object> LoadIC::CompileHandler(LookupIterator* lookup,
}
case LookupIterator::DATA: {
- if (lookup->is_dictionary_holder()) {
- DCHECK(kind() == Code::LOAD_IC || kind() == Code::LOAD_GLOBAL_IC);
- DCHECK(holder->IsJSGlobalObject());
- TRACE_HANDLER_STATS(isolate(), LoadIC_LoadGlobal);
- NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
- Handle<PropertyCell> cell = lookup->GetPropertyCell();
- Handle<Code> code = compiler.CompileLoadGlobal(
- cell, lookup->name(), lookup->IsConfigurable());
- return code;
- }
-
- // -------------- Fields --------------
- if (lookup->property_details().type() == DATA) {
- FieldIndex field = lookup->GetFieldIndex();
- DCHECK(!receiver_is_holder);
- TRACE_HANDLER_STATS(isolate(), LoadIC_LoadField);
- NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
- return compiler.CompileLoadField(lookup->name(), field);
- }
-
- // -------------- Constant properties --------------
- DCHECK(lookup->property_details().type() == DATA_CONSTANT);
- DCHECK(!receiver_is_holder);
- TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstant);
+ DCHECK(lookup->is_dictionary_holder());
+ DCHECK(kind() == Code::LOAD_IC || kind() == Code::LOAD_GLOBAL_IC);
+ DCHECK(holder->IsJSGlobalObject());
+ TRACE_HANDLER_STATS(isolate(), LoadIC_LoadGlobal);
NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
- return compiler.CompileLoadConstant(lookup->name(),
- lookup->GetConstantIndex());
+ Handle<PropertyCell> cell = lookup->GetPropertyCell();
+ Handle<Code> code = compiler.CompileLoadGlobal(cell, lookup->name(),
+ lookup->IsConfigurable());
+ return code;
}
case LookupIterator::INTEGER_INDEXED_EXOTIC:
@@ -1839,7 +1819,14 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
// If the object is undefined or null it's illegal to try to set any
// properties on it; throw a TypeError in that case.
- if (object->IsUndefined(isolate()) || object->IsNull(isolate())) {
+ if (object->IsNullOrUndefined(isolate())) {
+ if (FLAG_use_ic && state() != UNINITIALIZED && state() != PREMONOMORPHIC) {
+ // Ensure the IC state progresses.
+ TRACE_HANDLER_STATS(isolate(), StoreIC_NonReceiver);
+ update_receiver_map(object);
+ PatchCache(name, slow_stub());
+ TRACE_IC("StoreIC", name);
+ }
return TypeError(MessageTemplate::kNonObjectPropertyStore, object, name);
}
@@ -1890,11 +1877,12 @@ Handle<Object> StoreIC::StoreTransition(Handle<Map> receiver_map,
DCHECK(!transition->is_access_check_needed());
Handle<Object> smi_handler;
- if (details.type() == DATA_CONSTANT) {
+ DCHECK_EQ(kData, details.kind());
+ if (details.location() == kDescriptor) {
smi_handler = StoreHandler::TransitionToConstant(isolate(), descriptor);
} else {
- DCHECK_EQ(DATA, details.type());
+ DCHECK_EQ(kField, details.location());
bool extend_storage =
Map::cast(transition->GetBackPointer())->unused_property_fields() == 0;
@@ -1972,13 +1960,10 @@ Handle<Object> StoreIC::GetMapIndependentHandler(LookupIterator* lookup) {
return slow_stub();
}
DCHECK(lookup->IsCacheableTransition());
- if (FLAG_tf_store_ic_stub) {
- Handle<Map> transition = lookup->transition_map();
- TRACE_HANDLER_STATS(isolate(), StoreIC_StoreTransitionDH);
- return StoreTransition(receiver_map(), holder, transition,
- lookup->name());
- }
- break; // Custom-compiled handler.
+ Handle<Map> transition = lookup->transition_map();
+ TRACE_HANDLER_STATS(isolate(), StoreIC_StoreTransitionDH);
+ return StoreTransition(receiver_map(), holder, transition,
+ lookup->name());
}
case LookupIterator::INTERCEPTOR: {
@@ -2044,6 +2029,7 @@ Handle<Object> StoreIC::GetMapIndependentHandler(LookupIterator* lookup) {
}
case LookupIterator::DATA: {
+ DCHECK_EQ(kData, lookup->property_details().kind());
if (lookup->is_dictionary_holder()) {
if (holder->IsJSGlobalObject()) {
break; // Custom-compiled handler.
@@ -2054,32 +2040,16 @@ Handle<Object> StoreIC::GetMapIndependentHandler(LookupIterator* lookup) {
}
// -------------- Fields --------------
- if (lookup->property_details().type() == DATA) {
- if (FLAG_tf_store_ic_stub) {
- TRACE_HANDLER_STATS(isolate(), StoreIC_StoreFieldDH);
- int descriptor = lookup->GetFieldDescriptorIndex();
- FieldIndex index = lookup->GetFieldIndex();
- return StoreHandler::StoreField(isolate(), descriptor, index,
- lookup->representation());
- } else {
- bool use_stub = true;
- if (lookup->representation().IsHeapObject()) {
- // Only use a generic stub if no types need to be tracked.
- Handle<FieldType> field_type = lookup->GetFieldType();
- use_stub = !field_type->IsClass();
- }
- if (use_stub) {
- TRACE_HANDLER_STATS(isolate(), StoreIC_StoreFieldStub);
- StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
- lookup->representation());
- return stub.GetCode();
- }
- }
- break; // Custom-compiled handler.
+ if (lookup->property_details().location() == kField) {
+ TRACE_HANDLER_STATS(isolate(), StoreIC_StoreFieldDH);
+ int descriptor = lookup->GetFieldDescriptorIndex();
+ FieldIndex index = lookup->GetFieldIndex();
+ return StoreHandler::StoreField(isolate(), descriptor, index,
+ lookup->representation());
}
// -------------- Constant properties --------------
- DCHECK(lookup->property_details().type() == DATA_CONSTANT);
+ DCHECK_EQ(kDescriptor, lookup->property_details().location());
TRACE_GENERIC_IC(isolate(), "StoreIC", "constant property");
TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
return slow_stub();
@@ -2117,15 +2087,7 @@ Handle<Object> StoreIC::CompileHandler(LookupIterator* lookup,
cell->set_value(isolate()->heap()->the_hole_value());
return code;
}
- DCHECK(!FLAG_tf_store_ic_stub);
- Handle<Map> transition = lookup->transition_map();
- // Currently not handled by CompileStoreTransition.
- DCHECK(holder->HasFastProperties());
-
- DCHECK(lookup->IsCacheableTransition());
- TRACE_HANDLER_STATS(isolate(), StoreIC_StoreTransition);
- NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
- return compiler.CompileStoreTransition(transition, lookup->name());
+ UNREACHABLE();
}
case LookupIterator::INTERCEPTOR:
@@ -2173,40 +2135,18 @@ Handle<Object> StoreIC::CompileHandler(LookupIterator* lookup,
}
case LookupIterator::DATA: {
- if (lookup->is_dictionary_holder()) {
- DCHECK(holder->IsJSGlobalObject());
- TRACE_HANDLER_STATS(isolate(), StoreIC_StoreGlobal);
- DCHECK(holder.is_identical_to(receiver) ||
- receiver->map()->prototype() == *holder);
- auto cell = lookup->GetPropertyCell();
- auto updated_type =
- PropertyCell::UpdatedType(cell, value, lookup->property_details());
- auto code = PropertyCellStoreHandler(
- isolate(), receiver, Handle<JSGlobalObject>::cast(holder),
- lookup->name(), cell, updated_type);
- return code;
- }
-
- // -------------- Fields --------------
- if (lookup->property_details().type() == DATA) {
- DCHECK(!FLAG_tf_store_ic_stub);
-#ifdef DEBUG
- bool use_stub = true;
- if (lookup->representation().IsHeapObject()) {
- // Only use a generic stub if no types need to be tracked.
- Handle<FieldType> field_type = lookup->GetFieldType();
- use_stub = !field_type->IsClass();
- }
- DCHECK(!use_stub);
-#endif
- TRACE_HANDLER_STATS(isolate(), StoreIC_StoreField);
- NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
- return compiler.CompileStoreField(lookup);
- }
-
- // -------------- Constant properties --------------
- DCHECK(lookup->property_details().type() == DATA_CONSTANT);
- UNREACHABLE();
+ DCHECK(lookup->is_dictionary_holder());
+ DCHECK(holder->IsJSGlobalObject());
+ TRACE_HANDLER_STATS(isolate(), StoreIC_StoreGlobal);
+ DCHECK(holder.is_identical_to(receiver) ||
+ receiver->map()->prototype() == *holder);
+ auto cell = lookup->GetPropertyCell();
+ auto updated_type =
+ PropertyCell::UpdatedType(cell, value, lookup->property_details());
+ auto code = PropertyCellStoreHandler(isolate(), receiver,
+ Handle<JSGlobalObject>::cast(holder),
+ lookup->name(), cell, updated_type);
+ return code;
}
case LookupIterator::INTEGER_INDEXED_EXOTIC:
@@ -2227,7 +2167,7 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
Handle<Map> monomorphic_map =
ComputeTransitionedMap(receiver_map, store_mode);
store_mode = GetNonTransitioningStoreMode(store_mode);
- Handle<Code> handler =
+ Handle<Object> handler =
PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(monomorphic_map,
store_mode);
return ConfigureVectorState(Handle<Name>(), monomorphic_map, handler);
@@ -2261,7 +2201,7 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
// if they at least come from the same origin for a transitioning store,
// stay MONOMORPHIC and use the map for the most generic ElementsKind.
store_mode = GetNonTransitioningStoreMode(store_mode);
- Handle<Code> handler =
+ Handle<Object> handler =
PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
transitioned_receiver_map, store_mode);
ConfigureVectorState(Handle<Name>(), transitioned_receiver_map, handler);
@@ -2275,7 +2215,7 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
// A "normal" IC that handles stores can switch to a version that can
// grow at the end of the array, handle OOB accesses or copy COW arrays
// and still stay MONOMORPHIC.
- Handle<Code> handler =
+ Handle<Object> handler =
PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(receiver_map,
store_mode);
return ConfigureVectorState(Handle<Name>(), receiver_map, handler);
@@ -2336,7 +2276,7 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
}
MapHandleList transitioned_maps(target_receiver_maps.length());
- CodeHandleList handlers(target_receiver_maps.length());
+ List<Handle<Object>> handlers(target_receiver_maps.length());
PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers(
&target_receiver_maps, &transitioned_maps, &handlers, store_mode);
ConfigureVectorState(&target_receiver_maps, &transitioned_maps, &handlers);
@@ -2485,17 +2425,14 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
}
Handle<Map> old_receiver_map;
- bool sloppy_arguments_elements = false;
+ bool is_arguments = false;
bool key_is_valid_index = false;
KeyedAccessStoreMode store_mode = STANDARD_STORE;
if (use_ic && object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
old_receiver_map = handle(receiver->map(), isolate());
- sloppy_arguments_elements =
- !is_sloppy(language_mode()) &&
- receiver->elements()->map() ==
- isolate()->heap()->sloppy_arguments_elements_map();
- if (!sloppy_arguments_elements) {
+ is_arguments = receiver->IsJSArgumentsObject();
+ if (!is_arguments) {
key_is_valid_index = key->IsSmi() && Smi::cast(*key)->value() >= 0;
if (key_is_valid_index) {
uint32_t index = static_cast<uint32_t>(Smi::cast(*key)->value());
@@ -2512,7 +2449,7 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
if (use_ic) {
if (!old_receiver_map.is_null()) {
- if (sloppy_arguments_elements) {
+ if (is_arguments) {
TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "arguments receiver");
} else if (key_is_valid_index) {
// We should go generic if receiver isn't a dictionary, but our
@@ -2597,7 +2534,7 @@ RUNTIME_FUNCTION(Runtime_CallIC_Miss) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
// Runtime functions don't follow the IC's calling convention.
- Handle<Object> function = args.at<Object>(0);
+ Handle<Object> function = args.at(0);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(1);
Handle<Smi> slot = args.at<Smi>(2);
FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
@@ -2613,7 +2550,8 @@ RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
// Runtime functions don't follow the IC's calling convention.
- Handle<Object> receiver = args.at<Object>(0);
+ Handle<Object> receiver = args.at(0);
+ Handle<Name> key = args.at<Name>(1);
Handle<Smi> slot = args.at<Smi>(2);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
@@ -2622,15 +2560,12 @@ RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
// set up outside the IC, handle that here.
FeedbackVectorSlotKind kind = vector->GetKind(vector_slot);
if (kind == FeedbackVectorSlotKind::LOAD_IC) {
- Handle<Name> key = args.at<Name>(1);
LoadICNexus nexus(vector, vector_slot);
LoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
} else if (kind == FeedbackVectorSlotKind::LOAD_GLOBAL_IC) {
- Handle<Name> key(vector->GetName(vector_slot), isolate);
- DCHECK_NE(*key, isolate->heap()->empty_string());
DCHECK_EQ(*isolate->global_object(), *receiver);
LoadGlobalICNexus nexus(vector, vector_slot);
LoadGlobalIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
@@ -2638,7 +2573,6 @@ RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
RETURN_RESULT_OR_FAILURE(isolate, ic.Load(key));
} else {
- Handle<Name> key = args.at<Name>(1);
DCHECK_EQ(FeedbackVectorSlotKind::KEYED_LOAD_IC, kind);
KeyedLoadICNexus nexus(vector, vector_slot);
KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
@@ -2650,16 +2584,13 @@ RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Miss) {
HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
+ DCHECK_EQ(3, args.length());
// Runtime functions don't follow the IC's calling convention.
Handle<JSGlobalObject> global = isolate->global_object();
- Handle<Smi> slot = args.at<Smi>(0);
- Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(1);
+ Handle<String> name = args.at<String>(0);
+ Handle<Smi> slot = args.at<Smi>(1);
+ Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(2);
FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
- DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC,
- vector->GetKind(vector_slot));
- Handle<String> name(vector->GetName(vector_slot), isolate);
- DCHECK_NE(*name, isolate->heap()->empty_string());
LoadGlobalICNexus nexus(vector, vector_slot);
LoadGlobalIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
@@ -2672,20 +2603,12 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Miss) {
RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Slow) {
HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_SMI_ARG_CHECKED(slot, 0);
- CONVERT_ARG_HANDLE_CHECKED(TypeFeedbackVector, vector, 1);
-
- FeedbackVectorSlot vector_slot = vector->ToSlot(slot);
- DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC,
- vector->GetKind(vector_slot));
- Handle<String> name(vector->GetName(vector_slot), isolate);
- DCHECK_NE(*name, isolate->heap()->empty_string());
-
- Handle<JSGlobalObject> global = isolate->global_object();
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
+ Handle<Context> native_context = isolate->native_context();
Handle<ScriptContextTable> script_contexts(
- global->native_context()->script_context_table());
+ native_context->script_context_table());
ScriptContextTable::LookupResult lookup_result;
if (ScriptContextTable::Lookup(script_contexts, name, &lookup_result)) {
@@ -2700,6 +2623,7 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Slow) {
return *result;
}
+ Handle<JSGlobalObject> global(native_context->global_object(), isolate);
Handle<Object> result;
bool is_found = false;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
@@ -2723,8 +2647,8 @@ RUNTIME_FUNCTION(Runtime_KeyedLoadIC_Miss) {
HandleScope scope(isolate);
DCHECK_EQ(4, args.length());
// Runtime functions don't follow the IC's calling convention.
- Handle<Object> receiver = args.at<Object>(0);
- Handle<Object> key = args.at<Object>(1);
+ Handle<Object> receiver = args.at(0);
+ Handle<Object> key = args.at(1);
Handle<Smi> slot = args.at<Smi>(2);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
@@ -2739,8 +2663,8 @@ RUNTIME_FUNCTION(Runtime_KeyedLoadIC_MissFromStubFailure) {
HandleScope scope(isolate);
typedef LoadWithVectorDescriptor Descriptor;
DCHECK_EQ(Descriptor::kParameterCount, args.length());
- Handle<Object> receiver = args.at<Object>(Descriptor::kReceiver);
- Handle<Object> key = args.at<Object>(Descriptor::kName);
+ Handle<Object> receiver = args.at(Descriptor::kReceiver);
+ Handle<Object> key = args.at(Descriptor::kName);
Handle<Smi> slot = args.at<Smi>(Descriptor::kSlot);
Handle<TypeFeedbackVector> vector =
args.at<TypeFeedbackVector>(Descriptor::kVector);
@@ -2757,10 +2681,10 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
HandleScope scope(isolate);
DCHECK_EQ(5, args.length());
// Runtime functions don't follow the IC's calling convention.
- Handle<Object> value = args.at<Object>(0);
+ Handle<Object> value = args.at(0);
Handle<Smi> slot = args.at<Smi>(1);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(2);
- Handle<Object> receiver = args.at<Object>(3);
+ Handle<Object> receiver = args.at(3);
Handle<Name> key = args.at<Name>(4);
FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::STORE_IC) {
@@ -2784,11 +2708,11 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
HandleScope scope(isolate);
DCHECK_EQ(5, args.length());
// Runtime functions don't follow the IC's calling convention.
- Handle<Object> value = args.at<Object>(0);
+ Handle<Object> value = args.at(0);
Handle<Smi> slot = args.at<Smi>(1);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(2);
- Handle<Object> receiver = args.at<Object>(3);
- Handle<Object> key = args.at<Object>(4);
+ Handle<Object> receiver = args.at(3);
+ Handle<Object> key = args.at(4);
FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
KeyedStoreICNexus nexus(vector, vector_slot);
KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
@@ -2801,10 +2725,10 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) {
HandleScope scope(isolate);
DCHECK_EQ(5, args.length());
// Runtime functions don't follow the IC's calling convention.
- Handle<Object> value = args.at<Object>(0);
+ Handle<Object> value = args.at(0);
// slot and vector parameters are not used.
- Handle<Object> object = args.at<Object>(3);
- Handle<Object> key = args.at<Object>(4);
+ Handle<Object> object = args.at(3);
+ Handle<Object> key = args.at(4);
LanguageMode language_mode;
KeyedStoreICNexus nexus(isolate);
KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
@@ -2818,9 +2742,9 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) {
RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
HandleScope scope(isolate);
// Runtime functions don't follow the IC's calling convention.
- Handle<Object> object = args.at<Object>(0);
- Handle<Object> key = args.at<Object>(1);
- Handle<Object> value = args.at<Object>(2);
+ Handle<Object> object = args.at(0);
+ Handle<Object> key = args.at(1);
+ Handle<Object> value = args.at(2);
Handle<Map> map = args.at<Map>(3);
LanguageMode language_mode;
KeyedStoreICNexus nexus(isolate);
@@ -2931,7 +2855,19 @@ MaybeHandle<Object> BinaryOpIC::Transition(
}
set_target(*new_target);
- if (FLAG_trace_ic) {
+ if (FLAG_ic_stats &
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING) {
+ auto ic_stats = ICStats::instance();
+ ic_stats->Begin();
+ ICInfo& ic_info = ic_stats->Current();
+ ic_info.type = "BinaryOpIC";
+ ic_info.state = old_state.ToString();
+ ic_info.state += " => ";
+ ic_info.state += state.ToString();
+ JavaScriptFrame::CollectTopFrameForICStats(isolate());
+ ic_stats->End();
+ } else if (FLAG_ic_stats) {
+ // if (FLAG_trace_ic) {
OFStream os(stdout);
os << "[BinaryOpIC" << old_state << " => " << state << " @ "
<< static_cast<void*>(*new_target) << " <- ";
@@ -2957,8 +2893,8 @@ RUNTIME_FUNCTION(Runtime_BinaryOpIC_Miss) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
typedef BinaryOpDescriptor Descriptor;
- Handle<Object> left = args.at<Object>(Descriptor::kLeft);
- Handle<Object> right = args.at<Object>(Descriptor::kRight);
+ Handle<Object> left = args.at(Descriptor::kLeft);
+ Handle<Object> right = args.at(Descriptor::kRight);
BinaryOpIC ic(isolate);
RETURN_RESULT_OR_FAILURE(
isolate, ic.Transition(Handle<AllocationSite>::null(), left, right));
@@ -2971,8 +2907,8 @@ RUNTIME_FUNCTION(Runtime_BinaryOpIC_MissWithAllocationSite) {
typedef BinaryOpWithAllocationSiteDescriptor Descriptor;
Handle<AllocationSite> allocation_site =
args.at<AllocationSite>(Descriptor::kAllocationSite);
- Handle<Object> left = args.at<Object>(Descriptor::kLeft);
- Handle<Object> right = args.at<Object>(Descriptor::kRight);
+ Handle<Object> left = args.at(Descriptor::kLeft);
+ Handle<Object> right = args.at(Descriptor::kRight);
BinaryOpIC ic(isolate);
RETURN_RESULT_OR_FAILURE(isolate,
ic.Transition(allocation_site, left, right));
@@ -3005,7 +2941,30 @@ Code* CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
Handle<Code> new_target = stub.GetCode();
set_target(*new_target);
- if (FLAG_trace_ic) {
+ if (FLAG_ic_stats &
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING) {
+ auto ic_stats = ICStats::instance();
+ ic_stats->Begin();
+ ICInfo& ic_info = ic_stats->Current();
+ ic_info.type = "CompareIC";
+ JavaScriptFrame::CollectTopFrameForICStats(isolate());
+ ic_info.state = "((";
+ ic_info.state += CompareICState::GetStateName(old_stub.left());
+ ic_info.state += "+";
+ ic_info.state += CompareICState::GetStateName(old_stub.right());
+ ic_info.state += "=";
+ ic_info.state += CompareICState::GetStateName(old_stub.state());
+ ic_info.state += ")->(";
+ ic_info.state += CompareICState::GetStateName(new_left);
+ ic_info.state += "+";
+ ic_info.state += CompareICState::GetStateName(new_right);
+ ic_info.state += "=";
+ ic_info.state += CompareICState::GetStateName(state);
+ ic_info.state += "))#";
+ ic_info.state += Token::Name(op_);
+ ic_stats->End();
+ } else if (FLAG_ic_stats) {
+ // if (FLAG_trace_ic) {
PrintF("[CompareIC in ");
JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
PrintF(" ((%s+%s=%s)->(%s+%s=%s))#%s @ %p]\n",
@@ -3032,7 +2991,7 @@ RUNTIME_FUNCTION(Runtime_CompareIC_Miss) {
HandleScope scope(isolate);
DCHECK(args.length() == 3);
CompareIC ic(isolate, static_cast<Token::Value>(args.smi_at(2)));
- return ic.UpdateCaches(args.at<Object>(0), args.at<Object>(1));
+ return ic.UpdateCaches(args.at(0), args.at(1));
}
@@ -3055,7 +3014,7 @@ Handle<Object> ToBooleanIC::ToBoolean(Handle<Object> object) {
RUNTIME_FUNCTION(Runtime_ToBooleanIC_Miss) {
DCHECK(args.length() == 1);
HandleScope scope(isolate);
- Handle<Object> object = args.at<Object>(0);
+ Handle<Object> object = args.at(0);
ToBooleanIC ic(isolate);
return *ic.ToBoolean(object);
}
@@ -3066,7 +3025,7 @@ RUNTIME_FUNCTION(Runtime_StoreCallbackProperty) {
Handle<JSObject> holder = args.at<JSObject>(1);
Handle<HeapObject> callback_or_cell = args.at<HeapObject>(2);
Handle<Name> name = args.at<Name>(3);
- Handle<Object> value = args.at<Object>(4);
+ Handle<Object> value = args.at(4);
CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 5);
HandleScope scope(isolate);
@@ -3110,7 +3069,7 @@ RUNTIME_FUNCTION(Runtime_LoadPropertyWithInterceptorOnly) {
Handle<Name> name =
args.at<Name>(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex);
Handle<Object> receiver =
- args.at<Object>(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex);
+ args.at(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex);
Handle<JSObject> holder =
args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex);
HandleScope scope(isolate);
@@ -3146,7 +3105,7 @@ RUNTIME_FUNCTION(Runtime_LoadPropertyWithInterceptor) {
Handle<Name> name =
args.at<Name>(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex);
Handle<Object> receiver =
- args.at<Object>(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex);
+ args.at(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex);
Handle<JSObject> holder =
args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex);
@@ -3181,15 +3140,17 @@ RUNTIME_FUNCTION(Runtime_LoadPropertyWithInterceptor) {
if (it.IsFound()) return *result;
-#ifdef DEBUG
LoadICNexus nexus(isolate);
LoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
// It could actually be any kind of LoadICs here but the predicate handles
// all the cases properly.
- DCHECK(!ic.ShouldThrowReferenceError());
-#endif
+ if (!ic.ShouldThrowReferenceError()) {
+ return isolate->heap()->undefined_value();
+ }
- return isolate->heap()->undefined_value();
+ // Throw a reference error.
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewReferenceError(MessageTemplate::kNotDefined, it.name()));
}
@@ -3200,7 +3161,7 @@ RUNTIME_FUNCTION(Runtime_StorePropertyWithInterceptor) {
StoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
Handle<JSObject> receiver = args.at<JSObject>(0);
Handle<Name> name = args.at<Name>(1);
- Handle<Object> value = args.at<Object>(2);
+ Handle<Object> value = args.at(2);
DCHECK(receiver->HasNamedInterceptor());
InterceptorInfo* interceptor = receiver->GetNamedInterceptor();
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index 9e69cc85d0..c86fd713a2 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -120,7 +120,7 @@ class IC {
// keyed stores).
void ConfigureVectorState(MapHandleList* maps,
MapHandleList* transitioned_maps,
- CodeHandleList* handlers);
+ List<Handle<Object>>* handlers);
char TransitionMarkFromState(IC::State state);
void TraceIC(const char* type, Handle<Object> name);
@@ -285,12 +285,6 @@ class LoadIC : public IC {
NOT_INSIDE_TYPEOF;
}
- // Code generator routines.
-
- static void GenerateMiss(MacroAssembler* masm);
- static void GenerateRuntimeGetProperty(MacroAssembler* masm);
- static void GenerateNormal(MacroAssembler* masm);
-
MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object,
Handle<Name> name);
@@ -312,7 +306,7 @@ class LoadIC : public IC {
private:
// Creates a data handler that represents a load of a field by given index.
- Handle<Object> SimpleFieldLoad(FieldIndex index);
+ static Handle<Object> SimpleFieldLoad(Isolate* isolate, FieldIndex index);
// Creates a data handler that represents a prototype chain check followed
// by given Smi-handler that encoded a load from the holder.
@@ -325,6 +319,7 @@ class LoadIC : public IC {
Handle<Object> LoadNonExistent(Handle<Map> receiver_map, Handle<Name> name);
friend class IC;
+ friend class NamedLoadHandlerCompiler;
};
class LoadGlobalIC : public LoadIC {
@@ -353,10 +348,6 @@ class KeyedLoadIC : public LoadIC {
MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object,
Handle<Object> key);
- // Code generator routines.
- static void GenerateMiss(MacroAssembler* masm);
- static void GenerateRuntimeGetProperty(MacroAssembler* masm);
-
static void Clear(Isolate* isolate, Code* host, KeyedLoadICNexus* nexus);
protected:
@@ -379,11 +370,6 @@ class StoreIC : public IC {
return StoreICState::GetLanguageMode(extra_ic_state());
}
- // Code generators for stub routines. Only called once at startup.
- static void GenerateSlow(MacroAssembler* masm);
- static void GenerateMiss(MacroAssembler* masm);
- static void GenerateNormal(MacroAssembler* masm);
-
MUST_USE_RESULT MaybeHandle<Object> Store(
Handle<Object> object, Handle<Name> name, Handle<Object> value,
JSReceiver::StoreFromKeyed store_mode =
@@ -451,9 +437,6 @@ class KeyedStoreIC : public StoreIC {
static void GenerateMegamorphic(MacroAssembler* masm,
LanguageMode language_mode);
- static Handle<Code> ChooseMegamorphicStub(Isolate* isolate,
- ExtraICState extra_state);
-
static void Clear(Isolate* isolate, Code* host, KeyedStoreICNexus* nexus);
protected:
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index 30faba85e9..81ce3a6a1f 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -4,8 +4,11 @@
#include "src/ic/keyed-store-generic.h"
-#include "src/compiler/code-assembler.h"
+#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
#include "src/contexts.h"
+#include "src/ic/accessor-assembler-impl.h"
+#include "src/interface-descriptors.h"
#include "src/isolate.h"
namespace v8 {
@@ -13,10 +16,12 @@ namespace internal {
using compiler::Node;
-class KeyedStoreGenericAssembler : public CodeStubAssembler {
+class KeyedStoreGenericAssembler : public AccessorAssemblerImpl {
public:
- void KeyedStoreGeneric(const StoreICParameters* p,
- LanguageMode language_mode);
+ explicit KeyedStoreGenericAssembler(compiler::CodeAssemblerState* state)
+ : AccessorAssemblerImpl(state) {}
+
+ void KeyedStoreGeneric(LanguageMode language_mode);
private:
enum UpdateLength {
@@ -30,7 +35,8 @@ class KeyedStoreGenericAssembler : public CodeStubAssembler {
Node* value, Node* context, Label* slow);
void EmitGenericPropertyStore(Node* receiver, Node* receiver_map,
- const StoreICParameters* p, Label* slow);
+ const StoreICParameters* p, Label* slow,
+ LanguageMode language_mode);
void BranchIfPrototypesHaveNonFastElements(Node* receiver_map,
Label* non_fast_elements,
@@ -60,16 +66,18 @@ class KeyedStoreGenericAssembler : public CodeStubAssembler {
ElementsKind packed_kind,
ElementsKind packed_kind_2, Label* bailout);
- // Do not add fields, so that this is safe to reinterpret_cast to CSA.
+ void JumpIfDataProperty(Node* details, Label* writable, Label* readonly);
+ void LookupPropertyOnPrototypeChain(Node* receiver_map, Node* name,
+ Label* accessor,
+ Variable* var_accessor_pair,
+ Variable* var_accessor_holder,
+ Label* readonly, Label* bailout);
};
-void KeyedStoreGenericGenerator::Generate(
- CodeStubAssembler* assembler, const CodeStubAssembler::StoreICParameters* p,
- LanguageMode language_mode) {
- STATIC_ASSERT(sizeof(CodeStubAssembler) ==
- sizeof(KeyedStoreGenericAssembler));
- auto assm = reinterpret_cast<KeyedStoreGenericAssembler*>(assembler);
- assm->KeyedStoreGeneric(p, language_mode);
+void KeyedStoreGenericGenerator::Generate(compiler::CodeAssemblerState* state,
+ LanguageMode language_mode) {
+ KeyedStoreGenericAssembler assembler(state);
+ assembler.KeyedStoreGeneric(language_mode);
}
void KeyedStoreGenericAssembler::BranchIfPrototypesHaveNonFastElements(
@@ -94,9 +102,7 @@ void KeyedStoreGenericAssembler::BranchIfPrototypesHaveNonFastElements(
non_fast_elements);
Node* elements_kind = LoadMapElementsKind(prototype_map);
STATIC_ASSERT(FIRST_ELEMENTS_KIND == FIRST_FAST_ELEMENTS_KIND);
- GotoIf(Int32LessThanOrEqual(elements_kind,
- Int32Constant(LAST_FAST_ELEMENTS_KIND)),
- &loop_body);
+ GotoIf(IsFastElementsKind(elements_kind), &loop_body);
GotoIf(Word32Equal(elements_kind, Int32Constant(NO_ELEMENTS)), &loop_body);
Goto(non_fast_elements);
}
@@ -112,7 +118,7 @@ void KeyedStoreGenericAssembler::TryRewriteElements(
TrapAllocationMemento(receiver, bailout);
}
Label perform_transition(this), check_holey_map(this);
- Variable var_target_map(this, MachineType::PointerRepresentation());
+ Variable var_target_map(this, MachineRepresentation::kTagged);
// Check if the receiver has the default |from_kind| map.
{
Node* packed_map =
@@ -143,7 +149,7 @@ void KeyedStoreGenericAssembler::TryRewriteElements(
GrowElementsCapacity(receiver, elements, from_kind, to_kind, capacity,
capacity, INTPTR_PARAMETERS, bailout);
}
- StoreObjectField(receiver, JSObject::kMapOffset, var_target_map.value());
+ StoreMap(receiver, var_target_map.value());
}
}
@@ -160,7 +166,7 @@ void KeyedStoreGenericAssembler::TryChangeToHoleyMapHelper(
}
Node* holey_map =
LoadContextElement(native_context, Context::ArrayMapIndex(holey_kind));
- StoreObjectField(receiver, JSObject::kMapOffset, holey_map);
+ StoreMap(receiver, holey_map);
Goto(done);
}
@@ -219,6 +225,15 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
if (update_length != kDontChangeLength) {
CSA_ASSERT(this, Word32Equal(LoadMapInstanceType(receiver_map),
Int32Constant(JS_ARRAY_TYPE)));
+ // Check if the length property is writable. The fast check is only
+ // supported for fast properties.
+ GotoIf(IsDictionaryMap(receiver_map), slow);
+ // The length property is non-configurable, so it's guaranteed to always
+ // be the first property.
+ Node* descriptors = LoadMapDescriptors(receiver_map);
+ Node* details =
+ LoadFixedArrayElement(descriptors, DescriptorArray::ToDetailsIndex(0));
+ GotoIf(IsSetSmi(details, PropertyDetails::kAttributesReadOnlyMask), slow);
}
STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
const int kHeaderSize = FixedArray::kHeaderSize - kHeapObjectTag;
@@ -276,7 +291,7 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
TryChangeToHoleyMap(receiver, receiver_map, elements_kind, context,
FAST_ELEMENTS, slow);
}
- Store(MachineRepresentation::kTagged, elements, offset, value);
+ Store(elements, offset, value);
MaybeUpdateLengthAndReturn(receiver, intptr_index, value, update_length);
Bind(&must_transition);
@@ -320,7 +335,7 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
FAST_SMI_ELEMENTS, target_kind, slow);
// The elements backing store didn't change, no reload necessary.
CSA_ASSERT(this, WordEqual(elements, LoadElements(receiver)));
- Store(MachineRepresentation::kTagged, elements, offset, value);
+ Store(elements, offset, value);
MaybeUpdateLengthAndReturn(receiver, intptr_index, value,
update_length);
}
@@ -356,8 +371,8 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
// Try to store the value as a double.
{
Label non_number_value(this);
- Node* double_value = PrepareValueForWrite(value, Representation::Double(),
- &non_number_value);
+ Node* double_value = TryTaggedToFloat64(value, &non_number_value);
+
// Make sure we do not store signalling NaNs into double arrays.
double_value = Float64SilenceNaN(double_value);
// If we're about to introduce holes, ensure holey elements.
@@ -384,7 +399,7 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
Node* fast_elements = LoadElements(receiver);
Node* fast_offset = ElementOffsetFromIndex(
intptr_index, FAST_ELEMENTS, INTPTR_PARAMETERS, kHeaderSize);
- Store(MachineRepresentation::kTagged, fast_elements, fast_offset, value);
+ Store(fast_elements, fast_offset, value);
MaybeUpdateLengthAndReturn(receiver, intptr_index, value, update_length);
}
}
@@ -399,14 +414,13 @@ void KeyedStoreGenericAssembler::StoreElementWithCapacity(
void KeyedStoreGenericAssembler::EmitGenericElementStore(
Node* receiver, Node* receiver_map, Node* instance_type, Node* intptr_index,
Node* value, Node* context, Label* slow) {
- Label if_in_bounds(this), if_increment_length_by_one(this),
+ Label if_fast(this), if_in_bounds(this), if_increment_length_by_one(this),
if_bump_length_with_gap(this), if_grow(this), if_nonfast(this),
if_typed_array(this), if_dictionary(this);
Node* elements = LoadElements(receiver);
Node* elements_kind = LoadMapElementsKind(receiver_map);
- GotoIf(
- Int32GreaterThan(elements_kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)),
- &if_nonfast);
+ Branch(IsFastElementsKind(elements_kind), &if_fast, &if_nonfast);
+ Bind(&if_fast);
Label if_array(this);
GotoIf(Word32Equal(instance_type, Int32Constant(JS_ARRAY_TYPE)), &if_array);
@@ -482,37 +496,268 @@ void KeyedStoreGenericAssembler::EmitGenericElementStore(
}
}
+void KeyedStoreGenericAssembler::JumpIfDataProperty(Node* details,
+ Label* writable,
+ Label* readonly) {
+ // Accessor properties never have the READ_ONLY attribute set.
+ GotoIf(IsSetWord32(details, PropertyDetails::kAttributesReadOnlyMask),
+ readonly);
+ Node* kind = DecodeWord32<PropertyDetails::KindField>(details);
+ GotoIf(Word32Equal(kind, Int32Constant(kData)), writable);
+ // Fall through if it's an accessor property.
+}
+
+void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain(
+ Node* receiver_map, Node* name, Label* accessor,
+ Variable* var_accessor_pair, Variable* var_accessor_holder, Label* readonly,
+ Label* bailout) {
+ Label ok_to_write(this);
+ Variable var_holder(this, MachineRepresentation::kTagged);
+ var_holder.Bind(LoadMapPrototype(receiver_map));
+ Variable var_holder_map(this, MachineRepresentation::kTagged);
+ var_holder_map.Bind(LoadMap(var_holder.value()));
+
+ Variable* merged_variables[] = {&var_holder, &var_holder_map};
+ Label loop(this, arraysize(merged_variables), merged_variables);
+ Goto(&loop);
+ Bind(&loop);
+ {
+ Node* holder = var_holder.value();
+ Node* holder_map = var_holder_map.value();
+ Node* instance_type = LoadMapInstanceType(holder_map);
+ Label next_proto(this);
+ {
+ Label found(this), found_fast(this), found_dict(this), found_global(this);
+ Variable var_meta_storage(this, MachineRepresentation::kTagged);
+ Variable var_entry(this, MachineType::PointerRepresentation());
+ TryLookupProperty(holder, holder_map, instance_type, name, &found_fast,
+ &found_dict, &found_global, &var_meta_storage,
+ &var_entry, &next_proto, bailout);
+ Bind(&found_fast);
+ {
+ Node* descriptors = var_meta_storage.value();
+ Node* name_index = var_entry.value();
+ // TODO(jkummerow): Add helper functions for accessing value and
+ // details by entry.
+ const int kNameToDetailsOffset = (DescriptorArray::kDescriptorDetails -
+ DescriptorArray::kDescriptorKey) *
+ kPointerSize;
+ Node* details = LoadAndUntagToWord32FixedArrayElement(
+ descriptors, name_index, kNameToDetailsOffset);
+ JumpIfDataProperty(details, &ok_to_write, readonly);
+
+ // Accessor case.
+ Variable var_details(this, MachineRepresentation::kWord32);
+ LoadPropertyFromFastObject(holder, holder_map, descriptors, name_index,
+ &var_details, var_accessor_pair);
+ var_accessor_holder->Bind(holder);
+ Goto(accessor);
+ }
+
+ Bind(&found_dict);
+ {
+ Node* dictionary = var_meta_storage.value();
+ Node* entry = var_entry.value();
+ const int kNameToDetailsOffset = (NameDictionary::kEntryDetailsIndex -
+ NameDictionary::kEntryKeyIndex) *
+ kPointerSize;
+ Node* details = LoadAndUntagToWord32FixedArrayElement(
+ dictionary, entry, kNameToDetailsOffset);
+ JumpIfDataProperty(details, &ok_to_write, readonly);
+
+ // Accessor case.
+ const int kNameToValueOffset = (NameDictionary::kEntryValueIndex -
+ NameDictionary::kEntryKeyIndex) *
+ kPointerSize;
+ var_accessor_pair->Bind(
+ LoadFixedArrayElement(dictionary, entry, kNameToValueOffset));
+ var_accessor_holder->Bind(holder);
+ Goto(accessor);
+ }
+
+ Bind(&found_global);
+ {
+ Node* dictionary = var_meta_storage.value();
+ Node* entry = var_entry.value();
+ const int kNameToValueOffset = (GlobalDictionary::kEntryValueIndex -
+ GlobalDictionary::kEntryKeyIndex) *
+ kPointerSize;
+
+ Node* property_cell =
+ LoadFixedArrayElement(dictionary, entry, kNameToValueOffset);
+
+ Node* value =
+ LoadObjectField(property_cell, PropertyCell::kValueOffset);
+ GotoIf(WordEqual(value, TheHoleConstant()), &next_proto);
+ Node* details = LoadAndUntagToWord32ObjectField(
+ property_cell, PropertyCell::kDetailsOffset);
+ JumpIfDataProperty(details, &ok_to_write, readonly);
+
+ // Accessor case.
+ var_accessor_pair->Bind(value);
+ var_accessor_holder->Bind(holder);
+ Goto(accessor);
+ }
+ }
+
+ Bind(&next_proto);
+ // Bailout if it can be an integer indexed exotic case.
+ GotoIf(Word32Equal(instance_type, Int32Constant(JS_TYPED_ARRAY_TYPE)),
+ bailout);
+ Node* proto = LoadMapPrototype(holder_map);
+ GotoIf(WordEqual(proto, NullConstant()), &ok_to_write);
+ var_holder.Bind(proto);
+ var_holder_map.Bind(LoadMap(proto));
+ Goto(&loop);
+ }
+ Bind(&ok_to_write);
+}
+
void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
- Node* receiver, Node* receiver_map, const StoreICParameters* p,
- Label* slow) {
- Comment("stub cache probe");
- // TODO(jkummerow): Don't rely on the stub cache as much.
- // - existing properties can be overwritten inline (unless readonly).
- // - for dictionary mode receivers, we can even add properties inline
- // (unless the prototype chain prevents it).
- Variable var_handler(this, MachineRepresentation::kTagged);
- Label found_handler(this, &var_handler), stub_cache_miss(this);
- TryProbeStubCache(isolate()->store_stub_cache(), receiver, p->name,
- &found_handler, &var_handler, &stub_cache_miss);
- Bind(&found_handler);
+ Node* receiver, Node* receiver_map, const StoreICParameters* p, Label* slow,
+ LanguageMode language_mode) {
+ Variable var_accessor_pair(this, MachineRepresentation::kTagged);
+ Variable var_accessor_holder(this, MachineRepresentation::kTagged);
+ Label stub_cache(this), fast_properties(this), dictionary_properties(this),
+ accessor(this), readonly(this);
+ Node* properties = LoadProperties(receiver);
+ Node* properties_map = LoadMap(properties);
+ Branch(WordEqual(properties_map, LoadRoot(Heap::kHashTableMapRootIndex)),
+ &dictionary_properties, &fast_properties);
+
+ Bind(&fast_properties);
{
- Comment("KeyedStoreGeneric found handler");
- HandleStoreICHandlerCase(p, var_handler.value(), slow);
+ // TODO(jkummerow): Does it make sense to support some cases here inline?
+ // Maybe overwrite existing writable properties?
+ // Maybe support map transitions?
+ Goto(&stub_cache);
}
- Bind(&stub_cache_miss);
+
+ Bind(&dictionary_properties);
{
- Comment("KeyedStoreGeneric_miss");
- TailCallRuntime(Runtime::kKeyedStoreIC_Miss, p->context, p->value, p->slot,
- p->vector, p->receiver, p->name);
+ Comment("dictionary property store");
+ // We checked for LAST_CUSTOM_ELEMENTS_RECEIVER before, which rules out
+ // seeing global objects here (which would need special handling).
+
+ Variable var_name_index(this, MachineType::PointerRepresentation());
+ Label dictionary_found(this, &var_name_index), not_found(this);
+ NameDictionaryLookup<NameDictionary>(properties, p->name, &dictionary_found,
+ &var_name_index, &not_found);
+ Bind(&dictionary_found);
+ {
+ Label overwrite(this);
+ const int kNameToDetailsOffset = (NameDictionary::kEntryDetailsIndex -
+ NameDictionary::kEntryKeyIndex) *
+ kPointerSize;
+ Node* details = LoadAndUntagToWord32FixedArrayElement(
+ properties, var_name_index.value(), kNameToDetailsOffset);
+ JumpIfDataProperty(details, &overwrite, &readonly);
+
+ // Accessor case.
+ const int kNameToValueOffset =
+ (NameDictionary::kEntryValueIndex - NameDictionary::kEntryKeyIndex) *
+ kPointerSize;
+ var_accessor_pair.Bind(LoadFixedArrayElement(
+ properties, var_name_index.value(), kNameToValueOffset));
+ var_accessor_holder.Bind(receiver);
+ Goto(&accessor);
+
+ Bind(&overwrite);
+ {
+ StoreFixedArrayElement(properties, var_name_index.value(), p->value,
+ UPDATE_WRITE_BARRIER, kNameToValueOffset);
+ Return(p->value);
+ }
+ }
+
+ Bind(&not_found);
+ {
+ LookupPropertyOnPrototypeChain(receiver_map, p->name, &accessor,
+ &var_accessor_pair, &var_accessor_holder,
+ &readonly, slow);
+ Add<NameDictionary>(properties, p->name, p->value, slow);
+ Return(p->value);
+ }
+ }
+
+ Bind(&accessor);
+ {
+ Label not_callable(this);
+ Node* accessor_pair = var_accessor_pair.value();
+ GotoIf(IsAccessorInfoMap(LoadMap(accessor_pair)), slow);
+ CSA_ASSERT(this, HasInstanceType(accessor_pair, ACCESSOR_PAIR_TYPE));
+ Node* setter = LoadObjectField(accessor_pair, AccessorPair::kSetterOffset);
+ Node* setter_map = LoadMap(setter);
+ // FunctionTemplateInfo setters are not supported yet.
+ GotoIf(IsFunctionTemplateInfoMap(setter_map), slow);
+ GotoUnless(IsCallableMap(setter_map), &not_callable);
+
+ Callable callable = CodeFactory::Call(isolate());
+ CallJS(callable, p->context, setter, receiver, p->value);
+ Return(p->value);
+
+ Bind(&not_callable);
+ {
+ if (language_mode == STRICT) {
+ Node* message =
+ SmiConstant(Smi::FromInt(MessageTemplate::kNoSetterInCallback));
+ TailCallRuntime(Runtime::kThrowTypeError, p->context, message, p->name,
+ var_accessor_holder.value());
+ } else {
+ DCHECK_EQ(SLOPPY, language_mode);
+ Return(p->value);
+ }
+ }
+ }
+
+ Bind(&readonly);
+ {
+ if (language_mode == STRICT) {
+ Node* message =
+ SmiConstant(Smi::FromInt(MessageTemplate::kStrictReadOnlyProperty));
+ Node* type = Typeof(p->receiver, p->context);
+ TailCallRuntime(Runtime::kThrowTypeError, p->context, message, p->name,
+ type, p->receiver);
+ } else {
+ DCHECK_EQ(SLOPPY, language_mode);
+ Return(p->value);
+ }
+ }
+
+ Bind(&stub_cache);
+ {
+ Comment("stub cache probe");
+ Variable var_handler(this, MachineRepresentation::kTagged);
+ Label found_handler(this, &var_handler), stub_cache_miss(this);
+ TryProbeStubCache(isolate()->store_stub_cache(), receiver, p->name,
+ &found_handler, &var_handler, &stub_cache_miss);
+ Bind(&found_handler);
+ {
+ Comment("KeyedStoreGeneric found handler");
+ HandleStoreICHandlerCase(p, var_handler.value(), slow);
+ }
+ Bind(&stub_cache_miss);
+ {
+ Comment("KeyedStoreGeneric_miss");
+ TailCallRuntime(Runtime::kKeyedStoreIC_Miss, p->context, p->value,
+ p->slot, p->vector, p->receiver, p->name);
+ }
}
}
-void KeyedStoreGenericAssembler::KeyedStoreGeneric(const StoreICParameters* p,
- LanguageMode language_mode) {
+void KeyedStoreGenericAssembler::KeyedStoreGeneric(LanguageMode language_mode) {
+ typedef StoreWithVectorDescriptor Descriptor;
+
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* name = Parameter(Descriptor::kName);
+ Node* value = Parameter(Descriptor::kValue);
+ Node* slot = Parameter(Descriptor::kSlot);
+ Node* vector = Parameter(Descriptor::kVector);
+ Node* context = Parameter(Descriptor::kContext);
+
Variable var_index(this, MachineType::PointerRepresentation());
Label if_index(this), if_unique_name(this), slow(this);
- Node* receiver = p->receiver;
GotoIf(TaggedIsSmi(receiver), &slow);
Node* receiver_map = LoadMap(receiver);
Node* instance_type = LoadMapInstanceType(receiver_map);
@@ -522,26 +767,28 @@ void KeyedStoreGenericAssembler::KeyedStoreGeneric(const StoreICParameters* p,
Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
&slow);
- TryToName(p->name, &if_index, &var_index, &if_unique_name, &slow);
+ TryToName(name, &if_index, &var_index, &if_unique_name, &slow);
Bind(&if_index);
{
Comment("integer index");
EmitGenericElementStore(receiver, receiver_map, instance_type,
- var_index.value(), p->value, p->context, &slow);
+ var_index.value(), value, context, &slow);
}
Bind(&if_unique_name);
{
Comment("key is unique name");
- EmitGenericPropertyStore(receiver, receiver_map, p, &slow);
+ KeyedStoreGenericAssembler::StoreICParameters p(context, receiver, name,
+ value, slot, vector);
+ EmitGenericPropertyStore(receiver, receiver_map, &p, &slow, language_mode);
}
Bind(&slow);
{
Comment("KeyedStoreGeneric_slow");
- TailCallRuntime(Runtime::kSetProperty, p->context, p->receiver, p->name,
- p->value, SmiConstant(language_mode));
+ TailCallRuntime(Runtime::kSetProperty, context, receiver, name, value,
+ SmiConstant(language_mode));
}
}
diff --git a/deps/v8/src/ic/keyed-store-generic.h b/deps/v8/src/ic/keyed-store-generic.h
index daeb61fe68..8028736aa1 100644
--- a/deps/v8/src/ic/keyed-store-generic.h
+++ b/deps/v8/src/ic/keyed-store-generic.h
@@ -5,15 +5,18 @@
#ifndef V8_SRC_IC_KEYED_STORE_GENERIC_H_
#define V8_SRC_IC_KEYED_STORE_GENERIC_H_
-#include "src/code-stub-assembler.h"
+#include "src/globals.h"
namespace v8 {
namespace internal {
+namespace compiler {
+class CodeAssemblerState;
+}
+
class KeyedStoreGenericGenerator {
public:
- static void Generate(CodeStubAssembler* assembler,
- const CodeStubAssembler::StoreICParameters* p,
+ static void Generate(compiler::CodeAssemblerState* state,
LanguageMode language_mode);
};
diff --git a/deps/v8/src/ic/mips/handler-compiler-mips.cc b/deps/v8/src/ic/mips/handler-compiler-mips.cc
index b2ddea5dac..43588b707a 100644
--- a/deps/v8/src/ic/mips/handler-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/handler-compiler-mips.cc
@@ -129,14 +129,6 @@ void PropertyHandlerCompiler::DiscardVectorAndSlot() {
__ Addu(sp, sp, Operand(2 * kPointerSize));
}
-void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
- // No-op. Return address is in ra register.
-}
-
-void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
- // No-op. Return address is in ra register.
-}
-
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
@@ -181,18 +173,6 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
}
-
-void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register result, Label* miss) {
- __ LoadNativeContextSlot(index, result);
- // Load its initial map. The global functions all have initial maps.
- __ lw(result,
- FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
- // Load the prototype from the initial map.
- __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
-}
-
-
void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
MacroAssembler* masm, Register receiver, Register scratch1,
Register scratch2, Label* miss_label) {
@@ -342,57 +322,6 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
}
}
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
- __ li(this->name(), Operand(name));
-}
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
- Register map_reg,
- Register scratch,
- Label* miss) {
- Handle<WeakCell> cell = Map::WeakCellForMap(transition);
- DCHECK(!map_reg.is(scratch));
- __ LoadWeakValue(map_reg, cell, miss);
- if (transition->CanBeDeprecated()) {
- __ lw(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
- __ And(at, scratch, Operand(Map::Deprecated::kMask));
- __ Branch(miss, ne, at, Operand(zero_reg));
- }
-}
-
-
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
- int descriptor,
- Register value_reg,
- Register scratch,
- Label* miss_label) {
- DCHECK(!map_reg.is(scratch));
- DCHECK(!map_reg.is(value_reg));
- DCHECK(!value_reg.is(scratch));
- __ LoadInstanceDescriptors(map_reg, scratch);
- __ lw(scratch,
- FieldMemOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
- __ Branch(miss_label, ne, value_reg, Operand(scratch));
-}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
- Register value_reg,
- Label* miss_label) {
- Register map_reg = scratch1();
- Register scratch = scratch2();
- DCHECK(!value_reg.is(map_reg));
- DCHECK(!value_reg.is(scratch));
- __ JumpIfSmi(value_reg, miss_label);
- if (field_type->IsClass()) {
- __ lw(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
- // Compare map directly within the Branch() functions.
- __ GetWeakValue(scratch, Map::WeakCellForMap(field_type->AsClass()));
- __ Branch(miss_label, ne, map_reg, Operand(scratch));
- }
-}
-
void PropertyHandlerCompiler::GenerateAccessCheck(
Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
Label* miss, bool compare_native_contexts_only) {
@@ -520,14 +449,6 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
}
}
-
-void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
- // Return the constant value.
- __ li(v0, value);
- __ Ret();
-}
-
-
void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
LookupIterator* it, Register holder_reg) {
DCHECK(holder()->HasNamedInterceptor());
diff --git a/deps/v8/src/ic/mips/ic-compiler-mips.cc b/deps/v8/src/ic/mips/ic-compiler-mips.cc
deleted file mode 100644
index 86a602b3ec..0000000000
--- a/deps/v8/src/ic/mips/ic-compiler-mips.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_MIPS
-
-#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-void PropertyICCompiler::GenerateRuntimeSetProperty(
- MacroAssembler* masm, LanguageMode language_mode) {
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
-
- __ li(a0, Operand(Smi::FromInt(language_mode)));
- __ Push(a0);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty);
-}
-
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/ic/mips/ic-mips.cc b/deps/v8/src/ic/mips/ic-mips.cc
index 561c9d331b..e31aab1d76 100644
--- a/deps/v8/src/ic/mips/ic-mips.cc
+++ b/deps/v8/src/ic/mips/ic-mips.cc
@@ -19,455 +19,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-// Helper function used from LoadIC GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-// label is done.
-// name: Property name. It is not clobbered if a jump to the miss label is
-// done
-// result: Register for the result. It is only updated if a jump to the miss
-// label is not done. Can be the same as elements or name clobbering
-// one of these in the case of not jumping to the miss label.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-// The address returned from GenerateStringDictionaryProbes() in scratch2
-// is used.
-static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
- Register elements, Register name,
- Register result, Register scratch1,
- Register scratch2) {
- // Main use of the scratch registers.
- // scratch1: Used as temporary and to hold the capacity of the property
- // dictionary.
- // scratch2: Used as temporary.
- Label done;
-
- // Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
- name, scratch1, scratch2);
-
- // If probing finds an entry check that the value is a normal
- // property.
- __ bind(&done); // scratch2 == elements + 4 * index.
- const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
- __ And(at, scratch1,
- Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
- __ Branch(miss, ne, at, Operand(zero_reg));
-
- // Get the value at the masked, scaled index and return.
- __ lw(result,
- FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
-}
-
-
-// Helper function used from StoreIC::GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-// label is done.
-// name: Property name. It is not clobbered if a jump to the miss label is
-// done
-// value: The value to store.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-// The address returned from GenerateStringDictionaryProbes() in scratch2
-// is used.
-static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
- Register elements, Register name,
- Register value, Register scratch1,
- Register scratch2) {
- // Main use of the scratch registers.
- // scratch1: Used as temporary and to hold the capacity of the property
- // dictionary.
- // scratch2: Used as temporary.
- Label done;
-
- // Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
- name, scratch1, scratch2);
-
- // If probing finds an entry in the dictionary check that the value
- // is a normal property that is not read only.
- __ bind(&done); // scratch2 == elements + 4 * index.
- const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- const int kTypeAndReadOnlyMask =
- (PropertyDetails::TypeField::kMask |
- PropertyDetails::AttributesField::encode(READ_ONLY))
- << kSmiTagSize;
- __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
- __ And(at, scratch1, Operand(kTypeAndReadOnlyMask));
- __ Branch(miss, ne, at, Operand(zero_reg));
-
- // Store the value at the masked, scaled index and return.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ Addu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
- __ sw(value, MemOperand(scratch2));
-
- // Update the write barrier. Make sure not to clobber the value.
- __ mov(scratch1, value);
- __ RecordWrite(elements, scratch2, scratch1, kRAHasNotBeenSaved,
- kDontSaveFPRegs);
-}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
- Register dictionary = a0;
- DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
- DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
-
- Label slow;
-
- __ lw(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
- JSObject::kPropertiesOffset));
- GenerateDictionaryLoad(masm, &slow, dictionary,
- LoadDescriptor::NameRegister(), v0, a3, t0);
- __ Ret();
-
- // Dictionary load failed, go slow (but don't miss).
- __ bind(&slow);
- GenerateRuntimeGetProperty(masm);
-}
-
-
-// A register that isn't one of the parameters to the load ic.
-static const Register LoadIC_TempRegister() { return a3; }
-
-
-static void LoadIC_PushArgs(MacroAssembler* masm) {
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register name = LoadDescriptor::NameRegister();
- Register slot = LoadDescriptor::SlotRegister();
- Register vector = LoadWithVectorDescriptor::VectorRegister();
-
- __ Push(receiver, name, slot, vector);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
- // The return address is in ra.
- Isolate* isolate = masm->isolate();
-
- DCHECK(!AreAliased(t0, t1, LoadWithVectorDescriptor::SlotRegister(),
- LoadWithVectorDescriptor::VectorRegister()));
- __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, t0, t1);
-
- LoadIC_PushArgs(masm);
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kLoadIC_Miss);
-}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // The return address is in ra.
-
- __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
- __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kGetProperty);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
- // The return address is in ra.
- Isolate* isolate = masm->isolate();
-
- DCHECK(!AreAliased(t0, t1, LoadWithVectorDescriptor::SlotRegister(),
- LoadWithVectorDescriptor::VectorRegister()));
- __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, t0, t1);
-
- LoadIC_PushArgs(masm);
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
-}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // The return address is in ra.
-
- __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kKeyedGetProperty);
-}
-
-static void KeyedStoreGenerateMegamorphicHelper(
- MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
- KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
- Register value, Register key, Register receiver, Register receiver_map,
- Register elements_map, Register elements) {
- Label transition_smi_elements;
- Label finish_object_store, non_double_value, transition_double_elements;
- Label fast_double_without_map_check;
-
- // Fast case: Do the store, could be either Object or double.
- __ bind(fast_object);
- Register scratch = t0;
- Register scratch2 = t4;
- Register scratch3 = t5;
- Register address = t1;
- DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
- scratch, scratch2, scratch3, address));
-
- if (check_map == kCheckMap) {
- __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ Branch(fast_double, ne, elements_map,
- Operand(masm->isolate()->factory()->fixed_array_map()));
- }
-
- // HOLECHECK: guards "A[i] = V"
- // We have to go to the runtime if the current value is the hole because
- // there may be a callback on the element.
- Label holecheck_passed1;
- __ Addu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
- __ Lsa(address, address, key, kPointerSizeLog2 - kSmiTagSize);
- __ lw(scratch, MemOperand(address));
- __ Branch(&holecheck_passed1, ne, scratch,
- Operand(masm->isolate()->factory()->the_hole_value()));
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
-
- __ bind(&holecheck_passed1);
-
- // Smi stores don't require further checks.
- Label non_smi_value;
- __ JumpIfNotSmi(value, &non_smi_value);
-
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ Addu(scratch, key, Operand(Smi::FromInt(1)));
- __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
- }
- // It's irrelevant whether array is smi-only or not when writing a smi.
- __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ Lsa(address, address, key, kPointerSizeLog2 - kSmiTagSize);
- __ sw(value, MemOperand(address));
- __ Ret(USE_DELAY_SLOT);
- __ Move(v0, value); // Ensure the stub returns correct value.
-
- __ bind(&non_smi_value);
- // Escape to elements kind transition case.
- __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
-
- // Fast elements array, store the value to the elements backing store.
- __ bind(&finish_object_store);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ Addu(scratch, key, Operand(Smi::FromInt(1)));
- __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
- }
- __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ Lsa(address, address, key, kPointerSizeLog2 - kSmiTagSize);
- __ sw(value, MemOperand(address));
- // Update write barrier for the elements array address.
- __ mov(scratch, value); // Preserve the value which is returned.
- __ RecordWrite(elements, address, scratch, kRAHasNotBeenSaved,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ Ret(USE_DELAY_SLOT);
- __ Move(v0, value); // Ensure the stub returns correct value.
-
- __ bind(fast_double);
- if (check_map == kCheckMap) {
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
- __ Branch(slow, ne, elements_map, Operand(at));
- }
-
- // HOLECHECK: guards "A[i] double hole?"
- // We have to see if the double version of the hole is present. If so
- // go to the runtime.
- __ Addu(address, elements, Operand(FixedDoubleArray::kHeaderSize +
- kHoleNanUpper32Offset - kHeapObjectTag));
- __ Lsa(address, address, key, kPointerSizeLog2);
- __ lw(scratch, MemOperand(address));
- __ Branch(&fast_double_without_map_check, ne, scratch,
- Operand(kHoleNanUpper32));
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
-
- __ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value, key, elements, scratch, scratch2,
- scratch3, &transition_double_elements);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ Addu(scratch, key, Operand(Smi::FromInt(1)));
- __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
- }
- __ Ret(USE_DELAY_SLOT);
- __ Move(v0, value); // Ensure the stub returns correct value.
-
- __ bind(&transition_smi_elements);
- // Transition the array appropriately depending on the value type.
- __ lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- __ Branch(&non_double_value, ne, scratch, Operand(at));
-
- // Value is a double. Transition FAST_SMI_ELEMENTS ->
- // FAST_DOUBLE_ELEMENTS and complete the store.
- __ LoadTransitionedArrayMapConditional(
- FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
- AllocationSiteMode mode =
- AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
- receiver_map, mode, slow);
- __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&fast_double_without_map_check);
-
- __ bind(&non_double_value);
- // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
- receiver_map, scratch, slow);
- mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- masm, receiver, key, value, receiver_map, mode, slow);
- __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-
- __ bind(&transition_double_elements);
- // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
- // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
- // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
- receiver_map, scratch, slow);
- mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(
- masm, receiver, key, value, receiver_map, mode, slow);
- __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
- LanguageMode language_mode) {
- // ---------- S t a t e --------------
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -- ra : return address
- // -----------------------------------
- Label slow, fast_object, fast_object_grow;
- Label fast_double, fast_double_grow;
- Label array, extra, check_if_double_array, maybe_name_key, miss;
-
- // Register usage.
- Register value = StoreDescriptor::ValueRegister();
- Register key = StoreDescriptor::NameRegister();
- Register receiver = StoreDescriptor::ReceiverRegister();
- DCHECK(value.is(a0));
- Register receiver_map = a3;
- Register elements_map = t2;
- Register elements = t3; // Elements array of the receiver.
- // t0 and t1 are used as general scratch registers.
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &maybe_name_key);
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, &slow);
- // Get the map of the object.
- __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks.
- // The generic stub does not perform map checks.
- __ lbu(t0, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
- __ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded));
- __ Branch(&slow, ne, t0, Operand(zero_reg));
- // Check if the object is a JS array or not.
- __ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
- __ Branch(&array, eq, t0, Operand(JS_ARRAY_TYPE));
- // Check that the object is some kind of JS object EXCEPT JS Value type. In
- // the case that the object is a value-wrapper object, we enter the runtime
- // system to make sure that indexing into string objects works as intended.
- STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
- __ Branch(&slow, lo, t0, Operand(JS_OBJECT_TYPE));
-
- // Object case: Check key against length in the elements array.
- __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // Check array bounds. Both the key and the length of FixedArray are smis.
- __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Branch(&fast_object, lo, key, Operand(t0));
-
- // Slow case, handle jump to runtime.
- __ bind(&slow);
- // Entry registers are intact.
- // a0: value.
- // a1: key.
- // a2: receiver.
- PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
- // Never returns to here.
-
- __ bind(&maybe_name_key);
- __ lw(t0, FieldMemOperand(key, HeapObject::kMapOffset));
- __ lb(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(t0, &slow);
-
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Register vector = StoreWithVectorDescriptor::VectorRegister();
- Register slot = StoreWithVectorDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, t1, t2, t4, t5));
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
- __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
- __ li(slot, Operand(Smi::FromInt(slot_index)));
-
- masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, t1,
- t2, t4, t5);
- // Cache miss.
- __ Branch(&miss);
-
- // Extra capacity case: Check if there is extra capacity to
- // perform the store and update the length. Used for adding one
- // element to the array by writing to array[array.length].
- __ bind(&extra);
- // Condition code from comparing key and array length is still available.
- // Only support writing to array[array.length].
- __ Branch(&slow, ne, key, Operand(t0));
- // Check for room in the elements backing store.
- // Both the key and the length of FixedArray are smis.
- __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Branch(&slow, hs, key, Operand(t0));
- __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ Branch(&check_if_double_array, ne, elements_map,
- Heap::kFixedArrayMapRootIndex);
-
- __ jmp(&fast_object_grow);
-
- __ bind(&check_if_double_array);
- __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
- __ jmp(&fast_double_grow);
-
- // Array case: Get the length and the elements array from the JS
- // array. Check that the array is in fast mode (and writable); if it
- // is the length is always a smi.
- __ bind(&array);
- __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
- // Check the key against the length in the array.
- __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Branch(&extra, hs, key, Operand(t0));
-
- KeyedStoreGenerateMegamorphicHelper(
- masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
- value, key, receiver, receiver_map, elements_map, elements);
- KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
- &fast_double_grow, &slow, kDontCheckMap,
- kIncrementLength, value, key, receiver,
- receiver_map, elements_map, elements);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreWithVectorDescriptor::ValueRegister(),
StoreWithVectorDescriptor::SlotRegister(),
@@ -491,40 +42,6 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
}
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
- StoreIC_PushArgs(masm);
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kStoreIC_Miss);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
- Label miss;
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- Register dictionary = t1;
- DCHECK(receiver.is(a1));
- DCHECK(name.is(a2));
- DCHECK(value.is(a0));
- DCHECK(StoreWithVectorDescriptor::VectorRegister().is(a3));
- DCHECK(StoreWithVectorDescriptor::SlotRegister().is(t0));
-
- __ lw(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
- GenerateDictionaryStore(masm, &miss, dictionary, name, value, t2, t5);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->ic_store_normal_hit(), 1, t2, t5);
- __ Ret(USE_DELAY_SLOT);
- __ Move(v0, value); // Ensure the stub returns correct value.
-
- __ bind(&miss);
- __ IncrementCounter(counters->ic_store_normal_miss(), 1, t2, t5);
- GenerateMiss(masm);
-}
-
-
#undef __
diff --git a/deps/v8/src/ic/mips/stub-cache-mips.cc b/deps/v8/src/ic/mips/stub-cache-mips.cc
deleted file mode 100644
index d476c1e63e..0000000000
--- a/deps/v8/src/ic/mips/stub-cache-mips.cc
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_MIPS
-
-#include "src/codegen.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/interface-descriptors.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
- StubCache::Table table, Register receiver, Register name,
- // The offset is scaled by 4, based on
- // kCacheIndexShift, which is two bits
- Register offset, Register scratch, Register scratch2,
- Register offset_scratch) {
- ExternalReference key_offset(stub_cache->key_reference(table));
- ExternalReference value_offset(stub_cache->value_reference(table));
- ExternalReference map_offset(stub_cache->map_reference(table));
-
- uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
- uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
- uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
-
- // Check the relative positions of the address fields.
- DCHECK(value_off_addr > key_off_addr);
- DCHECK((value_off_addr - key_off_addr) % 4 == 0);
- DCHECK((value_off_addr - key_off_addr) < (256 * 4));
- DCHECK(map_off_addr > key_off_addr);
- DCHECK((map_off_addr - key_off_addr) % 4 == 0);
- DCHECK((map_off_addr - key_off_addr) < (256 * 4));
-
- Label miss;
- Register base_addr = scratch;
- scratch = no_reg;
-
- // Multiply by 3 because there are 3 fields per entry (name, code, map).
- __ Lsa(offset_scratch, offset, offset, 1);
-
- // Calculate the base address of the entry.
- __ li(base_addr, Operand(key_offset));
- __ Addu(base_addr, base_addr, offset_scratch);
-
- // Check that the key in the entry matches the name.
- __ lw(at, MemOperand(base_addr, 0));
- __ Branch(&miss, ne, name, Operand(at));
-
- // Check the map matches.
- __ lw(at, MemOperand(base_addr, map_off_addr - key_off_addr));
- __ lw(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Branch(&miss, ne, at, Operand(scratch2));
-
- // Get the code entry from the cache.
- Register code = scratch2;
- scratch2 = no_reg;
- __ lw(code, MemOperand(base_addr, value_off_addr - key_off_addr));
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ jmp(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ jmp(&miss);
- }
-#endif
-
- // Jump to the first instruction in the code stub.
- __ Addu(at, code, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
-
- // Miss: fall through.
- __ bind(&miss);
-}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
- Register name, Register scratch, Register extra,
- Register extra2, Register extra3) {
- Label miss;
-
- // Make sure that code is valid. The multiplying code relies on the
- // entry size being 12.
- DCHECK(sizeof(Entry) == 12);
-
- // Make sure that there are no register conflicts.
- DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
-
- // Check register validity.
- DCHECK(!scratch.is(no_reg));
- DCHECK(!extra.is(no_reg));
- DCHECK(!extra2.is(no_reg));
- DCHECK(!extra3.is(no_reg));
-
-#ifdef DEBUG
- // If vector-based ics are in use, ensure that scratch, extra, extra2 and
- // extra3 don't conflict with the vector and slot registers, which need
- // to be preserved for a handler call or miss.
- if (IC::ICUseVector(ic_kind_)) {
- Register vector, slot;
- if (ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC) {
- vector = StoreWithVectorDescriptor::VectorRegister();
- slot = StoreWithVectorDescriptor::SlotRegister();
- } else {
- DCHECK(ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC);
- vector = LoadWithVectorDescriptor::VectorRegister();
- slot = LoadWithVectorDescriptor::SlotRegister();
- }
- DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
- }
-#endif
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
- extra3);
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Get the map of the receiver and compute the hash.
- __ lw(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
- __ lw(at, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Addu(scratch, scratch, at);
- __ Xor(scratch, scratch, Operand(kPrimaryMagic));
- __ And(scratch, scratch,
- Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
-
- // Probe the primary table.
- ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
- extra3);
-
- // Primary miss: Compute hash for secondary probe.
- __ Subu(scratch, scratch, name);
- __ Addu(scratch, scratch, Operand(kSecondaryMagic));
- __ And(scratch, scratch,
- Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
-
- // Probe the secondary table.
- ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
- extra3);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ bind(&miss);
- __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
- extra3);
-}
-
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_MIPS
diff --git a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
index 249f8fedb3..06af88d19e 100644
--- a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
@@ -129,14 +129,6 @@ void PropertyHandlerCompiler::DiscardVectorAndSlot() {
__ Daddu(sp, sp, Operand(2 * kPointerSize));
}
-void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
- // No-op. Return address is in ra register.
-}
-
-void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
- // No-op. Return address is in ra register.
-}
-
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
@@ -181,18 +173,6 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
}
-
-void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register result, Label* miss) {
- __ LoadNativeContextSlot(index, result);
- // Load its initial map. The global functions all have initial maps.
- __ ld(result,
- FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
- // Load the prototype from the initial map.
- __ ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
-}
-
-
void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
MacroAssembler* masm, Register receiver, Register scratch1,
Register scratch2, Label* miss_label) {
@@ -342,57 +322,6 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
}
}
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
- __ li(this->name(), Operand(name));
-}
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
- Register map_reg,
- Register scratch,
- Label* miss) {
- Handle<WeakCell> cell = Map::WeakCellForMap(transition);
- DCHECK(!map_reg.is(scratch));
- __ LoadWeakValue(map_reg, cell, miss);
- if (transition->CanBeDeprecated()) {
- __ lwu(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
- __ And(at, scratch, Operand(Map::Deprecated::kMask));
- __ Branch(miss, ne, at, Operand(zero_reg));
- }
-}
-
-
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
- int descriptor,
- Register value_reg,
- Register scratch,
- Label* miss_label) {
- DCHECK(!map_reg.is(scratch));
- DCHECK(!map_reg.is(value_reg));
- DCHECK(!value_reg.is(scratch));
- __ LoadInstanceDescriptors(map_reg, scratch);
- __ ld(scratch,
- FieldMemOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
- __ Branch(miss_label, ne, value_reg, Operand(scratch));
-}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
- Register value_reg,
- Label* miss_label) {
- Register map_reg = scratch1();
- Register scratch = scratch2();
- DCHECK(!value_reg.is(map_reg));
- DCHECK(!value_reg.is(scratch));
- __ JumpIfSmi(value_reg, miss_label);
- if (field_type->IsClass()) {
- __ ld(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
- // Compare map directly within the Branch() functions.
- __ GetWeakValue(scratch, Map::WeakCellForMap(field_type->AsClass()));
- __ Branch(miss_label, ne, map_reg, Operand(scratch));
- }
-}
-
void PropertyHandlerCompiler::GenerateAccessCheck(
Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
Label* miss, bool compare_native_contexts_only) {
@@ -520,14 +449,6 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
}
}
-
-void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
- // Return the constant value.
- __ li(v0, value);
- __ Ret();
-}
-
-
void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
LookupIterator* it, Register holder_reg) {
DCHECK(holder()->HasNamedInterceptor());
diff --git a/deps/v8/src/ic/mips64/ic-compiler-mips64.cc b/deps/v8/src/ic/mips64/ic-compiler-mips64.cc
deleted file mode 100644
index 276f3afd38..0000000000
--- a/deps/v8/src/ic/mips64/ic-compiler-mips64.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_MIPS64
-
-#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-void PropertyICCompiler::GenerateRuntimeSetProperty(
- MacroAssembler* masm, LanguageMode language_mode) {
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
-
- __ li(a0, Operand(Smi::FromInt(language_mode)));
- __ Push(a0);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty);
-}
-
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/ic/mips64/ic-mips64.cc b/deps/v8/src/ic/mips64/ic-mips64.cc
index 57efa350c8..fa351ba5a3 100644
--- a/deps/v8/src/ic/mips64/ic-mips64.cc
+++ b/deps/v8/src/ic/mips64/ic-mips64.cc
@@ -19,458 +19,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-// Helper function used from LoadIC GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-// label is done.
-// name: Property name. It is not clobbered if a jump to the miss label is
-// done
-// result: Register for the result. It is only updated if a jump to the miss
-// label is not done. Can be the same as elements or name clobbering
-// one of these in the case of not jumping to the miss label.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-// The address returned from GenerateStringDictionaryProbes() in scratch2
-// is used.
-static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
- Register elements, Register name,
- Register result, Register scratch1,
- Register scratch2) {
- // Main use of the scratch registers.
- // scratch1: Used as temporary and to hold the capacity of the property
- // dictionary.
- // scratch2: Used as temporary.
- Label done;
-
- // Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
- name, scratch1, scratch2);
-
- // If probing finds an entry check that the value is a normal
- // property.
- __ bind(&done); // scratch2 == elements + 4 * index.
- const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- __ ld(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
- __ And(at, scratch1,
- Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
- __ Branch(miss, ne, at, Operand(zero_reg));
-
- // Get the value at the masked, scaled index and return.
- __ ld(result,
- FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
-}
-
-
-// Helper function used from StoreIC::GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-// label is done.
-// name: Property name. It is not clobbered if a jump to the miss label is
-// done
-// value: The value to store.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-// The address returned from GenerateStringDictionaryProbes() in scratch2
-// is used.
-static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
- Register elements, Register name,
- Register value, Register scratch1,
- Register scratch2) {
- // Main use of the scratch registers.
- // scratch1: Used as temporary and to hold the capacity of the property
- // dictionary.
- // scratch2: Used as temporary.
- Label done;
-
- // Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
- name, scratch1, scratch2);
-
- // If probing finds an entry in the dictionary check that the value
- // is a normal property that is not read only.
- __ bind(&done); // scratch2 == elements + 4 * index.
- const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- const int kTypeAndReadOnlyMask =
- (PropertyDetails::TypeField::kMask |
- PropertyDetails::AttributesField::encode(READ_ONLY));
- __ ld(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
- __ And(at, scratch1, Operand(Smi::FromInt(kTypeAndReadOnlyMask)));
- __ Branch(miss, ne, at, Operand(zero_reg));
-
- // Store the value at the masked, scaled index and return.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ Daddu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
- __ sd(value, MemOperand(scratch2));
-
- // Update the write barrier. Make sure not to clobber the value.
- __ mov(scratch1, value);
- __ RecordWrite(elements, scratch2, scratch1, kRAHasNotBeenSaved,
- kDontSaveFPRegs);
-}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
- Register dictionary = a0;
- DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
- DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
- Label slow;
-
- __ ld(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
- JSObject::kPropertiesOffset));
- GenerateDictionaryLoad(masm, &slow, dictionary,
- LoadDescriptor::NameRegister(), v0, a3, a4);
- __ Ret();
-
- // Dictionary load failed, go slow (but don't miss).
- __ bind(&slow);
- GenerateRuntimeGetProperty(masm);
-}
-
-
-// A register that isn't one of the parameters to the load ic.
-static const Register LoadIC_TempRegister() { return a3; }
-
-
-static void LoadIC_PushArgs(MacroAssembler* masm) {
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register name = LoadDescriptor::NameRegister();
- Register slot = LoadDescriptor::SlotRegister();
- Register vector = LoadWithVectorDescriptor::VectorRegister();
-
- __ Push(receiver, name, slot, vector);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
- // The return address is on the stack.
- Isolate* isolate = masm->isolate();
-
- DCHECK(!AreAliased(a4, a5, LoadWithVectorDescriptor::SlotRegister(),
- LoadWithVectorDescriptor::VectorRegister()));
- __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, a4, a5);
-
- LoadIC_PushArgs(masm);
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kLoadIC_Miss);
-}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // The return address is in ra.
-
- __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
- __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kGetProperty);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
- // The return address is in ra.
- Isolate* isolate = masm->isolate();
-
- DCHECK(!AreAliased(a4, a5, LoadWithVectorDescriptor::SlotRegister(),
- LoadWithVectorDescriptor::VectorRegister()));
- __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, a4, a5);
-
- LoadIC_PushArgs(masm);
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
-}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // The return address is in ra.
-
- __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kKeyedGetProperty);
-}
-
-static void KeyedStoreGenerateMegamorphicHelper(
- MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
- KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
- Register value, Register key, Register receiver, Register receiver_map,
- Register elements_map, Register elements) {
- Label transition_smi_elements;
- Label finish_object_store, non_double_value, transition_double_elements;
- Label fast_double_without_map_check;
-
- // Fast case: Do the store, could be either Object or double.
- __ bind(fast_object);
- Register scratch = a4;
- Register scratch2 = t0;
- Register address = a5;
- DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
- scratch, scratch2, address));
-
- if (check_map == kCheckMap) {
- __ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ Branch(fast_double, ne, elements_map,
- Operand(masm->isolate()->factory()->fixed_array_map()));
- }
-
- // HOLECHECK: guards "A[i] = V"
- // We have to go to the runtime if the current value is the hole because
- // there may be a callback on the element.
- Label holecheck_passed1;
- __ Daddu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
- __ SmiScale(at, key, kPointerSizeLog2);
- __ daddu(address, address, at);
- __ ld(scratch, MemOperand(address));
-
- __ Branch(&holecheck_passed1, ne, scratch,
- Operand(masm->isolate()->factory()->the_hole_value()));
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
-
- __ bind(&holecheck_passed1);
-
- // Smi stores don't require further checks.
- Label non_smi_value;
- __ JumpIfNotSmi(value, &non_smi_value);
-
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ Daddu(scratch, key, Operand(Smi::FromInt(1)));
- __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
- }
- // It's irrelevant whether array is smi-only or not when writing a smi.
- __ Daddu(address, elements,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ SmiScale(scratch, key, kPointerSizeLog2);
- __ Daddu(address, address, scratch);
- __ sd(value, MemOperand(address));
- __ Ret(USE_DELAY_SLOT);
- __ Move(v0, value); // Ensure the stub returns correct value.
-
- __ bind(&non_smi_value);
- // Escape to elements kind transition case.
- __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
-
- // Fast elements array, store the value to the elements backing store.
- __ bind(&finish_object_store);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ Daddu(scratch, key, Operand(Smi::FromInt(1)));
- __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
- }
- __ Daddu(address, elements,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ SmiScale(scratch, key, kPointerSizeLog2);
- __ Daddu(address, address, scratch);
- __ sd(value, MemOperand(address));
- // Update write barrier for the elements array address.
- __ mov(scratch, value); // Preserve the value which is returned.
- __ RecordWrite(elements, address, scratch, kRAHasNotBeenSaved,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ Ret(USE_DELAY_SLOT);
- __ Move(v0, value); // Ensure the stub returns correct value.
-
- __ bind(fast_double);
- if (check_map == kCheckMap) {
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
- __ Branch(slow, ne, elements_map, Operand(at));
- }
-
- // HOLECHECK: guards "A[i] double hole?"
- // We have to see if the double version of the hole is present. If so
- // go to the runtime.
- __ Daddu(address, elements,
- Operand(FixedDoubleArray::kHeaderSize + Register::kExponentOffset -
- kHeapObjectTag));
- __ SmiScale(at, key, kPointerSizeLog2);
- __ daddu(address, address, at);
- __ lw(scratch, MemOperand(address));
- __ Branch(&fast_double_without_map_check, ne, scratch,
- Operand(static_cast<int32_t>(kHoleNanUpper32)));
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
-
- __ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value, key, elements, scratch, scratch2,
- &transition_double_elements);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ Daddu(scratch, key, Operand(Smi::FromInt(1)));
- __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
- }
- __ Ret(USE_DELAY_SLOT);
- __ Move(v0, value); // Ensure the stub returns correct value.
-
- __ bind(&transition_smi_elements);
- // Transition the array appropriately depending on the value type.
- __ ld(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- __ Branch(&non_double_value, ne, scratch, Operand(at));
-
- // Value is a double. Transition FAST_SMI_ELEMENTS ->
- // FAST_DOUBLE_ELEMENTS and complete the store.
- __ LoadTransitionedArrayMapConditional(
- FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
- AllocationSiteMode mode =
- AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
- receiver_map, mode, slow);
- __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&fast_double_without_map_check);
-
- __ bind(&non_double_value);
- // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
- receiver_map, scratch, slow);
- mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- masm, receiver, key, value, receiver_map, mode, slow);
- __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-
- __ bind(&transition_double_elements);
- // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
- // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
- // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
- receiver_map, scratch, slow);
- mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(
- masm, receiver, key, value, receiver_map, mode, slow);
- __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
- LanguageMode language_mode) {
- // ---------- S t a t e --------------
- // -- a0 : value
- // -- a1 : key
- // -- a2 : receiver
- // -- ra : return address
- // -----------------------------------
- Label slow, fast_object, fast_object_grow;
- Label fast_double, fast_double_grow;
- Label array, extra, check_if_double_array, maybe_name_key, miss;
-
- // Register usage.
- Register value = StoreDescriptor::ValueRegister();
- Register key = StoreDescriptor::NameRegister();
- Register receiver = StoreDescriptor::ReceiverRegister();
- DCHECK(value.is(a0));
- Register receiver_map = a3;
- Register elements_map = a6;
- Register elements = a7; // Elements array of the receiver.
- // a4 and a5 are used as general scratch registers.
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &maybe_name_key);
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, &slow);
- // Get the map of the object.
- __ ld(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks.
- // The generic stub does not perform map checks.
- __ lbu(a4, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
- __ And(a4, a4, Operand(1 << Map::kIsAccessCheckNeeded));
- __ Branch(&slow, ne, a4, Operand(zero_reg));
- // Check if the object is a JS array or not.
- __ lbu(a4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
- __ Branch(&array, eq, a4, Operand(JS_ARRAY_TYPE));
- // Check that the object is some kind of JSObject.
- __ Branch(&slow, lt, a4, Operand(FIRST_JS_OBJECT_TYPE));
-
- // Object case: Check key against length in the elements array.
- __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // Check array bounds. Both the key and the length of FixedArray are smis.
- __ ld(a4, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Branch(&fast_object, lo, key, Operand(a4));
-
- // Slow case, handle jump to runtime.
- __ bind(&slow);
- // Entry registers are intact.
- // a0: value.
- // a1: key.
- // a2: receiver.
- PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
- // Never returns to here.
-
- __ bind(&maybe_name_key);
- __ ld(a4, FieldMemOperand(key, HeapObject::kMapOffset));
- __ lb(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(a4, &slow);
-
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Register vector = StoreWithVectorDescriptor::VectorRegister();
- Register slot = StoreWithVectorDescriptor::SlotRegister();
-
- DCHECK(!AreAliased(vector, slot, a5, a6, a7, t0));
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
- __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
- __ li(slot, Operand(Smi::FromInt(slot_index)));
-
- masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, a5,
- a6, a7, t0);
- // Cache miss.
- __ Branch(&miss);
-
- // Extra capacity case: Check if there is extra capacity to
- // perform the store and update the length. Used for adding one
- // element to the array by writing to array[array.length].
- __ bind(&extra);
- // Condition code from comparing key and array length is still available.
- // Only support writing to array[array.length].
- __ Branch(&slow, ne, key, Operand(a4));
- // Check for room in the elements backing store.
- // Both the key and the length of FixedArray are smis.
- __ ld(a4, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ Branch(&slow, hs, key, Operand(a4));
- __ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ Branch(&check_if_double_array, ne, elements_map,
- Heap::kFixedArrayMapRootIndex);
-
- __ jmp(&fast_object_grow);
-
- __ bind(&check_if_double_array);
- __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
- __ jmp(&fast_double_grow);
-
- // Array case: Get the length and the elements array from the JS
- // array. Check that the array is in fast mode (and writable); if it
- // is the length is always a smi.
- __ bind(&array);
- __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
- // Check the key against the length in the array.
- __ ld(a4, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ Branch(&extra, hs, key, Operand(a4));
-
- KeyedStoreGenerateMegamorphicHelper(
- masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
- value, key, receiver, receiver_map, elements_map, elements);
- KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
- &fast_double_grow, &slow, kDontCheckMap,
- kIncrementLength, value, key, receiver,
- receiver_map, elements_map, elements);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreWithVectorDescriptor::ValueRegister(),
StoreWithVectorDescriptor::SlotRegister(),
@@ -494,38 +42,6 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
}
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
- StoreIC_PushArgs(masm);
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kStoreIC_Miss);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
- Label miss;
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- Register dictionary = a5;
- DCHECK(!AreAliased(
- value, receiver, name, StoreWithVectorDescriptor::VectorRegister(),
- StoreWithVectorDescriptor::SlotRegister(), dictionary, a6, a7));
-
- __ ld(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
- GenerateDictionaryStore(masm, &miss, dictionary, name, value, a6, a7);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->ic_store_normal_hit(), 1, a6, a7);
- __ Ret(USE_DELAY_SLOT);
- __ Move(v0, value); // Ensure the stub returns correct value.
-
- __ bind(&miss);
- __ IncrementCounter(counters->ic_store_normal_miss(), 1, a6, a7);
- GenerateMiss(masm);
-}
-
-
#undef __
diff --git a/deps/v8/src/ic/mips64/stub-cache-mips64.cc b/deps/v8/src/ic/mips64/stub-cache-mips64.cc
deleted file mode 100644
index 6a87b7ba88..0000000000
--- a/deps/v8/src/ic/mips64/stub-cache-mips64.cc
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_MIPS64
-
-#include "src/codegen.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/interface-descriptors.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
- StubCache::Table table, Register receiver, Register name,
- // The offset is scaled by 4, based on
- // kCacheIndexShift, which is two bits
- Register offset, Register scratch, Register scratch2,
- Register offset_scratch) {
- ExternalReference key_offset(stub_cache->key_reference(table));
- ExternalReference value_offset(stub_cache->value_reference(table));
- ExternalReference map_offset(stub_cache->map_reference(table));
-
- uint64_t key_off_addr = reinterpret_cast<uint64_t>(key_offset.address());
- uint64_t value_off_addr = reinterpret_cast<uint64_t>(value_offset.address());
- uint64_t map_off_addr = reinterpret_cast<uint64_t>(map_offset.address());
-
- // Check the relative positions of the address fields.
- DCHECK(value_off_addr > key_off_addr);
- DCHECK((value_off_addr - key_off_addr) % 4 == 0);
- DCHECK((value_off_addr - key_off_addr) < (256 * 4));
- DCHECK(map_off_addr > key_off_addr);
- DCHECK((map_off_addr - key_off_addr) % 4 == 0);
- DCHECK((map_off_addr - key_off_addr) < (256 * 4));
-
- Label miss;
- Register base_addr = scratch;
- scratch = no_reg;
-
- // Multiply by 3 because there are 3 fields per entry (name, code, map).
- __ Dlsa(offset_scratch, offset, offset, 1);
-
- // Calculate the base address of the entry.
- __ li(base_addr, Operand(key_offset));
- __ Dlsa(base_addr, base_addr, offset_scratch,
- kPointerSizeLog2 - StubCache::kCacheIndexShift);
-
- // Check that the key in the entry matches the name.
- __ ld(at, MemOperand(base_addr, 0));
- __ Branch(&miss, ne, name, Operand(at));
-
- // Check the map matches.
- __ ld(at, MemOperand(base_addr,
- static_cast<int32_t>(map_off_addr - key_off_addr)));
- __ ld(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Branch(&miss, ne, at, Operand(scratch2));
-
- // Get the code entry from the cache.
- Register code = scratch2;
- scratch2 = no_reg;
- __ ld(code, MemOperand(base_addr,
- static_cast<int32_t>(value_off_addr - key_off_addr)));
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ jmp(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ jmp(&miss);
- }
-#endif
-
- // Jump to the first instruction in the code stub.
- __ Daddu(at, code, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
-
- // Miss: fall through.
- __ bind(&miss);
-}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
- Register name, Register scratch, Register extra,
- Register extra2, Register extra3) {
- Label miss;
-
- // Make sure that code is valid. The multiplying code relies on the
- // entry size being 12.
- // DCHECK(sizeof(Entry) == 12);
- // DCHECK(sizeof(Entry) == 3 * kPointerSize);
-
- // Make sure that there are no register conflicts.
- DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
-
- // Check register validity.
- DCHECK(!scratch.is(no_reg));
- DCHECK(!extra.is(no_reg));
- DCHECK(!extra2.is(no_reg));
- DCHECK(!extra3.is(no_reg));
-
-#ifdef DEBUG
- // If vector-based ics are in use, ensure that scratch, extra, extra2 and
- // extra3 don't conflict with the vector and slot registers, which need
- // to be preserved for a handler call or miss.
- if (IC::ICUseVector(ic_kind_)) {
- Register vector, slot;
- if (ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC) {
- vector = StoreWithVectorDescriptor::VectorRegister();
- slot = StoreWithVectorDescriptor::SlotRegister();
- } else {
- DCHECK(ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC);
- vector = LoadWithVectorDescriptor::VectorRegister();
- slot = LoadWithVectorDescriptor::SlotRegister();
- }
- DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
- }
-#endif
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
- extra3);
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Get the map of the receiver and compute the hash.
- __ lwu(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
- __ ld(at, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ Addu(scratch, scratch, at);
- __ Xor(scratch, scratch, Operand(kPrimaryMagic));
- __ And(scratch, scratch,
- Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
-
- // Probe the primary table.
- ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
- extra3);
-
- // Primary miss: Compute hash for secondary probe.
- __ Subu(scratch, scratch, name);
- __ Addu(scratch, scratch, kSecondaryMagic);
- __ And(scratch, scratch,
- Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
-
- // Probe the secondary table.
- ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
- extra3);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ bind(&miss);
- __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
- extra3);
-}
-
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_MIPS64
diff --git a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
index e0caaa6a1f..d4edcc1ec9 100644
--- a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
@@ -130,14 +130,6 @@ void PropertyHandlerCompiler::DiscardVectorAndSlot() {
__ addi(sp, sp, Operand(2 * kPointerSize));
}
-void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
- // No-op. Return address is in lr register.
-}
-
-void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
- // No-op. Return address is in lr register.
-}
-
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
@@ -184,18 +176,6 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
}
-
-void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register result, Label* miss) {
- __ LoadNativeContextSlot(index, result);
- // Load its initial map. The global functions all have initial maps.
- __ LoadP(result,
- FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
- // Load the prototype from the initial map.
- __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
-}
-
-
void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
MacroAssembler* masm, Register receiver, Register scratch1,
Register scratch2, Label* miss_label) {
@@ -350,58 +330,6 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
}
}
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
- __ mov(this->name(), Operand(name));
-}
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
- Register map_reg,
- Register scratch,
- Label* miss) {
- Handle<WeakCell> cell = Map::WeakCellForMap(transition);
- DCHECK(!map_reg.is(scratch));
- __ LoadWeakValue(map_reg, cell, miss);
- if (transition->CanBeDeprecated()) {
- __ lwz(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
- __ DecodeField<Map::Deprecated>(r0, scratch, SetRC);
- __ bne(miss, cr0);
- }
-}
-
-
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
- int descriptor,
- Register value_reg,
- Register scratch,
- Label* miss_label) {
- DCHECK(!map_reg.is(scratch));
- DCHECK(!map_reg.is(value_reg));
- DCHECK(!value_reg.is(scratch));
- __ LoadInstanceDescriptors(map_reg, scratch);
- __ LoadP(scratch, FieldMemOperand(
- scratch, DescriptorArray::GetValueOffset(descriptor)));
- __ cmp(value_reg, scratch);
- __ bne(miss_label);
-}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
- Register value_reg,
- Label* miss_label) {
- Register map_reg = scratch1();
- Register scratch = scratch2();
- DCHECK(!value_reg.is(map_reg));
- DCHECK(!value_reg.is(scratch));
- __ JumpIfSmi(value_reg, miss_label);
- if (field_type->IsClass()) {
- __ LoadP(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
- __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
- scratch);
- __ bne(miss_label);
- }
-}
-
void PropertyHandlerCompiler::GenerateAccessCheck(
Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
Label* miss, bool compare_native_contexts_only) {
@@ -538,14 +466,6 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
}
}
-
-void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
- // Return the constant value.
- __ Move(r3, value);
- __ Ret();
-}
-
-
void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
LookupIterator* it, Register holder_reg) {
DCHECK(holder()->HasNamedInterceptor());
diff --git a/deps/v8/src/ic/ppc/ic-compiler-ppc.cc b/deps/v8/src/ic/ppc/ic-compiler-ppc.cc
deleted file mode 100644
index c6b36f29f4..0000000000
--- a/deps/v8/src/ic/ppc/ic-compiler-ppc.cc
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_PPC
-
-#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-void PropertyICCompiler::GenerateRuntimeSetProperty(
- MacroAssembler* masm, LanguageMode language_mode) {
- __ mov(r0, Operand(Smi::FromInt(language_mode)));
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister(), r0);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty);
-}
-
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ic/ppc/ic-ppc.cc b/deps/v8/src/ic/ppc/ic-ppc.cc
index 359a6a42dd..3c325d8f92 100644
--- a/deps/v8/src/ic/ppc/ic-ppc.cc
+++ b/deps/v8/src/ic/ppc/ic-ppc.cc
@@ -19,187 +19,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-// Helper function used from LoadIC GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-// label is done.
-// name: Property name. It is not clobbered if a jump to the miss label is
-// done
-// result: Register for the result. It is only updated if a jump to the miss
-// label is not done. Can be the same as elements or name clobbering
-// one of these in the case of not jumping to the miss label.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
- Register elements, Register name,
- Register result, Register scratch1,
- Register scratch2) {
- // Main use of the scratch registers.
- // scratch1: Used as temporary and to hold the capacity of the property
- // dictionary.
- // scratch2: Used as temporary.
- Label done;
-
- // Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
- name, scratch1, scratch2);
-
- // If probing finds an entry check that the value is a normal
- // property.
- __ bind(&done); // scratch2 == elements + 4 * index
- const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
- __ mr(r0, scratch2);
- __ LoadSmiLiteral(scratch2, Smi::FromInt(PropertyDetails::TypeField::kMask));
- __ and_(scratch2, scratch1, scratch2, SetRC);
- __ bne(miss, cr0);
- __ mr(scratch2, r0);
-
- // Get the value at the masked, scaled index and return.
- __ LoadP(result,
- FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
-}
-
-
-// Helper function used from StoreIC::GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-// label is done.
-// name: Property name. It is not clobbered if a jump to the miss label is
-// done
-// value: The value to store.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
- Register elements, Register name,
- Register value, Register scratch1,
- Register scratch2) {
- // Main use of the scratch registers.
- // scratch1: Used as temporary and to hold the capacity of the property
- // dictionary.
- // scratch2: Used as temporary.
- Label done;
-
- // Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
- name, scratch1, scratch2);
-
- // If probing finds an entry in the dictionary check that the value
- // is a normal property that is not read only.
- __ bind(&done); // scratch2 == elements + 4 * index
- const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- int kTypeAndReadOnlyMask =
- PropertyDetails::TypeField::kMask |
- PropertyDetails::AttributesField::encode(READ_ONLY);
- __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
- __ mr(r0, scratch2);
- __ LoadSmiLiteral(scratch2, Smi::FromInt(kTypeAndReadOnlyMask));
- __ and_(scratch2, scratch1, scratch2, SetRC);
- __ bne(miss, cr0);
- __ mr(scratch2, r0);
-
- // Store the value at the masked, scaled index and return.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ addi(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
- __ StoreP(value, MemOperand(scratch2));
-
- // Update the write barrier. Make sure not to clobber the value.
- __ mr(scratch1, value);
- __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
- kDontSaveFPRegs);
-}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
- Register dictionary = r3;
- DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
- DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
-
- Label slow;
-
- __ LoadP(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
- JSObject::kPropertiesOffset));
- GenerateDictionaryLoad(masm, &slow, dictionary,
- LoadDescriptor::NameRegister(), r3, r6, r7);
- __ Ret();
-
- // Dictionary load failed, go slow (but don't miss).
- __ bind(&slow);
- GenerateRuntimeGetProperty(masm);
-}
-
-
-// A register that isn't one of the parameters to the load ic.
-static const Register LoadIC_TempRegister() { return r6; }
-
-
-static void LoadIC_PushArgs(MacroAssembler* masm) {
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register name = LoadDescriptor::NameRegister();
- Register slot = LoadDescriptor::SlotRegister();
- Register vector = LoadWithVectorDescriptor::VectorRegister();
-
- __ Push(receiver, name, slot, vector);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
- // The return address is in lr.
- Isolate* isolate = masm->isolate();
-
- DCHECK(!AreAliased(r7, r8, LoadWithVectorDescriptor::SlotRegister(),
- LoadWithVectorDescriptor::VectorRegister()));
- __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, r7, r8);
-
- LoadIC_PushArgs(masm);
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kLoadIC_Miss);
-}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // The return address is in lr.
-
- __ mr(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
- __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kGetProperty);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
- // The return address is in lr.
- Isolate* isolate = masm->isolate();
-
- DCHECK(!AreAliased(r7, r8, LoadWithVectorDescriptor::SlotRegister(),
- LoadWithVectorDescriptor::VectorRegister()));
- __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, r7, r8);
-
- LoadIC_PushArgs(masm);
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
-}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // The return address is in lr.
-
- __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kKeyedGetProperty);
-}
-
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreWithVectorDescriptor::ValueRegister(),
StoreWithVectorDescriptor::SlotRegister(),
@@ -223,307 +42,6 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
}
-static void KeyedStoreGenerateMegamorphicHelper(
- MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
- KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
- Register value, Register key, Register receiver, Register receiver_map,
- Register elements_map, Register elements) {
- Label transition_smi_elements;
- Label finish_object_store, non_double_value, transition_double_elements;
- Label fast_double_without_map_check;
-
- // Fast case: Do the store, could be either Object or double.
- __ bind(fast_object);
- Register scratch = r7;
- Register address = r8;
- DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
- scratch, address));
-
- if (check_map == kCheckMap) {
- __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ mov(scratch, Operand(masm->isolate()->factory()->fixed_array_map()));
- __ cmp(elements_map, scratch);
- __ bne(fast_double);
- }
-
- // HOLECHECK: guards "A[i] = V"
- // We have to go to the runtime if the current value is the hole because
- // there may be a callback on the element
- Label holecheck_passed1;
- __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ SmiToPtrArrayOffset(scratch, key);
- __ LoadPX(scratch, MemOperand(address, scratch));
- __ Cmpi(scratch, Operand(masm->isolate()->factory()->the_hole_value()), r0);
- __ bne(&holecheck_passed1);
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
-
- __ bind(&holecheck_passed1);
-
- // Smi stores don't require further checks.
- Label non_smi_value;
- __ JumpIfNotSmi(value, &non_smi_value);
-
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
- __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0);
- }
- // It's irrelevant whether array is smi-only or not when writing a smi.
- __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ SmiToPtrArrayOffset(scratch, key);
- __ StorePX(value, MemOperand(address, scratch));
- __ Ret();
-
- __ bind(&non_smi_value);
- // Escape to elements kind transition case.
- __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
-
- // Fast elements array, store the value to the elements backing store.
- __ bind(&finish_object_store);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
- __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0);
- }
- __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ SmiToPtrArrayOffset(scratch, key);
- __ StorePUX(value, MemOperand(address, scratch));
- // Update write barrier for the elements array address.
- __ mr(scratch, value); // Preserve the value which is returned.
- __ RecordWrite(elements, address, scratch, kLRHasNotBeenSaved,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ Ret();
-
- __ bind(fast_double);
- if (check_map == kCheckMap) {
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
- __ bne(slow);
- }
-
- // HOLECHECK: guards "A[i] double hole?"
- // We have to see if the double version of the hole is present. If so
- // go to the runtime.
- __ addi(address, elements,
- Operand((FixedDoubleArray::kHeaderSize + Register::kExponentOffset -
- kHeapObjectTag)));
- __ SmiToDoubleArrayOffset(scratch, key);
- __ lwzx(scratch, MemOperand(address, scratch));
- __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
- __ bne(&fast_double_without_map_check);
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
-
- __ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value, key, elements, scratch, d0,
- &transition_double_elements);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
- __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0);
- }
- __ Ret();
-
- __ bind(&transition_smi_elements);
- // Transition the array appropriately depending on the value type.
- __ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
- __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
- __ bne(&non_double_value);
-
- // Value is a double. Transition FAST_SMI_ELEMENTS ->
- // FAST_DOUBLE_ELEMENTS and complete the store.
- __ LoadTransitionedArrayMapConditional(
- FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
- AllocationSiteMode mode =
- AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
- receiver_map, mode, slow);
- __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ b(&fast_double_without_map_check);
-
- __ bind(&non_double_value);
- // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
- receiver_map, scratch, slow);
- mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- masm, receiver, key, value, receiver_map, mode, slow);
- __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ b(&finish_object_store);
-
- __ bind(&transition_double_elements);
- // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
- // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
- // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
- receiver_map, scratch, slow);
- mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(
- masm, receiver, key, value, receiver_map, mode, slow);
- __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ b(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
- LanguageMode language_mode) {
- // ---------- S t a t e --------------
- // -- r3 : value
- // -- r4 : key
- // -- r5 : receiver
- // -- lr : return address
- // -----------------------------------
- Label slow, fast_object, fast_object_grow;
- Label fast_double, fast_double_grow;
- Label array, extra, check_if_double_array, maybe_name_key, miss;
-
- // Register usage.
- Register value = StoreDescriptor::ValueRegister();
- Register key = StoreDescriptor::NameRegister();
- Register receiver = StoreDescriptor::ReceiverRegister();
- DCHECK(receiver.is(r4));
- DCHECK(key.is(r5));
- DCHECK(value.is(r3));
- Register receiver_map = r6;
- Register elements_map = r9;
- Register elements = r10; // Elements array of the receiver.
- // r7 and r8 are used as general scratch registers.
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &maybe_name_key);
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, &slow);
- // Get the map of the object.
- __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks.
- // The generic stub does not perform map checks.
- __ lbz(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
- __ andi(r0, ip, Operand(1 << Map::kIsAccessCheckNeeded));
- __ bne(&slow, cr0);
- // Check if the object is a JS array or not.
- __ lbz(r7, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
- __ cmpi(r7, Operand(JS_ARRAY_TYPE));
- __ beq(&array);
- // Check that the object is some kind of JSObject.
- __ cmpi(r7, Operand(FIRST_JS_OBJECT_TYPE));
- __ blt(&slow);
-
- // Object case: Check key against length in the elements array.
- __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // Check array bounds. Both the key and the length of FixedArray are smis.
- __ LoadP(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ cmpl(key, ip);
- __ blt(&fast_object);
-
- // Slow case, handle jump to runtime.
- __ bind(&slow);
- // Entry registers are intact.
- // r3: value.
- // r4: key.
- // r5: receiver.
- PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
- // Never returns to here.
-
- __ bind(&maybe_name_key);
- __ LoadP(r7, FieldMemOperand(key, HeapObject::kMapOffset));
- __ lbz(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(r7, &slow);
-
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Register vector = StoreWithVectorDescriptor::VectorRegister();
- Register slot = StoreWithVectorDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, r8, r9, r10, r11));
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
- __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
- __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
-
- masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, r8,
- r9, r10, r11);
- // Cache miss.
- __ b(&miss);
-
- // Extra capacity case: Check if there is extra capacity to
- // perform the store and update the length. Used for adding one
- // element to the array by writing to array[array.length].
- __ bind(&extra);
- // Condition code from comparing key and array length is still available.
- __ bne(&slow); // Only support writing to writing to array[array.length].
- // Check for room in the elements backing store.
- // Both the key and the length of FixedArray are smis.
- __ LoadP(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ cmpl(key, ip);
- __ bge(&slow);
- __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ mov(ip, Operand(masm->isolate()->factory()->fixed_array_map()));
- __ cmp(elements_map, ip); // PPC - I think I can re-use ip here
- __ bne(&check_if_double_array);
- __ b(&fast_object_grow);
-
- __ bind(&check_if_double_array);
- __ mov(ip, Operand(masm->isolate()->factory()->fixed_double_array_map()));
- __ cmp(elements_map, ip); // PPC - another ip re-use
- __ bne(&slow);
- __ b(&fast_double_grow);
-
- // Array case: Get the length and the elements array from the JS
- // array. Check that the array is in fast mode (and writable); if it
- // is the length is always a smi.
- __ bind(&array);
- __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
- // Check the key against the length in the array.
- __ LoadP(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ cmpl(key, ip);
- __ bge(&extra);
-
- KeyedStoreGenerateMegamorphicHelper(
- masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
- value, key, receiver, receiver_map, elements_map, elements);
- KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
- &fast_double_grow, &slow, kDontCheckMap,
- kIncrementLength, value, key, receiver,
- receiver_map, elements_map, elements);
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
- StoreIC_PushArgs(masm);
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kStoreIC_Miss);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
- Label miss;
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- Register dictionary = r8;
- DCHECK(receiver.is(r4));
- DCHECK(name.is(r5));
- DCHECK(value.is(r3));
- DCHECK(StoreWithVectorDescriptor::VectorRegister().is(r6));
- DCHECK(StoreWithVectorDescriptor::SlotRegister().is(r7));
-
- __ LoadP(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
- GenerateDictionaryStore(masm, &miss, dictionary, name, value, r9, r10);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->ic_store_normal_hit(), 1, r9, r10);
- __ Ret();
-
- __ bind(&miss);
- __ IncrementCounter(counters->ic_store_normal_miss(), 1, r9, r10);
- GenerateMiss(masm);
-}
-
-
#undef __
diff --git a/deps/v8/src/ic/ppc/stub-cache-ppc.cc b/deps/v8/src/ic/ppc/stub-cache-ppc.cc
deleted file mode 100644
index 3dad306f11..0000000000
--- a/deps/v8/src/ic/ppc/stub-cache-ppc.cc
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_PPC
-
-#include "src/codegen.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/interface-descriptors.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
- StubCache::Table table, Register receiver, Register name,
- // The offset is scaled by 4, based on
- // kCacheIndexShift, which is two bits
- Register offset, Register scratch, Register scratch2,
- Register offset_scratch) {
- ExternalReference key_offset(stub_cache->key_reference(table));
- ExternalReference value_offset(stub_cache->value_reference(table));
- ExternalReference map_offset(stub_cache->map_reference(table));
-
- uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
- uintptr_t value_off_addr =
- reinterpret_cast<uintptr_t>(value_offset.address());
- uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
-
- // Check the relative positions of the address fields.
- DCHECK(value_off_addr > key_off_addr);
- DCHECK((value_off_addr - key_off_addr) % 4 == 0);
- DCHECK((value_off_addr - key_off_addr) < (256 * 4));
- DCHECK(map_off_addr > key_off_addr);
- DCHECK((map_off_addr - key_off_addr) % 4 == 0);
- DCHECK((map_off_addr - key_off_addr) < (256 * 4));
-
- Label miss;
- Register base_addr = scratch;
- scratch = no_reg;
-
- // Multiply by 3 because there are 3 fields per entry (name, code, map).
- __ ShiftLeftImm(offset_scratch, offset, Operand(1));
- __ add(offset_scratch, offset, offset_scratch);
-
- // Calculate the base address of the entry.
- __ mov(base_addr, Operand(key_offset));
-#if V8_TARGET_ARCH_PPC64
- DCHECK(kPointerSizeLog2 > StubCache::kCacheIndexShift);
- __ ShiftLeftImm(offset_scratch, offset_scratch,
- Operand(kPointerSizeLog2 - StubCache::kCacheIndexShift));
-#else
- DCHECK(kPointerSizeLog2 == StubCache::kCacheIndexShift);
-#endif
- __ add(base_addr, base_addr, offset_scratch);
-
- // Check that the key in the entry matches the name.
- __ LoadP(ip, MemOperand(base_addr, 0));
- __ cmp(name, ip);
- __ bne(&miss);
-
- // Check the map matches.
- __ LoadP(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
- __ LoadP(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ cmp(ip, scratch2);
- __ bne(&miss);
-
- // Get the code entry from the cache.
- Register code = scratch2;
- scratch2 = no_reg;
- __ LoadP(code, MemOperand(base_addr, value_off_addr - key_off_addr));
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ b(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ b(&miss);
- }
-#endif
-
- // Jump to the first instruction in the code stub.
- __ addi(r0, code, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ mtctr(r0);
- __ bctr();
-
- // Miss: fall through.
- __ bind(&miss);
-}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
- Register name, Register scratch, Register extra,
- Register extra2, Register extra3) {
- Label miss;
-
-#if V8_TARGET_ARCH_PPC64
- // Make sure that code is valid. The multiplying code relies on the
- // entry size being 24.
- DCHECK(sizeof(Entry) == 24);
-#else
- // Make sure that code is valid. The multiplying code relies on the
- // entry size being 12.
- DCHECK(sizeof(Entry) == 12);
-#endif
-
- // Make sure that there are no register conflicts.
- DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
-
- // Check scratch, extra and extra2 registers are valid.
- DCHECK(!scratch.is(no_reg));
- DCHECK(!extra.is(no_reg));
- DCHECK(!extra2.is(no_reg));
- DCHECK(!extra3.is(no_reg));
-
-#ifdef DEBUG
- // If vector-based ics are in use, ensure that scratch, extra, extra2 and
- // extra3 don't conflict with the vector and slot registers, which need
- // to be preserved for a handler call or miss.
- if (IC::ICUseVector(ic_kind_)) {
- Register vector, slot;
- if (ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC) {
- vector = StoreWithVectorDescriptor::VectorRegister();
- slot = StoreWithVectorDescriptor::SlotRegister();
- } else {
- DCHECK(ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC);
- vector = LoadWithVectorDescriptor::VectorRegister();
- slot = LoadWithVectorDescriptor::SlotRegister();
- }
- DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
- }
-#endif
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
- extra3);
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Get the map of the receiver and compute the hash.
- __ lwz(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
- __ LoadP(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ add(scratch, scratch, ip);
- __ Xor(scratch, scratch, Operand(kPrimaryMagic));
- // The mask omits the last two bits because they are not part of the hash.
- __ andi(scratch, scratch,
- Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
-
- // Probe the primary table.
- ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
- extra3);
-
- // Primary miss: Compute hash for secondary probe.
- __ sub(scratch, scratch, name);
- __ Add(scratch, scratch, kSecondaryMagic, r0);
- __ andi(scratch, scratch,
- Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
-
- // Probe the secondary table.
- ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
- extra3);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ bind(&miss);
- __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
- extra3);
-}
-
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ic/s390/handler-compiler-s390.cc b/deps/v8/src/ic/s390/handler-compiler-s390.cc
index 72658ec1d1..40a8c310d8 100644
--- a/deps/v8/src/ic/s390/handler-compiler-s390.cc
+++ b/deps/v8/src/ic/s390/handler-compiler-s390.cc
@@ -125,14 +125,6 @@ void PropertyHandlerCompiler::DiscardVectorAndSlot() {
__ la(sp, MemOperand(sp, 2 * kPointerSize));
}
-void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
- // No-op. Return address is in lr register.
-}
-
-void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
- // No-op. Return address is in lr register.
-}
-
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
@@ -177,16 +169,6 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
}
-void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register result, Label* miss) {
- __ LoadNativeContextSlot(index, result);
- // Load its initial map. The global functions all have initial maps.
- __ LoadP(result,
- FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
- // Load the prototype from the initial map.
- __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
-}
-
void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
MacroAssembler* masm, Register receiver, Register scratch1,
Register scratch2, Label* miss_label) {
@@ -335,54 +317,6 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
}
}
-void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
- __ mov(this->name(), Operand(name));
-}
-
-void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
- Register map_reg,
- Register scratch,
- Label* miss) {
- Handle<WeakCell> cell = Map::WeakCellForMap(transition);
- DCHECK(!map_reg.is(scratch));
- __ LoadWeakValue(map_reg, cell, miss);
- if (transition->CanBeDeprecated()) {
- __ LoadlW(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
- __ DecodeField<Map::Deprecated>(r0, scratch);
- __ bne(miss);
- }
-}
-
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
- int descriptor,
- Register value_reg,
- Register scratch,
- Label* miss_label) {
- DCHECK(!map_reg.is(scratch));
- DCHECK(!map_reg.is(value_reg));
- DCHECK(!value_reg.is(scratch));
- __ LoadInstanceDescriptors(map_reg, scratch);
- __ CmpP(value_reg, FieldMemOperand(
- scratch, DescriptorArray::GetValueOffset(descriptor)));
- __ bne(miss_label);
-}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
- Register value_reg,
- Label* miss_label) {
- Register map_reg = scratch1();
- Register scratch = scratch2();
- DCHECK(!value_reg.is(map_reg));
- DCHECK(!value_reg.is(scratch));
- __ JumpIfSmi(value_reg, miss_label);
- if (field_type->IsClass()) {
- __ LoadP(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
- __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
- scratch);
- __ bne(miss_label);
- }
-}
-
void PropertyHandlerCompiler::GenerateAccessCheck(
Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
Label* miss, bool compare_native_contexts_only) {
@@ -512,12 +446,6 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
}
}
-void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
- // Return the constant value.
- __ Move(r2, value);
- __ Ret();
-}
-
void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
LookupIterator* it, Register holder_reg) {
DCHECK(holder()->HasNamedInterceptor());
diff --git a/deps/v8/src/ic/s390/ic-compiler-s390.cc b/deps/v8/src/ic/s390/ic-compiler-s390.cc
deleted file mode 100644
index a7691d83c5..0000000000
--- a/deps/v8/src/ic/s390/ic-compiler-s390.cc
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_S390
-
-#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void PropertyICCompiler::GenerateRuntimeSetProperty(
- MacroAssembler* masm, LanguageMode language_mode) {
- __ mov(r0, Operand(Smi::FromInt(language_mode)));
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister(), r0);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty);
-}
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_S390
diff --git a/deps/v8/src/ic/s390/ic-s390.cc b/deps/v8/src/ic/s390/ic-s390.cc
index bd83af1f59..6438cfca47 100644
--- a/deps/v8/src/ic/s390/ic-s390.cc
+++ b/deps/v8/src/ic/s390/ic-s390.cc
@@ -18,182 +18,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-// Helper function used from LoadIC GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-// label is done.
-// name: Property name. It is not clobbered if a jump to the miss label is
-// done
-// result: Register for the result. It is only updated if a jump to the miss
-// label is not done. Can be the same as elements or name clobbering
-// one of these in the case of not jumping to the miss label.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
- Register elements, Register name,
- Register result, Register scratch1,
- Register scratch2) {
- // Main use of the scratch registers.
- // scratch1: Used as temporary and to hold the capacity of the property
- // dictionary.
- // scratch2: Used as temporary.
- Label done;
-
- // Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
- name, scratch1, scratch2);
-
- // If probing finds an entry check that the value is a normal
- // property.
- __ bind(&done); // scratch2 == elements + 4 * index
- const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
- __ LoadRR(r0, scratch2);
- __ LoadSmiLiteral(scratch2, Smi::FromInt(PropertyDetails::TypeField::kMask));
- __ AndP(scratch2, scratch1);
- __ bne(miss);
- __ LoadRR(scratch2, r0);
-
- // Get the value at the masked, scaled index and return.
- __ LoadP(result,
- FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
-}
-
-// Helper function used from StoreIC::GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-// label is done.
-// name: Property name. It is not clobbered if a jump to the miss label is
-// done
-// value: The value to store.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
- Register elements, Register name,
- Register value, Register scratch1,
- Register scratch2) {
- // Main use of the scratch registers.
- // scratch1: Used as temporary and to hold the capacity of the property
- // dictionary.
- // scratch2: Used as temporary.
- Label done;
-
- // Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
- name, scratch1, scratch2);
-
- // If probing finds an entry in the dictionary check that the value
- // is a normal property that is not read only.
- __ bind(&done); // scratch2 == elements + 4 * index
- const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- int kTypeAndReadOnlyMask =
- PropertyDetails::TypeField::kMask |
- PropertyDetails::AttributesField::encode(READ_ONLY);
- __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
- __ LoadRR(r0, scratch2);
- __ LoadSmiLiteral(scratch2, Smi::FromInt(kTypeAndReadOnlyMask));
- __ AndP(scratch2, scratch1);
- __ bne(miss /*, cr0*/);
- __ LoadRR(scratch2, r0);
-
- // Store the value at the masked, scaled index and return.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ AddP(scratch2, Operand(kValueOffset - kHeapObjectTag));
- __ StoreP(value, MemOperand(scratch2));
-
- // Update the write barrier. Make sure not to clobber the value.
- __ LoadRR(scratch1, value);
- __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
- kDontSaveFPRegs);
-}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
- Register dictionary = r2;
- DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
- DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
-
- Label slow;
-
- __ LoadP(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
- JSObject::kPropertiesOffset));
- GenerateDictionaryLoad(masm, &slow, dictionary,
- LoadDescriptor::NameRegister(), r2, r5, r6);
- __ Ret();
-
- // Dictionary load failed, go slow (but don't miss).
- __ bind(&slow);
- GenerateRuntimeGetProperty(masm);
-}
-
-// A register that isn't one of the parameters to the load ic.
-static const Register LoadIC_TempRegister() { return r5; }
-
-static void LoadIC_PushArgs(MacroAssembler* masm) {
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register name = LoadDescriptor::NameRegister();
- Register slot = LoadDescriptor::SlotRegister();
- Register vector = LoadWithVectorDescriptor::VectorRegister();
-
- __ Push(receiver, name, slot, vector);
-}
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
- // The return address is in lr.
- Isolate* isolate = masm->isolate();
-
- DCHECK(!AreAliased(r6, r7, LoadWithVectorDescriptor::SlotRegister(),
- LoadWithVectorDescriptor::VectorRegister()));
- __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, r6, r7);
-
- LoadIC_PushArgs(masm);
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kLoadIC_Miss);
-}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // The return address is in lr.
-
- __ LoadRR(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
- __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kGetProperty);
-}
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
- // The return address is in lr.
- Isolate* isolate = masm->isolate();
-
- DCHECK(!AreAliased(r6, r7, LoadWithVectorDescriptor::SlotRegister(),
- LoadWithVectorDescriptor::VectorRegister()));
- __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, r6, r7);
-
- LoadIC_PushArgs(masm);
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
-}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // The return address is in lr.
-
- __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kKeyedGetProperty);
-}
-
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreWithVectorDescriptor::ValueRegister(),
StoreWithVectorDescriptor::SlotRegister(),
@@ -216,303 +40,6 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
}
-static void KeyedStoreGenerateMegamorphicHelper(
- MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
- KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
- Register value, Register key, Register receiver, Register receiver_map,
- Register elements_map, Register elements) {
- Label transition_smi_elements;
- Label finish_object_store, non_double_value, transition_double_elements;
- Label fast_double_without_map_check;
-
- // Fast case: Do the store, could be either Object or double.
- __ bind(fast_object);
- Register scratch = r6;
- Register address = r7;
- DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
- scratch, address));
-
- if (check_map == kCheckMap) {
- __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ CmpP(elements_map,
- Operand(masm->isolate()->factory()->fixed_array_map()));
- __ bne(fast_double);
- }
-
- // HOLECHECK: guards "A[i] = V"
- // We have to go to the runtime if the current value is the hole because
- // there may be a callback on the element
- Label holecheck_passed1;
- // @TODO(joransiu) : Fold AddP into memref of LoadP
- __ AddP(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ SmiToPtrArrayOffset(scratch, key);
- __ LoadP(scratch, MemOperand(address, scratch));
- __ CmpP(scratch, Operand(masm->isolate()->factory()->the_hole_value()));
- __ bne(&holecheck_passed1, Label::kNear);
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
-
- __ bind(&holecheck_passed1);
-
- // Smi stores don't require further checks.
- Label non_smi_value;
- __ JumpIfNotSmi(value, &non_smi_value);
-
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
- __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
- }
- // It's irrelevant whether array is smi-only or not when writing a smi.
- __ AddP(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ SmiToPtrArrayOffset(scratch, key);
- __ StoreP(value, MemOperand(address, scratch));
- __ Ret();
-
- __ bind(&non_smi_value);
- // Escape to elements kind transition case.
- __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
-
- // Fast elements array, store the value to the elements backing store.
- __ bind(&finish_object_store);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
- __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
- }
- __ AddP(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ SmiToPtrArrayOffset(scratch, key);
- __ StoreP(value, MemOperand(address, scratch));
- __ la(address, MemOperand(address, scratch));
- // Update write barrier for the elements array address.
- __ LoadRR(scratch, value); // Preserve the value which is returned.
- __ RecordWrite(elements, address, scratch, kLRHasNotBeenSaved,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ Ret();
-
- __ bind(fast_double);
- if (check_map == kCheckMap) {
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
- __ bne(slow);
- }
-
- // HOLECHECK: guards "A[i] double hole?"
- // We have to see if the double version of the hole is present. If so
- // go to the runtime.
- // @TODO(joransiu) : Fold AddP Operand into LoadlW
- __ AddP(address, elements,
- Operand((FixedDoubleArray::kHeaderSize + Register::kExponentOffset -
- kHeapObjectTag)));
- __ SmiToDoubleArrayOffset(scratch, key);
- __ LoadlW(scratch, MemOperand(address, scratch));
- __ CmpP(scratch, Operand(kHoleNanUpper32));
- __ bne(&fast_double_without_map_check, Label::kNear);
- __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
-
- __ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value, key, elements, scratch, d0,
- &transition_double_elements);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
- __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
- }
- __ Ret();
-
- __ bind(&transition_smi_elements);
- // Transition the array appropriately depending on the value type.
- __ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
- __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
- __ bne(&non_double_value);
-
- // Value is a double. Transition FAST_SMI_ELEMENTS ->
- // FAST_DOUBLE_ELEMENTS and complete the store.
- __ LoadTransitionedArrayMapConditional(
- FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
- AllocationSiteMode mode =
- AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
- receiver_map, mode, slow);
- __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ b(&fast_double_without_map_check);
-
- __ bind(&non_double_value);
- // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
- receiver_map, scratch, slow);
- mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- masm, receiver, key, value, receiver_map, mode, slow);
- __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ b(&finish_object_store);
-
- __ bind(&transition_double_elements);
- // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
- // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
- // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
- receiver_map, scratch, slow);
- mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(
- masm, receiver, key, value, receiver_map, mode, slow);
- __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ b(&finish_object_store);
-}
-
-void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
- LanguageMode language_mode) {
- // ---------- S t a t e --------------
- // -- r2 : value
- // -- r3 : key
- // -- r4 : receiver
- // -- lr : return address
- // -----------------------------------
- Label slow, fast_object, fast_object_grow;
- Label fast_double, fast_double_grow;
- Label array, extra, check_if_double_array, maybe_name_key, miss;
-
- // Register usage.
- Register value = StoreDescriptor::ValueRegister();
- Register key = StoreDescriptor::NameRegister();
- Register receiver = StoreDescriptor::ReceiverRegister();
- DCHECK(receiver.is(r3));
- DCHECK(key.is(r4));
- DCHECK(value.is(r2));
- Register receiver_map = r5;
- Register elements_map = r8;
- Register elements = r9; // Elements array of the receiver.
- // r6 and r7 are used as general scratch registers.
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &maybe_name_key);
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, &slow);
- // Get the map of the object.
- __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks.
- // The generic stub does not perform map checks.
- __ LoadlB(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
- __ AndP(r0, ip, Operand(1 << Map::kIsAccessCheckNeeded));
- __ bne(&slow, Label::kNear);
- // Check if the object is a JS array or not.
- __ LoadlB(r6, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
- __ CmpP(r6, Operand(JS_ARRAY_TYPE));
- __ beq(&array);
- // Check that the object is some kind of JSObject.
- __ CmpP(r6, Operand(FIRST_JS_OBJECT_TYPE));
- __ blt(&slow, Label::kNear);
-
- // Object case: Check key against length in the elements array.
- __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // Check array bounds. Both the key and the length of FixedArray are smis.
- __ CmpLogicalP(key, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ blt(&fast_object);
-
- // Slow case, handle jump to runtime.
- __ bind(&slow);
- // Entry registers are intact.
- // r2: value.
- // r3: key.
- // r4: receiver.
- PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
- // Never returns to here.
-
- __ bind(&maybe_name_key);
- __ LoadP(r6, FieldMemOperand(key, HeapObject::kMapOffset));
- __ LoadlB(r6, FieldMemOperand(r6, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(r6, &slow);
-
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Register vector = StoreWithVectorDescriptor::VectorRegister();
- Register slot = StoreWithVectorDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, r7, r8, r9, ip));
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
- __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
- __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
-
- masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, r7,
- r8, r9, ip);
- // Cache miss.
- __ b(&miss);
-
- // Extra capacity case: Check if there is extra capacity to
- // perform the store and update the length. Used for adding one
- // element to the array by writing to array[array.length].
- __ bind(&extra);
- // Condition code from comparing key and array length is still available.
- __ bne(&slow); // Only support writing to writing to array[array.length].
- // Check for room in the elements backing store.
- // Both the key and the length of FixedArray are smis.
- __ CmpLogicalP(key, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ bge(&slow);
- __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
- __ CmpP(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
- __ bne(&check_if_double_array, Label::kNear);
- __ b(&fast_object_grow);
-
- __ bind(&check_if_double_array);
- __ CmpP(elements_map,
- Operand(masm->isolate()->factory()->fixed_double_array_map()));
- __ bne(&slow);
- __ b(&fast_double_grow);
-
- // Array case: Get the length and the elements array from the JS
- // array. Check that the array is in fast mode (and writable); if it
- // is the length is always a smi.
- __ bind(&array);
- __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
- // Check the key against the length in the array.
- __ CmpLogicalP(key, FieldMemOperand(receiver, JSArray::kLengthOffset));
- __ bge(&extra);
-
- KeyedStoreGenerateMegamorphicHelper(
- masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
- value, key, receiver, receiver_map, elements_map, elements);
- KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
- &fast_double_grow, &slow, kDontCheckMap,
- kIncrementLength, value, key, receiver,
- receiver_map, elements_map, elements);
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
- StoreIC_PushArgs(masm);
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kStoreIC_Miss);
-}
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
- Label miss;
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- Register dictionary = r7;
- DCHECK(receiver.is(r3));
- DCHECK(name.is(r4));
- DCHECK(value.is(r2));
- DCHECK(StoreWithVectorDescriptor::VectorRegister().is(r5));
- DCHECK(StoreWithVectorDescriptor::SlotRegister().is(r6));
-
- __ LoadP(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
- GenerateDictionaryStore(masm, &miss, dictionary, name, value, r8, r9);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->ic_store_normal_hit(), 1, r8, r9);
- __ Ret();
-
- __ bind(&miss);
- __ IncrementCounter(counters->ic_store_normal_miss(), 1, r8, r9);
- GenerateMiss(masm);
-}
-
#undef __
Condition CompareIC::ComputeCondition(Token::Value op) {
@@ -624,13 +151,13 @@ void PatchInlinedSmiCode(Isolate* isolate, Address address,
cc = static_cast<Condition>((branch_instr & 0x00f00000) >> 20);
DCHECK((cc == ne) || (cc == eq));
cc = (cc == ne) ? eq : ne;
- patcher.masm()->brc(cc, Operand((branch_instr & 0xffff) << 1));
+ patcher.masm()->brc(cc, Operand(branch_instr & 0xffff));
} else if (Instruction::S390OpcodeValue(branch_address) == BRCL) {
cc = static_cast<Condition>(
(branch_instr & (static_cast<uint64_t>(0x00f0) << 32)) >> 36);
DCHECK((cc == ne) || (cc == eq));
cc = (cc == ne) ? eq : ne;
- patcher.masm()->brcl(cc, Operand((branch_instr & 0xffffffff) << 1));
+ patcher.masm()->brcl(cc, Operand(branch_instr & 0xffffffff));
} else {
DCHECK(false);
}
diff --git a/deps/v8/src/ic/s390/stub-cache-s390.cc b/deps/v8/src/ic/s390/stub-cache-s390.cc
deleted file mode 100644
index a0564a3be3..0000000000
--- a/deps/v8/src/ic/s390/stub-cache-s390.cc
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_S390
-
-#include "src/ic/stub-cache.h"
-#include "src/codegen.h"
-#include "src/ic/ic.h"
-#include "src/interface-descriptors.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
- StubCache::Table table, Register receiver, Register name,
- // The offset is scaled by 4, based on
- // kCacheIndexShift, which is two bits
- Register offset, Register scratch, Register scratch2,
- Register offset_scratch) {
- ExternalReference key_offset(stub_cache->key_reference(table));
- ExternalReference value_offset(stub_cache->value_reference(table));
- ExternalReference map_offset(stub_cache->map_reference(table));
-
- uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
- uintptr_t value_off_addr =
- reinterpret_cast<uintptr_t>(value_offset.address());
- uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
-
- // Check the relative positions of the address fields.
- DCHECK(value_off_addr > key_off_addr);
- DCHECK((value_off_addr - key_off_addr) % 4 == 0);
- DCHECK((value_off_addr - key_off_addr) < (256 * 4));
- DCHECK(map_off_addr > key_off_addr);
- DCHECK((map_off_addr - key_off_addr) % 4 == 0);
- DCHECK((map_off_addr - key_off_addr) < (256 * 4));
-
- Label miss;
- Register base_addr = scratch;
- scratch = no_reg;
-
- // Multiply by 3 because there are 3 fields per entry (name, code, map).
- __ ShiftLeftP(offset_scratch, offset, Operand(1));
- __ AddP(offset_scratch, offset, offset_scratch);
-
- // Calculate the base address of the entry.
- __ mov(base_addr, Operand(key_offset));
-#if V8_TARGET_ARCH_S390X
- DCHECK(kPointerSizeLog2 > StubCache::kCacheIndexShift);
- __ ShiftLeftP(offset_scratch, offset_scratch,
- Operand(kPointerSizeLog2 - StubCache::kCacheIndexShift));
-#else
- DCHECK(kPointerSizeLog2 == StubCache::kCacheIndexShift);
-#endif
- __ AddP(base_addr, base_addr, offset_scratch);
-
- // Check that the key in the entry matches the name.
- __ CmpP(name, MemOperand(base_addr, 0));
- __ bne(&miss, Label::kNear);
-
- // Check the map matches.
- __ LoadP(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
- __ CmpP(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ bne(&miss, Label::kNear);
-
- // Get the code entry from the cache.
- Register code = scratch2;
- scratch2 = no_reg;
- __ LoadP(code, MemOperand(base_addr, value_off_addr - key_off_addr));
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ b(&miss, Label::kNear);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ b(&miss, Label::kNear);
- }
-#endif
-
- // Jump to the first instruction in the code stub.
- // TODO(joransiu): Combine into indirect branch
- __ la(code, MemOperand(code, Code::kHeaderSize - kHeapObjectTag));
- __ b(code);
-
- // Miss: fall through.
- __ bind(&miss);
-}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
- Register name, Register scratch, Register extra,
- Register extra2, Register extra3) {
- Label miss;
-
-#if V8_TARGET_ARCH_S390X
- // Make sure that code is valid. The multiplying code relies on the
- // entry size being 24.
- DCHECK(sizeof(Entry) == 24);
-#else
- // Make sure that code is valid. The multiplying code relies on the
- // entry size being 12.
- DCHECK(sizeof(Entry) == 12);
-#endif
-
- // Make sure that there are no register conflicts.
- DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
-
- // Check scratch, extra and extra2 registers are valid.
- DCHECK(!scratch.is(no_reg));
- DCHECK(!extra.is(no_reg));
- DCHECK(!extra2.is(no_reg));
- DCHECK(!extra3.is(no_reg));
-
-#ifdef DEBUG
- // If vector-based ics are in use, ensure that scratch, extra, extra2 and
- // extra3 don't conflict with the vector and slot registers, which need
- // to be preserved for a handler call or miss.
- if (IC::ICUseVector(ic_kind_)) {
- Register vector, slot;
- if (ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC) {
- vector = StoreWithVectorDescriptor::VectorRegister();
- slot = StoreWithVectorDescriptor::SlotRegister();
- } else {
- DCHECK(ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC);
- vector = LoadWithVectorDescriptor::VectorRegister();
- slot = LoadWithVectorDescriptor::SlotRegister();
- }
- DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
- }
-#endif
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
- extra3);
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Get the map of the receiver and compute the hash.
- __ LoadlW(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
- __ LoadP(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ AddP(scratch, scratch, ip);
- __ XorP(scratch, scratch, Operand(kPrimaryMagic));
- // The mask omits the last two bits because they are not part of the hash.
- __ AndP(scratch, scratch,
- Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
-
- // Probe the primary table.
- ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
- extra3);
-
- // Primary miss: Compute hash for secondary probe.
- __ SubP(scratch, scratch, name);
- __ AddP(scratch, scratch, Operand(kSecondaryMagic));
- __ AndP(scratch, scratch,
- Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
-
- // Probe the secondary table.
- ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
- extra3);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ bind(&miss);
- __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
- extra3);
-}
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_S390
diff --git a/deps/v8/src/ic/stub-cache.h b/deps/v8/src/ic/stub-cache.h
index bdd7f4a3be..e8df26d37b 100644
--- a/deps/v8/src/ic/stub-cache.h
+++ b/deps/v8/src/ic/stub-cache.h
@@ -48,13 +48,6 @@ class StubCache {
// Collect all maps that match the name.
void CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
Handle<Context> native_context, Zone* zone);
- // Generate code for probing the stub cache table.
- // Arguments extra, extra2 and extra3 may be used to pass additional scratch
- // registers. Set to no_reg if not needed.
- // If leave_frame is true, then exit a frame before the tail call.
- void GenerateProbe(MacroAssembler* masm, Register receiver, Register name,
- Register scratch, Register extra, Register extra2 = no_reg,
- Register extra3 = no_reg);
enum Table { kPrimary, kSecondary };
diff --git a/deps/v8/src/ic/x64/handler-compiler-x64.cc b/deps/v8/src/ic/x64/handler-compiler-x64.cc
index 36acccc007..a89afa8a7e 100644
--- a/deps/v8/src/ic/x64/handler-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/handler-compiler-x64.cc
@@ -44,16 +44,6 @@ void PropertyHandlerCompiler::DiscardVectorAndSlot() {
__ addp(rsp, Immediate(2 * kPointerSize));
}
-void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
- MacroAssembler* masm = this->masm();
- __ Push(tmp);
-}
-
-void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
- MacroAssembler* masm = this->masm();
- __ Pop(tmp);
-}
-
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
@@ -93,18 +83,6 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ DecrementCounter(counters->negative_lookups_miss(), 1);
}
-
-void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register result, Label* miss) {
- __ LoadNativeContextSlot(index, result);
- // Load its initial map. The global functions all have initial maps.
- __ movp(result,
- FieldOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
- // Load the prototype from the initial map.
- __ movp(result, FieldOperand(result, Map::kPrototypeOffset));
-}
-
-
void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
MacroAssembler* masm, Register receiver, Register result, Register scratch,
Label* miss_label) {
@@ -348,59 +326,6 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
}
}
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
- __ Move(this->name(), name);
-}
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
- Register map_reg,
- Register scratch,
- Label* miss) {
- Handle<WeakCell> cell = Map::WeakCellForMap(transition);
- DCHECK(!map_reg.is(scratch));
- __ LoadWeakValue(map_reg, cell, miss);
- if (transition->CanBeDeprecated()) {
- __ movl(scratch, FieldOperand(map_reg, Map::kBitField3Offset));
- __ andl(scratch, Immediate(Map::Deprecated::kMask));
- __ j(not_zero, miss);
- }
-}
-
-
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
- int descriptor,
- Register value_reg,
- Register scratch,
- Label* miss_label) {
- DCHECK(!map_reg.is(scratch));
- DCHECK(!map_reg.is(value_reg));
- DCHECK(!value_reg.is(scratch));
- __ LoadInstanceDescriptors(map_reg, scratch);
- __ movp(scratch,
- FieldOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
- __ cmpp(value_reg, scratch);
- __ j(not_equal, miss_label);
-}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
- Register value_reg,
- Label* miss_label) {
- Register map_reg = scratch1();
- Register scratch = scratch2();
- DCHECK(!value_reg.is(map_reg));
- DCHECK(!value_reg.is(scratch));
- __ JumpIfSmi(value_reg, miss_label);
- if (field_type->IsClass()) {
- Label do_store;
- __ movp(map_reg, FieldOperand(value_reg, HeapObject::kMapOffset));
- __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
- scratch);
- __ j(not_equal, miss_label);
- }
-}
-
void PropertyHandlerCompiler::GenerateAccessCheck(
Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
Label* miss, bool compare_native_contexts_only) {
@@ -533,13 +458,6 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
}
}
-void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
- // Return the constant value.
- __ Move(rax, value);
- __ ret(0);
-}
-
-
void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
LookupIterator* it, Register holder_reg) {
DCHECK(holder()->HasNamedInterceptor());
diff --git a/deps/v8/src/ic/x64/ic-compiler-x64.cc b/deps/v8/src/ic/x64/ic-compiler-x64.cc
deleted file mode 100644
index 9d734338bb..0000000000
--- a/deps/v8/src/ic/x64/ic-compiler-x64.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X64
-
-#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-void PropertyICCompiler::GenerateRuntimeSetProperty(
- MacroAssembler* masm, LanguageMode language_mode) {
- // Return address is on the stack.
- DCHECK(!rbx.is(StoreDescriptor::ReceiverRegister()) &&
- !rbx.is(StoreDescriptor::NameRegister()) &&
- !rbx.is(StoreDescriptor::ValueRegister()));
-
- __ PopReturnAddressTo(rbx);
- __ Push(StoreDescriptor::ReceiverRegister());
- __ Push(StoreDescriptor::NameRegister());
- __ Push(StoreDescriptor::ValueRegister());
- __ Push(Smi::FromInt(language_mode));
- __ PushReturnAddressFrom(rbx);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty);
-}
-
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/ic/x64/ic-x64.cc b/deps/v8/src/ic/x64/ic-x64.cc
index a916e22fa5..587ebd3daa 100644
--- a/deps/v8/src/ic/x64/ic-x64.cc
+++ b/deps/v8/src/ic/x64/ic-x64.cc
@@ -18,450 +18,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-// Helper function used to load a property from a dictionary backing storage.
-// This function may return false negatives, so miss_label
-// must always call a backup property load that is complete.
-// This function is safe to call if name is not an internalized string,
-// and will jump to the miss_label in that case.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
- Register elements, Register name,
- Register r0, Register r1, Register result) {
- // Register use:
- //
- // elements - holds the property dictionary on entry and is unchanged.
- //
- // name - holds the name of the property on entry and is unchanged.
- //
- // r0 - used to hold the capacity of the property dictionary.
- //
- // r1 - used to hold the index into the property dictionary.
- //
- // result - holds the result on exit if the load succeeded.
-
- Label done;
-
- // Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done,
- elements, name, r0, r1);
-
- // If probing finds an entry in the dictionary, r1 contains the
- // index into the dictionary. Check that the value is a normal
- // property.
- __ bind(&done);
- const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- __ Test(Operand(elements, r1, times_pointer_size,
- kDetailsOffset - kHeapObjectTag),
- Smi::FromInt(PropertyDetails::TypeField::kMask));
- __ j(not_zero, miss_label);
-
- // Get the value at the masked, scaled index.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ movp(result, Operand(elements, r1, times_pointer_size,
- kValueOffset - kHeapObjectTag));
-}
-
-
-// Helper function used to store a property to a dictionary backing
-// storage. This function may fail to store a property even though it
-// is in the dictionary, so code at miss_label must always call a
-// backup property store that is complete. This function is safe to
-// call if name is not an internalized string, and will jump to the miss_label
-// in that case. The generated code assumes that the receiver has slow
-// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
- Register elements, Register name,
- Register value, Register scratch0,
- Register scratch1) {
- // Register use:
- //
- // elements - holds the property dictionary on entry and is clobbered.
- //
- // name - holds the name of the property on entry and is unchanged.
- //
- // value - holds the value to store and is unchanged.
- //
- // scratch0 - used during the positive dictionary lookup and is clobbered.
- //
- // scratch1 - used for index into the property dictionary and is clobbered.
- Label done;
-
- // Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(
- masm, miss_label, &done, elements, name, scratch0, scratch1);
-
- // If probing finds an entry in the dictionary, scratch0 contains the
- // index into the dictionary. Check that the value is a normal
- // property that is not read only.
- __ bind(&done);
- const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- const int kTypeAndReadOnlyMask =
- PropertyDetails::TypeField::kMask |
- PropertyDetails::AttributesField::encode(READ_ONLY);
- __ Test(Operand(elements, scratch1, times_pointer_size,
- kDetailsOffset - kHeapObjectTag),
- Smi::FromInt(kTypeAndReadOnlyMask));
- __ j(not_zero, miss_label);
-
- // Store the value at the masked, scaled index.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ leap(scratch1, Operand(elements, scratch1, times_pointer_size,
- kValueOffset - kHeapObjectTag));
- __ movp(Operand(scratch1, 0), value);
-
- // Update write barrier. Make sure not to clobber the value.
- __ movp(scratch0, value);
- __ RecordWrite(elements, scratch1, scratch0, kDontSaveFPRegs);
-}
-
-static void KeyedStoreGenerateMegamorphicHelper(
- MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
- KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
- Label transition_smi_elements;
- Label finish_object_store, non_double_value, transition_double_elements;
- Label fast_double_without_map_check;
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register key = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- DCHECK(receiver.is(rdx));
- DCHECK(key.is(rcx));
- DCHECK(value.is(rax));
- // Fast case: Do the store, could be either Object or double.
- __ bind(fast_object);
- // rbx: receiver's elements array (a FixedArray)
- // receiver is a JSArray.
- // r9: map of receiver
- if (check_map == kCheckMap) {
- __ movp(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
- __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, fast_double);
- }
-
- // HOLECHECK: guards "A[i] = V"
- // We have to go to the runtime if the current value is the hole because
- // there may be a callback on the element
- Label holecheck_passed1;
- __ movp(kScratchRegister,
- FieldOperand(rbx, key, times_pointer_size, FixedArray::kHeaderSize));
- __ CompareRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &holecheck_passed1);
- __ JumpIfDictionaryInPrototypeChain(receiver, rdi, kScratchRegister, slow);
-
- __ bind(&holecheck_passed1);
-
- // Smi stores don't require further checks.
- Label non_smi_value;
- __ JumpIfNotSmi(value, &non_smi_value);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ leal(rdi, Operand(key, 1));
- __ Integer32ToSmiField(FieldOperand(receiver, JSArray::kLengthOffset), rdi);
- }
- // It's irrelevant whether array is smi-only or not when writing a smi.
- __ movp(FieldOperand(rbx, key, times_pointer_size, FixedArray::kHeaderSize),
- value);
- __ ret(0);
-
- __ bind(&non_smi_value);
- // Writing a non-smi, check whether array allows non-smi elements.
- // r9: receiver's map
- __ CheckFastObjectElements(r9, &transition_smi_elements);
-
- __ bind(&finish_object_store);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ leal(rdi, Operand(key, 1));
- __ Integer32ToSmiField(FieldOperand(receiver, JSArray::kLengthOffset), rdi);
- }
- __ movp(FieldOperand(rbx, key, times_pointer_size, FixedArray::kHeaderSize),
- value);
- __ movp(rdx, value); // Preserve the value which is returned.
- __ RecordWriteArray(rbx, rdx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ ret(0);
-
- __ bind(fast_double);
- if (check_map == kCheckMap) {
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- // rdi: elements array's map
- __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
- __ j(not_equal, slow);
- }
-
- // HOLECHECK: guards "A[i] double hole?"
- // We have to see if the double version of the hole is present. If so
- // go to the runtime.
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
- __ cmpl(FieldOperand(rbx, key, times_8, offset), Immediate(kHoleNanUpper32));
- __ j(not_equal, &fast_double_without_map_check);
- __ JumpIfDictionaryInPrototypeChain(receiver, rdi, kScratchRegister, slow);
-
- __ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value, rbx, key, kScratchDoubleReg,
- &transition_double_elements);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ leal(rdi, Operand(key, 1));
- __ Integer32ToSmiField(FieldOperand(receiver, JSArray::kLengthOffset), rdi);
- }
- __ ret(0);
-
- __ bind(&transition_smi_elements);
- __ movp(rbx, FieldOperand(receiver, HeapObject::kMapOffset));
-
- // Transition the array appropriately depending on the value type.
- __ movp(r9, FieldOperand(value, HeapObject::kMapOffset));
- __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &non_double_value);
-
- // Value is a double. Transition FAST_SMI_ELEMENTS ->
- // FAST_DOUBLE_ELEMENTS and complete the store.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS, rbx, rdi, slow);
- AllocationSiteMode mode =
- AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
- rbx, mode, slow);
- __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&fast_double_without_map_check);
-
- __ bind(&non_double_value);
- // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, rbx,
- rdi, slow);
- mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- masm, receiver, key, value, rbx, mode, slow);
- __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-
- __ bind(&transition_double_elements);
- // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
- // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
- // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ movp(rbx, FieldOperand(receiver, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
- rbx, rdi, slow);
- mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, receiver, key,
- value, rbx, mode, slow);
- __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
- LanguageMode language_mode) {
- // Return address is on the stack.
- Label slow, slow_with_tagged_index, fast_object, fast_object_grow;
- Label fast_double, fast_double_grow;
- Label array, extra, check_if_double_array, maybe_name_key, miss;
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register key = StoreDescriptor::NameRegister();
- DCHECK(receiver.is(rdx));
- DCHECK(key.is(rcx));
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, &slow_with_tagged_index);
- // Get the map from the receiver.
- __ movp(r9, FieldOperand(receiver, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks.
- // The generic stub does not perform map checks.
- __ testb(FieldOperand(r9, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded));
- __ j(not_zero, &slow_with_tagged_index);
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &maybe_name_key);
- __ SmiToInteger32(key, key);
-
- __ CmpInstanceType(r9, JS_ARRAY_TYPE);
- __ j(equal, &array);
- // Check that the object is some kind of JS object EXCEPT JS Value type. In
- // the case that the object is a value-wrapper object, we enter the runtime
- // system to make sure that indexing into string objects works as intended.
- STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
- __ CmpInstanceType(r9, JS_OBJECT_TYPE);
- __ j(below, &slow);
-
- // Object case: Check key against length in the elements array.
- __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
- // Check array bounds.
- __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), key);
- // rbx: FixedArray
- __ j(above, &fast_object);
-
- // Slow case: call runtime.
- __ bind(&slow);
- __ Integer32ToSmi(key, key);
- __ bind(&slow_with_tagged_index);
- PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
- // Never returns to here.
-
- __ bind(&maybe_name_key);
- __ movp(r9, FieldOperand(key, HeapObject::kMapOffset));
- __ movzxbp(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(r9, &slow_with_tagged_index);
-
- Register vector = StoreWithVectorDescriptor::VectorRegister();
- Register slot = StoreWithVectorDescriptor::SlotRegister();
- // The handlers in the stub cache expect a vector and slot. Since we won't
- // change the IC from any downstream misses, a dummy vector can be used.
- Handle<TypeFeedbackVector> dummy_vector =
- TypeFeedbackVector::DummyVector(masm->isolate());
- int slot_index = dummy_vector->GetIndex(
- FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
- __ Move(vector, dummy_vector);
- __ Move(slot, Smi::FromInt(slot_index));
-
- masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, r9,
- no_reg);
- // Cache miss.
- __ jmp(&miss);
-
- // Extra capacity case: Check if there is extra capacity to
- // perform the store and update the length. Used for adding one
- // element to the array by writing to array[array.length].
- __ bind(&extra);
- // receiver is a JSArray.
- // rbx: receiver's elements array (a FixedArray)
- // flags: smicompare (receiver.length(), rbx)
- __ j(not_equal, &slow); // do not leave holes in the array
- __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), key);
- __ j(below_equal, &slow);
- // Increment index to get new length.
- __ movp(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
- __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &check_if_double_array);
- __ jmp(&fast_object_grow);
-
- __ bind(&check_if_double_array);
- // rdi: elements array's map
- __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
- __ j(not_equal, &slow);
- __ jmp(&fast_double_grow);
-
- // Array case: Get the length and the elements array from the JS
- // array. Check that the array is in fast mode (and writable); if it
- // is the length is always a smi.
- __ bind(&array);
- // receiver is a JSArray.
- __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
-
- // Check the key against the length in the array, compute the
- // address to store into and fall through to fast case.
- __ SmiCompareInteger32(FieldOperand(receiver, JSArray::kLengthOffset), key);
- __ j(below_equal, &extra);
-
- KeyedStoreGenerateMegamorphicHelper(masm, &fast_object, &fast_double, &slow,
- kCheckMap, kDontIncrementLength);
- KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
- &fast_double_grow, &slow, kDontCheckMap,
- kIncrementLength);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
- Register dictionary = rax;
- DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
- DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
-
- Label slow;
-
- __ movp(dictionary, FieldOperand(LoadDescriptor::ReceiverRegister(),
- JSObject::kPropertiesOffset));
- GenerateDictionaryLoad(masm, &slow, dictionary,
- LoadDescriptor::NameRegister(), rbx, rdi, rax);
- __ ret(0);
-
- // Dictionary load failed, go slow (but don't miss).
- __ bind(&slow);
- LoadIC::GenerateRuntimeGetProperty(masm);
-}
-
-
-static void LoadIC_PushArgs(MacroAssembler* masm) {
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register name = LoadDescriptor::NameRegister();
- Register slot = LoadDescriptor::SlotRegister();
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- DCHECK(!rdi.is(receiver) && !rdi.is(name) && !rdi.is(slot) &&
- !rdi.is(vector));
-
- __ PopReturnAddressTo(rdi);
- __ Push(receiver);
- __ Push(name);
- __ Push(slot);
- __ Push(vector);
- __ PushReturnAddressFrom(rdi);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
- // The return address is on the stack.
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->ic_load_miss(), 1);
-
- LoadIC_PushArgs(masm);
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kLoadIC_Miss);
-}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // The return address is on the stack.
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register name = LoadDescriptor::NameRegister();
-
- DCHECK(!rbx.is(receiver) && !rbx.is(name));
-
- __ PopReturnAddressTo(rbx);
- __ Push(receiver);
- __ Push(name);
- __ PushReturnAddressFrom(rbx);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kGetProperty);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
- // The return address is on the stack.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->ic_keyed_load_miss(), 1);
-
- LoadIC_PushArgs(masm);
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
-}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // The return address is on the stack.
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register name = LoadDescriptor::NameRegister();
-
- DCHECK(!rbx.is(receiver) && !rbx.is(name));
-
- __ PopReturnAddressTo(rbx);
- __ Push(receiver);
- __ Push(name);
- __ PushReturnAddressFrom(rbx);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kKeyedGetProperty);
-}
-
static void StoreIC_PushArgs(MacroAssembler* masm) {
Register receiver = StoreWithVectorDescriptor::ReceiverRegister();
Register name = StoreWithVectorDescriptor::NameRegister();
@@ -480,38 +36,6 @@ static void StoreIC_PushArgs(MacroAssembler* masm) {
__ PushReturnAddressFrom(temp);
}
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
- // Return address is on the stack.
- StoreIC_PushArgs(masm);
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kStoreIC_Miss);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register name = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- Register dictionary = r11;
- DCHECK(!AreAliased(dictionary, StoreWithVectorDescriptor::VectorRegister(),
- StoreWithVectorDescriptor::SlotRegister()));
-
- Label miss;
-
- __ movp(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
- GenerateDictionaryStore(masm, &miss, dictionary, name, value, r8, r9);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->ic_store_normal_hit(), 1);
- __ ret(0);
-
- __ bind(&miss);
- __ IncrementCounter(counters->ic_store_normal_miss(), 1);
- GenerateMiss(masm);
-}
-
-
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// Return address is on the stack.
StoreIC_PushArgs(masm);
diff --git a/deps/v8/src/ic/x64/stub-cache-x64.cc b/deps/v8/src/ic/x64/stub-cache-x64.cc
deleted file mode 100644
index 946aee51fc..0000000000
--- a/deps/v8/src/ic/x64/stub-cache-x64.cc
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X64
-
-#include "src/codegen.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/interface-descriptors.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
- StubCache::Table table, Register receiver, Register name,
- // The offset is scaled by 4, based on
- // kCacheIndexShift, which is two bits
- Register offset) {
- // We need to scale up the pointer by 2 when the offset is scaled by less
- // than the pointer size.
- DCHECK(kPointerSize == kInt64Size
- ? kPointerSizeLog2 == StubCache::kCacheIndexShift + 1
- : kPointerSizeLog2 == StubCache::kCacheIndexShift);
- ScaleFactor scale_factor = kPointerSize == kInt64Size ? times_2 : times_1;
-
- DCHECK_EQ(3u * kPointerSize, sizeof(StubCache::Entry));
- // The offset register holds the entry offset times four (due to masking
- // and shifting optimizations).
- ExternalReference key_offset(stub_cache->key_reference(table));
- ExternalReference value_offset(stub_cache->value_reference(table));
- Label miss;
-
- // Multiply by 3 because there are 3 fields per entry (name, code, map).
- __ leap(offset, Operand(offset, offset, times_2, 0));
-
- __ LoadAddress(kScratchRegister, key_offset);
-
- // Check that the key in the entry matches the name.
- __ cmpp(name, Operand(kScratchRegister, offset, scale_factor, 0));
- __ j(not_equal, &miss);
-
- // Get the map entry from the cache.
- // Use key_offset + kPointerSize * 2, rather than loading map_offset.
- DCHECK(stub_cache->map_reference(table).address() -
- stub_cache->key_reference(table).address() ==
- kPointerSize * 2);
- __ movp(kScratchRegister,
- Operand(kScratchRegister, offset, scale_factor, kPointerSize * 2));
- __ cmpp(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
- __ j(not_equal, &miss);
-
- // Get the code entry from the cache.
- __ LoadAddress(kScratchRegister, value_offset);
- __ movp(kScratchRegister, Operand(kScratchRegister, offset, scale_factor, 0));
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ jmp(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ jmp(&miss);
- }
-#endif
-
- // Jump to the first instruction in the code stub.
- __ addp(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(kScratchRegister);
-
- __ bind(&miss);
-}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
- Register name, Register scratch, Register extra,
- Register extra2, Register extra3) {
- Label miss;
- USE(extra); // The register extra is not used on the X64 platform.
- USE(extra2); // The register extra2 is not used on the X64 platform.
- USE(extra3); // The register extra2 is not used on the X64 platform.
- // Make sure that code is valid. The multiplying code relies on the
- // entry size being 3 * kPointerSize.
- DCHECK(sizeof(Entry) == 3 * kPointerSize);
-
- // Make sure that there are no register conflicts.
- DCHECK(!scratch.is(receiver));
- DCHECK(!scratch.is(name));
-
- // Check scratch register is valid, extra and extra2 are unused.
- DCHECK(!scratch.is(no_reg));
- DCHECK(extra2.is(no_reg));
- DCHECK(extra3.is(no_reg));
-
-#ifdef DEBUG
- // If vector-based ics are in use, ensure that scratch doesn't conflict with
- // the vector and slot registers, which need to be preserved for a handler
- // call or miss.
- if (IC::ICUseVector(ic_kind_)) {
- if (ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC) {
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, scratch));
- } else {
- DCHECK(ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC);
- Register vector = StoreWithVectorDescriptor::VectorRegister();
- Register slot = StoreWithVectorDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, scratch));
- }
- }
-#endif
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Get the map of the receiver and compute the hash.
- __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
- // Use only the low 32 bits of the map pointer.
- __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xorp(scratch, Immediate(kPrimaryMagic));
- // We mask out the last two bits because they are not part of the hash and
- // they are always 01 for maps. Also in the two 'and' instructions below.
- __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift));
-
- // Probe the primary table.
- ProbeTable(this, masm, kPrimary, receiver, name, scratch);
-
- // Primary miss: Compute hash for secondary probe.
- __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
- __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xorp(scratch, Immediate(kPrimaryMagic));
- __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift));
- __ subl(scratch, name);
- __ addl(scratch, Immediate(kSecondaryMagic));
- __ andp(scratch, Immediate((kSecondaryTableSize - 1) << kCacheIndexShift));
-
- // Probe the secondary table.
- ProbeTable(this, masm, kSecondary, receiver, name, scratch);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ bind(&miss);
- __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
-}
-
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/ic/x87/OWNERS b/deps/v8/src/ic/x87/OWNERS
index dd9998b261..61245ae8e2 100644
--- a/deps/v8/src/ic/x87/OWNERS
+++ b/deps/v8/src/ic/x87/OWNERS
@@ -1 +1,2 @@
weiliang.lin@intel.com
+chunyang.dai@intel.com
diff --git a/deps/v8/src/ic/x87/handler-compiler-x87.cc b/deps/v8/src/ic/x87/handler-compiler-x87.cc
index a5c32d37cc..4a521b76d3 100644
--- a/deps/v8/src/ic/x87/handler-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/handler-compiler-x87.cc
@@ -83,16 +83,6 @@ void PropertyHandlerCompiler::DiscardVectorAndSlot() {
__ add(esp, Immediate(2 * kPointerSize));
}
-void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
- MacroAssembler* masm = this->masm();
- __ push(tmp);
-}
-
-void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
- MacroAssembler* masm = this->masm();
- __ pop(tmp);
-}
-
void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
MacroAssembler* masm, Label* miss_label, Register receiver,
Handle<Name> name, Register scratch0, Register scratch1) {
@@ -132,18 +122,6 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
__ DecrementCounter(counters->negative_lookups_miss(), 1);
}
-
-void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
- MacroAssembler* masm, int index, Register result, Label* miss) {
- __ LoadGlobalFunction(index, result);
- // Load its initial map. The global functions all have initial maps.
- __ mov(result,
- FieldOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
- // Load the prototype from the initial map.
- __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
-}
-
-
void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
MacroAssembler* masm, Register receiver, Register scratch1,
Register scratch2, Label* miss_label) {
@@ -359,58 +337,6 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
}
}
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
- __ mov(this->name(), Immediate(name));
-}
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
- Register map_reg,
- Register scratch,
- Label* miss) {
- Handle<WeakCell> cell = Map::WeakCellForMap(transition);
- DCHECK(!map_reg.is(scratch));
- __ LoadWeakValue(map_reg, cell, miss);
- if (transition->CanBeDeprecated()) {
- __ mov(scratch, FieldOperand(map_reg, Map::kBitField3Offset));
- __ and_(scratch, Immediate(Map::Deprecated::kMask));
- __ j(not_zero, miss);
- }
-}
-
-
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
- int descriptor,
- Register value_reg,
- Register scratch,
- Label* miss_label) {
- DCHECK(!map_reg.is(scratch));
- DCHECK(!map_reg.is(value_reg));
- DCHECK(!value_reg.is(scratch));
- __ LoadInstanceDescriptors(map_reg, scratch);
- __ mov(scratch,
- FieldOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
- __ cmp(value_reg, scratch);
- __ j(not_equal, miss_label);
-}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
- Register value_reg,
- Label* miss_label) {
- Register map_reg = scratch1();
- Register scratch = scratch2();
- DCHECK(!value_reg.is(map_reg));
- DCHECK(!value_reg.is(scratch));
- __ JumpIfSmi(value_reg, miss_label);
- if (field_type->IsClass()) {
- __ mov(map_reg, FieldOperand(value_reg, HeapObject::kMapOffset));
- __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
- scratch);
- __ j(not_equal, miss_label);
- }
-}
-
void PropertyHandlerCompiler::GenerateAccessCheck(
Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
Label* miss, bool compare_native_contexts_only) {
@@ -540,14 +466,6 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
}
}
-
-void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
- // Return the constant value.
- __ LoadObject(eax, value);
- __ ret(0);
-}
-
-
void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
LookupIterator* it, Register holder_reg) {
DCHECK(holder()->HasNamedInterceptor());
diff --git a/deps/v8/src/ic/x87/ic-compiler-x87.cc b/deps/v8/src/ic/x87/ic-compiler-x87.cc
deleted file mode 100644
index 11a8cdcd34..0000000000
--- a/deps/v8/src/ic/x87/ic-compiler-x87.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X87
-
-#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-void PropertyICCompiler::GenerateRuntimeSetProperty(
- MacroAssembler* masm, LanguageMode language_mode) {
- typedef StoreWithVectorDescriptor Descriptor;
- STATIC_ASSERT(Descriptor::kStackArgumentsCount == 3);
- // ----------- S t a t e -------------
- // -- esp[12] : value
- // -- esp[8] : slot
- // -- esp[4] : vector
- // -- esp[0] : return address
- // -----------------------------------
- __ LoadParameterFromStack<Descriptor>(Descriptor::ValueRegister(),
- Descriptor::kValue);
-
- __ mov(Operand(esp, 12), Descriptor::ReceiverRegister());
- __ mov(Operand(esp, 8), Descriptor::NameRegister());
- __ mov(Operand(esp, 4), Descriptor::ValueRegister());
- __ pop(ebx);
- __ push(Immediate(Smi::FromInt(language_mode)));
- __ push(ebx); // return address
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kSetProperty);
-}
-
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/ic/x87/ic-x87.cc b/deps/v8/src/ic/x87/ic-x87.cc
index f96e509f53..049a85e92e 100644
--- a/deps/v8/src/ic/x87/ic-x87.cc
+++ b/deps/v8/src/ic/x87/ic-x87.cc
@@ -18,440 +18,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-// Helper function used to load a property from a dictionary backing
-// storage. This function may fail to load a property even though it is
-// in the dictionary, so code at miss_label must always call a backup
-// property load that is complete. This function is safe to call if
-// name is not internalized, and will jump to the miss_label in that
-// case. The generated code assumes that the receiver has slow
-// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
- Register elements, Register name,
- Register r0, Register r1, Register result) {
- // Register use:
- //
- // elements - holds the property dictionary on entry and is unchanged.
- //
- // name - holds the name of the property on entry and is unchanged.
- //
- // Scratch registers:
- //
- // r0 - used for the index into the property dictionary
- //
- // r1 - used to hold the capacity of the property dictionary.
- //
- // result - holds the result on exit.
-
- Label done;
-
- // Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done,
- elements, name, r0, r1);
-
- // If probing finds an entry in the dictionary, r0 contains the
- // index into the dictionary. Check that the value is a normal
- // property.
- __ bind(&done);
- const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
- Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
- __ j(not_zero, miss_label);
-
- // Get the value at the masked, scaled index.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ mov(result, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
-}
-
-
-// Helper function used to store a property to a dictionary backing
-// storage. This function may fail to store a property eventhough it
-// is in the dictionary, so code at miss_label must always call a
-// backup property store that is complete. This function is safe to
-// call if name is not internalized, and will jump to the miss_label in
-// that case. The generated code assumes that the receiver has slow
-// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
- Register elements, Register name,
- Register value, Register r0, Register r1) {
- // Register use:
- //
- // elements - holds the property dictionary on entry and is clobbered.
- //
- // name - holds the name of the property on entry and is unchanged.
- //
- // value - holds the value to store and is unchanged.
- //
- // r0 - used for index into the property dictionary and is clobbered.
- //
- // r1 - used to hold the capacity of the property dictionary and is clobbered.
- Label done;
-
-
- // Probe the dictionary.
- NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done,
- elements, name, r0, r1);
-
- // If probing finds an entry in the dictionary, r0 contains the
- // index into the dictionary. Check that the value is a normal
- // property that is not read only.
- __ bind(&done);
- const int kElementsStartOffset =
- NameDictionary::kHeaderSize +
- NameDictionary::kElementsStartIndex * kPointerSize;
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- const int kTypeAndReadOnlyMask =
- (PropertyDetails::TypeField::kMask |
- PropertyDetails::AttributesField::encode(READ_ONLY))
- << kSmiTagSize;
- __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
- Immediate(kTypeAndReadOnlyMask));
- __ j(not_zero, miss_label);
-
- // Store the value at the masked, scaled index.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ lea(r0, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
- __ mov(Operand(r0, 0), value);
-
- // Update write barrier. Make sure not to clobber the value.
- __ mov(r1, value);
- __ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
-}
-
-static void KeyedStoreGenerateMegamorphicHelper(
- MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
- KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
- Label transition_smi_elements;
- Label finish_object_store, non_double_value, transition_double_elements;
- Label fast_double_without_map_check;
- Register receiver = StoreDescriptor::ReceiverRegister();
- Register key = StoreDescriptor::NameRegister();
- Register value = StoreDescriptor::ValueRegister();
- DCHECK(receiver.is(edx));
- DCHECK(key.is(ecx));
- DCHECK(value.is(eax));
- // key is a smi.
- // ebx: FixedArray receiver->elements
- // edi: receiver map
- // Fast case: Do the store, could either Object or double.
- __ bind(fast_object);
- if (check_map == kCheckMap) {
- __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
- __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
- __ j(not_equal, fast_double);
- }
-
- // HOLECHECK: guards "A[i] = V"
- // We have to go to the runtime if the current value is the hole because
- // there may be a callback on the element
- Label holecheck_passed1;
- __ cmp(FixedArrayElementOperand(ebx, key),
- masm->isolate()->factory()->the_hole_value());
- __ j(not_equal, &holecheck_passed1);
- __ JumpIfDictionaryInPrototypeChain(receiver, ebx, edi, slow);
- __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-
- __ bind(&holecheck_passed1);
-
- // Smi stores don't require further checks.
- Label non_smi_value;
- __ JumpIfNotSmi(value, &non_smi_value);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ add(FieldOperand(receiver, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- }
- // It's irrelevant whether array is smi-only or not when writing a smi.
- __ mov(FixedArrayElementOperand(ebx, key), value);
- __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
-
- __ bind(&non_smi_value);
- // Escape to elements kind transition case.
- __ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset));
- __ CheckFastObjectElements(edi, &transition_smi_elements);
-
- // Fast elements array, store the value to the elements backing store.
- __ bind(&finish_object_store);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ add(FieldOperand(receiver, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- }
- __ mov(FixedArrayElementOperand(ebx, key), value);
- // Update write barrier for the elements array address.
- __ mov(edx, value); // Preserve the value which is returned.
- __ RecordWriteArray(ebx, edx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
-
- __ bind(fast_double);
- if (check_map == kCheckMap) {
- // Check for fast double array case. If this fails, call through to the
- // runtime.
- __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
- __ j(not_equal, slow);
- // If the value is a number, store it as a double in the FastDoubleElements
- // array.
- }
-
- // HOLECHECK: guards "A[i] double hole?"
- // We have to see if the double version of the hole is present. If so
- // go to the runtime.
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
- __ cmp(FieldOperand(ebx, key, times_4, offset), Immediate(kHoleNanUpper32));
- __ j(not_equal, &fast_double_without_map_check);
- __ JumpIfDictionaryInPrototypeChain(receiver, ebx, edi, slow);
- __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-
- __ bind(&fast_double_without_map_check);
- __ StoreNumberToDoubleElements(value, ebx, key, edi,
- &transition_double_elements, false);
- if (increment_length == kIncrementLength) {
- // Add 1 to receiver->length.
- __ add(FieldOperand(receiver, JSArray::kLengthOffset),
- Immediate(Smi::FromInt(1)));
- }
- __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
-
- __ bind(&transition_smi_elements);
- __ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
-
- // Transition the array appropriately depending on the value type.
- __ CheckMap(value, masm->isolate()->factory()->heap_number_map(),
- &non_double_value, DONT_DO_SMI_CHECK);
-
- // Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS
- // and complete the store.
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_DOUBLE_ELEMENTS, ebx, edi, slow);
- AllocationSiteMode mode =
- AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
- ebx, mode, slow);
- __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&fast_double_without_map_check);
-
- __ bind(&non_double_value);
- // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
- __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, ebx,
- edi, slow);
- mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- masm, receiver, key, value, ebx, mode, slow);
- __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-
- __ bind(&transition_double_elements);
- // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
- // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
- // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
- __ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
- __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
- ebx, edi, slow);
- mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, receiver, key,
- value, ebx, mode, slow);
- __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
- __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
- LanguageMode language_mode) {
- typedef StoreWithVectorDescriptor Descriptor;
- // Return address is on the stack.
- Label slow, fast_object, fast_object_grow;
- Label fast_double, fast_double_grow;
- Label array, extra, check_if_double_array, maybe_name_key, miss;
- Register receiver = Descriptor::ReceiverRegister();
- Register key = Descriptor::NameRegister();
- DCHECK(receiver.is(edx));
- DCHECK(key.is(ecx));
-
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, &slow);
- // Get the map from the receiver.
- __ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset));
- // Check that the receiver does not require access checks.
- // The generic stub does not perform map checks.
- __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded));
- __ j(not_zero, &slow);
-
- __ LoadParameterFromStack<Descriptor>(Descriptor::ValueRegister(),
- Descriptor::kValue);
-
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, &maybe_name_key);
- __ CmpInstanceType(edi, JS_ARRAY_TYPE);
- __ j(equal, &array);
- // Check that the object is some kind of JS object EXCEPT JS Value type. In
- // the case that the object is a value-wrapper object, we enter the runtime
- // system to make sure that indexing into string objects works as intended.
- STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
- __ CmpInstanceType(edi, JS_OBJECT_TYPE);
- __ j(below, &slow);
-
- // Object case: Check key against length in the elements array.
- // Key is a smi.
- // edi: receiver map
- __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
- // Check array bounds. Both the key and the length of FixedArray are smis.
- __ cmp(key, FieldOperand(ebx, FixedArray::kLengthOffset));
- __ j(below, &fast_object);
-
- // Slow case: call runtime.
- __ bind(&slow);
- PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
- // Never returns to here.
-
- __ bind(&maybe_name_key);
- __ mov(ebx, FieldOperand(key, HeapObject::kMapOffset));
- __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ JumpIfNotUniqueNameInstanceType(ebx, &slow);
-
- masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, edi,
- no_reg);
-
- // Cache miss.
- __ jmp(&miss);
-
- // Extra capacity case: Check if there is extra capacity to
- // perform the store and update the length. Used for adding one
- // element to the array by writing to array[array.length].
- __ bind(&extra);
- // receiver is a JSArray.
- // key is a smi.
- // ebx: receiver->elements, a FixedArray
- // edi: receiver map
- // flags: compare (key, receiver.length())
- // do not leave holes in the array:
- __ j(not_equal, &slow);
- __ cmp(key, FieldOperand(ebx, FixedArray::kLengthOffset));
- __ j(above_equal, &slow);
- __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
- __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
- __ j(not_equal, &check_if_double_array);
- __ jmp(&fast_object_grow);
-
- __ bind(&check_if_double_array);
- __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
- __ j(not_equal, &slow);
- __ jmp(&fast_double_grow);
-
- // Array case: Get the length and the elements array from the JS
- // array. Check that the array is in fast mode (and writable); if it
- // is the length is always a smi.
- __ bind(&array);
- // receiver is a JSArray.
- // key is a smi.
- // edi: receiver map
- __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-
- // Check the key against the length in the array and fall through to the
- // common store code.
- __ cmp(key, FieldOperand(receiver, JSArray::kLengthOffset)); // Compare smis.
- __ j(above_equal, &extra);
-
- KeyedStoreGenerateMegamorphicHelper(masm, &fast_object, &fast_double, &slow,
- kCheckMap, kDontIncrementLength);
- KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
- &fast_double_grow, &slow, kDontCheckMap,
- kIncrementLength);
-
- __ bind(&miss);
- GenerateMiss(masm);
-}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
- Register dictionary = eax;
- DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
- DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
-
- Label slow;
-
- __ mov(dictionary, FieldOperand(LoadDescriptor::ReceiverRegister(),
- JSObject::kPropertiesOffset));
- GenerateDictionaryLoad(masm, &slow, dictionary,
- LoadDescriptor::NameRegister(), edi, ebx, eax);
- __ ret(0);
-
- // Dictionary load failed, go slow (but don't miss).
- __ bind(&slow);
- GenerateRuntimeGetProperty(masm);
-}
-
-
-static void LoadIC_PushArgs(MacroAssembler* masm) {
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register name = LoadDescriptor::NameRegister();
-
- Register slot = LoadDescriptor::SlotRegister();
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- DCHECK(!edi.is(receiver) && !edi.is(name) && !edi.is(slot) &&
- !edi.is(vector));
-
- __ pop(edi);
- __ push(receiver);
- __ push(name);
- __ push(slot);
- __ push(vector);
- __ push(edi);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
- // Return address is on the stack.
- __ IncrementCounter(masm->isolate()->counters()->ic_load_miss(), 1);
- LoadIC_PushArgs(masm);
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kLoadIC_Miss);
-}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // Return address is on the stack.
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register name = LoadDescriptor::NameRegister();
- DCHECK(!ebx.is(receiver) && !ebx.is(name));
-
- __ pop(ebx);
- __ push(receiver);
- __ push(name);
- __ push(ebx);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kGetProperty);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
- // Return address is on the stack.
- __ IncrementCounter(masm->isolate()->counters()->ic_keyed_load_miss(), 1);
-
- LoadIC_PushArgs(masm);
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
-}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
- // Return address is on the stack.
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register name = LoadDescriptor::NameRegister();
- DCHECK(!ebx.is(receiver) && !ebx.is(name));
-
- __ pop(ebx);
- __ push(receiver);
- __ push(name);
- __ push(ebx);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kKeyedGetProperty);
-}
-
static void StoreIC_PushArgs(MacroAssembler* masm) {
Register receiver = StoreWithVectorDescriptor::ReceiverRegister();
Register name = StoreWithVectorDescriptor::NameRegister();
@@ -470,50 +36,6 @@ static void StoreIC_PushArgs(MacroAssembler* masm) {
__ push(return_address);
}
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
- // Return address is on the stack.
- StoreIC_PushArgs(masm);
-
- // Perform tail call to the entry.
- __ TailCallRuntime(Runtime::kStoreIC_Miss);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
- typedef StoreWithVectorDescriptor Descriptor;
- Label restore_miss;
- Register receiver = Descriptor::ReceiverRegister();
- Register name = Descriptor::NameRegister();
- Register value = Descriptor::ValueRegister();
- // Since the slot and vector values are passed on the stack we can use
- // respective registers as scratch registers.
- Register scratch1 = Descriptor::VectorRegister();
- Register scratch2 = Descriptor::SlotRegister();
-
- __ LoadParameterFromStack<Descriptor>(value, Descriptor::kValue);
-
- // A lot of registers are needed for storing to slow case objects.
- // Push and restore receiver but rely on GenerateDictionaryStore preserving
- // the value and name.
- __ push(receiver);
-
- Register dictionary = receiver;
- __ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
- GenerateDictionaryStore(masm, &restore_miss, dictionary, name, value,
- scratch1, scratch2);
- __ Drop(1);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->ic_store_normal_hit(), 1);
- __ ret(Descriptor::kStackArgumentsCount * kPointerSize);
-
- __ bind(&restore_miss);
- __ pop(receiver);
- __ IncrementCounter(counters->ic_store_normal_miss(), 1);
- GenerateMiss(masm);
-}
-
-
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// Return address is on the stack.
StoreIC_PushArgs(masm);
diff --git a/deps/v8/src/ic/x87/stub-cache-x87.cc b/deps/v8/src/ic/x87/stub-cache-x87.cc
deleted file mode 100644
index 68fa615420..0000000000
--- a/deps/v8/src/ic/x87/stub-cache-x87.cc
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X87
-
-#include "src/codegen.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/interface-descriptors.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
- StubCache::Table table, Register name, Register receiver,
- // The offset is scaled by 4, based on
- // kCacheIndexShift, which is two bits
- Register offset, Register extra) {
- ExternalReference key_offset(stub_cache->key_reference(table));
- ExternalReference value_offset(stub_cache->value_reference(table));
- ExternalReference map_offset(stub_cache->map_reference(table));
-
- Label miss;
- Code::Kind ic_kind = stub_cache->ic_kind();
- bool is_vector_store =
- IC::ICUseVector(ic_kind) &&
- (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC);
-
- // Multiply by 3 because there are 3 fields per entry (name, code, map).
- __ lea(offset, Operand(offset, offset, times_2, 0));
-
- if (extra.is_valid()) {
- // Get the code entry from the cache.
- __ mov(extra, Operand::StaticArray(offset, times_1, value_offset));
-
- // Check that the key in the entry matches the name.
- __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
- __ j(not_equal, &miss);
-
- // Check the map matches.
- __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
- __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
- __ j(not_equal, &miss);
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ jmp(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ jmp(&miss);
- }
-#endif
-
- if (is_vector_store) {
- // The value, vector and slot were passed to the IC on the stack and
- // they are still there. So we can just jump to the handler.
- DCHECK(extra.is(StoreWithVectorDescriptor::SlotRegister()));
- __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(extra);
- } else {
- // The vector and slot were pushed onto the stack before starting the
- // probe, and need to be dropped before calling the handler.
- __ pop(LoadWithVectorDescriptor::VectorRegister());
- __ pop(LoadDescriptor::SlotRegister());
- __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(extra);
- }
-
- __ bind(&miss);
- } else {
- DCHECK(ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC);
-
- // Save the offset on the stack.
- __ push(offset);
-
- // Check that the key in the entry matches the name.
- __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
- __ j(not_equal, &miss);
-
- // Check the map matches.
- __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
- __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
- __ j(not_equal, &miss);
-
- // Restore offset register.
- __ mov(offset, Operand(esp, 0));
-
- // Get the code entry from the cache.
- __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
-
-#ifdef DEBUG
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
- __ jmp(&miss);
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
- __ jmp(&miss);
- }
-#endif
-
- // Restore offset and re-load code entry from cache.
- __ pop(offset);
- __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
-
- // Jump to the first instruction in the code stub.
- if (is_vector_store) {
- DCHECK(offset.is(StoreWithVectorDescriptor::SlotRegister()));
- }
- __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(offset);
-
- // Pop at miss.
- __ bind(&miss);
- __ pop(offset);
- }
-}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
- Register name, Register scratch, Register extra,
- Register extra2, Register extra3) {
- Label miss;
-
- // Assert that code is valid. The multiplying code relies on the entry size
- // being 12.
- DCHECK(sizeof(Entry) == 12);
-
- // Assert that there are no register conflicts.
- DCHECK(!scratch.is(receiver));
- DCHECK(!scratch.is(name));
- DCHECK(!extra.is(receiver));
- DCHECK(!extra.is(name));
- DCHECK(!extra.is(scratch));
-
- // Assert scratch and extra registers are valid, and extra2/3 are unused.
- DCHECK(!scratch.is(no_reg));
- DCHECK(extra2.is(no_reg));
- DCHECK(extra3.is(no_reg));
-
- Register offset = scratch;
- scratch = no_reg;
-
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Get the map of the receiver and compute the hash.
- __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
- __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(offset, kPrimaryMagic);
- // We mask out the last two bits because they are not part of the hash and
- // they are always 01 for maps. Also in the two 'and' instructions below.
- __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
- // ProbeTable expects the offset to be pointer scaled, which it is, because
- // the heap object tag size is 2 and the pointer size log 2 is also 2.
- DCHECK(kCacheIndexShift == kPointerSizeLog2);
-
- // Probe the primary table.
- ProbeTable(this, masm, kPrimary, name, receiver, offset, extra);
-
- // Primary miss: Compute hash for secondary probe.
- __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
- __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(offset, kPrimaryMagic);
- __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
- __ sub(offset, name);
- __ add(offset, Immediate(kSecondaryMagic));
- __ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
-
- // Probe the secondary table.
- ProbeTable(this, masm, kSecondary, name, receiver, offset, extra);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ bind(&miss);
- __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
-}
-
-
-#undef __
-} // namespace internal
-} // namespace v8
-
-#endif // V8_TARGET_ARCH_X87
diff --git a/deps/v8/src/inspector/BUILD.gn b/deps/v8/src/inspector/BUILD.gn
index 6ebb91ccbc..e6742c09f7 100644
--- a/deps/v8/src/inspector/BUILD.gn
+++ b/deps/v8/src/inspector/BUILD.gn
@@ -140,7 +140,6 @@ v8_source_set("inspector") {
"inspected-context.h",
"java-script-call-frame.cc",
"java-script-call-frame.h",
- "protocol-platform.h",
"remote-object-id.cc",
"remote-object-id.h",
"script-breakpoint.h",
@@ -150,6 +149,8 @@ v8_source_set("inspector") {
"string-16.h",
"string-util.cc",
"string-util.h",
+ "test-interface.cc",
+ "test-interface.h",
"v8-console-agent-impl.cc",
"v8-console-agent-impl.h",
"v8-console-message.cc",
@@ -186,5 +187,7 @@ v8_source_set("inspector") {
"v8-stack-trace-impl.h",
"v8-value-copier.cc",
"v8-value-copier.h",
+ "wasm-translation.cc",
+ "wasm-translation.h",
]
}
diff --git a/deps/v8/src/inspector/DEPS b/deps/v8/src/inspector/DEPS
index d49c6a6254..748a7c12d9 100644
--- a/deps/v8/src/inspector/DEPS
+++ b/deps/v8/src/inspector/DEPS
@@ -4,6 +4,7 @@ include_rules = [
"+src/base/macros.h",
"+src/base/logging.h",
"+src/base/platform/platform.h",
+ "+src/conversions.h",
"+src/inspector",
"+src/tracing",
"-include/v8-debug.h",
diff --git a/deps/v8/src/inspector/debugger-script.js b/deps/v8/src/inspector/debugger-script.js
index 1614566ffa..7843dc9d67 100644
--- a/deps/v8/src/inspector/debugger-script.js
+++ b/deps/v8/src/inspector/debugger-script.js
@@ -33,17 +33,6 @@
var DebuggerScript = {};
-/**
- * @param {?CompileEvent} eventData
- */
-DebuggerScript.getAfterCompileScript = function(eventData)
-{
- var script = eventData.script().value();
- if (!script.is_debugger_script)
- return script;
- return null;
-}
-
/** @type {!Map<!ScopeType, string>} */
DebuggerScript._scopeTypeNames = new Map();
DebuggerScript._scopeTypeNames.set(ScopeType.Global, "global");
@@ -53,6 +42,8 @@ DebuggerScript._scopeTypeNames.set(ScopeType.Closure, "closure");
DebuggerScript._scopeTypeNames.set(ScopeType.Catch, "catch");
DebuggerScript._scopeTypeNames.set(ScopeType.Block, "block");
DebuggerScript._scopeTypeNames.set(ScopeType.Script, "script");
+DebuggerScript._scopeTypeNames.set(ScopeType.Eval, "eval");
+DebuggerScript._scopeTypeNames.set(ScopeType.Module, "module");
/**
* @param {function()} fun
@@ -83,6 +74,34 @@ DebuggerScript.getFunctionScopes = function(fun)
}
/**
+ * @param {Object} gen
+ * @return {?Array<!Scope>}
+ */
+DebuggerScript.getGeneratorScopes = function(gen)
+{
+ var mirror = MakeMirror(gen);
+ if (!mirror.isGenerator())
+ return null;
+ var generatorMirror = /** @type {!GeneratorMirror} */(mirror);
+ var count = generatorMirror.scopeCount();
+ if (count == 0)
+ return null;
+ var result = [];
+ for (var i = 0; i < count; i++) {
+ var scopeDetails = generatorMirror.scope(i).details();
+ var scopeObject = DebuggerScript._buildScopeObject(scopeDetails.type(), scopeDetails.object());
+ if (!scopeObject)
+ continue;
+ result.push({
+ type: /** @type {string} */(DebuggerScript._scopeTypeNames.get(scopeDetails.type())),
+ object: scopeObject,
+ name: scopeDetails.name() || ""
+ });
+ }
+ return result;
+}
+
+/**
* @param {Object} object
* @return {?RawLocation}
*/
@@ -126,20 +145,6 @@ DebuggerScript.getCollectionEntries = function(object)
}
/**
- * @param {string|undefined} contextData
- * @return {number}
- */
-DebuggerScript._executionContextId = function(contextData)
-{
- if (!contextData)
- return 0;
- var match = contextData.match(/^[^,]*,([^,]*),.*$/);
- if (!match)
- return 0;
- return parseInt(match[1], 10) || 0;
-}
-
-/**
* @param {!ExecutionState} execState
* @param {!BreakpointInfo} info
* @return {string|undefined}
@@ -467,12 +472,9 @@ DebuggerScript._frameMirrorToJSCallFrame = function(frameMirror)
function contextId()
{
var mirror = ensureFuncMirror();
- // Old V8 do not have context() function on these objects
- if (!mirror.context)
- return DebuggerScript._executionContextId(mirror.script().value().context_data);
var context = mirror.context();
- if (context)
- return DebuggerScript._executionContextId(context.data());
+ if (context && context.data())
+ return Number(context.data());
return 0;
}
@@ -491,7 +493,7 @@ DebuggerScript._frameMirrorToJSCallFrame = function(frameMirror)
*/
function evaluate(expression)
{
- return frameMirror.evaluate(expression, false).value();
+ return frameMirror.evaluate(expression).value();
}
/** @return {undefined} */
@@ -541,15 +543,21 @@ DebuggerScript._buildScopeObject = function(scopeType, scopeObject)
case ScopeType.Catch:
case ScopeType.Block:
case ScopeType.Script:
+ case ScopeType.Eval:
+ case ScopeType.Module:
// For transient objects we create a "persistent" copy that contains
// the same properties.
// Reset scope object prototype to null so that the proto properties
// don't appear in the local scope section.
var properties = /** @type {!ObjectMirror} */(MakeMirror(scopeObject, true /* transient */)).properties();
// Almost always Script scope will be empty, so just filter out that noise.
- // Also drop empty Block scopes, should we get any.
- if (!properties.length && (scopeType === ScopeType.Script || scopeType === ScopeType.Block))
+ // Also drop empty Block, Eval and Script scopes, should we get any.
+ if (!properties.length && (scopeType === ScopeType.Script ||
+ scopeType === ScopeType.Block ||
+ scopeType === ScopeType.Eval ||
+ scopeType === ScopeType.Module)) {
break;
+ }
result = { __proto__: null };
for (var j = 0; j < properties.length; j++) {
var name = properties[j].name();
diff --git a/deps/v8/src/inspector/debugger_script_externs.js b/deps/v8/src/inspector/debugger_script_externs.js
index cc152d5537..4fa3a0fbe3 100644
--- a/deps/v8/src/inspector/debugger_script_externs.js
+++ b/deps/v8/src/inspector/debugger_script_externs.js
@@ -19,21 +19,6 @@ var Scope;
var RawLocation;
/** @typedef {{
- id: number,
- name: string,
- sourceURL: (string|undefined),
- sourceMappingURL: (string|undefined),
- source: string,
- startLine: number,
- endLine: number,
- startColumn: number,
- endColumn: number,
- executionContextId: number,
- executionContextAuxData: string
- }} */
-var FormattedScript;
-
-/** @typedef {{
functionName: string,
location: !RawLocation,
this: !Object,
@@ -47,6 +32,7 @@ var JavaScriptCallFrameDetails;
sourceID: function():(number),
line: function():number,
column: function():number,
+ contextId: function():number,
thisObject: !Object,
evaluate: function(string):*,
restart: function():undefined,
@@ -174,13 +160,6 @@ BreakPoint.prototype.number = function() {}
/** @interface */
-function CompileEvent() {}
-
-/** @return {!ScriptMirror} */
-CompileEvent.prototype.script = function() {}
-
-
-/** @interface */
function BreakEvent() {}
/** @return {!Array<!BreakPoint>|undefined} */
@@ -192,10 +171,8 @@ function ExecutionState() {}
/**
* @param {string} source
- * @param {boolean} disableBreak
- * @param {*=} additionalContext
*/
-ExecutionState.prototype.evaluateGlobal = function(source, disableBreak, additionalContext) {}
+ExecutionState.prototype.evaluateGlobal = function(source) {}
/** @return {number} */
ExecutionState.prototype.frameCount = function() {}
@@ -220,7 +197,9 @@ var ScopeType = { Global: 0,
Closure: 3,
Catch: 4,
Block: 5,
- Script: 6 };
+ Script: 6,
+ Eval: 7,
+ Module: 8};
/** @typedef {{
@@ -237,14 +216,6 @@ var SourceLocation;
/** @typedef{{
* id: number,
* context_data: (string|undefined),
- * source_url: (string|undefined),
- * source_mapping_url: (string|undefined),
- * is_debugger_script: boolean,
- * source: string,
- * line_offset: number,
- * column_offset: number,
- * nameOrSourceURL: function():string,
- * compilationType: function():!ScriptCompilationType,
* }}
*/
var Script;
@@ -421,6 +392,15 @@ GeneratorMirror.prototype.sourceLocation = function() {}
/** @return {!FunctionMirror} */
GeneratorMirror.prototype.func = function() {}
+/** @return {number} */
+GeneratorMirror.prototype.scopeCount = function() {}
+
+/**
+ * @param {number} index
+ * @return {!ScopeMirror|undefined}
+ */
+GeneratorMirror.prototype.scope = function(index) {}
+
/**
* @interface
@@ -457,9 +437,8 @@ FrameMirror.prototype.script = function() {}
/**
* @param {string} source
- * @param {boolean} disableBreak
*/
-FrameMirror.prototype.evaluate = function(source, disableBreak) {}
+FrameMirror.prototype.evaluate = function(source) {}
FrameMirror.prototype.restart = function() {}
diff --git a/deps/v8/src/inspector/injected-script-native.cc b/deps/v8/src/inspector/injected-script-native.cc
index fcf2ead94b..5d0136b3b6 100644
--- a/deps/v8/src/inspector/injected-script-native.cc
+++ b/deps/v8/src/inspector/injected-script-native.cc
@@ -44,8 +44,8 @@ int InjectedScriptNative::bind(v8::Local<v8::Value> value,
const String16& groupName) {
if (m_lastBoundObjectId <= 0) m_lastBoundObjectId = 1;
int id = m_lastBoundObjectId++;
- m_idToWrappedObject[id] =
- wrapUnique(new v8::Global<v8::Value>(m_isolate, value));
+ m_idToWrappedObject.insert(
+ std::make_pair(id, v8::Global<v8::Value>(m_isolate, value)));
addObjectToGroup(id, groupName);
return id;
}
@@ -57,7 +57,7 @@ void InjectedScriptNative::unbind(int id) {
v8::Local<v8::Value> InjectedScriptNative::objectForId(int id) {
auto iter = m_idToWrappedObject.find(id);
- return iter != m_idToWrappedObject.end() ? iter->second->Get(m_isolate)
+ return iter != m_idToWrappedObject.end() ? iter->second.Get(m_isolate)
: v8::Local<v8::Value>();
}
diff --git a/deps/v8/src/inspector/injected-script-native.h b/deps/v8/src/inspector/injected-script-native.h
index 3bdf24709d..c0b93013fe 100644
--- a/deps/v8/src/inspector/injected-script-native.h
+++ b/deps/v8/src/inspector/injected-script-native.h
@@ -34,8 +34,7 @@ class InjectedScriptNative final {
int m_lastBoundObjectId;
v8::Isolate* m_isolate;
- protocol::HashMap<int, std::unique_ptr<v8::Global<v8::Value>>>
- m_idToWrappedObject;
+ protocol::HashMap<int, v8::Global<v8::Value>> m_idToWrappedObject;
typedef protocol::HashMap<int, String16> IdToObjectGroupName;
IdToObjectGroupName m_idToObjectGroupName;
typedef protocol::HashMap<String16, std::vector<int>> NameToObjectGroup;
diff --git a/deps/v8/src/inspector/injected-script-source.js b/deps/v8/src/inspector/injected-script-source.js
index f3c8d6b96e..b52277a8eb 100644
--- a/deps/v8/src/inspector/injected-script-source.js
+++ b/deps/v8/src/inspector/injected-script-source.js
@@ -211,6 +211,8 @@ InjectedScript.closureTypes["block"] = "Block";
InjectedScript.closureTypes["script"] = "Script";
InjectedScript.closureTypes["with"] = "With Block";
InjectedScript.closureTypes["global"] = "Global";
+InjectedScript.closureTypes["eval"] = "Eval";
+InjectedScript.closureTypes["module"] = "Module";
InjectedScript.prototype = {
/**
@@ -617,7 +619,13 @@ InjectedScript.prototype = {
var className = InjectedScriptHost.internalConstructorName(obj);
if (subtype === "array" || subtype === "typedarray") {
if (typeof obj.length === "number")
- className += "[" + obj.length + "]";
+ return className + "(" + obj.length + ")";
+ return className;
+ }
+
+ if (subtype === "map" || subtype === "set") {
+ if (typeof obj.size === "number")
+ return className + "(" + obj.size + ")";
return className;
}
@@ -929,17 +937,16 @@ InjectedScript.RemoteObject.prototype = {
if (!descriptor.isOwn)
continue;
- // Ignore computed properties.
- if (!("value" in descriptor))
+ // Ignore computed properties unless they have getters.
+ if (!("value" in descriptor)) {
+ if (descriptor.get)
+ this._appendPropertyPreview(preview, { name: name, type: "accessor", __proto__: null }, propertiesThreshold);
continue;
+ }
var value = descriptor.value;
var type = typeof value;
- // Never render functions in object preview.
- if (type === "function" && (this.subtype !== "array" || !isUInt32(name)))
- continue;
-
// Special-case HTMLAll.
if (type === "undefined" && injectedScript._isHTMLAllCollection(value))
type = "object";
diff --git a/deps/v8/src/inspector/injected-script.cc b/deps/v8/src/inspector/injected-script.cc
index d605227222..9d9c3270c2 100644
--- a/deps/v8/src/inspector/injected-script.cc
+++ b/deps/v8/src/inspector/injected-script.cc
@@ -105,9 +105,9 @@ std::unique_ptr<InjectedScript> InjectedScript::create(
if (inspector->getContext(contextGroupId, contextId) != inspectedContext)
return nullptr;
if (!injectedScriptValue->IsObject()) return nullptr;
- return wrapUnique(new InjectedScript(inspectedContext,
- injectedScriptValue.As<v8::Object>(),
- std::move(injectedScriptNative)));
+ return std::unique_ptr<InjectedScript>(
+ new InjectedScript(inspectedContext, injectedScriptValue.As<v8::Object>(),
+ std::move(injectedScriptNative)));
}
InjectedScript::InjectedScript(
@@ -150,7 +150,7 @@ Response InjectedScript::getProperties(
if (!response.isSuccess()) return response;
protocol::ErrorSupport errors;
std::unique_ptr<Array<PropertyDescriptor>> result =
- Array<PropertyDescriptor>::parse(protocolValue.get(), &errors);
+ Array<PropertyDescriptor>::fromValue(protocolValue.get(), &errors);
if (errors.hasErrors()) return Response::Error(errors.errors());
*properties = std::move(result);
return Response::OK();
@@ -158,7 +158,7 @@ Response InjectedScript::getProperties(
void InjectedScript::releaseObject(const String16& objectId) {
std::unique_ptr<protocol::Value> parsedObjectId =
- protocol::parseJSON(objectId);
+ protocol::StringUtil::parseJSON(objectId);
if (!parsedObjectId) return;
protocol::DictionaryValue* object =
protocol::DictionaryValue::cast(parsedObjectId.get());
@@ -184,7 +184,7 @@ Response InjectedScript::wrapObject(
if (!response.isSuccess()) return response;
*result =
- protocol::Runtime::RemoteObject::parse(protocolValue.get(), &errors);
+ protocol::Runtime::RemoteObject::fromValue(protocolValue.get(), &errors);
if (!result->get()) return Response::Error(errors.errors());
return Response::OK();
}
@@ -260,7 +260,8 @@ std::unique_ptr<protocol::Runtime::RemoteObject> InjectedScript::wrapTable(
Response response = toProtocolValue(context, r, &protocolValue);
if (!response.isSuccess()) return nullptr;
protocol::ErrorSupport errors;
- return protocol::Runtime::RemoteObject::parse(protocolValue.get(), &errors);
+ return protocol::Runtime::RemoteObject::fromValue(protocolValue.get(),
+ &errors);
}
Response InjectedScript::findObject(const RemoteObjectId& objectId,
@@ -317,7 +318,7 @@ Response InjectedScript::resolveCallArgument(
if (callArgument->hasValue() || callArgument->hasUnserializableValue()) {
String16 value =
callArgument->hasValue()
- ? callArgument->getValue(nullptr)->toJSONString()
+ ? callArgument->getValue(nullptr)->serialize()
: "Number(\"" + callArgument->getUnserializableValue("") + "\")";
if (!m_context->inspector()
->compileAndRunInternalScript(
@@ -418,7 +419,7 @@ InjectedScript::Scope::Scope(V8InspectorImpl* inspector, int contextGroupId)
m_handleScope(inspector->isolate()),
m_tryCatch(inspector->isolate()),
m_ignoreExceptionsAndMuteConsole(false),
- m_previousPauseOnExceptionsState(v8::DebugInterface::NoBreakOnException),
+ m_previousPauseOnExceptionsState(v8::debug::NoBreakOnException),
m_userGesture(false) {}
Response InjectedScript::Scope::initialize() {
@@ -448,14 +449,13 @@ void InjectedScript::Scope::ignoreExceptionsAndMuteConsole() {
m_inspector->client()->muteMetrics(m_contextGroupId);
m_inspector->muteExceptions(m_contextGroupId);
m_previousPauseOnExceptionsState =
- setPauseOnExceptionsState(v8::DebugInterface::NoBreakOnException);
+ setPauseOnExceptionsState(v8::debug::NoBreakOnException);
}
-v8::DebugInterface::ExceptionBreakState
-InjectedScript::Scope::setPauseOnExceptionsState(
- v8::DebugInterface::ExceptionBreakState newState) {
+v8::debug::ExceptionBreakState InjectedScript::Scope::setPauseOnExceptionsState(
+ v8::debug::ExceptionBreakState newState) {
if (!m_inspector->debugger()->enabled()) return newState;
- v8::DebugInterface::ExceptionBreakState presentState =
+ v8::debug::ExceptionBreakState presentState =
m_inspector->debugger()->getPauseOnExceptionsState();
if (presentState != newState)
m_inspector->debugger()->setPauseOnExceptionsState(newState);
diff --git a/deps/v8/src/inspector/injected-script.h b/deps/v8/src/inspector/injected-script.h
index 6500f4dbb7..9e6680a7e3 100644
--- a/deps/v8/src/inspector/injected-script.h
+++ b/deps/v8/src/inspector/injected-script.h
@@ -120,15 +120,15 @@ class InjectedScript final {
private:
void cleanup();
- v8::DebugInterface::ExceptionBreakState setPauseOnExceptionsState(
- v8::DebugInterface::ExceptionBreakState);
+ v8::debug::ExceptionBreakState setPauseOnExceptionsState(
+ v8::debug::ExceptionBreakState);
v8::HandleScope m_handleScope;
v8::TryCatch m_tryCatch;
v8::Local<v8::Context> m_context;
std::unique_ptr<V8Console::CommandLineAPIScope> m_commandLineAPIScope;
bool m_ignoreExceptionsAndMuteConsole;
- v8::DebugInterface::ExceptionBreakState m_previousPauseOnExceptionsState;
+ v8::debug::ExceptionBreakState m_previousPauseOnExceptionsState;
bool m_userGesture;
};
diff --git a/deps/v8/src/inspector/inspected-context.cc b/deps/v8/src/inspector/inspected-context.cc
index dab3bba050..6d9f51ed3f 100644
--- a/deps/v8/src/inspector/inspected-context.cc
+++ b/deps/v8/src/inspector/inspected-context.cc
@@ -41,10 +41,12 @@ InspectedContext::InspectedContext(V8InspectorImpl* inspector,
m_humanReadableName(toString16(info.humanReadableName)),
m_auxData(toString16(info.auxData)),
m_reported(false) {
+ v8::Isolate* isolate = m_inspector->isolate();
+ info.context->SetEmbedderData(static_cast<int>(v8::Context::kDebugIdIndex),
+ v8::Int32::New(isolate, contextId));
m_context.SetWeak(&m_context, &clearContext,
v8::WeakCallbackType::kParameter);
- v8::Isolate* isolate = m_inspector->isolate();
v8::Local<v8::Object> global = info.context->Global();
v8::Local<v8::Object> console =
V8Console::createConsole(this, info.hasMemoryOnConsole);
@@ -65,6 +67,14 @@ InspectedContext::~InspectedContext() {
}
}
+// static
+int InspectedContext::contextId(v8::Local<v8::Context> context) {
+ v8::Local<v8::Value> data =
+ context->GetEmbedderData(static_cast<int>(v8::Context::kDebugIdIndex));
+ if (data.IsEmpty() || !data->IsInt32()) return 0;
+ return static_cast<int>(data.As<v8::Int32>()->Value());
+}
+
v8::Local<v8::Context> InspectedContext::context() const {
return m_context.Get(isolate());
}
diff --git a/deps/v8/src/inspector/inspected-context.h b/deps/v8/src/inspector/inspected-context.h
index f31eb76419..f8d97e9b94 100644
--- a/deps/v8/src/inspector/inspected-context.h
+++ b/deps/v8/src/inspector/inspected-context.h
@@ -21,6 +21,8 @@ class InspectedContext {
public:
~InspectedContext();
+ static int contextId(v8::Local<v8::Context>);
+
v8::Local<v8::Context> context() const;
int contextId() const { return m_contextId; }
int contextGroupId() const { return m_contextGroupId; }
diff --git a/deps/v8/src/inspector/inspector.gyp b/deps/v8/src/inspector/inspector.gyp
index 91507bd579..c70722f852 100644
--- a/deps/v8/src/inspector/inspector.gyp
+++ b/deps/v8/src/inspector/inspector.gyp
@@ -13,13 +13,6 @@
'targets': [
{ 'target_name': 'inspector_injected_script',
'type': 'none',
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }]
- ],
'actions': [
{
'action_name': 'convert_js_to_cpp_char_array',
@@ -44,13 +37,6 @@
},
{ 'target_name': 'inspector_debugger_script',
'type': 'none',
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }]
- ],
'actions': [
{
'action_name': 'convert_js_to_cpp_char_array',
@@ -75,13 +61,6 @@
},
{ 'target_name': 'protocol_compatibility',
'type': 'none',
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }]
- ],
'actions': [
{
'action_name': 'protocol_compatibility',
@@ -104,13 +83,6 @@
{ 'target_name': 'protocol_generated_sources',
'type': 'none',
'dependencies': [ 'protocol_compatibility' ],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }]
- ],
'actions': [
{
'action_name': 'protocol_generated_sources',
diff --git a/deps/v8/src/inspector/inspector.gypi b/deps/v8/src/inspector/inspector.gypi
index 863c038d6a..8aff49d0ea 100644
--- a/deps/v8/src/inspector/inspector.gypi
+++ b/deps/v8/src/inspector/inspector.gypi
@@ -44,7 +44,6 @@
'inspector/inspected-context.h',
'inspector/java-script-call-frame.cc',
'inspector/java-script-call-frame.h',
- 'inspector/protocol-platform.h',
'inspector/remote-object-id.cc',
'inspector/remote-object-id.h',
'inspector/script-breakpoint.h',
@@ -54,6 +53,8 @@
'inspector/string-16.h',
'inspector/string-util.cc',
'inspector/string-util.h',
+ 'inspector/test-interface.cc',
+ 'inspector/test-interface.h',
'inspector/v8-console.cc',
'inspector/v8-console.h',
'inspector/v8-console-agent-impl.cc',
@@ -90,6 +91,8 @@
'inspector/v8-stack-trace-impl.h',
'inspector/v8-value-copier.cc',
'inspector/v8-value-copier.h',
+ 'inspector/wasm-translation.cc',
+ 'inspector/wasm-translation.h',
]
}
}
diff --git a/deps/v8/src/inspector/inspector_protocol_config.json b/deps/v8/src/inspector/inspector_protocol_config.json
index cb9e6698d1..22e2cf5636 100644
--- a/deps/v8/src/inspector/inspector_protocol_config.json
+++ b/deps/v8/src/inspector/inspector_protocol_config.json
@@ -3,7 +3,31 @@
"path": "js_protocol.json",
"package": "src/inspector/protocol",
"output": "protocol",
- "namespace": ["v8_inspector", "protocol"]
+ "namespace": ["v8_inspector", "protocol"],
+ "options": [
+ {
+ "domain": "Schema",
+ "exported": ["Domain"]
+ },
+ {
+ "domain": "Runtime",
+ "async": ["evaluate", "awaitPromise", "callFunctionOn", "runScript"],
+ "exported": ["StackTrace", "RemoteObject"]
+ },
+ {
+ "domain": "Debugger",
+ "exported": ["SearchMatch", "paused.reason"]
+ },
+ {
+ "domain": "Console"
+ },
+ {
+ "domain": "Profiler"
+ },
+ {
+ "domain": "HeapProfiler"
+ }
+ ]
},
"exported": {
@@ -19,7 +43,6 @@
"lib": {
"package": "src/inspector/protocol",
"output": "protocol",
- "string_header": "src/inspector/string-util.h",
- "platform_header": "src/inspector/protocol-platform.h"
+ "string_header": "src/inspector/string-util.h"
}
}
diff --git a/deps/v8/src/inspector/java-script-call-frame.cc b/deps/v8/src/inspector/java-script-call-frame.cc
index 2da4f04249..f9d0585a8e 100644
--- a/deps/v8/src/inspector/java-script-call-frame.cc
+++ b/deps/v8/src/inspector/java-script-call-frame.cc
@@ -91,7 +91,7 @@ bool JavaScriptCallFrame::isAtReturn() const {
return result.As<v8::Boolean>()->BooleanValue(context).FromMaybe(false);
}
-v8::Local<v8::Object> JavaScriptCallFrame::details() const {
+v8::MaybeLocal<v8::Object> JavaScriptCallFrame::details() const {
v8::MicrotasksScope microtasks(m_isolate,
v8::MicrotasksScope::kDoNotRunMicrotasks);
v8::Local<v8::Context> context =
@@ -101,8 +101,12 @@ v8::Local<v8::Object> JavaScriptCallFrame::details() const {
v8::Local<v8::Function> func = v8::Local<v8::Function>::Cast(
callFrame->Get(context, toV8StringInternalized(m_isolate, "details"))
.ToLocalChecked());
- return v8::Local<v8::Object>::Cast(
- func->Call(context, callFrame, 0, nullptr).ToLocalChecked());
+ v8::TryCatch try_catch(m_isolate);
+ v8::Local<v8::Value> details;
+ if (func->Call(context, callFrame, 0, nullptr).ToLocal(&details)) {
+ return v8::Local<v8::Object>::Cast(details);
+ }
+ return v8::MaybeLocal<v8::Object>();
}
v8::MaybeLocal<v8::Value> JavaScriptCallFrame::evaluate(
@@ -129,10 +133,11 @@ v8::MaybeLocal<v8::Value> JavaScriptCallFrame::restart() {
v8::Local<v8::Function> restartFunction = v8::Local<v8::Function>::Cast(
callFrame->Get(context, toV8StringInternalized(m_isolate, "restart"))
.ToLocalChecked());
- v8::DebugInterface::SetLiveEditEnabled(m_isolate, true);
+ v8::TryCatch try_catch(m_isolate);
+ v8::debug::SetLiveEditEnabled(m_isolate, true);
v8::MaybeLocal<v8::Value> result = restartFunction->Call(
m_debuggerContext.Get(m_isolate), callFrame, 0, nullptr);
- v8::DebugInterface::SetLiveEditEnabled(m_isolate, false);
+ v8::debug::SetLiveEditEnabled(m_isolate, false);
return result;
}
@@ -154,6 +159,7 @@ v8::MaybeLocal<v8::Value> JavaScriptCallFrame::setVariableValue(
v8::Local<v8::Value> argv[] = {
v8::Local<v8::Value>(v8::Integer::New(m_isolate, scopeNumber)),
variableName, newValue};
+ v8::TryCatch try_catch(m_isolate);
return setVariableValueFunction->Call(context, callFrame, arraysize(argv),
argv);
}
diff --git a/deps/v8/src/inspector/java-script-call-frame.h b/deps/v8/src/inspector/java-script-call-frame.h
index 5a4ce19cc2..6b73abf0ad 100644
--- a/deps/v8/src/inspector/java-script-call-frame.h
+++ b/deps/v8/src/inspector/java-script-call-frame.h
@@ -31,10 +31,10 @@
#ifndef V8_INSPECTOR_JAVASCRIPTCALLFRAME_H_
#define V8_INSPECTOR_JAVASCRIPTCALLFRAME_H_
+#include <memory>
#include <vector>
#include "src/base/macros.h"
-#include "src/inspector/protocol-platform.h"
#include "include/v8.h"
@@ -44,7 +44,8 @@ class JavaScriptCallFrame {
public:
static std::unique_ptr<JavaScriptCallFrame> create(
v8::Local<v8::Context> debuggerContext, v8::Local<v8::Object> callFrame) {
- return wrapUnique(new JavaScriptCallFrame(debuggerContext, callFrame));
+ return std::unique_ptr<JavaScriptCallFrame>(
+ new JavaScriptCallFrame(debuggerContext, callFrame));
}
~JavaScriptCallFrame();
@@ -54,7 +55,7 @@ class JavaScriptCallFrame {
int contextId() const;
bool isAtReturn() const;
- v8::Local<v8::Object> details() const;
+ v8::MaybeLocal<v8::Object> details() const;
v8::MaybeLocal<v8::Value> evaluate(v8::Local<v8::Value> expression);
v8::MaybeLocal<v8::Value> restart();
diff --git a/deps/v8/src/inspector/js_protocol.json b/deps/v8/src/inspector/js_protocol.json
index c1ac585ed1..d0af43ded5 100644
--- a/deps/v8/src/inspector/js_protocol.json
+++ b/deps/v8/src/inspector/js_protocol.json
@@ -9,7 +9,6 @@
"id": "Domain",
"type": "object",
"description": "Description of the protocol domain.",
- "exported": true,
"properties": [
{ "name": "name", "type": "string", "description": "Domain name." },
{ "name": "version", "type": "string", "description": "Domain version." }
@@ -51,7 +50,6 @@
"id": "RemoteObject",
"type": "object",
"description": "Mirror object referencing original JavaScript object.",
- "exported": true,
"properties": [
{ "name": "type", "type": "string", "enum": ["object", "function", "undefined", "string", "number", "boolean", "symbol"], "description": "Object type." },
{ "name": "subtype", "type": "string", "optional": true, "enum": ["array", "null", "node", "regexp", "date", "map", "set", "iterator", "generator", "error", "proxy", "promise", "typedarray"], "description": "Object subtype hint. Specified for <code>object</code> type values only." },
@@ -200,7 +198,6 @@
"id": "StackTrace",
"type": "object",
"description": "Call frames for assertions or error messages.",
- "exported": true,
"properties": [
{ "name": "description", "type": "string", "optional": true, "description": "String label of this stack trace. For async traces this may be a name of the function that initiated the async call." },
{ "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "JavaScript function name." },
@@ -211,7 +208,6 @@
"commands": [
{
"name": "evaluate",
- "async": true,
"parameters": [
{ "name": "expression", "type": "string", "description": "Expression to evaluate." },
{ "name": "objectGroup", "type": "string", "optional": true, "description": "Symbolic group name that can be used to release multiple objects." },
@@ -231,7 +227,6 @@
},
{
"name": "awaitPromise",
- "async": true,
"parameters": [
{ "name": "promiseObjectId", "$ref": "RemoteObjectId", "description": "Identifier of the promise." },
{ "name": "returnByValue", "type": "boolean", "optional": true, "description": "Whether the result is expected to be a JSON object that should be sent by value." },
@@ -245,7 +240,6 @@
},
{
"name": "callFunctionOn",
- "async": true,
"parameters": [
{ "name": "objectId", "$ref": "RemoteObjectId", "description": "Identifier of the object to call function on." },
{ "name": "functionDeclaration", "type": "string", "description": "Declaration of the function to call." },
@@ -333,7 +327,6 @@
},
{
"name": "runScript",
- "async": true,
"parameters": [
{ "name": "scriptId", "$ref": "ScriptId", "description": "Id of the script to run." },
{ "name": "executionContextId", "$ref": "ExecutionContextId", "optional": true, "description": "Specifies in which execution context to perform script run. If the parameter is omitted the evaluation will be performed in the context of the inspected page." },
@@ -460,7 +453,7 @@
"id": "Scope",
"type": "object",
"properties": [
- { "name": "type", "type": "string", "enum": ["global", "local", "with", "closure", "catch", "block", "script"], "description": "Scope type." },
+ { "name": "type", "type": "string", "enum": ["global", "local", "with", "closure", "catch", "block", "script", "eval", "module"], "description": "Scope type." },
{ "name": "object", "$ref": "Runtime.RemoteObject", "description": "Object representing the scope. For <code>global</code> and <code>with</code> scopes it represents the actual object; for the rest of the scopes, it is artificial transient object enumerating scope variables as its properties." },
{ "name": "name", "type": "string", "optional": true },
{ "name": "startLocation", "$ref": "Location", "optional": true, "description": "Location in the source code where scope starts" },
@@ -472,7 +465,6 @@
"id": "SearchMatch",
"type": "object",
"description": "Search match for resource.",
- "exported": true,
"properties": [
{ "name": "lineNumber", "type": "number", "description": "Line number in resource content." },
{ "name": "lineContent", "type": "string", "description": "Line with match content." }
@@ -733,7 +725,7 @@
"name": "paused",
"parameters": [
{ "name": "callFrames", "type": "array", "items": { "$ref": "CallFrame" }, "description": "Call stack the virtual machine stopped on." },
- { "name": "reason", "type": "string", "enum": [ "XHR", "DOM", "EventListener", "exception", "assert", "debugCommand", "promiseRejection", "other" ], "description": "Pause reason.", "exported": true },
+ { "name": "reason", "type": "string", "enum": [ "XHR", "DOM", "EventListener", "exception", "assert", "debugCommand", "promiseRejection", "other" ], "description": "Pause reason." },
{ "name": "data", "type": "object", "optional": true, "description": "Object containing break-specific auxiliary properties." },
{ "name": "hitBreakpoints", "type": "array", "optional": true, "items": { "type": "string" }, "description": "Hit breakpoints IDs" },
{ "name": "asyncStackTrace", "$ref": "Runtime.StackTrace", "optional": true, "description": "Async stack trace, if any." }
diff --git a/deps/v8/src/inspector/protocol-platform.h b/deps/v8/src/inspector/protocol-platform.h
deleted file mode 100644
index c7723932b4..0000000000
--- a/deps/v8/src/inspector/protocol-platform.h
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_INSPECTOR_PROTOCOLPLATFORM_H_
-#define V8_INSPECTOR_PROTOCOLPLATFORM_H_
-
-#include <memory>
-
-#include "src/base/logging.h"
-
-namespace v8_inspector {
-
-template <typename T>
-std::unique_ptr<T> wrapUnique(T* ptr) {
- return std::unique_ptr<T>(ptr);
-}
-
-} // namespace v8_inspector
-
-#endif // V8_INSPECTOR_PROTOCOLPLATFORM_H_
diff --git a/deps/v8/src/inspector/remote-object-id.cc b/deps/v8/src/inspector/remote-object-id.cc
index aac6724498..2f5f051816 100644
--- a/deps/v8/src/inspector/remote-object-id.cc
+++ b/deps/v8/src/inspector/remote-object-id.cc
@@ -13,7 +13,8 @@ RemoteObjectIdBase::RemoteObjectIdBase() : m_injectedScriptId(0) {}
std::unique_ptr<protocol::DictionaryValue>
RemoteObjectIdBase::parseInjectedScriptId(const String16& objectId) {
- std::unique_ptr<protocol::Value> parsedValue = protocol::parseJSON(objectId);
+ std::unique_ptr<protocol::Value> parsedValue =
+ protocol::StringUtil::parseJSON(objectId);
if (!parsedValue || parsedValue->type() != protocol::Value::TypeObject)
return nullptr;
diff --git a/deps/v8/src/inspector/script-breakpoint.h b/deps/v8/src/inspector/script-breakpoint.h
index 025233dd19..a981b1626c 100644
--- a/deps/v8/src/inspector/script-breakpoint.h
+++ b/deps/v8/src/inspector/script-breakpoint.h
@@ -35,15 +35,18 @@
namespace v8_inspector {
struct ScriptBreakpoint {
- ScriptBreakpoint() : ScriptBreakpoint(0, 0, String16()) {}
-
- ScriptBreakpoint(int lineNumber, int columnNumber, const String16& condition)
- : lineNumber(lineNumber),
- columnNumber(columnNumber),
- condition(condition) {}
-
- int lineNumber;
- int columnNumber;
+ ScriptBreakpoint() {}
+
+ ScriptBreakpoint(String16 script_id, int line_number, int column_number,
+ String16 condition)
+ : script_id(std::move(script_id)),
+ line_number(line_number),
+ column_number(column_number),
+ condition(std::move(condition)) {}
+
+ String16 script_id;
+ int line_number = 0;
+ int column_number = 0;
String16 condition;
};
diff --git a/deps/v8/src/inspector/search-util.cc b/deps/v8/src/inspector/search-util.cc
index a6fba06c11..b05d7a07ec 100644
--- a/deps/v8/src/inspector/search-util.cc
+++ b/deps/v8/src/inspector/search-util.cc
@@ -132,7 +132,8 @@ std::unique_ptr<V8Regex> createSearchRegex(V8InspectorImpl* inspector,
const String16& query,
bool caseSensitive, bool isRegex) {
String16 regexSource = isRegex ? query : createSearchRegexSource(query);
- return wrapUnique(new V8Regex(inspector, regexSource, caseSensitive));
+ return std::unique_ptr<V8Regex>(
+ new V8Regex(inspector, regexSource, caseSensitive));
}
} // namespace
diff --git a/deps/v8/src/inspector/string-16.cc b/deps/v8/src/inspector/string-16.cc
index 09909a911b..6544646d71 100644
--- a/deps/v8/src/inspector/string-16.cc
+++ b/deps/v8/src/inspector/string-16.cc
@@ -8,14 +8,11 @@
#include <cctype>
#include <cstdlib>
#include <cstring>
-#include <iomanip>
#include <limits>
-#include <locale>
-#include <sstream>
#include <string>
#include "src/base/platform/platform.h"
-#include "src/inspector/protocol-platform.h"
+#include "src/conversions.h"
namespace v8_inspector {
@@ -367,10 +364,9 @@ static inline void putUTF8Triple(char*& buffer, UChar ch) {
// static
String16 String16::fromInteger(int number) {
- const size_t kBufferSize = 50;
- char buffer[kBufferSize];
- v8::base::OS::SNPrintF(buffer, kBufferSize, "%d", number);
- return String16(buffer);
+ char arr[50];
+ v8::internal::Vector<char> buffer(arr, arraysize(arr));
+ return String16(IntToCString(number, buffer));
}
// static
@@ -387,19 +383,16 @@ String16 String16::fromInteger(size_t number) {
// static
String16 String16::fromDouble(double number) {
- std::ostringstream s;
- s.imbue(std::locale("C"));
- s << std::fixed << std::setprecision(std::numeric_limits<double>::digits10)
- << number;
- return String16(s.str().c_str());
+ char arr[50];
+ v8::internal::Vector<char> buffer(arr, arraysize(arr));
+ return String16(DoubleToCString(number, buffer));
}
// static
String16 String16::fromDouble(double number, int precision) {
- std::ostringstream s;
- s.imbue(std::locale("C"));
- s << std::fixed << std::setprecision(precision) << number;
- return String16(s.str().c_str());
+ std::unique_ptr<char[]> str(
+ v8::internal::DoubleToPrecisionCString(number, precision));
+ return String16(str.get());
}
int String16::toInteger(bool* ok) const {
diff --git a/deps/v8/src/inspector/string-16.h b/deps/v8/src/inspector/string-16.h
index 360ec93864..0270f5117a 100644
--- a/deps/v8/src/inspector/string-16.h
+++ b/deps/v8/src/inspector/string-16.h
@@ -23,7 +23,7 @@ class String16 {
String16() {}
String16(const String16& other)
: m_impl(other.m_impl), hash_code(other.hash_code) {}
- String16(const String16&& other)
+ String16(String16&& other)
: m_impl(std::move(other.m_impl)), hash_code(other.hash_code) {}
String16(const UChar* characters, size_t size) : m_impl(characters, size) {}
String16(const UChar* characters) // NOLINT(runtime/explicit)
diff --git a/deps/v8/src/inspector/string-util.cc b/deps/v8/src/inspector/string-util.cc
index e6ad5d0c5b..31b2db572d 100644
--- a/deps/v8/src/inspector/string-util.cc
+++ b/deps/v8/src/inspector/string-util.cc
@@ -50,8 +50,7 @@ v8::Local<v8::String> toV8String(v8::Isolate* isolate,
}
String16 toProtocolString(v8::Local<v8::String> value) {
- if (value.IsEmpty() || value->IsNull() || value->IsUndefined())
- return String16();
+ if (value.IsEmpty() || value->IsNullOrUndefined()) return String16();
std::unique_ptr<UChar[]> buffer(new UChar[value->Length()]);
value->Write(reinterpret_cast<uint16_t*>(buffer.get()), 0, value->Length());
return String16(buffer.get(), value->Length());
@@ -93,19 +92,20 @@ bool stringViewStartsWith(const StringView& string, const char* prefix) {
namespace protocol {
-std::unique_ptr<protocol::Value> parseJSON(const StringView& string) {
+std::unique_ptr<protocol::Value> StringUtil::parseJSON(
+ const StringView& string) {
if (!string.length()) return nullptr;
if (string.is8Bit()) {
- return protocol::parseJSON(string.characters8(),
+ return parseJSONCharacters(string.characters8(),
static_cast<int>(string.length()));
}
- return protocol::parseJSON(string.characters16(),
+ return parseJSONCharacters(string.characters16(),
static_cast<int>(string.length()));
}
-std::unique_ptr<protocol::Value> parseJSON(const String16& string) {
+std::unique_ptr<protocol::Value> StringUtil::parseJSON(const String16& string) {
if (!string.length()) return nullptr;
- return protocol::parseJSON(string.characters16(),
+ return parseJSONCharacters(string.characters16(),
static_cast<int>(string.length()));
}
@@ -119,7 +119,7 @@ std::unique_ptr<StringBuffer> StringBuffer::create(const StringView& string) {
// static
std::unique_ptr<StringBufferImpl> StringBufferImpl::adopt(String16& string) {
- return wrapUnique(new StringBufferImpl(string));
+ return std::unique_ptr<StringBufferImpl>(new StringBufferImpl(string));
}
StringBufferImpl::StringBufferImpl(String16& string) {
diff --git a/deps/v8/src/inspector/string-util.h b/deps/v8/src/inspector/string-util.h
index e1a69e8906..c484aab2ed 100644
--- a/deps/v8/src/inspector/string-util.h
+++ b/deps/v8/src/inspector/string-util.h
@@ -5,6 +5,9 @@
#ifndef V8_INSPECTOR_STRINGUTIL_H_
#define V8_INSPECTOR_STRINGUTIL_H_
+#include <memory>
+
+#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/inspector/string-16.h"
@@ -33,11 +36,10 @@ class StringUtil {
static void builderReserve(StringBuilder& builder, size_t capacity) {
builder.reserveCapacity(capacity);
}
+ static std::unique_ptr<protocol::Value> parseJSON(const String16& json);
+ static std::unique_ptr<protocol::Value> parseJSON(const StringView& json);
};
-std::unique_ptr<protocol::Value> parseJSON(const StringView& json);
-std::unique_ptr<protocol::Value> parseJSON(const String16& json);
-
} // namespace protocol
v8::Local<v8::String> toV8String(v8::Isolate*, const String16&);
diff --git a/deps/v8/src/inspector/test-interface.cc b/deps/v8/src/inspector/test-interface.cc
new file mode 100644
index 0000000000..ead1dc3b81
--- /dev/null
+++ b/deps/v8/src/inspector/test-interface.cc
@@ -0,0 +1,18 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/test-interface.h"
+
+#include "src/inspector/v8-debugger.h"
+#include "src/inspector/v8-inspector-impl.h"
+
+namespace v8_inspector {
+
+void SetMaxAsyncTaskStacksForTest(V8Inspector* inspector, int limit) {
+ static_cast<V8InspectorImpl*>(inspector)
+ ->debugger()
+ ->setMaxAsyncTaskStacksForTest(limit);
+}
+
+} // v8_inspector
diff --git a/deps/v8/src/inspector/test-interface.h b/deps/v8/src/inspector/test-interface.h
new file mode 100644
index 0000000000..98bedc2786
--- /dev/null
+++ b/deps/v8/src/inspector/test-interface.h
@@ -0,0 +1,18 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_TEST_INTERFACE_H_
+#define V8_INSPECTOR_TEST_INTERFACE_H_
+
+#include "include/v8.h"
+
+namespace v8_inspector {
+
+class V8Inspector;
+
+V8_EXPORT void SetMaxAsyncTaskStacksForTest(V8Inspector* inspector, int limit);
+
+} // v8_inspector
+
+#endif // V8_INSPECTOR_TEST_INTERFACE_H_
diff --git a/deps/v8/src/inspector/v8-console-message.cc b/deps/v8/src/inspector/v8-console-message.cc
index 63f1d49faf..281a0b1d90 100644
--- a/deps/v8/src/inspector/v8-console-message.cc
+++ b/deps/v8/src/inspector/v8-console-message.cc
@@ -4,6 +4,7 @@
#include "src/inspector/v8-console-message.h"
+#include "src/debug/debug-interface.h"
#include "src/inspector/inspected-context.h"
#include "src/inspector/protocol/Protocol.h"
#include "src/inspector/string-util.h"
@@ -58,6 +59,7 @@ String16 consoleAPITypeValue(ConsoleAPIType type) {
}
const unsigned maxConsoleMessageCount = 1000;
+const int maxConsoleMessageV8Size = 10 * 1024 * 1024;
const unsigned maxArrayItemsLimit = 10000;
const unsigned maxStackDepthLimit = 32;
@@ -361,7 +363,7 @@ std::unique_ptr<V8ConsoleMessage> V8ConsoleMessage::createForConsoleAPI(
V8InspectorImpl* inspector = inspectedContext->inspector();
v8::Local<v8::Context> context = inspectedContext->context();
- std::unique_ptr<V8ConsoleMessage> message = wrapUnique(
+ std::unique_ptr<V8ConsoleMessage> message(
new V8ConsoleMessage(V8MessageOrigin::kConsole, timestamp, String16()));
if (stackTrace && !stackTrace->isEmpty()) {
message->m_url = toString16(stackTrace->topSourceURL());
@@ -371,9 +373,12 @@ std::unique_ptr<V8ConsoleMessage> V8ConsoleMessage::createForConsoleAPI(
message->m_stackTrace = std::move(stackTrace);
message->m_type = type;
message->m_contextId = contextId;
- for (size_t i = 0; i < arguments.size(); ++i)
- message->m_arguments.push_back(
- wrapUnique(new v8::Global<v8::Value>(isolate, arguments.at(i))));
+ for (size_t i = 0; i < arguments.size(); ++i) {
+ message->m_arguments.push_back(std::unique_ptr<v8::Global<v8::Value>>(
+ new v8::Global<v8::Value>(isolate, arguments.at(i))));
+ message->m_v8Size +=
+ v8::debug::EstimatedValueSize(isolate, arguments.at(i));
+ }
if (arguments.size())
message->m_message = V8ValueStringBuilder::toString(arguments[0], context);
@@ -404,7 +409,7 @@ std::unique_ptr<V8ConsoleMessage> V8ConsoleMessage::createForException(
std::unique_ptr<V8StackTraceImpl> stackTrace, int scriptId,
v8::Isolate* isolate, const String16& message, int contextId,
v8::Local<v8::Value> exception, unsigned exceptionId) {
- std::unique_ptr<V8ConsoleMessage> consoleMessage = wrapUnique(
+ std::unique_ptr<V8ConsoleMessage> consoleMessage(
new V8ConsoleMessage(V8MessageOrigin::kException, timestamp, message));
consoleMessage->setLocation(url, lineNumber, columnNumber,
std::move(stackTrace), scriptId);
@@ -413,7 +418,10 @@ std::unique_ptr<V8ConsoleMessage> V8ConsoleMessage::createForException(
if (contextId && !exception.IsEmpty()) {
consoleMessage->m_contextId = contextId;
consoleMessage->m_arguments.push_back(
- wrapUnique(new v8::Global<v8::Value>(isolate, exception)));
+ std::unique_ptr<v8::Global<v8::Value>>(
+ new v8::Global<v8::Value>(isolate, exception)));
+ consoleMessage->m_v8Size +=
+ v8::debug::EstimatedValueSize(isolate, exception);
}
return consoleMessage;
}
@@ -422,7 +430,7 @@ std::unique_ptr<V8ConsoleMessage> V8ConsoleMessage::createForException(
std::unique_ptr<V8ConsoleMessage> V8ConsoleMessage::createForRevokedException(
double timestamp, const String16& messageText,
unsigned revokedExceptionId) {
- std::unique_ptr<V8ConsoleMessage> message = wrapUnique(new V8ConsoleMessage(
+ std::unique_ptr<V8ConsoleMessage> message(new V8ConsoleMessage(
V8MessageOrigin::kRevokedException, timestamp, messageText));
message->m_revokedExceptionId = revokedExceptionId;
return message;
@@ -434,15 +442,14 @@ void V8ConsoleMessage::contextDestroyed(int contextId) {
if (m_message.isEmpty()) m_message = "<message collected>";
Arguments empty;
m_arguments.swap(empty);
+ m_v8Size = 0;
}
// ------------------------ V8ConsoleMessageStorage ----------------------------
V8ConsoleMessageStorage::V8ConsoleMessageStorage(V8InspectorImpl* inspector,
int contextGroupId)
- : m_inspector(inspector),
- m_contextGroupId(contextGroupId),
- m_expiredCount(0) {}
+ : m_inspector(inspector), m_contextGroupId(contextGroupId) {}
V8ConsoleMessageStorage::~V8ConsoleMessageStorage() { clear(); }
@@ -463,23 +470,33 @@ void V8ConsoleMessageStorage::addMessage(
DCHECK(m_messages.size() <= maxConsoleMessageCount);
if (m_messages.size() == maxConsoleMessageCount) {
- ++m_expiredCount;
+ m_estimatedSize -= m_messages.front()->estimatedSize();
+ m_messages.pop_front();
+ }
+ while (m_estimatedSize + message->estimatedSize() > maxConsoleMessageV8Size &&
+ !m_messages.empty()) {
+ m_estimatedSize -= m_messages.front()->estimatedSize();
m_messages.pop_front();
}
+
m_messages.push_back(std::move(message));
+ m_estimatedSize += m_messages.back()->estimatedSize();
}
void V8ConsoleMessageStorage::clear() {
m_messages.clear();
- m_expiredCount = 0;
+ m_estimatedSize = 0;
if (V8InspectorSessionImpl* session =
m_inspector->sessionForContextGroup(m_contextGroupId))
session->releaseObjectGroup("console");
}
void V8ConsoleMessageStorage::contextDestroyed(int contextId) {
- for (size_t i = 0; i < m_messages.size(); ++i)
+ m_estimatedSize = 0;
+ for (size_t i = 0; i < m_messages.size(); ++i) {
m_messages[i]->contextDestroyed(contextId);
+ m_estimatedSize += m_messages[i]->estimatedSize();
+ }
}
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-console-message.h b/deps/v8/src/inspector/v8-console-message.h
index a6e9eafe2d..8ab81f4dcb 100644
--- a/deps/v8/src/inspector/v8-console-message.h
+++ b/deps/v8/src/inspector/v8-console-message.h
@@ -65,6 +65,10 @@ class V8ConsoleMessage {
ConsoleAPIType type() const;
void contextDestroyed(int contextId);
+ int estimatedSize() const {
+ return m_v8Size + static_cast<int>(m_message.length() * sizeof(UChar));
+ }
+
private:
V8ConsoleMessage(V8MessageOrigin, double timestamp, const String16& message);
@@ -89,6 +93,7 @@ class V8ConsoleMessage {
ConsoleAPIType m_type;
unsigned m_exceptionId;
unsigned m_revokedExceptionId;
+ int m_v8Size = 0;
Arguments m_arguments;
String16 m_detailedMessage;
};
@@ -99,7 +104,6 @@ class V8ConsoleMessageStorage {
~V8ConsoleMessageStorage();
int contextGroupId() { return m_contextGroupId; }
- int expiredCount() { return m_expiredCount; }
const std::deque<std::unique_ptr<V8ConsoleMessage>>& messages() const {
return m_messages;
}
@@ -111,7 +115,7 @@ class V8ConsoleMessageStorage {
private:
V8InspectorImpl* m_inspector;
int m_contextGroupId;
- int m_expiredCount;
+ int m_estimatedSize = 0;
std::deque<std::unique_ptr<V8ConsoleMessage>> m_messages;
};
diff --git a/deps/v8/src/inspector/v8-console.cc b/deps/v8/src/inspector/v8-console.cc
index fee61177e7..3b47d2f6b4 100644
--- a/deps/v8/src/inspector/v8-console.cc
+++ b/deps/v8/src/inspector/v8-console.cc
@@ -714,6 +714,29 @@ v8::Local<v8::Object> V8Console::createConsole(
createBoundFunctionProperty(context, console, "timeStamp",
V8Console::timeStampCallback);
+ const char* jsConsoleAssert =
+ "(function(){\n"
+ " var originAssert = this.assert;\n"
+ " originAssert.apply = Function.prototype.apply;\n"
+ " this.assert = assertWrapper;\n"
+ " assertWrapper.toString = () => originAssert.toString();\n"
+ " function assertWrapper(){\n"
+ " if (!!arguments[0]) return;\n"
+ " originAssert.apply(null, arguments);\n"
+ " }\n"
+ "})";
+
+ v8::Local<v8::String> assertSource = toV8String(isolate, jsConsoleAssert);
+ V8InspectorImpl* inspector = inspectedContext->inspector();
+ v8::Local<v8::Value> setupFunction;
+ if (inspector->compileAndRunInternalScript(context, assertSource)
+ .ToLocal(&setupFunction) &&
+ setupFunction->IsFunction()) {
+ inspector->callInternalFunction(
+ v8::Local<v8::Function>::Cast(setupFunction), context, console, 0,
+ nullptr);
+ }
+
if (hasMemoryAttribute)
console->SetAccessorProperty(
toV8StringInternalized(isolate, "memory"),
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
index 224ae282c4..b287d1c082 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -60,8 +60,28 @@ static const char kDebuggerNotEnabled[] = "Debugger agent is not enabled";
static const char kDebuggerNotPaused[] =
"Can only perform operation while paused.";
-static String16 breakpointIdSuffix(
- V8DebuggerAgentImpl::BreakpointSource source) {
+namespace {
+
+void TranslateWasmStackTraceLocations(Array<CallFrame>* stackTrace,
+ WasmTranslation* wasmTranslation) {
+ for (size_t i = 0, e = stackTrace->length(); i != e; ++i) {
+ protocol::Debugger::Location* location = stackTrace->get(i)->getLocation();
+ String16 scriptId = location->getScriptId();
+ int lineNumber = location->getLineNumber();
+ int columnNumber = location->getColumnNumber(-1);
+
+ if (!wasmTranslation->TranslateWasmScriptLocationToProtocolLocation(
+ &scriptId, &lineNumber, &columnNumber)) {
+ continue;
+ }
+
+ location->setScriptId(std::move(scriptId));
+ location->setLineNumber(lineNumber);
+ location->setColumnNumber(columnNumber);
+ }
+}
+
+String16 breakpointIdSuffix(V8DebuggerAgentImpl::BreakpointSource source) {
switch (source) {
case V8DebuggerAgentImpl::UserBreakpointSource:
break;
@@ -73,26 +93,25 @@ static String16 breakpointIdSuffix(
return String16();
}
-static String16 generateBreakpointId(
- const String16& scriptId, int lineNumber, int columnNumber,
- V8DebuggerAgentImpl::BreakpointSource source) {
+String16 generateBreakpointId(const ScriptBreakpoint& breakpoint,
+ V8DebuggerAgentImpl::BreakpointSource source) {
String16Builder builder;
- builder.append(scriptId);
+ builder.append(breakpoint.script_id);
builder.append(':');
- builder.appendNumber(lineNumber);
+ builder.appendNumber(breakpoint.line_number);
builder.append(':');
- builder.appendNumber(columnNumber);
+ builder.appendNumber(breakpoint.column_number);
builder.append(breakpointIdSuffix(source));
return builder.toString();
}
-static bool positionComparator(const std::pair<int, int>& a,
- const std::pair<int, int>& b) {
+bool positionComparator(const std::pair<int, int>& a,
+ const std::pair<int, int>& b) {
if (a.first != b.first) return a.first < b.first;
return a.second < b.second;
}
-static std::unique_ptr<protocol::Debugger::Location> buildProtocolLocation(
+std::unique_ptr<protocol::Debugger::Location> buildProtocolLocation(
const String16& scriptId, int lineNumber, int columnNumber) {
return protocol::Debugger::Location::create()
.setScriptId(scriptId)
@@ -101,6 +120,8 @@ static std::unique_ptr<protocol::Debugger::Location> buildProtocolLocation(
.build();
}
+} // namespace
+
V8DebuggerAgentImpl::V8DebuggerAgentImpl(
V8InspectorSessionImpl* session, protocol::FrontendChannel* frontendChannel,
protocol::DictionaryValue* state)
@@ -161,7 +182,7 @@ Response V8DebuggerAgentImpl::disable() {
m_state->setObject(DebuggerAgentState::javaScriptBreakpoints,
protocol::DictionaryValue::create());
m_state->setInteger(DebuggerAgentState::pauseOnExceptionsState,
- v8::DebugInterface::NoBreakOnException);
+ v8::debug::NoBreakOnException);
m_state->setInteger(DebuggerAgentState::asyncCallStackDepth, 0);
if (!m_pausedContext.IsEmpty()) m_debugger->continueProgram();
@@ -199,7 +220,7 @@ void V8DebuggerAgentImpl::restore() {
enableImpl();
- int pauseState = v8::DebugInterface::NoBreakOnException;
+ int pauseState = v8::debug::NoBreakOnException;
m_state->getInteger(DebuggerAgentState::pauseOnExceptionsState, &pauseState);
setPauseOnExceptionsImpl(pauseState);
@@ -291,12 +312,13 @@ Response V8DebuggerAgentImpl::setBreakpointByUrl(
breakpointId, buildObjectForBreakpointCookie(
url, lineNumber, columnNumber, condition, isRegex));
- ScriptBreakpoint breakpoint(lineNumber, columnNumber, condition);
+ ScriptBreakpoint breakpoint(String16(), lineNumber, columnNumber, condition);
for (const auto& script : m_scripts) {
if (!matches(m_inspector, script.second->sourceURL(), url, isRegex))
continue;
- std::unique_ptr<protocol::Debugger::Location> location = resolveBreakpoint(
- breakpointId, script.first, breakpoint, UserBreakpointSource);
+ breakpoint.script_id = script.first;
+ std::unique_ptr<protocol::Debugger::Location> location =
+ resolveBreakpoint(breakpointId, breakpoint, UserBreakpointSource);
if (location) (*locations)->addItem(std::move(location));
}
@@ -308,21 +330,18 @@ Response V8DebuggerAgentImpl::setBreakpoint(
std::unique_ptr<protocol::Debugger::Location> location,
Maybe<String16> optionalCondition, String16* outBreakpointId,
std::unique_ptr<protocol::Debugger::Location>* actualLocation) {
- String16 scriptId = location->getScriptId();
- int lineNumber = location->getLineNumber();
- int columnNumber = location->getColumnNumber(0);
-
- String16 condition = optionalCondition.fromMaybe("");
+ ScriptBreakpoint breakpoint(
+ location->getScriptId(), location->getLineNumber(),
+ location->getColumnNumber(0), optionalCondition.fromMaybe(String16()));
- String16 breakpointId = generateBreakpointId(
- scriptId, lineNumber, columnNumber, UserBreakpointSource);
+ String16 breakpointId =
+ generateBreakpointId(breakpoint, UserBreakpointSource);
if (m_breakpointIdToDebuggerBreakpointIds.find(breakpointId) !=
m_breakpointIdToDebuggerBreakpointIds.end()) {
return Response::Error("Breakpoint at specified location already exists.");
}
- ScriptBreakpoint breakpoint(lineNumber, columnNumber, condition);
- *actualLocation = resolveBreakpoint(breakpointId, scriptId, breakpoint,
- UserBreakpointSource);
+ *actualLocation =
+ resolveBreakpoint(breakpointId, breakpoint, UserBreakpointSource);
if (!*actualLocation) return Response::Error("Could not resolve breakpoint");
*outBreakpointId = breakpointId;
return Response::OK();
@@ -365,9 +384,9 @@ Response V8DebuggerAgentImpl::getPossibleBreakpoints(
return Response::Error(
"start.lineNumber and start.columnNumber should be >= 0");
- v8::DebugInterface::Location v8Start(start->getLineNumber(),
- start->getColumnNumber(0));
- v8::DebugInterface::Location v8End;
+ v8::debug::Location v8Start(start->getLineNumber(),
+ start->getColumnNumber(0));
+ v8::debug::Location v8End;
if (end.isJust()) {
if (end.fromJust()->getScriptId() != scriptId)
return Response::Error("Locations should contain the same scriptId");
@@ -376,12 +395,12 @@ Response V8DebuggerAgentImpl::getPossibleBreakpoints(
if (line < 0 || column < 0)
return Response::Error(
"end.lineNumber and end.columnNumber should be >= 0");
- v8End = v8::DebugInterface::Location(line, column);
+ v8End = v8::debug::Location(line, column);
}
auto it = m_scripts.find(scriptId);
if (it == m_scripts.end()) return Response::Error("Script not found");
- std::vector<v8::DebugInterface::Location> v8Locations;
+ std::vector<v8::debug::Location> v8Locations;
if (!it->second->getPossibleBreakpoints(v8Start, v8End, &v8Locations))
return Response::InternalError();
@@ -405,13 +424,13 @@ Response V8DebuggerAgentImpl::continueToLocation(
m_continueToLocationBreakpointId = "";
}
- String16 scriptId = location->getScriptId();
- int lineNumber = location->getLineNumber();
- int columnNumber = location->getColumnNumber(0);
+ ScriptBreakpoint breakpoint(location->getScriptId(),
+ location->getLineNumber(),
+ location->getColumnNumber(0), String16());
- ScriptBreakpoint breakpoint(lineNumber, columnNumber, "");
m_continueToLocationBreakpointId = m_debugger->setBreakpoint(
- scriptId, breakpoint, &lineNumber, &columnNumber);
+ breakpoint, &breakpoint.line_number, &breakpoint.column_number);
+ // TODO(kozyatinskiy): Return actual line and column number.
return resume();
}
@@ -493,23 +512,28 @@ V8DebuggerAgentImpl::SkipPauseRequest V8DebuggerAgentImpl::shouldSkipStepPause(
std::unique_ptr<protocol::Debugger::Location>
V8DebuggerAgentImpl::resolveBreakpoint(const String16& breakpointId,
- const String16& scriptId,
const ScriptBreakpoint& breakpoint,
BreakpointSource source) {
+ v8::HandleScope handles(m_isolate);
DCHECK(enabled());
// FIXME: remove these checks once crbug.com/520702 is resolved.
CHECK(!breakpointId.isEmpty());
- CHECK(!scriptId.isEmpty());
- ScriptsMap::iterator scriptIterator = m_scripts.find(scriptId);
+ CHECK(!breakpoint.script_id.isEmpty());
+ ScriptsMap::iterator scriptIterator = m_scripts.find(breakpoint.script_id);
if (scriptIterator == m_scripts.end()) return nullptr;
- if (breakpoint.lineNumber < scriptIterator->second->startLine() ||
- scriptIterator->second->endLine() < breakpoint.lineNumber)
+ if (breakpoint.line_number < scriptIterator->second->startLine() ||
+ scriptIterator->second->endLine() < breakpoint.line_number)
return nullptr;
+ ScriptBreakpoint translatedBreakpoint = breakpoint;
+ m_debugger->wasmTranslation()->TranslateProtocolLocationToWasmScriptLocation(
+ &translatedBreakpoint.script_id, &translatedBreakpoint.line_number,
+ &translatedBreakpoint.column_number);
+
int actualLineNumber;
int actualColumnNumber;
String16 debuggerBreakpointId = m_debugger->setBreakpoint(
- scriptId, breakpoint, &actualLineNumber, &actualColumnNumber);
+ translatedBreakpoint, &actualLineNumber, &actualColumnNumber);
if (debuggerBreakpointId.isEmpty()) return nullptr;
m_serverBreakpoints[debuggerBreakpointId] =
@@ -518,7 +542,8 @@ V8DebuggerAgentImpl::resolveBreakpoint(const String16& breakpointId,
m_breakpointIdToDebuggerBreakpointIds[breakpointId].push_back(
debuggerBreakpointId);
- return buildProtocolLocation(scriptId, actualLineNumber, actualColumnNumber);
+ return buildProtocolLocation(translatedBreakpoint.script_id, actualLineNumber,
+ actualColumnNumber);
}
Response V8DebuggerAgentImpl::searchInContent(
@@ -531,9 +556,8 @@ Response V8DebuggerAgentImpl::searchInContent(
return Response::Error("No script for id: " + scriptId);
std::vector<std::unique_ptr<protocol::Debugger::SearchMatch>> matches =
- searchInTextByLinesImpl(m_session,
- toProtocolString(it->second->source(m_isolate)),
- query, optionalCaseSensitive.fromMaybe(false),
+ searchInTextByLinesImpl(m_session, it->second->source(m_isolate), query,
+ optionalCaseSensitive.fromMaybe(false),
optionalIsRegex.fromMaybe(false));
*results = protocol::Array<protocol::Debugger::SearchMatch>::create();
for (size_t i = 0; i < matches.size(); ++i)
@@ -604,7 +628,7 @@ Response V8DebuggerAgentImpl::getScriptSource(const String16& scriptId,
if (it == m_scripts.end())
return Response::Error("No script for id: " + scriptId);
v8::HandleScope handles(m_isolate);
- *scriptSource = toProtocolString(it->second->source(m_isolate));
+ *scriptSource = it->second->source(m_isolate);
return Response::OK();
}
@@ -699,13 +723,13 @@ Response V8DebuggerAgentImpl::stepOut() {
Response V8DebuggerAgentImpl::setPauseOnExceptions(
const String16& stringPauseState) {
if (!enabled()) return Response::Error(kDebuggerNotEnabled);
- v8::DebugInterface::ExceptionBreakState pauseState;
+ v8::debug::ExceptionBreakState pauseState;
if (stringPauseState == "none") {
- pauseState = v8::DebugInterface::NoBreakOnException;
+ pauseState = v8::debug::NoBreakOnException;
} else if (stringPauseState == "all") {
- pauseState = v8::DebugInterface::BreakOnAnyException;
+ pauseState = v8::debug::BreakOnAnyException;
} else if (stringPauseState == "uncaught") {
- pauseState = v8::DebugInterface::BreakOnUncaughtException;
+ pauseState = v8::debug::BreakOnUncaughtException;
} else {
return Response::Error("Unknown pause on exceptions mode: " +
stringPauseState);
@@ -716,7 +740,7 @@ Response V8DebuggerAgentImpl::setPauseOnExceptions(
void V8DebuggerAgentImpl::setPauseOnExceptionsImpl(int pauseState) {
m_debugger->setPauseOnExceptionsState(
- static_cast<v8::DebugInterface::ExceptionBreakState>(pauseState));
+ static_cast<v8::debug::ExceptionBreakState>(pauseState));
m_state->setInteger(DebuggerAgentState::pauseOnExceptionsState, pauseState);
}
@@ -910,7 +934,7 @@ Response V8DebuggerAgentImpl::currentCallFrames(
}
v8::HandleScope handles(m_isolate);
v8::Local<v8::Context> debuggerContext =
- v8::DebugInterface::GetDebugContext(m_isolate);
+ v8::debug::GetDebugContext(m_isolate);
v8::Context::Scope contextScope(debuggerContext);
v8::Local<v8::Array> objects = v8::Array::New(m_isolate);
@@ -920,8 +944,9 @@ Response V8DebuggerAgentImpl::currentCallFrames(
const std::unique_ptr<JavaScriptCallFrame>& currentCallFrame =
m_pausedCallFrames[frameOrdinal];
- v8::Local<v8::Object> details = currentCallFrame->details();
- if (details.IsEmpty()) return Response::InternalError();
+ v8::Local<v8::Object> details;
+ if (!currentCallFrame->details().ToLocal(&details))
+ return Response::InternalError();
int contextId = currentCallFrame->contextId();
@@ -1004,8 +1029,10 @@ Response V8DebuggerAgentImpl::currentCallFrames(
Response response = toProtocolValue(debuggerContext, objects, &protocolValue);
if (!response.isSuccess()) return response;
protocol::ErrorSupport errorSupport;
- *result = Array<CallFrame>::parse(protocolValue.get(), &errorSupport);
+ *result = Array<CallFrame>::fromValue(protocolValue.get(), &errorSupport);
if (!*result) return Response::Error(errorSupport.errors());
+ TranslateWasmStackTraceLocations(result->get(),
+ m_debugger->wasmTranslation());
return Response::OK();
}
@@ -1019,40 +1046,51 @@ std::unique_ptr<StackTrace> V8DebuggerAgentImpl::currentAsyncStackTrace() {
void V8DebuggerAgentImpl::didParseSource(
std::unique_ptr<V8DebuggerScript> script, bool success) {
v8::HandleScope handles(m_isolate);
- String16 scriptSource = toProtocolString(script->source(m_isolate));
+ String16 scriptSource = script->source(m_isolate);
if (!success) script->setSourceURL(findSourceURL(scriptSource, false));
if (!success)
script->setSourceMappingURL(findSourceMapURL(scriptSource, false));
+ int contextId = script->executionContextId();
+ int contextGroupId = m_inspector->contextGroupId(contextId);
+ InspectedContext* inspected =
+ m_inspector->getContext(contextGroupId, contextId);
std::unique_ptr<protocol::DictionaryValue> executionContextAuxData;
- if (!script->executionContextAuxData().isEmpty())
+ if (inspected) {
+ // Script reused between different groups/sessions can have a stale
+ // execution context id.
executionContextAuxData = protocol::DictionaryValue::cast(
- protocol::parseJSON(script->executionContextAuxData()));
+ protocol::StringUtil::parseJSON(inspected->auxData()));
+ }
bool isLiveEdit = script->isLiveEdit();
bool hasSourceURL = script->hasSourceURL();
String16 scriptId = script->scriptId();
String16 scriptURL = script->sourceURL();
- Maybe<String16> sourceMapURLParam = script->sourceMappingURL();
+ m_scripts[scriptId] = std::move(script);
+
+ ScriptsMap::iterator scriptIterator = m_scripts.find(scriptId);
+ DCHECK(scriptIterator != m_scripts.end());
+ V8DebuggerScript* scriptRef = scriptIterator->second.get();
+
+ Maybe<String16> sourceMapURLParam = scriptRef->sourceMappingURL();
Maybe<protocol::DictionaryValue> executionContextAuxDataParam(
std::move(executionContextAuxData));
const bool* isLiveEditParam = isLiveEdit ? &isLiveEdit : nullptr;
const bool* hasSourceURLParam = hasSourceURL ? &hasSourceURL : nullptr;
if (success)
m_frontend.scriptParsed(
- scriptId, scriptURL, script->startLine(), script->startColumn(),
- script->endLine(), script->endColumn(), script->executionContextId(),
- script->hash(), std::move(executionContextAuxDataParam),
+ scriptId, scriptURL, scriptRef->startLine(), scriptRef->startColumn(),
+ scriptRef->endLine(), scriptRef->endColumn(), contextId,
+ scriptRef->hash(m_isolate), std::move(executionContextAuxDataParam),
isLiveEditParam, std::move(sourceMapURLParam), hasSourceURLParam);
else
m_frontend.scriptFailedToParse(
- scriptId, scriptURL, script->startLine(), script->startColumn(),
- script->endLine(), script->endColumn(), script->executionContextId(),
- script->hash(), std::move(executionContextAuxDataParam),
+ scriptId, scriptURL, scriptRef->startLine(), scriptRef->startColumn(),
+ scriptRef->endLine(), scriptRef->endColumn(), contextId,
+ scriptRef->hash(m_isolate), std::move(executionContextAuxDataParam),
std::move(sourceMapURLParam), hasSourceURLParam);
- m_scripts[scriptId] = std::move(script);
-
if (scriptURL.isEmpty() || !success) return;
protocol::DictionaryValue* breakpointsCookie =
@@ -1069,14 +1107,15 @@ void V8DebuggerAgentImpl::didParseSource(
breakpointObject->getString(DebuggerAgentState::url, &url);
if (!matches(m_inspector, scriptURL, url, isRegex)) continue;
ScriptBreakpoint breakpoint;
+ breakpoint.script_id = scriptId;
breakpointObject->getInteger(DebuggerAgentState::lineNumber,
- &breakpoint.lineNumber);
+ &breakpoint.line_number);
breakpointObject->getInteger(DebuggerAgentState::columnNumber,
- &breakpoint.columnNumber);
+ &breakpoint.column_number);
breakpointObject->getString(DebuggerAgentState::condition,
&breakpoint.condition);
- std::unique_ptr<protocol::Debugger::Location> location = resolveBreakpoint(
- cookie.first, scriptId, breakpoint, UserBreakpointSource);
+ std::unique_ptr<protocol::Debugger::Location> location =
+ resolveBreakpoint(cookie.first, breakpoint, UserBreakpointSource);
if (location)
m_frontend.breakpointResolved(cookie.first, std::move(location));
}
@@ -1117,7 +1156,7 @@ V8DebuggerAgentImpl::SkipPauseRequest V8DebuggerAgentImpl::didPause(
if (!exception.IsEmpty()) {
InjectedScript* injectedScript = nullptr;
- m_session->findInjectedScript(V8Debugger::contextId(context),
+ m_session->findInjectedScript(InspectedContext::contextId(context),
injectedScript);
if (injectedScript) {
m_breakReason =
@@ -1128,7 +1167,7 @@ V8DebuggerAgentImpl::SkipPauseRequest V8DebuggerAgentImpl::didPause(
injectedScript->wrapObject(exception, kBacktraceObjectGroup, false, false,
&obj);
if (obj) {
- m_breakAuxData = obj->serialize();
+ m_breakAuxData = obj->toValue();
m_breakAuxData->setBoolean("uncaught", isUncaught);
} else {
m_breakAuxData = nullptr;
@@ -1200,8 +1239,7 @@ void V8DebuggerAgentImpl::breakProgramOnException(
const String16& breakReason,
std::unique_ptr<protocol::DictionaryValue> data) {
if (!enabled() ||
- m_debugger->getPauseOnExceptionsState() ==
- v8::DebugInterface::NoBreakOnException)
+ m_debugger->getPauseOnExceptionsState() == v8::debug::NoBreakOnException)
return;
breakProgram(breakReason, std::move(data));
}
@@ -1215,17 +1253,17 @@ void V8DebuggerAgentImpl::setBreakpointAt(const String16& scriptId,
int lineNumber, int columnNumber,
BreakpointSource source,
const String16& condition) {
- String16 breakpointId =
- generateBreakpointId(scriptId, lineNumber, columnNumber, source);
- ScriptBreakpoint breakpoint(lineNumber, columnNumber, condition);
- resolveBreakpoint(breakpointId, scriptId, breakpoint, source);
+ ScriptBreakpoint breakpoint(scriptId, lineNumber, columnNumber, condition);
+ String16 breakpointId = generateBreakpointId(breakpoint, source);
+ resolveBreakpoint(breakpointId, breakpoint, source);
}
void V8DebuggerAgentImpl::removeBreakpointAt(const String16& scriptId,
int lineNumber, int columnNumber,
BreakpointSource source) {
- removeBreakpointImpl(
- generateBreakpointId(scriptId, lineNumber, columnNumber, source));
+ removeBreakpointImpl(generateBreakpointId(
+ ScriptBreakpoint(scriptId, lineNumber, columnNumber, String16()),
+ source));
}
void V8DebuggerAgentImpl::reset() {
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.h b/deps/v8/src/inspector/v8-debugger-agent-impl.h
index e5285f4cc3..4e8e336545 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.h
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.h
@@ -162,8 +162,7 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
void setPauseOnExceptionsImpl(int);
std::unique_ptr<protocol::Debugger::Location> resolveBreakpoint(
- const String16& breakpointId, const String16& scriptId,
- const ScriptBreakpoint&, BreakpointSource);
+ const String16& breakpointId, const ScriptBreakpoint&, BreakpointSource);
void removeBreakpointImpl(const String16& breakpointId);
void clearBreakDetails();
diff --git a/deps/v8/src/inspector/v8-debugger-script.cc b/deps/v8/src/inspector/v8-debugger-script.cc
index ed0c0d63de..d6d15e5ae6 100644
--- a/deps/v8/src/inspector/v8-debugger-script.cc
+++ b/deps/v8/src/inspector/v8-debugger-script.cc
@@ -4,14 +4,16 @@
#include "src/inspector/v8-debugger-script.h"
-#include "src/inspector/protocol-platform.h"
+#include "src/inspector/inspected-context.h"
#include "src/inspector/string-util.h"
namespace v8_inspector {
-static const char hexDigits[17] = "0123456789ABCDEF";
+namespace {
-static void appendUnsignedAsHex(uint64_t number, String16Builder* destination) {
+const char hexDigits[17] = "0123456789ABCDEF";
+
+void appendUnsignedAsHex(uint64_t number, String16Builder* destination) {
for (size_t i = 0; i < 8; ++i) {
UChar c = hexDigits[number & 0xF];
destination->append(c);
@@ -23,7 +25,7 @@ static void appendUnsignedAsHex(uint64_t number, String16Builder* destination) {
// Multiplikation in
// eingeschränkten Branchingprogrammmodellen" by Woelfe.
// http://opendatastructures.org/versions/edition-0.1d/ods-java/node33.html#SECTION00832000000000000000
-static String16 calculateHash(const String16& str) {
+String16 calculateHash(const String16& str) {
static uint64_t prime[] = {0x3FB75161, 0xAB1F4E4F, 0x82675BC5, 0xCD924D35,
0x81ABE279};
static uint64_t random[] = {0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476,
@@ -67,98 +69,178 @@ static String16 calculateHash(const String16& str) {
return hash.toString();
}
-V8DebuggerScript::V8DebuggerScript(v8::Isolate* isolate,
- v8::Local<v8::DebugInterface::Script> script,
- bool isLiveEdit) {
- m_isolate = script->GetIsolate();
- m_id = String16::fromInteger(script->Id());
- v8::Local<v8::String> tmp;
- if (script->Name().ToLocal(&tmp)) m_url = toProtocolString(tmp);
- if (script->SourceURL().ToLocal(&tmp)) {
- m_sourceURL = toProtocolString(tmp);
- if (m_url.isEmpty()) m_url = toProtocolString(tmp);
- }
- if (script->SourceMappingURL().ToLocal(&tmp))
- m_sourceMappingURL = toProtocolString(tmp);
- m_startLine = script->LineOffset();
- m_startColumn = script->ColumnOffset();
- std::vector<int> lineEnds = script->LineEnds();
- CHECK(lineEnds.size());
- int source_length = lineEnds[lineEnds.size() - 1];
- if (lineEnds.size()) {
- m_endLine = static_cast<int>(lineEnds.size()) + m_startLine - 1;
- if (lineEnds.size() > 1) {
- m_endColumn = source_length - lineEnds[lineEnds.size() - 2] - 1;
+class ActualScript : public V8DebuggerScript {
+ friend class V8DebuggerScript;
+
+ public:
+ ActualScript(v8::Isolate* isolate, v8::Local<v8::debug::Script> script,
+ bool isLiveEdit)
+ : V8DebuggerScript(isolate, String16::fromInteger(script->Id()),
+ GetNameOrSourceUrl(script)),
+ m_isLiveEdit(isLiveEdit) {
+ v8::Local<v8::String> tmp;
+ if (script->SourceURL().ToLocal(&tmp)) m_sourceURL = toProtocolString(tmp);
+ if (script->SourceMappingURL().ToLocal(&tmp))
+ m_sourceMappingURL = toProtocolString(tmp);
+ m_startLine = script->LineOffset();
+ m_startColumn = script->ColumnOffset();
+ std::vector<int> lineEnds = script->LineEnds();
+ CHECK(lineEnds.size());
+ int source_length = lineEnds[lineEnds.size() - 1];
+ if (lineEnds.size()) {
+ m_endLine = static_cast<int>(lineEnds.size()) + m_startLine - 1;
+ if (lineEnds.size() > 1) {
+ m_endColumn = source_length - lineEnds[lineEnds.size() - 2] - 1;
+ } else {
+ m_endColumn = source_length + m_startColumn;
+ }
} else {
- m_endColumn = source_length + m_startColumn;
+ m_endLine = m_startLine;
+ m_endColumn = m_startColumn;
+ }
+
+ v8::Local<v8::Value> contextData;
+ if (script->ContextData().ToLocal(&contextData) && contextData->IsInt32()) {
+ m_executionContextId =
+ static_cast<int>(contextData.As<v8::Int32>()->Value());
}
- } else {
- m_endLine = m_startLine;
- m_endColumn = m_startColumn;
- }
- if (script->ContextData().ToLocal(&tmp)) {
- String16 contextData = toProtocolString(tmp);
- size_t firstComma = contextData.find(",", 0);
- size_t secondComma = firstComma != String16::kNotFound
- ? contextData.find(",", firstComma + 1)
- : String16::kNotFound;
- if (secondComma != String16::kNotFound) {
- String16 executionContextId =
- contextData.substring(firstComma + 1, secondComma - firstComma - 1);
- bool isOk = false;
- m_executionContextId = executionContextId.toInteger(&isOk);
- if (!isOk) m_executionContextId = 0;
- m_executionContextAuxData = contextData.substring(secondComma + 1);
+ if (script->Source().ToLocal(&tmp)) {
+ m_sourceObj.Reset(m_isolate, tmp);
+ String16 source = toProtocolString(tmp);
+ // V8 will not count last line if script source ends with \n.
+ if (source.length() > 1 && source[source.length() - 1] == '\n') {
+ m_endLine++;
+ m_endColumn = 0;
+ }
}
+
+ m_script.Reset(m_isolate, script);
}
- m_isLiveEdit = isLiveEdit;
+ bool isLiveEdit() const override { return m_isLiveEdit; }
+
+ const String16& sourceMappingURL() const override {
+ return m_sourceMappingURL;
+ }
- if (script->Source().ToLocal(&tmp)) {
- m_source.Reset(m_isolate, tmp);
- String16 source = toProtocolString(tmp);
- m_hash = calculateHash(source);
- // V8 will not count last line if script source ends with \n.
- if (source.length() > 1 && source[source.length() - 1] == '\n') {
- m_endLine++;
- m_endColumn = 0;
+ String16 source(v8::Isolate* isolate) const override {
+ if (!m_sourceObj.IsEmpty())
+ return toProtocolString(m_sourceObj.Get(isolate));
+ return V8DebuggerScript::source(isolate);
+ }
+
+ void setSourceMappingURL(const String16& sourceMappingURL) override {
+ m_sourceMappingURL = sourceMappingURL;
+ }
+
+ void setSource(v8::Local<v8::String> source) override {
+ m_source = String16();
+ m_sourceObj.Reset(m_isolate, source);
+ m_hash = String16();
+ }
+
+ bool getPossibleBreakpoints(
+ const v8::debug::Location& start, const v8::debug::Location& end,
+ std::vector<v8::debug::Location>* locations) override {
+ v8::HandleScope scope(m_isolate);
+ v8::Local<v8::debug::Script> script = m_script.Get(m_isolate);
+ return script->GetPossibleBreakpoints(start, end, locations);
+ }
+
+ private:
+ String16 GetNameOrSourceUrl(v8::Local<v8::debug::Script> script) {
+ v8::Local<v8::String> name;
+ if (script->Name().ToLocal(&name) || script->SourceURL().ToLocal(&name))
+ return toProtocolString(name);
+ return String16();
+ }
+
+ String16 m_sourceMappingURL;
+ v8::Global<v8::String> m_sourceObj;
+ bool m_isLiveEdit = false;
+ v8::Global<v8::debug::Script> m_script;
+};
+
+class WasmVirtualScript : public V8DebuggerScript {
+ friend class V8DebuggerScript;
+
+ public:
+ WasmVirtualScript(v8::Isolate* isolate,
+ v8::Local<v8::debug::WasmScript> script, String16 id,
+ String16 url, String16 source)
+ : V8DebuggerScript(isolate, std::move(id), std::move(url)),
+ m_script(isolate, script) {
+ int num_lines = 0;
+ int last_newline = -1;
+ size_t next_newline = source.find('\n', last_newline + 1);
+ while (next_newline != String16::kNotFound) {
+ last_newline = static_cast<int>(next_newline);
+ next_newline = source.find('\n', last_newline + 1);
+ ++num_lines;
}
+ m_endLine = num_lines;
+ m_endColumn = static_cast<int>(source.length()) - last_newline - 1;
+ m_source = std::move(source);
+ }
+
+ const String16& sourceMappingURL() const override { return emptyString(); }
+ bool isLiveEdit() const override { return false; }
+ void setSourceMappingURL(const String16&) override {}
+
+ bool getPossibleBreakpoints(
+ const v8::debug::Location& start, const v8::debug::Location& end,
+ std::vector<v8::debug::Location>* locations) override {
+ // TODO(clemensh): Returning false produces the protocol error "Internal
+ // error". Implement and fix expected output of
+ // wasm-get-breakable-locations.js.
+ return false;
+ }
+
+ private:
+ static const String16& emptyString() {
+ static const String16 singleEmptyString;
+ return singleEmptyString;
}
- m_script.Reset(m_isolate, script);
+ v8::Global<v8::debug::WasmScript> m_script;
+};
+
+} // namespace
+
+std::unique_ptr<V8DebuggerScript> V8DebuggerScript::Create(
+ v8::Isolate* isolate, v8::Local<v8::debug::Script> scriptObj,
+ bool isLiveEdit) {
+ return std::unique_ptr<ActualScript>(
+ new ActualScript(isolate, scriptObj, isLiveEdit));
+}
+
+std::unique_ptr<V8DebuggerScript> V8DebuggerScript::CreateWasm(
+ v8::Isolate* isolate, v8::Local<v8::debug::WasmScript> underlyingScript,
+ String16 id, String16 url, String16 source) {
+ return std::unique_ptr<WasmVirtualScript>(
+ new WasmVirtualScript(isolate, underlyingScript, std::move(id),
+ std::move(url), std::move(source)));
}
+V8DebuggerScript::V8DebuggerScript(v8::Isolate* isolate, String16 id,
+ String16 url)
+ : m_id(std::move(id)), m_url(std::move(url)), m_isolate(isolate) {}
+
V8DebuggerScript::~V8DebuggerScript() {}
const String16& V8DebuggerScript::sourceURL() const {
return m_sourceURL.isEmpty() ? m_url : m_sourceURL;
}
-v8::Local<v8::String> V8DebuggerScript::source(v8::Isolate* isolate) const {
- return m_source.Get(isolate);
+const String16& V8DebuggerScript::hash(v8::Isolate* isolate) const {
+ if (m_hash.isEmpty()) m_hash = calculateHash(source(isolate));
+ DCHECK(!m_hash.isEmpty());
+ return m_hash;
}
void V8DebuggerScript::setSourceURL(const String16& sourceURL) {
m_sourceURL = sourceURL;
}
-void V8DebuggerScript::setSourceMappingURL(const String16& sourceMappingURL) {
- m_sourceMappingURL = sourceMappingURL;
-}
-
-void V8DebuggerScript::setSource(v8::Local<v8::String> source) {
- m_source.Reset(m_isolate, source);
- m_hash = calculateHash(toProtocolString(source));
-}
-
-bool V8DebuggerScript::getPossibleBreakpoints(
- const v8::DebugInterface::Location& start,
- const v8::DebugInterface::Location& end,
- std::vector<v8::DebugInterface::Location>* locations) {
- v8::HandleScope scope(m_isolate);
- v8::Local<v8::DebugInterface::Script> script = m_script.Get(m_isolate);
- return script->GetPossibleBreakpoints(start, end, locations);
-}
-
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-debugger-script.h b/deps/v8/src/inspector/v8-debugger-script.h
index 97b5ba9e51..58beefe5ec 100644
--- a/deps/v8/src/inspector/v8-debugger-script.h
+++ b/deps/v8/src/inspector/v8-debugger-script.h
@@ -32,6 +32,7 @@
#include "src/base/macros.h"
#include "src/inspector/string-16.h"
+#include "src/inspector/string-util.h"
#include "include/v8.h"
#include "src/debug/debug-interface.h"
@@ -40,55 +41,56 @@ namespace v8_inspector {
class V8DebuggerScript {
public:
- V8DebuggerScript(v8::Isolate* isolate,
- v8::Local<v8::DebugInterface::Script> script,
- bool isLiveEdit);
- ~V8DebuggerScript();
+ static std::unique_ptr<V8DebuggerScript> Create(
+ v8::Isolate* isolate, v8::Local<v8::debug::Script> script,
+ bool isLiveEdit);
+ static std::unique_ptr<V8DebuggerScript> CreateWasm(
+ v8::Isolate* isolate, v8::Local<v8::debug::WasmScript> underlyingScript,
+ String16 id, String16 url, String16 source);
+
+ virtual ~V8DebuggerScript();
const String16& scriptId() const { return m_id; }
const String16& url() const { return m_url; }
bool hasSourceURL() const { return !m_sourceURL.isEmpty(); }
const String16& sourceURL() const;
- const String16& sourceMappingURL() const { return m_sourceMappingURL; }
- v8::Local<v8::String> source(v8::Isolate*) const;
- const String16& hash() const { return m_hash; }
+ virtual const String16& sourceMappingURL() const = 0;
+ virtual String16 source(v8::Isolate*) const { return m_source; }
+ const String16& hash(v8::Isolate*) const;
int startLine() const { return m_startLine; }
int startColumn() const { return m_startColumn; }
int endLine() const { return m_endLine; }
int endColumn() const { return m_endColumn; }
int executionContextId() const { return m_executionContextId; }
- const String16& executionContextAuxData() const {
- return m_executionContextAuxData;
- }
- bool isLiveEdit() const { return m_isLiveEdit; }
+ virtual bool isLiveEdit() const = 0;
void setSourceURL(const String16&);
- void setSourceMappingURL(const String16&);
- void setSource(v8::Local<v8::String>);
+ virtual void setSourceMappingURL(const String16&) = 0;
+ virtual void setSource(v8::Local<v8::String> source) {
+ m_source = toProtocolString(source);
+ }
- bool getPossibleBreakpoints(
- const v8::DebugInterface::Location& start,
- const v8::DebugInterface::Location& end,
- std::vector<v8::DebugInterface::Location>* locations);
+ virtual bool getPossibleBreakpoints(
+ const v8::debug::Location& start, const v8::debug::Location& end,
+ std::vector<v8::debug::Location>* locations) = 0;
+
+ protected:
+ V8DebuggerScript(v8::Isolate*, String16 id, String16 url);
- private:
String16 m_id;
String16 m_url;
String16 m_sourceURL;
- String16 m_sourceMappingURL;
- v8::Global<v8::String> m_source;
- String16 m_hash;
- int m_startLine;
- int m_startColumn;
- int m_endLine;
- int m_endColumn;
- int m_executionContextId;
- String16 m_executionContextAuxData;
- bool m_isLiveEdit;
+ String16 m_source;
+ mutable String16 m_hash;
+ int m_startLine = 0;
+ int m_startColumn = 0;
+ int m_endLine = 0;
+ int m_endColumn = 0;
+ int m_executionContextId = 0;
v8::Isolate* m_isolate;
- v8::Global<v8::DebugInterface::Script> m_script;
+ private:
DISALLOW_COPY_AND_ASSIGN(V8DebuggerScript);
};
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index b3657e577c..2563f4f36c 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -5,6 +5,7 @@
#include "src/inspector/v8-debugger.h"
#include "src/inspector/debugger-script.h"
+#include "src/inspector/inspected-context.h"
#include "src/inspector/protocol/Protocol.h"
#include "src/inspector/script-breakpoint.h"
#include "src/inspector/string-util.h"
@@ -19,11 +20,11 @@
namespace v8_inspector {
namespace {
-static const char v8AsyncTaskEventEnqueue[] = "enqueue";
-static const char v8AsyncTaskEventEnqueueRecurring[] = "enqueueRecurring";
-static const char v8AsyncTaskEventWillHandle[] = "willHandle";
-static const char v8AsyncTaskEventDidHandle[] = "didHandle";
-static const char v8AsyncTaskEventCancel[] = "cancel";
+
+// Based on DevTools frontend measurement, with asyncCallStackDepth = 4,
+// average async call stack tail requires ~1 Kb. Let's reserve ~ 128 Mb
+// for async stacks.
+static const int kMaxAsyncTaskStacks = 128 * 1024;
inline v8::Local<v8::Boolean> v8Boolean(bool value, v8::Isolate* isolate) {
return value ? v8::True(isolate) : v8::False(isolate);
@@ -34,7 +35,8 @@ inline v8::Local<v8::Boolean> v8Boolean(bool value, v8::Isolate* isolate) {
static bool inLiveEditScope = false;
v8::MaybeLocal<v8::Value> V8Debugger::callDebuggerMethod(
- const char* functionName, int argc, v8::Local<v8::Value> argv[]) {
+ const char* functionName, int argc, v8::Local<v8::Value> argv[],
+ bool catchExceptions) {
v8::MicrotasksScope microtasks(m_isolate,
v8::MicrotasksScope::kDoNotRunMicrotasks);
DCHECK(m_isolate->InContext());
@@ -44,19 +46,25 @@ v8::MaybeLocal<v8::Value> V8Debugger::callDebuggerMethod(
debuggerScript
->Get(context, toV8StringInternalized(m_isolate, functionName))
.ToLocalChecked());
+ if (catchExceptions) {
+ v8::TryCatch try_catch(m_isolate);
+ return function->Call(context, debuggerScript, argc, argv);
+ }
return function->Call(context, debuggerScript, argc, argv);
}
V8Debugger::V8Debugger(v8::Isolate* isolate, V8InspectorImpl* inspector)
: m_isolate(isolate),
m_inspector(inspector),
- m_lastContextId(0),
m_enableCount(0),
m_breakpointsActivated(true),
m_runningNestedMessageLoop(false),
m_ignoreScriptParsedEventsCounter(0),
+ m_maxAsyncCallStacks(kMaxAsyncTaskStacks),
+ m_lastTaskId(0),
m_maxAsyncCallStackDepth(0),
- m_pauseOnExceptionsState(v8::DebugInterface::NoBreakOnException) {}
+ m_pauseOnExceptionsState(v8::debug::NoBreakOnException),
+ m_wasmTranslation(isolate) {}
V8Debugger::~V8Debugger() {}
@@ -64,14 +72,13 @@ void V8Debugger::enable() {
if (m_enableCount++) return;
DCHECK(!enabled());
v8::HandleScope scope(m_isolate);
- v8::DebugInterface::SetDebugEventListener(m_isolate,
- &V8Debugger::v8DebugEventCallback,
- v8::External::New(m_isolate, this));
- m_debuggerContext.Reset(m_isolate,
- v8::DebugInterface::GetDebugContext(m_isolate));
- v8::DebugInterface::ChangeBreakOnException(
- m_isolate, v8::DebugInterface::NoBreakOnException);
- m_pauseOnExceptionsState = v8::DebugInterface::NoBreakOnException;
+ v8::debug::SetDebugEventListener(m_isolate, &V8Debugger::v8DebugEventCallback,
+ v8::External::New(m_isolate, this));
+ v8::debug::SetAsyncTaskListener(m_isolate, &V8Debugger::v8AsyncTaskListener,
+ this);
+ m_debuggerContext.Reset(m_isolate, v8::debug::GetDebugContext(m_isolate));
+ v8::debug::ChangeBreakOnException(m_isolate, v8::debug::NoBreakOnException);
+ m_pauseOnExceptionsState = v8::debug::NoBreakOnException;
compileDebuggerScript();
}
@@ -82,61 +89,32 @@ void V8Debugger::disable() {
m_debuggerScript.Reset();
m_debuggerContext.Reset();
allAsyncTasksCanceled();
- v8::DebugInterface::SetDebugEventListener(m_isolate, nullptr);
+ m_wasmTranslation.Clear();
+ v8::debug::SetDebugEventListener(m_isolate, nullptr);
+ v8::debug::SetAsyncTaskListener(m_isolate, nullptr, nullptr);
}
bool V8Debugger::enabled() const { return !m_debuggerScript.IsEmpty(); }
-// static
-int V8Debugger::contextId(v8::Local<v8::Context> context) {
- v8::Local<v8::Value> data =
- context->GetEmbedderData(static_cast<int>(v8::Context::kDebugIdIndex));
- if (data.IsEmpty() || !data->IsString()) return 0;
- String16 dataString = toProtocolString(data.As<v8::String>());
- if (dataString.isEmpty()) return 0;
- size_t commaPos = dataString.find(",");
- if (commaPos == String16::kNotFound) return 0;
- size_t commaPos2 = dataString.find(",", commaPos + 1);
- if (commaPos2 == String16::kNotFound) return 0;
- return dataString.substring(commaPos + 1, commaPos2 - commaPos - 1)
- .toInteger();
-}
-
-// static
-int V8Debugger::getGroupId(v8::Local<v8::Context> context) {
- v8::Local<v8::Value> data =
- context->GetEmbedderData(static_cast<int>(v8::Context::kDebugIdIndex));
- if (data.IsEmpty() || !data->IsString()) return 0;
- String16 dataString = toProtocolString(data.As<v8::String>());
- if (dataString.isEmpty()) return 0;
- size_t commaPos = dataString.find(",");
- if (commaPos == String16::kNotFound) return 0;
- return dataString.substring(0, commaPos).toInteger();
-}
-
void V8Debugger::getCompiledScripts(
int contextGroupId,
std::vector<std::unique_ptr<V8DebuggerScript>>& result) {
v8::HandleScope scope(m_isolate);
- v8::PersistentValueVector<v8::DebugInterface::Script> scripts(m_isolate);
- v8::DebugInterface::GetLoadedScripts(m_isolate, scripts);
- String16 contextPrefix = String16::fromInteger(contextGroupId) + ",";
+ v8::PersistentValueVector<v8::debug::Script> scripts(m_isolate);
+ v8::debug::GetLoadedScripts(m_isolate, scripts);
for (size_t i = 0; i < scripts.Size(); ++i) {
- v8::Local<v8::DebugInterface::Script> script = scripts.Get(i);
+ v8::Local<v8::debug::Script> script = scripts.Get(i);
if (!script->WasCompiled()) continue;
- v8::ScriptOriginOptions origin = script->OriginOptions();
- if (origin.IsEmbedderDebugScript()) continue;
- v8::Local<v8::String> v8ContextData;
- if (!script->ContextData().ToLocal(&v8ContextData)) continue;
- String16 contextData = toProtocolString(v8ContextData);
- if (contextData.find(contextPrefix) != 0) continue;
- result.push_back(
- wrapUnique(new V8DebuggerScript(m_isolate, script, false)));
+ v8::Local<v8::Value> contextData;
+ if (!script->ContextData().ToLocal(&contextData) || !contextData->IsInt32())
+ continue;
+ int contextId = static_cast<int>(contextData.As<v8::Int32>()->Value());
+ if (m_inspector->contextGroupId(contextId) != contextGroupId) continue;
+ result.push_back(V8DebuggerScript::Create(m_isolate, script, false));
}
}
-String16 V8Debugger::setBreakpoint(const String16& sourceID,
- const ScriptBreakpoint& scriptBreakpoint,
+String16 V8Debugger::setBreakpoint(const ScriptBreakpoint& breakpoint,
int* actualLineNumber,
int* actualColumnNumber) {
v8::HandleScope scope(m_isolate);
@@ -146,20 +124,20 @@ String16 V8Debugger::setBreakpoint(const String16& sourceID,
v8::Local<v8::Object> info = v8::Object::New(m_isolate);
bool success = false;
success = info->Set(context, toV8StringInternalized(m_isolate, "sourceID"),
- toV8String(m_isolate, sourceID))
+ toV8String(m_isolate, breakpoint.script_id))
.FromMaybe(false);
DCHECK(success);
success = info->Set(context, toV8StringInternalized(m_isolate, "lineNumber"),
- v8::Integer::New(m_isolate, scriptBreakpoint.lineNumber))
+ v8::Integer::New(m_isolate, breakpoint.line_number))
.FromMaybe(false);
DCHECK(success);
success =
info->Set(context, toV8StringInternalized(m_isolate, "columnNumber"),
- v8::Integer::New(m_isolate, scriptBreakpoint.columnNumber))
+ v8::Integer::New(m_isolate, breakpoint.column_number))
.FromMaybe(false);
DCHECK(success);
success = info->Set(context, toV8StringInternalized(m_isolate, "condition"),
- toV8String(m_isolate, scriptBreakpoint.condition))
+ toV8String(m_isolate, breakpoint.condition))
.FromMaybe(false);
DCHECK(success);
@@ -168,7 +146,7 @@ String16 V8Debugger::setBreakpoint(const String16& sourceID,
->Get(context, toV8StringInternalized(m_isolate, "setBreakpoint"))
.ToLocalChecked());
v8::Local<v8::Value> breakpointId =
- v8::DebugInterface::Call(debuggerContext(), setBreakpointFunction, info)
+ v8::debug::Call(debuggerContext(), setBreakpointFunction, info)
.ToLocalChecked();
if (!breakpointId->IsString()) return "";
*actualLineNumber =
@@ -203,7 +181,7 @@ void V8Debugger::removeBreakpoint(const String16& breakpointId) {
->Get(context,
toV8StringInternalized(m_isolate, "removeBreakpoint"))
.ToLocalChecked());
- v8::DebugInterface::Call(debuggerContext(), removeBreakpointFunction, info)
+ v8::debug::Call(debuggerContext(), removeBreakpointFunction, info)
.ToLocalChecked();
}
@@ -216,8 +194,7 @@ void V8Debugger::clearBreakpoints() {
m_debuggerScript.Get(m_isolate)
->Get(context, toV8StringInternalized(m_isolate, "clearBreakpoints"))
.ToLocalChecked());
- v8::DebugInterface::Call(debuggerContext(), clearBreakpoints)
- .ToLocalChecked();
+ v8::debug::Call(debuggerContext(), clearBreakpoints).ToLocalChecked();
}
void V8Debugger::setBreakpointsActivated(bool activated) {
@@ -241,32 +218,31 @@ void V8Debugger::setBreakpointsActivated(bool activated) {
->Get(context, toV8StringInternalized(m_isolate,
"setBreakpointsActivated"))
.ToLocalChecked());
- v8::DebugInterface::Call(debuggerContext(), setBreakpointsActivated, info)
+ v8::debug::Call(debuggerContext(), setBreakpointsActivated, info)
.ToLocalChecked();
m_breakpointsActivated = activated;
}
-v8::DebugInterface::ExceptionBreakState
-V8Debugger::getPauseOnExceptionsState() {
+v8::debug::ExceptionBreakState V8Debugger::getPauseOnExceptionsState() {
DCHECK(enabled());
return m_pauseOnExceptionsState;
}
void V8Debugger::setPauseOnExceptionsState(
- v8::DebugInterface::ExceptionBreakState pauseOnExceptionsState) {
+ v8::debug::ExceptionBreakState pauseOnExceptionsState) {
DCHECK(enabled());
if (m_pauseOnExceptionsState == pauseOnExceptionsState) return;
- v8::DebugInterface::ChangeBreakOnException(m_isolate, pauseOnExceptionsState);
+ v8::debug::ChangeBreakOnException(m_isolate, pauseOnExceptionsState);
m_pauseOnExceptionsState = pauseOnExceptionsState;
}
void V8Debugger::setPauseOnNextStatement(bool pause) {
if (m_runningNestedMessageLoop) return;
if (pause)
- v8::DebugInterface::DebugBreak(m_isolate);
+ v8::debug::DebugBreak(m_isolate);
else
- v8::DebugInterface::CancelDebugBreak(m_isolate);
+ v8::debug::CancelDebugBreak(m_isolate);
}
bool V8Debugger::canBreakProgram() {
@@ -294,7 +270,7 @@ void V8Debugger::breakProgram() {
v8::ConstructorBehavior::kThrow)
.ToLocal(&breakFunction))
return;
- v8::DebugInterface::Call(debuggerContext(), breakFunction).ToLocalChecked();
+ v8::debug::Call(debuggerContext(), breakFunction).ToLocalChecked();
}
void V8Debugger::continueProgram() {
@@ -306,27 +282,27 @@ void V8Debugger::continueProgram() {
void V8Debugger::stepIntoStatement() {
DCHECK(isPaused());
DCHECK(!m_executionState.IsEmpty());
- v8::DebugInterface::PrepareStep(m_isolate, v8::DebugInterface::StepIn);
+ v8::debug::PrepareStep(m_isolate, v8::debug::StepIn);
continueProgram();
}
void V8Debugger::stepOverStatement() {
DCHECK(isPaused());
DCHECK(!m_executionState.IsEmpty());
- v8::DebugInterface::PrepareStep(m_isolate, v8::DebugInterface::StepNext);
+ v8::debug::PrepareStep(m_isolate, v8::debug::StepNext);
continueProgram();
}
void V8Debugger::stepOutOfFunction() {
DCHECK(isPaused());
DCHECK(!m_executionState.IsEmpty());
- v8::DebugInterface::PrepareStep(m_isolate, v8::DebugInterface::StepOut);
+ v8::debug::PrepareStep(m_isolate, v8::debug::StepOut);
continueProgram();
}
void V8Debugger::clearStepping() {
DCHECK(enabled());
- v8::DebugInterface::ClearStepping(m_isolate);
+ v8::debug::ClearStepping(m_isolate);
}
Response V8Debugger::setScriptSource(
@@ -337,11 +313,11 @@ Response V8Debugger::setScriptSource(
class EnableLiveEditScope {
public:
explicit EnableLiveEditScope(v8::Isolate* isolate) : m_isolate(isolate) {
- v8::DebugInterface::SetLiveEditEnabled(m_isolate, true);
+ v8::debug::SetLiveEditEnabled(m_isolate, true);
inLiveEditScope = true;
}
~EnableLiveEditScope() {
- v8::DebugInterface::SetLiveEditEnabled(m_isolate, false);
+ v8::debug::SetLiveEditEnabled(m_isolate, false);
inLiveEditScope = false;
}
@@ -355,7 +331,7 @@ Response V8Debugger::setScriptSource(
std::unique_ptr<v8::Context::Scope> contextScope;
if (!isPaused())
- contextScope = wrapUnique(new v8::Context::Scope(debuggerContext()));
+ contextScope.reset(new v8::Context::Scope(debuggerContext()));
v8::Local<v8::Value> argv[] = {toV8String(m_isolate, sourceID), newSource,
v8Boolean(dryRun, m_isolate)};
@@ -366,7 +342,7 @@ Response V8Debugger::setScriptSource(
v8::TryCatch tryCatch(m_isolate);
tryCatch.SetVerbose(false);
v8::MaybeLocal<v8::Value> maybeResult =
- callDebuggerMethod("liveEditScriptSource", 3, argv);
+ callDebuggerMethod("liveEditScriptSource", 3, argv, false);
if (tryCatch.HasCaught()) {
v8::Local<v8::Message> message = tryCatch.Message();
if (!message.IsEmpty())
@@ -436,16 +412,16 @@ JavaScriptCallFrames V8Debugger::currentCallFrames(int limit) {
->Get(debuggerContext(),
toV8StringInternalized(m_isolate, "currentCallFrames"))
.ToLocalChecked());
- currentCallFramesV8 =
- v8::DebugInterface::Call(debuggerContext(), currentCallFramesFunction,
- v8::Integer::New(m_isolate, limit))
- .ToLocalChecked();
+ if (!v8::debug::Call(debuggerContext(), currentCallFramesFunction,
+ v8::Integer::New(m_isolate, limit))
+ .ToLocal(&currentCallFramesV8))
+ return JavaScriptCallFrames();
} else {
v8::Local<v8::Value> argv[] = {m_executionState,
v8::Integer::New(m_isolate, limit)};
- currentCallFramesV8 =
- callDebuggerMethod("currentCallFrames", arraysize(argv), argv)
- .ToLocalChecked();
+ if (!callDebuggerMethod("currentCallFrames", arraysize(argv), argv, true)
+ .ToLocal(&currentCallFramesV8))
+ return JavaScriptCallFrames();
}
DCHECK(!currentCallFramesV8.IsEmpty());
if (!currentCallFramesV8->IsArray()) return JavaScriptCallFrames();
@@ -490,8 +466,8 @@ void V8Debugger::handleProgramBreak(v8::Local<v8::Context> pausedContext,
// Don't allow nested breaks.
if (m_runningNestedMessageLoop) return;
- V8DebuggerAgentImpl* agent =
- m_inspector->enabledDebuggerAgentForGroup(getGroupId(pausedContext));
+ V8DebuggerAgentImpl* agent = m_inspector->enabledDebuggerAgentForGroup(
+ m_inspector->contextGroupId(pausedContext));
if (!agent) return;
std::vector<String16> breakpointIds;
@@ -512,12 +488,16 @@ void V8Debugger::handleProgramBreak(v8::Local<v8::Context> pausedContext,
pausedContext, exception, breakpointIds, isPromiseRejection, isUncaught);
if (result == V8DebuggerAgentImpl::RequestNoSkip) {
m_runningNestedMessageLoop = true;
- int groupId = getGroupId(pausedContext);
+ int groupId = m_inspector->contextGroupId(pausedContext);
DCHECK(groupId);
+ v8::Context::Scope scope(pausedContext);
+ v8::Local<v8::Context> context = m_isolate->GetCurrentContext();
+ CHECK(!context.IsEmpty() &&
+ context != v8::debug::GetDebugContext(m_isolate));
m_inspector->client()->runMessageLoopOnPause(groupId);
// The agent may have been removed in the nested loop.
- agent =
- m_inspector->enabledDebuggerAgentForGroup(getGroupId(pausedContext));
+ agent = m_inspector->enabledDebuggerAgentForGroup(
+ m_inspector->contextGroupId(pausedContext));
if (agent) agent->didContinue();
m_runningNestedMessageLoop = false;
}
@@ -525,16 +505,16 @@ void V8Debugger::handleProgramBreak(v8::Local<v8::Context> pausedContext,
m_executionState.Clear();
if (result == V8DebuggerAgentImpl::RequestStepFrame) {
- v8::DebugInterface::PrepareStep(m_isolate, v8::DebugInterface::StepFrame);
+ v8::debug::PrepareStep(m_isolate, v8::debug::StepFrame);
} else if (result == V8DebuggerAgentImpl::RequestStepInto) {
- v8::DebugInterface::PrepareStep(m_isolate, v8::DebugInterface::StepIn);
+ v8::debug::PrepareStep(m_isolate, v8::debug::StepIn);
} else if (result == V8DebuggerAgentImpl::RequestStepOut) {
- v8::DebugInterface::PrepareStep(m_isolate, v8::DebugInterface::StepOut);
+ v8::debug::PrepareStep(m_isolate, v8::debug::StepOut);
}
}
void V8Debugger::v8DebugEventCallback(
- const v8::DebugInterface::EventDetails& eventDetails) {
+ const v8::debug::EventDetails& eventDetails) {
V8Debugger* thisPtr = toV8Debugger(eventDetails.GetCallbackData());
thisPtr->handleV8DebugEvent(eventDetails);
}
@@ -555,100 +535,99 @@ v8::Local<v8::Value> V8Debugger::callInternalGetterFunction(
}
void V8Debugger::handleV8DebugEvent(
- const v8::DebugInterface::EventDetails& eventDetails) {
+ const v8::debug::EventDetails& eventDetails) {
if (!enabled()) return;
+ v8::HandleScope scope(m_isolate);
+
v8::DebugEvent event = eventDetails.GetEvent();
- if (event != v8::AsyncTaskEvent && event != v8::Break &&
- event != v8::Exception && event != v8::AfterCompile &&
- event != v8::BeforeCompile && event != v8::CompileError)
+ if (event != v8::Break && event != v8::Exception &&
+ event != v8::AfterCompile && event != v8::CompileError)
return;
v8::Local<v8::Context> eventContext = eventDetails.GetEventContext();
DCHECK(!eventContext.IsEmpty());
+ V8DebuggerAgentImpl* agent = m_inspector->enabledDebuggerAgentForGroup(
+ m_inspector->contextGroupId(eventContext));
+ if (!agent) return;
- if (event == v8::AsyncTaskEvent) {
- v8::HandleScope scope(m_isolate);
- handleV8AsyncTaskEvent(eventContext, eventDetails.GetExecutionState(),
- eventDetails.GetEventData());
- return;
- }
-
- V8DebuggerAgentImpl* agent =
- m_inspector->enabledDebuggerAgentForGroup(getGroupId(eventContext));
- if (agent) {
- v8::HandleScope scope(m_isolate);
- if (m_ignoreScriptParsedEventsCounter == 0 &&
- (event == v8::AfterCompile || event == v8::CompileError)) {
- v8::Local<v8::Context> context = debuggerContext();
- v8::Context::Scope contextScope(context);
- v8::Local<v8::Value> argv[] = {eventDetails.GetEventData()};
- v8::Local<v8::Value> value =
- callDebuggerMethod("getAfterCompileScript", 1, argv).ToLocalChecked();
- if (value->IsNull()) return;
- DCHECK(value->IsObject());
- v8::Local<v8::Object> scriptObject = v8::Local<v8::Object>::Cast(value);
- v8::Local<v8::DebugInterface::Script> script;
- if (!v8::DebugInterface::Script::Wrap(m_isolate, scriptObject)
- .ToLocal(&script))
- return;
+ if (event == v8::AfterCompile || event == v8::CompileError) {
+ v8::Context::Scope contextScope(debuggerContext());
+ // Determine if the script is a wasm script.
+ v8::Local<v8::Value> scriptMirror =
+ callInternalGetterFunction(eventDetails.GetEventData(), "script");
+ DCHECK(scriptMirror->IsObject());
+ v8::Local<v8::Value> scriptWrapper =
+ callInternalGetterFunction(scriptMirror.As<v8::Object>(), "value");
+ DCHECK(scriptWrapper->IsObject());
+ v8::Local<v8::debug::Script> script;
+ if (!v8::debug::Script::Wrap(m_isolate, scriptWrapper.As<v8::Object>())
+ .ToLocal(&script)) {
+ return;
+ }
+ if (script->IsWasm()) {
+ m_wasmTranslation.AddScript(script.As<v8::debug::WasmScript>(), agent);
+ } else if (m_ignoreScriptParsedEventsCounter == 0) {
agent->didParseSource(
- wrapUnique(new V8DebuggerScript(m_isolate, script, inLiveEditScope)),
+ V8DebuggerScript::Create(m_isolate, script, inLiveEditScope),
event == v8::AfterCompile);
- } else if (event == v8::Exception) {
- v8::Local<v8::Context> context = debuggerContext();
- v8::Local<v8::Object> eventData = eventDetails.GetEventData();
- v8::Local<v8::Value> exception =
- callInternalGetterFunction(eventData, "exception");
- v8::Local<v8::Value> promise =
- callInternalGetterFunction(eventData, "promise");
- bool isPromiseRejection = !promise.IsEmpty() && promise->IsObject();
- v8::Local<v8::Value> uncaught =
- callInternalGetterFunction(eventData, "uncaught");
- bool isUncaught = uncaught->BooleanValue(context).FromJust();
- handleProgramBreak(eventContext, eventDetails.GetExecutionState(),
- exception, v8::Local<v8::Array>(), isPromiseRejection,
- isUncaught);
- } else if (event == v8::Break) {
- v8::Local<v8::Value> argv[] = {eventDetails.GetEventData()};
- v8::Local<v8::Value> hitBreakpoints =
- callDebuggerMethod("getBreakpointNumbers", 1, argv).ToLocalChecked();
- DCHECK(hitBreakpoints->IsArray());
- handleProgramBreak(eventContext, eventDetails.GetExecutionState(),
- v8::Local<v8::Value>(),
- hitBreakpoints.As<v8::Array>());
}
+ } else if (event == v8::Exception) {
+ v8::Local<v8::Context> context = debuggerContext();
+ v8::Local<v8::Object> eventData = eventDetails.GetEventData();
+ v8::Local<v8::Value> exception =
+ callInternalGetterFunction(eventData, "exception");
+ v8::Local<v8::Value> promise =
+ callInternalGetterFunction(eventData, "promise");
+ bool isPromiseRejection = !promise.IsEmpty() && promise->IsObject();
+ v8::Local<v8::Value> uncaught =
+ callInternalGetterFunction(eventData, "uncaught");
+ bool isUncaught = uncaught->BooleanValue(context).FromJust();
+ handleProgramBreak(eventContext, eventDetails.GetExecutionState(),
+ exception, v8::Local<v8::Array>(), isPromiseRejection,
+ isUncaught);
+ } else if (event == v8::Break) {
+ v8::Local<v8::Value> argv[] = {eventDetails.GetEventData()};
+ v8::Local<v8::Value> hitBreakpoints;
+ if (!callDebuggerMethod("getBreakpointNumbers", 1, argv, true)
+ .ToLocal(&hitBreakpoints))
+ return;
+ DCHECK(hitBreakpoints->IsArray());
+ handleProgramBreak(eventContext, eventDetails.GetExecutionState(),
+ v8::Local<v8::Value>(), hitBreakpoints.As<v8::Array>());
}
}
-void V8Debugger::handleV8AsyncTaskEvent(v8::Local<v8::Context> context,
- v8::Local<v8::Object> executionState,
- v8::Local<v8::Object> eventData) {
- if (!m_maxAsyncCallStackDepth) return;
-
- String16 type = toProtocolStringWithTypeCheck(
- callInternalGetterFunction(eventData, "type"));
- String16 name = toProtocolStringWithTypeCheck(
- callInternalGetterFunction(eventData, "name"));
- int id = static_cast<int>(callInternalGetterFunction(eventData, "id")
- ->ToInteger(context)
- .ToLocalChecked()
- ->Value());
+void V8Debugger::v8AsyncTaskListener(v8::debug::PromiseDebugActionType type,
+ int id, void* data) {
+ V8Debugger* debugger = static_cast<V8Debugger*>(data);
+ if (!debugger->m_maxAsyncCallStackDepth) return;
// Async task events from Promises are given misaligned pointers to prevent
// from overlapping with other Blink task identifiers. There is a single
// namespace of such ids, managed by src/js/promise.js.
void* ptr = reinterpret_cast<void*>(id * 2 + 1);
- if (type == v8AsyncTaskEventEnqueue)
- asyncTaskScheduled(name, ptr, false);
- else if (type == v8AsyncTaskEventEnqueueRecurring)
- asyncTaskScheduled(name, ptr, true);
- else if (type == v8AsyncTaskEventWillHandle)
- asyncTaskStarted(ptr);
- else if (type == v8AsyncTaskEventDidHandle)
- asyncTaskFinished(ptr);
- else if (type == v8AsyncTaskEventCancel)
- asyncTaskCanceled(ptr);
- else
- UNREACHABLE();
+ switch (type) {
+ case v8::debug::kDebugEnqueueAsyncFunction:
+ debugger->asyncTaskScheduled("async function", ptr, true);
+ break;
+ case v8::debug::kDebugEnqueuePromiseResolve:
+ debugger->asyncTaskScheduled("Promise.resolve", ptr, true);
+ break;
+ case v8::debug::kDebugEnqueuePromiseReject:
+ debugger->asyncTaskScheduled("Promise.reject", ptr, true);
+ break;
+ case v8::debug::kDebugEnqueuePromiseResolveThenableJob:
+ debugger->asyncTaskScheduled("PromiseResolveThenableJob", ptr, true);
+ break;
+ case v8::debug::kDebugPromiseCollected:
+ debugger->asyncTaskCanceled(ptr);
+ break;
+ case v8::debug::kDebugWillHandle:
+ debugger->asyncTaskStarted(ptr);
+ break;
+ case v8::debug::kDebugDidHandle:
+ debugger->asyncTaskFinished(ptr);
+ break;
+ }
}
V8StackTraceImpl* V8Debugger::currentAsyncCallChain() {
@@ -685,15 +664,27 @@ v8::Local<v8::Context> V8Debugger::debuggerContext() const {
return m_debuggerContext.Get(m_isolate);
}
-v8::MaybeLocal<v8::Value> V8Debugger::functionScopes(
- v8::Local<v8::Context> context, v8::Local<v8::Function> function) {
+v8::MaybeLocal<v8::Value> V8Debugger::getTargetScopes(
+ v8::Local<v8::Context> context, v8::Local<v8::Value> value,
+ ScopeTargetKind kind) {
if (!enabled()) {
UNREACHABLE();
return v8::Local<v8::Value>::New(m_isolate, v8::Undefined(m_isolate));
}
- v8::Local<v8::Value> argv[] = {function};
+ v8::Local<v8::Value> argv[] = {value};
v8::Local<v8::Value> scopesValue;
- if (!callDebuggerMethod("getFunctionScopes", 1, argv).ToLocal(&scopesValue))
+
+ const char* debuggerMethod = nullptr;
+ switch (kind) {
+ case FUNCTION:
+ debuggerMethod = "getFunctionScopes";
+ break;
+ case GENERATOR:
+ debuggerMethod = "getGeneratorScopes";
+ break;
+ }
+
+ if (!callDebuggerMethod(debuggerMethod, 1, argv, true).ToLocal(&scopesValue))
return v8::MaybeLocal<v8::Value>();
v8::Local<v8::Value> copied;
if (!copyValueFromDebuggerContext(m_isolate, debuggerContext(), context,
@@ -710,11 +701,20 @@ v8::MaybeLocal<v8::Value> V8Debugger::functionScopes(
return copied;
}
+v8::MaybeLocal<v8::Value> V8Debugger::functionScopes(
+ v8::Local<v8::Context> context, v8::Local<v8::Function> function) {
+ return getTargetScopes(context, function, FUNCTION);
+}
+
+v8::MaybeLocal<v8::Value> V8Debugger::generatorScopes(
+ v8::Local<v8::Context> context, v8::Local<v8::Value> generator) {
+ return getTargetScopes(context, generator, GENERATOR);
+}
+
v8::MaybeLocal<v8::Array> V8Debugger::internalProperties(
v8::Local<v8::Context> context, v8::Local<v8::Value> value) {
v8::Local<v8::Array> properties;
- if (!v8::DebugInterface::GetInternalProperties(m_isolate, value)
- .ToLocal(&properties))
+ if (!v8::debug::GetInternalProperties(m_isolate, value).ToLocal(&properties))
return v8::MaybeLocal<v8::Array>();
if (value->IsFunction()) {
v8::Local<v8::Function> function = value.As<v8::Function>();
@@ -752,6 +752,12 @@ v8::MaybeLocal<v8::Array> V8Debugger::internalProperties(
toV8StringInternalized(m_isolate, "[[GeneratorLocation]]"));
createDataProperty(context, properties, properties->Length(), location);
}
+ v8::Local<v8::Value> scopes;
+ if (generatorScopes(context, value).ToLocal(&scopes)) {
+ createDataProperty(context, properties, properties->Length(),
+ toV8StringInternalized(m_isolate, "[[Scopes]]"));
+ createDataProperty(context, properties, properties->Length(), scopes);
+ }
}
if (value->IsFunction()) {
v8::Local<v8::Function> function = value.As<v8::Function>();
@@ -774,9 +780,11 @@ v8::Local<v8::Value> V8Debugger::collectionEntries(
return v8::Undefined(m_isolate);
}
v8::Local<v8::Value> argv[] = {object};
- v8::Local<v8::Value> entriesValue =
- callDebuggerMethod("getCollectionEntries", 1, argv).ToLocalChecked();
- if (!entriesValue->IsArray()) return v8::Undefined(m_isolate);
+ v8::Local<v8::Value> entriesValue;
+ if (!callDebuggerMethod("getCollectionEntries", 1, argv, true)
+ .ToLocal(&entriesValue) ||
+ !entriesValue->IsArray())
+ return v8::Undefined(m_isolate);
v8::Local<v8::Array> entries = entriesValue.As<v8::Array>();
v8::Local<v8::Array> copiedArray =
@@ -809,11 +817,11 @@ v8::Local<v8::Value> V8Debugger::generatorObjectLocation(
return v8::Null(m_isolate);
}
v8::Local<v8::Value> argv[] = {object};
- v8::Local<v8::Value> location =
- callDebuggerMethod("getGeneratorObjectLocation", 1, argv)
- .ToLocalChecked();
+ v8::Local<v8::Value> location;
v8::Local<v8::Value> copied;
- if (!copyValueFromDebuggerContext(m_isolate, debuggerContext(), context,
+ if (!callDebuggerMethod("getGeneratorObjectLocation", 1, argv, true)
+ .ToLocal(&location) ||
+ !copyValueFromDebuggerContext(m_isolate, debuggerContext(), context,
location)
.ToLocal(&copied) ||
!copied->IsObject())
@@ -861,23 +869,13 @@ bool V8Debugger::isPaused() { return !m_pausedContext.IsEmpty(); }
std::unique_ptr<V8StackTraceImpl> V8Debugger::createStackTrace(
v8::Local<v8::StackTrace> stackTrace) {
int contextGroupId =
- m_isolate->InContext() ? getGroupId(m_isolate->GetCurrentContext()) : 0;
+ m_isolate->InContext()
+ ? m_inspector->contextGroupId(m_isolate->GetCurrentContext())
+ : 0;
return V8StackTraceImpl::create(this, contextGroupId, stackTrace,
V8StackTraceImpl::maxCallStackSizeToCapture);
}
-int V8Debugger::markContext(const V8ContextInfo& info) {
- DCHECK(info.context->GetIsolate() == m_isolate);
- int contextId = ++m_lastContextId;
- String16 debugData = String16::fromInteger(info.contextGroupId) + "," +
- String16::fromInteger(contextId) + "," +
- toString16(info.auxData);
- v8::Context::Scope contextScope(info.context);
- info.context->SetEmbedderData(static_cast<int>(v8::Context::kDebugIdIndex),
- toV8String(m_isolate, debugData));
- return contextId;
-}
-
void V8Debugger::setAsyncCallStackDepth(V8DebuggerAgentImpl* agent, int depth) {
if (depth <= 0)
m_maxAsyncCallStackDepthMap.erase(agent);
@@ -906,13 +904,22 @@ void V8Debugger::asyncTaskScheduled(const String16& taskName, void* task,
if (!m_maxAsyncCallStackDepth) return;
v8::HandleScope scope(m_isolate);
int contextGroupId =
- m_isolate->InContext() ? getGroupId(m_isolate->GetCurrentContext()) : 0;
+ m_isolate->InContext()
+ ? m_inspector->contextGroupId(m_isolate->GetCurrentContext())
+ : 0;
std::unique_ptr<V8StackTraceImpl> chain = V8StackTraceImpl::capture(
this, contextGroupId, V8StackTraceImpl::maxCallStackSizeToCapture,
taskName);
if (chain) {
m_asyncTaskStacks[task] = std::move(chain);
if (recurring) m_recurringTasks.insert(task);
+ int id = ++m_lastTaskId;
+ m_taskToId[task] = id;
+ m_idToTask[id] = task;
+ if (static_cast<int>(m_idToTask.size()) > m_maxAsyncCallStacks) {
+ void* taskToRemove = m_idToTask.begin()->second;
+ asyncTaskCanceled(taskToRemove);
+ }
}
}
@@ -920,6 +927,10 @@ void V8Debugger::asyncTaskCanceled(void* task) {
if (!m_maxAsyncCallStackDepth) return;
m_asyncTaskStacks.erase(task);
m_recurringTasks.erase(task);
+ auto it = m_taskToId.find(task);
+ if (it == m_taskToId.end()) return;
+ m_idToTask.erase(it->second);
+ m_taskToId.erase(it);
}
void V8Debugger::asyncTaskStarted(void* task) {
@@ -948,8 +959,13 @@ void V8Debugger::asyncTaskFinished(void* task) {
m_currentTasks.pop_back();
m_currentStacks.pop_back();
- if (m_recurringTasks.find(task) == m_recurringTasks.end())
+ if (m_recurringTasks.find(task) == m_recurringTasks.end()) {
m_asyncTaskStacks.erase(task);
+ auto it = m_taskToId.find(task);
+ if (it == m_taskToId.end()) return;
+ m_idToTask.erase(it->second);
+ m_taskToId.erase(it);
+ }
}
void V8Debugger::allAsyncTasksCanceled() {
@@ -957,6 +973,9 @@ void V8Debugger::allAsyncTasksCanceled() {
m_recurringTasks.clear();
m_currentStacks.clear();
m_currentTasks.clear();
+ m_idToTask.clear();
+ m_taskToId.clear();
+ m_lastTaskId = 0;
}
void V8Debugger::muteScriptParsedEvents() {
@@ -973,7 +992,8 @@ std::unique_ptr<V8StackTraceImpl> V8Debugger::captureStackTrace(
if (!m_isolate->InContext()) return nullptr;
v8::HandleScope handles(m_isolate);
- int contextGroupId = getGroupId(m_isolate->GetCurrentContext());
+ int contextGroupId =
+ m_inspector->contextGroupId(m_isolate->GetCurrentContext());
if (!contextGroupId) return nullptr;
size_t stackSize =
diff --git a/deps/v8/src/inspector/v8-debugger.h b/deps/v8/src/inspector/v8-debugger.h
index 4c7477899a..68fba6eaa8 100644
--- a/deps/v8/src/inspector/v8-debugger.h
+++ b/deps/v8/src/inspector/v8-debugger.h
@@ -13,6 +13,7 @@
#include "src/inspector/protocol/Forward.h"
#include "src/inspector/protocol/Runtime.h"
#include "src/inspector/v8-debugger-script.h"
+#include "src/inspector/wasm-translation.h"
#include "include/v8-inspector.h"
@@ -30,20 +31,16 @@ class V8Debugger {
V8Debugger(v8::Isolate*, V8InspectorImpl*);
~V8Debugger();
- static int contextId(v8::Local<v8::Context>);
- static int getGroupId(v8::Local<v8::Context>);
- int markContext(const V8ContextInfo&);
-
bool enabled() const;
- String16 setBreakpoint(const String16& sourceID, const ScriptBreakpoint&,
- int* actualLineNumber, int* actualColumnNumber);
+ String16 setBreakpoint(const ScriptBreakpoint&, int* actualLineNumber,
+ int* actualColumnNumber);
void removeBreakpoint(const String16& breakpointId);
void setBreakpointsActivated(bool);
bool breakpointsActivated() const { return m_breakpointsActivated; }
- v8::DebugInterface::ExceptionBreakState getPauseOnExceptionsState();
- void setPauseOnExceptionsState(v8::DebugInterface::ExceptionBreakState);
+ v8::debug::ExceptionBreakState getPauseOnExceptionsState();
+ void setPauseOnExceptionsState(v8::debug::ExceptionBreakState);
void setPauseOnNextStatement(bool);
bool canBreakProgram();
void breakProgram();
@@ -94,11 +91,16 @@ class V8Debugger {
V8InspectorImpl* inspector() { return m_inspector; }
+ WasmTranslation* wasmTranslation() { return &m_wasmTranslation; }
+
+ void setMaxAsyncTaskStacksForTest(int limit) { m_maxAsyncCallStacks = limit; }
+
private:
void compileDebuggerScript();
v8::MaybeLocal<v8::Value> callDebuggerMethod(const char* functionName,
int argc,
- v8::Local<v8::Value> argv[]);
+ v8::Local<v8::Value> argv[],
+ bool catchExceptions);
v8::Local<v8::Context> debuggerContext() const;
void clearBreakpoints();
@@ -109,13 +111,12 @@ class V8Debugger {
v8::Local<v8::Array> hitBreakpoints,
bool isPromiseRejection = false,
bool isUncaught = false);
- static void v8DebugEventCallback(const v8::DebugInterface::EventDetails&);
+ static void v8DebugEventCallback(const v8::debug::EventDetails&);
v8::Local<v8::Value> callInternalGetterFunction(v8::Local<v8::Object>,
const char* functionName);
- void handleV8DebugEvent(const v8::DebugInterface::EventDetails&);
- void handleV8AsyncTaskEvent(v8::Local<v8::Context>,
- v8::Local<v8::Object> executionState,
- v8::Local<v8::Object> eventData);
+ void handleV8DebugEvent(const v8::debug::EventDetails&);
+ static void v8AsyncTaskListener(v8::debug::PromiseDebugActionType type,
+ int id, void* data);
v8::Local<v8::Value> collectionEntries(v8::Local<v8::Context>,
v8::Local<v8::Object>);
@@ -123,12 +124,22 @@ class V8Debugger {
v8::Local<v8::Object>);
v8::Local<v8::Value> functionLocation(v8::Local<v8::Context>,
v8::Local<v8::Function>);
+
+ enum ScopeTargetKind {
+ FUNCTION,
+ GENERATOR,
+ };
+ v8::MaybeLocal<v8::Value> getTargetScopes(v8::Local<v8::Context>,
+ v8::Local<v8::Value>,
+ ScopeTargetKind);
+
v8::MaybeLocal<v8::Value> functionScopes(v8::Local<v8::Context>,
v8::Local<v8::Function>);
+ v8::MaybeLocal<v8::Value> generatorScopes(v8::Local<v8::Context>,
+ v8::Local<v8::Value>);
v8::Isolate* m_isolate;
V8InspectorImpl* m_inspector;
- int m_lastContextId;
int m_enableCount;
bool m_breakpointsActivated;
v8::Global<v8::Object> m_debuggerScript;
@@ -141,13 +152,19 @@ class V8Debugger {
using AsyncTaskToStackTrace =
protocol::HashMap<void*, std::unique_ptr<V8StackTraceImpl>>;
AsyncTaskToStackTrace m_asyncTaskStacks;
+ int m_maxAsyncCallStacks;
+ std::map<int, void*> m_idToTask;
+ std::unordered_map<void*, int> m_taskToId;
+ int m_lastTaskId;
protocol::HashSet<void*> m_recurringTasks;
int m_maxAsyncCallStackDepth;
std::vector<void*> m_currentTasks;
std::vector<std::unique_ptr<V8StackTraceImpl>> m_currentStacks;
protocol::HashMap<V8DebuggerAgentImpl*, int> m_maxAsyncCallStackDepthMap;
- v8::DebugInterface::ExceptionBreakState m_pauseOnExceptionsState;
+ v8::debug::ExceptionBreakState m_pauseOnExceptionsState;
+
+ WasmTranslation m_wasmTranslation;
DISALLOW_COPY_AND_ASSIGN(V8Debugger);
};
diff --git a/deps/v8/src/inspector/v8-function-call.cc b/deps/v8/src/inspector/v8-function-call.cc
index 3880e3100e..b8c86d3da0 100644
--- a/deps/v8/src/inspector/v8-function-call.cc
+++ b/deps/v8/src/inspector/v8-function-call.cc
@@ -30,6 +30,7 @@
#include "src/inspector/v8-function-call.h"
+#include "src/inspector/inspected-context.h"
#include "src/inspector/string-util.h"
#include "src/inspector/v8-debugger.h"
#include "src/inspector/v8-inspector-impl.h"
@@ -89,7 +90,7 @@ v8::Local<v8::Value> V8FunctionCall::callWithoutExceptionHandling() {
DCHECK(!info[i].IsEmpty());
}
- int contextGroupId = V8Debugger::getGroupId(m_context);
+ int contextGroupId = m_inspector->contextGroupId(m_context);
if (contextGroupId) {
m_inspector->client()->muteMetrics(contextGroupId);
m_inspector->muteExceptions(contextGroupId);
diff --git a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
index 0ff04e75b9..b3e3d11f51 100644
--- a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
@@ -5,6 +5,7 @@
#include "src/inspector/v8-heap-profiler-agent-impl.h"
#include "src/inspector/injected-script.h"
+#include "src/inspector/inspected-context.h"
#include "src/inspector/protocol/Protocol.h"
#include "src/inspector/string-util.h"
#include "src/inspector/v8-debugger.h"
@@ -55,7 +56,7 @@ class GlobalObjectNameResolver final
const char* GetName(v8::Local<v8::Object> object) override {
InspectedContext* context = m_session->inspector()->getContext(
m_session->contextGroupId(),
- V8Debugger::contextId(object->CreationContext()));
+ InspectedContext::contextId(object->CreationContext()));
if (!context) return "";
String16 name = context->origin();
size_t length = name.length();
@@ -216,7 +217,7 @@ Response V8HeapProfilerAgentImpl::takeHeapSnapshot(Maybe<bool> reportProgress) {
if (!profiler) return Response::Error("Cannot access v8 heap profiler");
std::unique_ptr<HeapSnapshotProgress> progress;
if (reportProgress.fromMaybe(false))
- progress = wrapUnique(new HeapSnapshotProgress(&m_frontend));
+ progress.reset(new HeapSnapshotProgress(&m_frontend));
GlobalObjectNameResolver resolver(m_session);
const v8::HeapSnapshot* snapshot =
@@ -244,7 +245,7 @@ Response V8HeapProfilerAgentImpl::getObjectByHeapObjectId(
*result = m_session->wrapObject(heapObject->CreationContext(), heapObject,
objectGroup.fromMaybe(""), false);
- if (!result) return Response::Error("Object is not available");
+ if (!*result) return Response::Error("Object is not available");
return Response::OK();
}
@@ -260,7 +261,8 @@ Response V8HeapProfilerAgentImpl::addInspectedHeapObject(
if (!m_session->inspector()->client()->isInspectableHeapObject(heapObject))
return Response::Error("Object is not available");
- m_session->addInspectedObject(wrapUnique(new InspectableHeapObject(id)));
+ m_session->addInspectedObject(
+ std::unique_ptr<InspectableHeapObject>(new InspectableHeapObject(id)));
return Response::OK();
}
diff --git a/deps/v8/src/inspector/v8-inspector-impl.cc b/deps/v8/src/inspector/v8-inspector-impl.cc
index bd68548fbf..34e41208ac 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-impl.cc
@@ -45,7 +45,7 @@ namespace v8_inspector {
std::unique_ptr<V8Inspector> V8Inspector::create(v8::Isolate* isolate,
V8InspectorClient* client) {
- return wrapUnique(new V8InspectorImpl(isolate, client));
+ return std::unique_ptr<V8Inspector>(new V8InspectorImpl(isolate, client));
}
V8InspectorImpl::V8InspectorImpl(v8::Isolate* isolate,
@@ -54,10 +54,21 @@ V8InspectorImpl::V8InspectorImpl(v8::Isolate* isolate,
m_client(client),
m_debugger(new V8Debugger(isolate, this)),
m_capturingStackTracesCount(0),
- m_lastExceptionId(0) {}
+ m_lastExceptionId(0),
+ m_lastContextId(0) {}
V8InspectorImpl::~V8InspectorImpl() {}
+int V8InspectorImpl::contextGroupId(v8::Local<v8::Context> context) {
+ return contextGroupId(InspectedContext::contextId(context));
+}
+
+int V8InspectorImpl::contextGroupId(int contextId) {
+ protocol::HashMap<int, int>::iterator it =
+ m_contextIdToGroupIdMap.find(contextId);
+ return it != m_contextIdToGroupIdMap.end() ? it->second : 0;
+}
+
V8DebuggerAgentImpl* V8InspectorImpl::enabledDebuggerAgentForGroup(
int contextGroupId) {
V8InspectorSessionImpl* session = sessionForContextGroup(contextGroupId);
@@ -83,7 +94,7 @@ v8::MaybeLocal<v8::Value> V8InspectorImpl::runCompiledScript(
v8::Local<v8::Context> context, v8::Local<v8::Script> script) {
v8::MicrotasksScope microtasksScope(m_isolate,
v8::MicrotasksScope::kRunMicrotasks);
- int groupId = V8Debugger::getGroupId(context);
+ int groupId = contextGroupId(context);
if (V8DebuggerAgentImpl* agent = enabledDebuggerAgentForGroup(groupId))
agent->willExecuteScript(script->GetUnboundScript()->GetId());
v8::MaybeLocal<v8::Value> result = script->Run(context);
@@ -97,9 +108,23 @@ v8::MaybeLocal<v8::Value> V8InspectorImpl::runCompiledScript(
v8::MaybeLocal<v8::Value> V8InspectorImpl::callFunction(
v8::Local<v8::Function> function, v8::Local<v8::Context> context,
v8::Local<v8::Value> receiver, int argc, v8::Local<v8::Value> info[]) {
- v8::MicrotasksScope microtasksScope(m_isolate,
- v8::MicrotasksScope::kRunMicrotasks);
- int groupId = V8Debugger::getGroupId(context);
+ return callFunction(function, context, receiver, argc, info,
+ v8::MicrotasksScope::kRunMicrotasks);
+}
+
+v8::MaybeLocal<v8::Value> V8InspectorImpl::callInternalFunction(
+ v8::Local<v8::Function> function, v8::Local<v8::Context> context,
+ v8::Local<v8::Value> receiver, int argc, v8::Local<v8::Value> info[]) {
+ return callFunction(function, context, receiver, argc, info,
+ v8::MicrotasksScope::kDoNotRunMicrotasks);
+}
+
+v8::MaybeLocal<v8::Value> V8InspectorImpl::callFunction(
+ v8::Local<v8::Function> function, v8::Local<v8::Context> context,
+ v8::Local<v8::Value> receiver, int argc, v8::Local<v8::Value> info[],
+ v8::MicrotasksScope::Type runMicrotasks) {
+ v8::MicrotasksScope microtasksScope(m_isolate, runMicrotasks);
+ int groupId = contextGroupId(context);
if (V8DebuggerAgentImpl* agent = enabledDebuggerAgentForGroup(groupId))
agent->willExecuteScript(function->ScriptId());
v8::MaybeLocal<v8::Value> result =
@@ -113,32 +138,28 @@ v8::MaybeLocal<v8::Value> V8InspectorImpl::callFunction(
v8::MaybeLocal<v8::Value> V8InspectorImpl::compileAndRunInternalScript(
v8::Local<v8::Context> context, v8::Local<v8::String> source) {
- v8::Local<v8::Script> script =
- compileScript(context, source, String16(), true);
- if (script.IsEmpty()) return v8::MaybeLocal<v8::Value>();
+ v8::Local<v8::UnboundScript> unboundScript;
+ if (!v8::debug::CompileInspectorScript(m_isolate, source)
+ .ToLocal(&unboundScript))
+ return v8::MaybeLocal<v8::Value>();
v8::MicrotasksScope microtasksScope(m_isolate,
v8::MicrotasksScope::kDoNotRunMicrotasks);
- return script->Run(context);
+ v8::Context::Scope contextScope(context);
+ return unboundScript->BindToCurrentContext()->Run(context);
}
-v8::Local<v8::Script> V8InspectorImpl::compileScript(
- v8::Local<v8::Context> context, v8::Local<v8::String> code,
- const String16& fileName, bool markAsInternal) {
+v8::MaybeLocal<v8::Script> V8InspectorImpl::compileScript(
+ v8::Local<v8::Context> context, const String16& code,
+ const String16& fileName) {
v8::ScriptOrigin origin(
toV8String(m_isolate, fileName), v8::Integer::New(m_isolate, 0),
v8::Integer::New(m_isolate, 0),
- v8::False(m_isolate), // sharable
- v8::Local<v8::Integer>(),
- v8::Boolean::New(m_isolate, markAsInternal), // internal
- toV8String(m_isolate, String16()), // sourceMap
- v8::True(m_isolate)); // opaqueresource
- v8::ScriptCompiler::Source source(code, origin);
- v8::Local<v8::Script> script;
- if (!v8::ScriptCompiler::Compile(context, &source,
- v8::ScriptCompiler::kNoCompileOptions)
- .ToLocal(&script))
- return v8::Local<v8::Script>();
- return script;
+ v8::False(m_isolate), // sharable
+ v8::Local<v8::Integer>(), toV8String(m_isolate, String16()), // sourceMap
+ v8::True(m_isolate)); // opaqueresource
+ v8::ScriptCompiler::Source source(toV8String(m_isolate, code), origin);
+ return v8::ScriptCompiler::Compile(context, &source,
+ v8::ScriptCompiler::kNoCompileOptions);
}
void V8InspectorImpl::enableStackCapturingIfNeeded() {
@@ -167,12 +188,12 @@ V8ConsoleMessageStorage* V8InspectorImpl::ensureConsoleMessageStorage(
ConsoleStorageMap::iterator storageIt =
m_consoleStorageMap.find(contextGroupId);
if (storageIt == m_consoleStorageMap.end())
- storageIt =
- m_consoleStorageMap
- .insert(std::make_pair(
- contextGroupId,
- wrapUnique(new V8ConsoleMessageStorage(this, contextGroupId))))
- .first;
+ storageIt = m_consoleStorageMap
+ .insert(std::make_pair(
+ contextGroupId,
+ std::unique_ptr<V8ConsoleMessageStorage>(
+ new V8ConsoleMessageStorage(this, contextGroupId))))
+ .first;
return storageIt->second.get();
}
@@ -216,42 +237,43 @@ InspectedContext* V8InspectorImpl::getContext(int groupId,
}
void V8InspectorImpl::contextCreated(const V8ContextInfo& info) {
- int contextId = m_debugger->markContext(info);
+ int contextId = ++m_lastContextId;
+ InspectedContext* context = new InspectedContext(this, info, contextId);
+ m_contextIdToGroupIdMap[contextId] = info.contextGroupId;
ContextsByGroupMap::iterator contextIt = m_contexts.find(info.contextGroupId);
if (contextIt == m_contexts.end())
contextIt = m_contexts
- .insert(std::make_pair(info.contextGroupId,
- wrapUnique(new ContextByIdMap())))
+ .insert(std::make_pair(
+ info.contextGroupId,
+ std::unique_ptr<ContextByIdMap>(new ContextByIdMap())))
.first;
-
const auto& contextById = contextIt->second;
DCHECK(contextById->find(contextId) == contextById->cend());
- InspectedContext* context = new InspectedContext(this, info, contextId);
- (*contextById)[contextId] = wrapUnique(context);
+ (*contextById)[contextId].reset(context);
SessionMap::iterator sessionIt = m_sessions.find(info.contextGroupId);
if (sessionIt != m_sessions.end())
sessionIt->second->runtimeAgent()->reportExecutionContextCreated(context);
}
void V8InspectorImpl::contextDestroyed(v8::Local<v8::Context> context) {
- int contextId = V8Debugger::contextId(context);
- int contextGroupId = V8Debugger::getGroupId(context);
+ int contextId = InspectedContext::contextId(context);
+ int groupId = contextGroupId(context);
+ m_contextIdToGroupIdMap.erase(contextId);
- ConsoleStorageMap::iterator storageIt =
- m_consoleStorageMap.find(contextGroupId);
+ ConsoleStorageMap::iterator storageIt = m_consoleStorageMap.find(groupId);
if (storageIt != m_consoleStorageMap.end())
storageIt->second->contextDestroyed(contextId);
- InspectedContext* inspectedContext = getContext(contextGroupId, contextId);
+ InspectedContext* inspectedContext = getContext(groupId, contextId);
if (!inspectedContext) return;
- SessionMap::iterator iter = m_sessions.find(contextGroupId);
+ SessionMap::iterator iter = m_sessions.find(groupId);
if (iter != m_sessions.end())
iter->second->runtimeAgent()->reportExecutionContextDestroyed(
inspectedContext);
- discardInspectedContext(contextGroupId, contextId);
+ discardInspectedContext(groupId, contextId);
}
void V8InspectorImpl::resetContextGroup(int contextGroupId) {
@@ -260,19 +282,22 @@ void V8InspectorImpl::resetContextGroup(int contextGroupId) {
SessionMap::iterator session = m_sessions.find(contextGroupId);
if (session != m_sessions.end()) session->second->reset();
m_contexts.erase(contextGroupId);
+ m_debugger->wasmTranslation()->Clear();
}
void V8InspectorImpl::willExecuteScript(v8::Local<v8::Context> context,
int scriptId) {
if (V8DebuggerAgentImpl* agent =
- enabledDebuggerAgentForGroup(V8Debugger::getGroupId(context)))
+ enabledDebuggerAgentForGroup(contextGroupId(context))) {
agent->willExecuteScript(scriptId);
+ }
}
void V8InspectorImpl::didExecuteScript(v8::Local<v8::Context> context) {
if (V8DebuggerAgentImpl* agent =
- enabledDebuggerAgentForGroup(V8Debugger::getGroupId(context)))
+ enabledDebuggerAgentForGroup(contextGroupId(context))) {
agent->didExecuteScript();
+ }
}
void V8InspectorImpl::idleStarted() {
@@ -292,33 +317,31 @@ unsigned V8InspectorImpl::exceptionThrown(
v8::Local<v8::Value> exception, const StringView& detailedMessage,
const StringView& url, unsigned lineNumber, unsigned columnNumber,
std::unique_ptr<V8StackTrace> stackTrace, int scriptId) {
- int contextGroupId = V8Debugger::getGroupId(context);
- if (!contextGroupId || m_muteExceptionsMap[contextGroupId]) return 0;
- std::unique_ptr<V8StackTraceImpl> stackTraceImpl =
- wrapUnique(static_cast<V8StackTraceImpl*>(stackTrace.release()));
+ int groupId = contextGroupId(context);
+ if (!groupId || m_muteExceptionsMap[groupId]) return 0;
+ std::unique_ptr<V8StackTraceImpl> stackTraceImpl(
+ static_cast<V8StackTraceImpl*>(stackTrace.release()));
unsigned exceptionId = nextExceptionId();
std::unique_ptr<V8ConsoleMessage> consoleMessage =
V8ConsoleMessage::createForException(
m_client->currentTimeMS(), toString16(detailedMessage),
toString16(url), lineNumber, columnNumber, std::move(stackTraceImpl),
scriptId, m_isolate, toString16(message),
- V8Debugger::contextId(context), exception, exceptionId);
- ensureConsoleMessageStorage(contextGroupId)
- ->addMessage(std::move(consoleMessage));
+ InspectedContext::contextId(context), exception, exceptionId);
+ ensureConsoleMessageStorage(groupId)->addMessage(std::move(consoleMessage));
return exceptionId;
}
void V8InspectorImpl::exceptionRevoked(v8::Local<v8::Context> context,
unsigned exceptionId,
const StringView& message) {
- int contextGroupId = V8Debugger::getGroupId(context);
- if (!contextGroupId) return;
+ int groupId = contextGroupId(context);
+ if (!groupId) return;
std::unique_ptr<V8ConsoleMessage> consoleMessage =
V8ConsoleMessage::createForRevokedException(
m_client->currentTimeMS(), toString16(message), exceptionId);
- ensureConsoleMessageStorage(contextGroupId)
- ->addMessage(std::move(consoleMessage));
+ ensureConsoleMessageStorage(groupId)->addMessage(std::move(consoleMessage));
}
std::unique_ptr<V8StackTrace> V8InspectorImpl::captureStackTrace(
diff --git a/deps/v8/src/inspector/v8-inspector-impl.h b/deps/v8/src/inspector/v8-inspector-impl.h
index 0ca1a6a729..f98747543b 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-impl.h
@@ -58,6 +58,8 @@ class V8InspectorImpl : public V8Inspector {
v8::Isolate* isolate() const { return m_isolate; }
V8InspectorClient* client() { return m_client; }
V8Debugger* debugger() { return m_debugger.get(); }
+ int contextGroupId(v8::Local<v8::Context>);
+ int contextGroupId(int contextId);
v8::MaybeLocal<v8::Value> runCompiledScript(v8::Local<v8::Context>,
v8::Local<v8::Script>);
@@ -67,10 +69,14 @@ class V8InspectorImpl : public V8Inspector {
int argc, v8::Local<v8::Value> info[]);
v8::MaybeLocal<v8::Value> compileAndRunInternalScript(v8::Local<v8::Context>,
v8::Local<v8::String>);
- v8::Local<v8::Script> compileScript(v8::Local<v8::Context>,
- v8::Local<v8::String>,
- const String16& fileName,
- bool markAsInternal);
+ v8::MaybeLocal<v8::Value> callInternalFunction(v8::Local<v8::Function>,
+ v8::Local<v8::Context>,
+ v8::Local<v8::Value> receiver,
+ int argc,
+ v8::Local<v8::Value> info[]);
+ v8::MaybeLocal<v8::Script> compileScript(v8::Local<v8::Context>,
+ const String16& code,
+ const String16& fileName);
v8::Local<v8::Context> regexContext();
// V8Inspector implementation.
@@ -121,12 +127,18 @@ class V8InspectorImpl : public V8Inspector {
V8ProfilerAgentImpl* enabledProfilerAgentForGroup(int contextGroupId);
private:
+ v8::MaybeLocal<v8::Value> callFunction(
+ v8::Local<v8::Function>, v8::Local<v8::Context>,
+ v8::Local<v8::Value> receiver, int argc, v8::Local<v8::Value> info[],
+ v8::MicrotasksScope::Type runMicrotasks);
+
v8::Isolate* m_isolate;
V8InspectorClient* m_client;
std::unique_ptr<V8Debugger> m_debugger;
v8::Global<v8::Context> m_regexContext;
int m_capturingStackTracesCount;
unsigned m_lastExceptionId;
+ int m_lastContextId;
using MuteExceptionsMap = protocol::HashMap<int, int>;
MuteExceptionsMap m_muteExceptionsMap;
@@ -142,6 +154,8 @@ class V8InspectorImpl : public V8Inspector {
protocol::HashMap<int, std::unique_ptr<V8ConsoleMessageStorage>>;
ConsoleStorageMap m_consoleStorageMap;
+ protocol::HashMap<int, int> m_contextIdToGroupIdMap;
+
DISALLOW_COPY_AND_ASSIGN(V8InspectorImpl);
};
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.cc b/deps/v8/src/inspector/v8-inspector-session-impl.cc
index e415575304..3a5b59c28d 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.cc
@@ -40,7 +40,7 @@ bool V8InspectorSession::canDispatchMethod(const StringView& method) {
std::unique_ptr<V8InspectorSessionImpl> V8InspectorSessionImpl::create(
V8InspectorImpl* inspector, int contextGroupId,
V8Inspector::Channel* channel, const StringView& state) {
- return wrapUnique(
+ return std::unique_ptr<V8InspectorSessionImpl>(
new V8InspectorSessionImpl(inspector, contextGroupId, channel, state));
}
@@ -62,35 +62,35 @@ V8InspectorSessionImpl::V8InspectorSessionImpl(V8InspectorImpl* inspector,
m_schemaAgent(nullptr) {
if (savedState.length()) {
std::unique_ptr<protocol::Value> state =
- protocol::parseJSON(toString16(savedState));
+ protocol::StringUtil::parseJSON(toString16(savedState));
if (state) m_state = protocol::DictionaryValue::cast(std::move(state));
if (!m_state) m_state = protocol::DictionaryValue::create();
} else {
m_state = protocol::DictionaryValue::create();
}
- m_runtimeAgent = wrapUnique(new V8RuntimeAgentImpl(
+ m_runtimeAgent.reset(new V8RuntimeAgentImpl(
this, this, agentState(protocol::Runtime::Metainfo::domainName)));
protocol::Runtime::Dispatcher::wire(&m_dispatcher, m_runtimeAgent.get());
- m_debuggerAgent = wrapUnique(new V8DebuggerAgentImpl(
+ m_debuggerAgent.reset(new V8DebuggerAgentImpl(
this, this, agentState(protocol::Debugger::Metainfo::domainName)));
protocol::Debugger::Dispatcher::wire(&m_dispatcher, m_debuggerAgent.get());
- m_profilerAgent = wrapUnique(new V8ProfilerAgentImpl(
+ m_profilerAgent.reset(new V8ProfilerAgentImpl(
this, this, agentState(protocol::Profiler::Metainfo::domainName)));
protocol::Profiler::Dispatcher::wire(&m_dispatcher, m_profilerAgent.get());
- m_heapProfilerAgent = wrapUnique(new V8HeapProfilerAgentImpl(
+ m_heapProfilerAgent.reset(new V8HeapProfilerAgentImpl(
this, this, agentState(protocol::HeapProfiler::Metainfo::domainName)));
protocol::HeapProfiler::Dispatcher::wire(&m_dispatcher,
m_heapProfilerAgent.get());
- m_consoleAgent = wrapUnique(new V8ConsoleAgentImpl(
+ m_consoleAgent.reset(new V8ConsoleAgentImpl(
this, this, agentState(protocol::Console::Metainfo::domainName)));
protocol::Console::Dispatcher::wire(&m_dispatcher, m_consoleAgent.get());
- m_schemaAgent = wrapUnique(new V8SchemaAgentImpl(
+ m_schemaAgent.reset(new V8SchemaAgentImpl(
this, this, agentState(protocol::Schema::Metainfo::domainName)));
protocol::Schema::Dispatcher::wire(&m_dispatcher, m_schemaAgent.get());
@@ -126,13 +126,42 @@ protocol::DictionaryValue* V8InspectorSessionImpl::agentState(
return state;
}
-void V8InspectorSessionImpl::sendProtocolResponse(int callId,
- const String16& message) {
- m_channel->sendProtocolResponse(callId, toStringView(message));
+namespace {
+
+class MessageBuffer : public StringBuffer {
+ public:
+ static std::unique_ptr<MessageBuffer> create(
+ std::unique_ptr<protocol::Serializable> message) {
+ return std::unique_ptr<MessageBuffer>(
+ new MessageBuffer(std::move(message)));
+ }
+
+ const StringView& string() override {
+ if (!m_serialized) {
+ m_serialized = StringBuffer::create(toStringView(m_message->serialize()));
+ m_message.reset(nullptr);
+ }
+ return m_serialized->string();
+ }
+
+ private:
+ explicit MessageBuffer(std::unique_ptr<protocol::Serializable> message)
+ : m_message(std::move(message)) {}
+
+ std::unique_ptr<protocol::Serializable> m_message;
+ std::unique_ptr<StringBuffer> m_serialized;
+};
+
+} // namespace
+
+void V8InspectorSessionImpl::sendProtocolResponse(
+ int callId, std::unique_ptr<protocol::Serializable> message) {
+ m_channel->sendResponse(callId, MessageBuffer::create(std::move(message)));
}
-void V8InspectorSessionImpl::sendProtocolNotification(const String16& message) {
- m_channel->sendProtocolNotification(toStringView(message));
+void V8InspectorSessionImpl::sendProtocolNotification(
+ std::unique_ptr<protocol::Serializable> message) {
+ m_channel->sendNotification(MessageBuffer::create(std::move(message)));
}
void V8InspectorSessionImpl::flushProtocolNotifications() {
@@ -266,7 +295,7 @@ V8InspectorSessionImpl::wrapObject(v8::Local<v8::Context> context,
const String16& groupName,
bool generatePreview) {
InjectedScript* injectedScript = nullptr;
- findInjectedScript(V8Debugger::contextId(context), injectedScript);
+ findInjectedScript(InspectedContext::contextId(context), injectedScript);
if (!injectedScript) return nullptr;
std::unique_ptr<protocol::Runtime::RemoteObject> result;
injectedScript->wrapObject(value, groupName, false, generatePreview, &result);
@@ -278,7 +307,7 @@ V8InspectorSessionImpl::wrapTable(v8::Local<v8::Context> context,
v8::Local<v8::Value> table,
v8::Local<v8::Value> columns) {
InjectedScript* injectedScript = nullptr;
- findInjectedScript(V8Debugger::contextId(context), injectedScript);
+ findInjectedScript(InspectedContext::contextId(context), injectedScript);
if (!injectedScript) return nullptr;
return injectedScript->wrapTable(table, columns);
}
@@ -305,11 +334,11 @@ void V8InspectorSessionImpl::reportAllContexts(V8RuntimeAgentImpl* agent) {
void V8InspectorSessionImpl::dispatchProtocolMessage(
const StringView& message) {
- m_dispatcher.dispatch(protocol::parseJSON(message));
+ m_dispatcher.dispatch(protocol::StringUtil::parseJSON(message));
}
std::unique_ptr<StringBuffer> V8InspectorSessionImpl::stateJSON() {
- String16 json = m_state->toJSONString();
+ String16 json = m_state->serialize();
return StringBufferImpl::adopt(json);
}
@@ -366,7 +395,8 @@ void V8InspectorSessionImpl::schedulePauseOnNextStatement(
const StringView& breakReason, const StringView& breakDetails) {
m_debuggerAgent->schedulePauseOnNextStatement(
toString16(breakReason),
- protocol::DictionaryValue::cast(protocol::parseJSON(breakDetails)));
+ protocol::DictionaryValue::cast(
+ protocol::StringUtil::parseJSON(breakDetails)));
}
void V8InspectorSessionImpl::cancelPauseOnNextStatement() {
@@ -377,7 +407,8 @@ void V8InspectorSessionImpl::breakProgram(const StringView& breakReason,
const StringView& breakDetails) {
m_debuggerAgent->breakProgram(
toString16(breakReason),
- protocol::DictionaryValue::cast(protocol::parseJSON(breakDetails)));
+ protocol::DictionaryValue::cast(
+ protocol::StringUtil::parseJSON(breakDetails)));
}
void V8InspectorSessionImpl::setSkipAllPauses(bool skip) {
diff --git a/deps/v8/src/inspector/v8-inspector-session-impl.h b/deps/v8/src/inspector/v8-inspector-session-impl.h
index af65aa3c93..7a59e1cead 100644
--- a/deps/v8/src/inspector/v8-inspector-session-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-session-impl.h
@@ -96,8 +96,10 @@ class V8InspectorSessionImpl : public V8InspectorSession,
protocol::DictionaryValue* agentState(const String16& name);
// protocol::FrontendChannel implementation.
- void sendProtocolResponse(int callId, const String16& message) override;
- void sendProtocolNotification(const String16& message) override;
+ void sendProtocolResponse(
+ int callId, std::unique_ptr<protocol::Serializable> message) override;
+ void sendProtocolNotification(
+ std::unique_ptr<protocol::Serializable> message) override;
void flushProtocolNotifications() override;
int m_contextGroupId;
diff --git a/deps/v8/src/inspector/v8-internal-value-type.cc b/deps/v8/src/inspector/v8-internal-value-type.cc
index cde8bc9f7f..46f5dac1ac 100644
--- a/deps/v8/src/inspector/v8-internal-value-type.cc
+++ b/deps/v8/src/inspector/v8-internal-value-type.cc
@@ -4,7 +4,6 @@
#include "src/inspector/v8-internal-value-type.h"
-#include "src/inspector/protocol-platform.h"
#include "src/inspector/string-util.h"
namespace v8_inspector {
diff --git a/deps/v8/src/inspector/v8-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
index 8b888a066b..16c4777e84 100644
--- a/deps/v8/src/inspector/v8-profiler-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
@@ -309,8 +309,4 @@ bool V8ProfilerAgentImpl::idleFinished() {
return m_profiler;
}
-void V8ProfilerAgentImpl::collectSample() {
- if (m_profiler) m_profiler->CollectSample();
-}
-
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-profiler-agent-impl.h b/deps/v8/src/inspector/v8-profiler-agent-impl.h
index a634ff3cd9..a8441174e0 100644
--- a/deps/v8/src/inspector/v8-profiler-agent-impl.h
+++ b/deps/v8/src/inspector/v8-profiler-agent-impl.h
@@ -43,8 +43,6 @@ class V8ProfilerAgentImpl : public protocol::Profiler::Backend {
bool idleStarted();
bool idleFinished();
- void collectSample();
-
private:
String16 nextProfileId();
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.cc b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
index 4dbe60f8f3..b40f08ed06 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
@@ -241,7 +241,7 @@ Response ensureContext(V8InspectorImpl* inspector, int contextGroupId,
inspector->client()->ensureDefaultContextInGroup(contextGroupId);
if (defaultContext.IsEmpty())
return Response::Error("Cannot find default execution context");
- *contextId = V8Debugger::contextId(defaultContext);
+ *contextId = InspectedContext::contextId(defaultContext);
}
return Response::OK();
}
@@ -293,11 +293,11 @@ void V8RuntimeAgentImpl::evaluate(
if (evalIsDisabled) scope.context()->AllowCodeGenerationFromStrings(true);
v8::MaybeLocal<v8::Value> maybeResultValue;
- v8::Local<v8::Script> script = m_inspector->compileScript(
- scope.context(), toV8String(m_inspector->isolate(), expression),
- String16(), false);
- if (!script.IsEmpty())
+ v8::Local<v8::Script> script;
+ if (m_inspector->compileScript(scope.context(), expression, String16())
+ .ToLocal(&script)) {
maybeResultValue = m_inspector->runCompiledScript(scope.context(), script);
+ }
if (evalIsDisabled) scope.context()->AllowCodeGenerationFromStrings(false);
@@ -379,10 +379,14 @@ void V8RuntimeAgentImpl::callFunctionOn(
if (silent.fromMaybe(false)) scope.ignoreExceptionsAndMuteConsole();
if (userGesture.fromMaybe(false)) scope.pretendUserGesture();
- v8::MaybeLocal<v8::Value> maybeFunctionValue =
- m_inspector->compileAndRunInternalScript(
- scope.context(),
- toV8String(m_inspector->isolate(), "(" + expression + ")"));
+ v8::MaybeLocal<v8::Value> maybeFunctionValue;
+ v8::Local<v8::Script> functionScript;
+ if (m_inspector
+ ->compileScript(scope.context(), "(" + expression + ")", String16())
+ .ToLocal(&functionScript)) {
+ maybeFunctionValue =
+ m_inspector->runCompiledScript(scope.context(), functionScript);
+ }
// Re-initialize after running client's code, as it could have destroyed
// context or session.
response = scope.initialize();
@@ -543,11 +547,11 @@ Response V8RuntimeAgentImpl::compileScript(
if (!response.isSuccess()) return response;
if (!persistScript) m_inspector->debugger()->muteScriptParsedEvents();
- v8::Local<v8::Script> script = m_inspector->compileScript(
- scope.context(), toV8String(m_inspector->isolate(), expression),
- sourceURL, false);
+ v8::Local<v8::Script> script;
+ bool isOk = m_inspector->compileScript(scope.context(), expression, sourceURL)
+ .ToLocal(&script);
if (!persistScript) m_inspector->debugger()->unmuteScriptParsedEvents();
- if (script.IsEmpty()) {
+ if (!isOk) {
if (scope.tryCatch().HasCaught()) {
response = scope.injectedScript()->createExceptionDetails(
scope.tryCatch(), String16(), false, exceptionDetails);
@@ -702,7 +706,7 @@ void V8RuntimeAgentImpl::reportExecutionContextCreated(
.build();
if (!context->auxData().isEmpty())
description->setAuxData(protocol::DictionaryValue::cast(
- protocol::parseJSON(context->auxData())));
+ protocol::StringUtil::parseJSON(context->auxData())));
m_frontend.executionContextCreated(std::move(description));
}
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.cc b/deps/v8/src/inspector/v8-stack-trace-impl.cc
index 1a38c6dd82..962a00a773 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.cc
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.cc
@@ -5,12 +5,11 @@
#include "src/inspector/v8-stack-trace-impl.h"
#include "src/inspector/string-util.h"
+#include "src/inspector/v8-debugger-agent-impl.h"
#include "src/inspector/v8-debugger.h"
#include "src/inspector/v8-inspector-impl.h"
-#include "src/inspector/v8-profiler-agent-impl.h"
#include "include/v8-debug.h"
-#include "include/v8-profiler.h"
#include "include/v8-version.h"
namespace v8_inspector {
@@ -23,7 +22,9 @@ static const v8::StackTrace::StackTraceOptions stackTraceOptions =
v8::StackTrace::kScriptId | v8::StackTrace::kScriptNameOrSourceURL |
v8::StackTrace::kFunctionName);
-V8StackTraceImpl::Frame toFrame(v8::Local<v8::StackFrame> frame) {
+V8StackTraceImpl::Frame toFrame(v8::Local<v8::StackFrame> frame,
+ WasmTranslation* wasmTranslation,
+ int contextGroupId) {
String16 scriptId = String16::fromInteger(frame->GetScriptId());
String16 sourceName;
v8::Local<v8::String> sourceNameValue(frame->GetScriptNameOrSourceURL());
@@ -35,22 +36,30 @@ V8StackTraceImpl::Frame toFrame(v8::Local<v8::StackFrame> frame) {
if (!functionNameValue.IsEmpty())
functionName = toProtocolString(functionNameValue);
- int sourceLineNumber = frame->GetLineNumber();
- int sourceColumn = frame->GetColumn();
+ int sourceLineNumber = frame->GetLineNumber() - 1;
+ int sourceColumn = frame->GetColumn() - 1;
+ // TODO(clemensh): Figure out a way to do this translation only right before
+ // sending the stack trace over wire.
+ if (wasmTranslation)
+ wasmTranslation->TranslateWasmScriptLocationToProtocolLocation(
+ &scriptId, &sourceLineNumber, &sourceColumn);
return V8StackTraceImpl::Frame(functionName, scriptId, sourceName,
- sourceLineNumber, sourceColumn);
+ sourceLineNumber + 1, sourceColumn + 1);
}
void toFramesVector(v8::Local<v8::StackTrace> stackTrace,
std::vector<V8StackTraceImpl::Frame>& frames,
- size_t maxStackSize, v8::Isolate* isolate) {
+ size_t maxStackSize, v8::Isolate* isolate,
+ V8Debugger* debugger, int contextGroupId) {
DCHECK(isolate->InContext());
int frameCount = stackTrace->GetFrameCount();
if (frameCount > static_cast<int>(maxStackSize))
frameCount = static_cast<int>(maxStackSize);
+ WasmTranslation* wasmTranslation =
+ debugger ? debugger->wasmTranslation() : nullptr;
for (int i = 0; i < frameCount; i++) {
v8::Local<v8::StackFrame> stackFrame = stackTrace->GetFrame(i);
- frames.push_back(toFrame(stackFrame));
+ frames.push_back(toFrame(stackFrame, wasmTranslation, contextGroupId));
}
}
@@ -113,7 +122,8 @@ std::unique_ptr<V8StackTraceImpl> V8StackTraceImpl::create(
v8::HandleScope scope(isolate);
std::vector<V8StackTraceImpl::Frame> frames;
if (!stackTrace.IsEmpty())
- toFramesVector(stackTrace, frames, maxStackSize, isolate);
+ toFramesVector(stackTrace, frames, maxStackSize, isolate, debugger,
+ contextGroupId);
int maxAsyncCallChainDepth = 1;
V8StackTraceImpl* asyncCallChain = nullptr;
@@ -161,12 +171,6 @@ std::unique_ptr<V8StackTraceImpl> V8StackTraceImpl::capture(
v8::HandleScope handleScope(isolate);
v8::Local<v8::StackTrace> stackTrace;
if (isolate->InContext()) {
- if (debugger) {
- V8InspectorImpl* inspector = debugger->inspector();
- V8ProfilerAgentImpl* profilerAgent =
- inspector->enabledProfilerAgentForGroup(contextGroupId);
- if (profilerAgent) profilerAgent->collectSample();
- }
stackTrace = v8::StackTrace::CurrentStackTrace(
isolate, static_cast<int>(maxStackSize), stackTraceOptions);
}
@@ -176,7 +180,7 @@ std::unique_ptr<V8StackTraceImpl> V8StackTraceImpl::capture(
std::unique_ptr<V8StackTraceImpl> V8StackTraceImpl::cloneImpl() {
std::vector<Frame> framesCopy(m_frames);
- return wrapUnique(
+ return std::unique_ptr<V8StackTraceImpl>(
new V8StackTraceImpl(m_contextGroupId, m_description, framesCopy,
m_parent ? m_parent->cloneImpl() : nullptr));
}
@@ -185,7 +189,7 @@ std::unique_ptr<V8StackTrace> V8StackTraceImpl::clone() {
std::vector<Frame> frames;
for (size_t i = 0; i < m_frames.size(); i++)
frames.push_back(m_frames.at(i).clone());
- return wrapUnique(
+ return std::unique_ptr<V8StackTraceImpl>(
new V8StackTraceImpl(m_contextGroupId, m_description, frames, nullptr));
}
diff --git a/deps/v8/src/inspector/wasm-translation.cc b/deps/v8/src/inspector/wasm-translation.cc
new file mode 100644
index 0000000000..825341e122
--- /dev/null
+++ b/deps/v8/src/inspector/wasm-translation.cc
@@ -0,0 +1,309 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/inspector/wasm-translation.h"
+
+#include <algorithm>
+
+#include "src/debug/debug-interface.h"
+#include "src/inspector/protocol/Debugger.h"
+#include "src/inspector/script-breakpoint.h"
+#include "src/inspector/string-util.h"
+#include "src/inspector/v8-debugger-agent-impl.h"
+#include "src/inspector/v8-debugger-script.h"
+#include "src/inspector/v8-debugger.h"
+#include "src/inspector/v8-inspector-impl.h"
+
+using namespace v8_inspector;
+using namespace v8;
+
+class WasmTranslation::TranslatorImpl {
+ public:
+ struct TransLocation {
+ WasmTranslation *translation;
+ String16 script_id;
+ int line;
+ int column;
+ TransLocation(WasmTranslation *translation, String16 script_id, int line,
+ int column)
+ : translation(translation),
+ script_id(script_id),
+ line(line),
+ column(column) {}
+ };
+
+ virtual void Translate(TransLocation *loc) = 0;
+ virtual void TranslateBack(TransLocation *loc) = 0;
+
+ class RawTranslator;
+ class DisassemblingTranslator;
+};
+
+class WasmTranslation::TranslatorImpl::RawTranslator
+ : public WasmTranslation::TranslatorImpl {
+ public:
+ void Translate(TransLocation *loc) {}
+ void TranslateBack(TransLocation *loc) {}
+};
+
+class WasmTranslation::TranslatorImpl::DisassemblingTranslator
+ : public WasmTranslation::TranslatorImpl {
+ using OffsetTable = debug::WasmDisassembly::OffsetTable;
+
+ public:
+ DisassemblingTranslator(Isolate *isolate, Local<debug::WasmScript> script,
+ WasmTranslation *translation,
+ V8DebuggerAgentImpl *agent)
+ : script_(isolate, script) {
+ // Register fake scripts for each function in this wasm module/script.
+ int num_functions = script->NumFunctions();
+ int num_imported_functions = script->NumImportedFunctions();
+ DCHECK_LE(0, num_imported_functions);
+ DCHECK_LE(0, num_functions);
+ DCHECK_GE(num_functions, num_imported_functions);
+ String16 script_id = String16::fromInteger(script->Id());
+ for (int func_idx = num_imported_functions; func_idx < num_functions;
+ ++func_idx) {
+ AddFakeScript(isolate, script_id, func_idx, translation, agent);
+ }
+ }
+
+ void Translate(TransLocation *loc) {
+ const OffsetTable &offset_table = GetOffsetTable(loc);
+ DCHECK(!offset_table.empty());
+ uint32_t byte_offset = static_cast<uint32_t>(loc->column);
+
+ // Binary search for the given offset.
+ unsigned left = 0; // inclusive
+ unsigned right = static_cast<unsigned>(offset_table.size()); // exclusive
+ while (right - left > 1) {
+ unsigned mid = (left + right) / 2;
+ if (offset_table[mid].byte_offset <= byte_offset) {
+ left = mid;
+ } else {
+ right = mid;
+ }
+ }
+
+ loc->script_id = GetFakeScriptId(loc);
+ if (offset_table[left].byte_offset == byte_offset) {
+ loc->line = offset_table[left].line;
+ loc->column = offset_table[left].column;
+ } else {
+ loc->line = 0;
+ loc->column = 0;
+ }
+ }
+
+ void TranslateBack(TransLocation *loc) {
+ int func_index = GetFunctionIndexFromFakeScriptId(loc->script_id);
+ const OffsetTable *reverse_table = GetReverseTable(func_index);
+ if (!reverse_table) return;
+ DCHECK(!reverse_table->empty());
+
+ // Binary search for the given line and column.
+ unsigned left = 0; // inclusive
+ unsigned right = static_cast<unsigned>(reverse_table->size()); // exclusive
+ while (right - left > 1) {
+ unsigned mid = (left + right) / 2;
+ auto &entry = (*reverse_table)[mid];
+ if (entry.line < loc->line ||
+ (entry.line == loc->line && entry.column <= loc->column)) {
+ left = mid;
+ } else {
+ right = mid;
+ }
+ }
+
+ int found_byte_offset = 0;
+ // If we found an exact match, use it. Otherwise check whether the next
+ // bigger entry is still in the same line. Report that one then.
+ if ((*reverse_table)[left].line == loc->line &&
+ (*reverse_table)[left].column == loc->column) {
+ found_byte_offset = (*reverse_table)[left].byte_offset;
+ } else if (left + 1 < reverse_table->size() &&
+ (*reverse_table)[left + 1].line == loc->line) {
+ found_byte_offset = (*reverse_table)[left + 1].byte_offset;
+ }
+
+ v8::Isolate *isolate = loc->translation->isolate_;
+ loc->script_id = String16::fromInteger(script_.Get(isolate)->Id());
+ loc->line = func_index;
+ loc->column = found_byte_offset;
+ }
+
+ private:
+ String16 GetFakeScriptUrl(v8::Isolate *isolate, int func_index) {
+ Local<debug::WasmScript> script = script_.Get(isolate);
+ String16 script_name = toProtocolString(script->Name().ToLocalChecked());
+ int numFunctions = script->NumFunctions();
+ int numImported = script->NumImportedFunctions();
+ String16Builder builder;
+ builder.appendAll("wasm://wasm/", script_name, '/');
+ if (numFunctions - numImported > 300) {
+ size_t digits = String16::fromInteger(numFunctions - 1).length();
+ String16 thisCategory = String16::fromInteger((func_index / 100) * 100);
+ DCHECK_LE(thisCategory.length(), digits);
+ for (size_t i = thisCategory.length(); i < digits; ++i)
+ builder.append('0');
+ builder.appendAll(thisCategory, '/');
+ }
+ builder.appendAll(script_name, '-');
+ builder.appendNumber(func_index);
+ return builder.toString();
+ }
+
+ String16 GetFakeScriptId(const String16 script_id, int func_index) {
+ return String16::concat(script_id, '-', String16::fromInteger(func_index));
+ }
+ String16 GetFakeScriptId(const TransLocation *loc) {
+ return GetFakeScriptId(loc->script_id, loc->line);
+ }
+
+ void AddFakeScript(v8::Isolate *isolate, const String16 &underlyingScriptId,
+ int func_idx, WasmTranslation *translation,
+ V8DebuggerAgentImpl *agent) {
+ String16 fake_script_id = GetFakeScriptId(underlyingScriptId, func_idx);
+ String16 fake_script_url = GetFakeScriptUrl(isolate, func_idx);
+
+ v8::Local<debug::WasmScript> script = script_.Get(isolate);
+ // TODO(clemensh): Generate disassembly lazily when queried by the frontend.
+ debug::WasmDisassembly disassembly = script->DisassembleFunction(func_idx);
+
+ DCHECK_EQ(0, offset_tables_.count(func_idx));
+ offset_tables_.insert(
+ std::make_pair(func_idx, std::move(disassembly.offset_table)));
+ String16 source(disassembly.disassembly.data(),
+ disassembly.disassembly.length());
+ std::unique_ptr<V8DebuggerScript> fake_script =
+ V8DebuggerScript::CreateWasm(isolate, script, fake_script_id,
+ std::move(fake_script_url), source);
+
+ translation->AddFakeScript(fake_script->scriptId(), this);
+ agent->didParseSource(std::move(fake_script), true);
+ }
+
+ int GetFunctionIndexFromFakeScriptId(const String16 &fake_script_id) {
+ size_t last_dash_pos = fake_script_id.reverseFind('-');
+ DCHECK_GT(fake_script_id.length(), last_dash_pos);
+ bool ok = true;
+ int func_index = fake_script_id.substring(last_dash_pos + 1).toInteger(&ok);
+ DCHECK(ok);
+ return func_index;
+ }
+
+ const OffsetTable &GetOffsetTable(const TransLocation *loc) {
+ int func_index = loc->line;
+ auto it = offset_tables_.find(func_index);
+ // TODO(clemensh): Once we load disassembly lazily, the offset table
+ // might not be there yet. Load it lazily then.
+ DCHECK(it != offset_tables_.end());
+ return it->second;
+ }
+
+ const OffsetTable *GetReverseTable(int func_index) {
+ auto it = reverse_tables_.find(func_index);
+ if (it != reverse_tables_.end()) return &it->second;
+
+ // Find offset table, copy and sort it to get reverse table.
+ it = offset_tables_.find(func_index);
+ if (it == offset_tables_.end()) return nullptr;
+
+ OffsetTable reverse_table = it->second;
+ // Order by line, column, then byte offset.
+ auto cmp = [](OffsetTable::value_type el1, OffsetTable::value_type el2) {
+ if (el1.line != el2.line) return el1.line < el2.line;
+ if (el1.column != el2.column) return el1.column < el2.column;
+ return el1.byte_offset < el2.byte_offset;
+ };
+ std::sort(reverse_table.begin(), reverse_table.end(), cmp);
+
+ auto inserted = reverse_tables_.insert(
+ std::make_pair(func_index, std::move(reverse_table)));
+ DCHECK(inserted.second);
+ return &inserted.first->second;
+ }
+
+ Global<debug::WasmScript> script_;
+
+ // We assume to only disassemble a subset of the functions, so store them in a
+ // map instead of an array.
+ std::unordered_map<int, const OffsetTable> offset_tables_;
+ std::unordered_map<int, const OffsetTable> reverse_tables_;
+};
+
+WasmTranslation::WasmTranslation(v8::Isolate *isolate)
+ : isolate_(isolate), mode_(Disassemble) {}
+
+WasmTranslation::~WasmTranslation() { Clear(); }
+
+void WasmTranslation::AddScript(Local<debug::WasmScript> script,
+ V8DebuggerAgentImpl *agent) {
+ int script_id = script->Id();
+ DCHECK_EQ(0, wasm_translators_.count(script_id));
+ std::unique_ptr<TranslatorImpl> impl;
+ switch (mode_) {
+ case Raw:
+ impl.reset(new TranslatorImpl::RawTranslator());
+ break;
+ case Disassemble:
+ impl.reset(new TranslatorImpl::DisassemblingTranslator(isolate_, script,
+ this, agent));
+ break;
+ }
+ DCHECK(impl);
+ wasm_translators_.insert(std::make_pair(script_id, std::move(impl)));
+}
+
+void WasmTranslation::Clear() {
+ wasm_translators_.clear();
+ fake_scripts_.clear();
+}
+
+// Translation "forward" (to artificial scripts).
+bool WasmTranslation::TranslateWasmScriptLocationToProtocolLocation(
+ String16 *script_id, int *line_number, int *column_number) {
+ DCHECK(script_id && line_number && column_number);
+ bool ok = true;
+ int script_id_int = script_id->toInteger(&ok);
+ if (!ok) return false;
+
+ auto it = wasm_translators_.find(script_id_int);
+ if (it == wasm_translators_.end()) return false;
+ TranslatorImpl *translator = it->second.get();
+
+ TranslatorImpl::TransLocation trans_loc(this, std::move(*script_id),
+ *line_number, *column_number);
+ translator->Translate(&trans_loc);
+
+ *script_id = std::move(trans_loc.script_id);
+ *line_number = trans_loc.line;
+ *column_number = trans_loc.column;
+
+ return true;
+}
+
+// Translation "backward" (from artificial to real scripts).
+bool WasmTranslation::TranslateProtocolLocationToWasmScriptLocation(
+ String16 *script_id, int *line_number, int *column_number) {
+ auto it = fake_scripts_.find(*script_id);
+ if (it == fake_scripts_.end()) return false;
+ TranslatorImpl *translator = it->second;
+
+ TranslatorImpl::TransLocation trans_loc(this, std::move(*script_id),
+ *line_number, *column_number);
+ translator->TranslateBack(&trans_loc);
+
+ *script_id = std::move(trans_loc.script_id);
+ *line_number = trans_loc.line;
+ *column_number = trans_loc.column;
+
+ return true;
+}
+
+void WasmTranslation::AddFakeScript(const String16 &scriptId,
+ TranslatorImpl *translator) {
+ DCHECK_EQ(0, fake_scripts_.count(scriptId));
+ fake_scripts_.insert(std::make_pair(scriptId, translator));
+}
diff --git a/deps/v8/src/inspector/wasm-translation.h b/deps/v8/src/inspector/wasm-translation.h
new file mode 100644
index 0000000000..2162edee67
--- /dev/null
+++ b/deps/v8/src/inspector/wasm-translation.h
@@ -0,0 +1,75 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INSPECTOR_WASMTRANSLATION_H_
+#define V8_INSPECTOR_WASMTRANSLATION_H_
+
+#include <unordered_map>
+
+#include "include/v8.h"
+#include "src/base/macros.h"
+#include "src/debug/debug-interface.h"
+#include "src/inspector/string-16.h"
+
+namespace v8_inspector {
+
+// Forward declarations.
+class V8DebuggerAgentImpl;
+
+class WasmTranslation {
+ public:
+ enum Mode { Raw, Disassemble };
+
+ explicit WasmTranslation(v8::Isolate* isolate);
+ ~WasmTranslation();
+
+ // Set translation mode.
+ void SetMode(Mode mode) { mode_ = mode; }
+
+ // Make a wasm script known to the translation. This will trigger a number of
+ // didParseScript calls to the given debugger agent.
+ // Only locations referencing a registered script will be translated by the
+ // Translate functions below.
+ void AddScript(v8::Local<v8::debug::WasmScript> script,
+ V8DebuggerAgentImpl* agent);
+
+ // Clear all registered scripts.
+ void Clear();
+
+ // Translate a location as generated by V8 to a location that should be sent
+ // over protocol.
+ // Does nothing for locations referencing a script which was not registered
+ // before via AddScript.
+ // Line and column are 0-based.
+ // Returns true if the location was translated, false otherwise.
+ bool TranslateWasmScriptLocationToProtocolLocation(String16* script_id,
+ int* line_number,
+ int* column_number);
+
+ // Translate back from protocol locations (potentially referencing artificial
+ // scripts for individual wasm functions) to locations that make sense to V8.
+ // Does nothing if the location was not generated by the translate method
+ // above.
+ // Returns true if the location was translated, false otherwise.
+ bool TranslateProtocolLocationToWasmScriptLocation(String16* script_id,
+ int* line_number,
+ int* column_number);
+
+ private:
+ class TranslatorImpl;
+ friend class TranslatorImpl;
+
+ void AddFakeScript(const String16& scriptId, TranslatorImpl* translator);
+
+ v8::Isolate* isolate_;
+ std::unordered_map<int, std::unique_ptr<TranslatorImpl>> wasm_translators_;
+ std::unordered_map<String16, TranslatorImpl*> fake_scripts_;
+ Mode mode_;
+
+ DISALLOW_COPY_AND_ASSIGN(WasmTranslation);
+};
+
+} // namespace v8_inspector
+
+#endif // V8_INSPECTOR_WASMTRANSLATION_H_
diff --git a/deps/v8/src/interface-descriptors.cc b/deps/v8/src/interface-descriptors.cc
index d14b1a1011..26b6422f6b 100644
--- a/deps/v8/src/interface-descriptors.cc
+++ b/deps/v8/src/interface-descriptors.cc
@@ -74,6 +74,20 @@ void FastNewFunctionContextDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void FastNewObjectDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {TargetRegister(), NewTargetRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+const Register FastNewObjectDescriptor::TargetRegister() {
+ return kJSFunctionRegister;
+}
+
+const Register FastNewObjectDescriptor::NewTargetRegister() {
+ return kJavaScriptCallNewTargetRegister;
+}
+
void LoadDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kReceiver, kName, kSlot
@@ -90,24 +104,41 @@ void LoadDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void LoadFieldDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kReceiver, kSmiHandler
+ MachineType machine_types[] = {MachineType::AnyTagged(),
+ MachineType::AnyTagged()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
+void LoadFieldDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ReceiverRegister(), SmiHandlerRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
void LoadGlobalDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
- // kSlot
- MachineType machine_types[] = {MachineType::TaggedSigned()};
+ // kName, kSlot
+ MachineType machine_types[] = {MachineType::AnyTagged(),
+ MachineType::TaggedSigned()};
data->InitializePlatformIndependent(arraysize(machine_types), 0,
machine_types);
}
void LoadGlobalDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {LoadWithVectorDescriptor::SlotRegister()};
+ Register registers[] = {NameRegister(), SlotRegister()};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void LoadGlobalWithVectorDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
- // kSlot, kVector
- MachineType machine_types[] = {MachineType::TaggedSigned(),
+ // kName, kSlot, kVector
+ MachineType machine_types[] = {MachineType::AnyTagged(),
+ MachineType::TaggedSigned(),
MachineType::AnyTagged()};
data->InitializePlatformIndependent(arraysize(machine_types), 0,
machine_types);
@@ -115,8 +146,7 @@ void LoadGlobalWithVectorDescriptor::InitializePlatformIndependent(
void LoadGlobalWithVectorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {LoadWithVectorDescriptor::SlotRegister(),
- LoadWithVectorDescriptor::VectorRegister()};
+ Register registers[] = {NameRegister(), SlotRegister(), VectorRegister()};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -183,6 +213,35 @@ void StoreNamedTransitionDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(len, registers);
}
+void StringCharAtDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kReceiver, kPosition
+ MachineType machine_types[] = {MachineType::AnyTagged(),
+ MachineType::IntPtr()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
+void StringCharAtDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, kParameterCount);
+}
+
+void StringCharCodeAtDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kReceiver, kPosition
+ // TODO(turbofan): Allow builtins to return untagged values.
+ MachineType machine_types[] = {MachineType::AnyTagged(),
+ MachineType::IntPtr()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
+void StringCharCodeAtDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, kParameterCount);
+}
+
void StringCompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {LeftRegister(), RightRegister()};
@@ -207,6 +266,19 @@ void MathPowIntegerDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+const Register LoadFieldDescriptor::ReceiverRegister() {
+ // Reuse the register from the LoadDescriptor, since given the
+ // LoadFieldDescriptor's usage, it doesn't matter exactly which registers are
+ // used to pass parameters in.
+ return LoadDescriptor::ReceiverRegister();
+}
+const Register LoadFieldDescriptor::SmiHandlerRegister() {
+ // Reuse the register from the LoadDescriptor, since given the
+ // LoadFieldDescriptor's usage, it doesn't matter exactly which registers are
+ // used to pass parameters in.
+ return LoadDescriptor::NameRegister();
+}
+
void LoadWithVectorDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kReceiver, kName, kSlot, kVector
@@ -293,6 +365,18 @@ void GrowArrayElementsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+void NewArgumentsElementsDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ MachineType const kMachineTypes[] = {MachineType::IntPtr()};
+ data->InitializePlatformIndependent(arraysize(kMachineTypes), 0,
+ kMachineTypes);
+}
+
+void NewArgumentsElementsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ DefaultInitializePlatformSpecific(data, 1);
+}
+
void VarArgFunctionDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kActualArgumentsCount
@@ -389,15 +473,17 @@ void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformIndependent(
void BuiltinDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
- MachineType machine_types[] = {MachineType::AnyTagged(),
- MachineType::Int32()};
+ // kTarget, kNewTarget, kArgumentsCount
+ MachineType machine_types[] = {
+ MachineType::AnyTagged(), MachineType::AnyTagged(), MachineType::Int32()};
data->InitializePlatformIndependent(arraysize(machine_types), 0,
machine_types);
}
void BuiltinDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {NewTargetRegister(), ArgumentsCountRegister()};
+ Register registers[] = {TargetRegister(), NewTargetRegister(),
+ ArgumentsCountRegister()};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -408,6 +494,10 @@ const Register BuiltinDescriptor::NewTargetRegister() {
return kJavaScriptCallNewTargetRegister;
}
+const Register BuiltinDescriptor::TargetRegister() {
+ return kJSFunctionRegister;
+}
+
void ArrayNoArgumentConstructorDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kFunction, kAllocationSite, kActualArgumentsCount, kFunctionParameter
diff --git a/deps/v8/src/interface-descriptors.h b/deps/v8/src/interface-descriptors.h
index 3b49041a09..1d1b48af93 100644
--- a/deps/v8/src/interface-descriptors.h
+++ b/deps/v8/src/interface-descriptors.h
@@ -21,6 +21,7 @@ class PlatformInterfaceDescriptor;
V(ContextOnly) \
V(Load) \
V(LoadWithVector) \
+ V(LoadField) \
V(LoadICProtoArray) \
V(LoadGlobal) \
V(LoadGlobalWithVector) \
@@ -73,10 +74,13 @@ class PlatformInterfaceDescriptor;
V(BinaryOpWithVector) \
V(CountOp) \
V(StringAdd) \
+ V(StringCharAt) \
+ V(StringCharCodeAt) \
V(StringCompare) \
V(SubString) \
V(Keyed) \
V(Named) \
+ V(CreateIterResultObject) \
V(HasProperty) \
V(ForInFilter) \
V(GetProperty) \
@@ -87,12 +91,14 @@ class PlatformInterfaceDescriptor;
V(MathPowTagged) \
V(MathPowInteger) \
V(GrowArrayElements) \
+ V(NewArgumentsElements) \
V(InterpreterDispatch) \
V(InterpreterPushArgsAndCall) \
V(InterpreterPushArgsAndConstruct) \
V(InterpreterPushArgsAndConstructArray) \
V(InterpreterCEntry) \
- V(ResumeGenerator)
+ V(ResumeGenerator) \
+ V(PromiseHandleReject)
class V8_EXPORT_PRIVATE CallInterfaceDescriptorData {
public:
@@ -157,8 +163,7 @@ class CallDescriptors {
};
};
-
-class CallInterfaceDescriptor {
+class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
public:
CallInterfaceDescriptor() : data_(NULL) {}
virtual ~CallInterfaceDescriptor() {}
@@ -305,12 +310,28 @@ class LoadDescriptor : public CallInterfaceDescriptor {
static const Register SlotRegister();
};
+// LoadFieldDescriptor is used by the shared handler that loads a field from an
+// object based on the smi-encoded field description.
+class LoadFieldDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kReceiver, kSmiHandler)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(LoadFieldDescriptor,
+ CallInterfaceDescriptor)
+
+ static const Register ReceiverRegister();
+ static const Register SmiHandlerRegister();
+};
+
class LoadGlobalDescriptor : public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS(kSlot)
+ DEFINE_PARAMETERS(kName, kSlot)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(LoadGlobalDescriptor,
CallInterfaceDescriptor)
+ static const Register NameRegister() {
+ return LoadDescriptor::NameRegister();
+ }
+
static const Register SlotRegister() {
return LoadDescriptor::SlotRegister();
}
@@ -401,7 +422,7 @@ class LoadICProtoArrayDescriptor : public LoadWithVectorDescriptor {
class LoadGlobalWithVectorDescriptor : public LoadGlobalDescriptor {
public:
- DEFINE_PARAMETERS(kSlot, kVector)
+ DEFINE_PARAMETERS(kName, kSlot, kVector)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(LoadGlobalWithVectorDescriptor,
LoadGlobalDescriptor)
@@ -412,6 +433,7 @@ class LoadGlobalWithVectorDescriptor : public LoadGlobalDescriptor {
class FastNewClosureDescriptor : public CallInterfaceDescriptor {
public:
+ DEFINE_PARAMETERS(kSharedFunctionInfo, kVector, kSlot)
DECLARE_DESCRIPTOR(FastNewClosureDescriptor, CallInterfaceDescriptor)
};
@@ -427,7 +449,10 @@ class FastNewFunctionContextDescriptor : public CallInterfaceDescriptor {
class FastNewObjectDescriptor : public CallInterfaceDescriptor {
public:
+ DEFINE_PARAMETERS(kTarget, kNewTarget)
DECLARE_DESCRIPTOR(FastNewObjectDescriptor, CallInterfaceDescriptor)
+ static const Register TargetRegister();
+ static const Register NewTargetRegister();
};
class FastNewRestParameterDescriptor : public CallInterfaceDescriptor {
@@ -455,6 +480,13 @@ class TypeConversionDescriptor final : public CallInterfaceDescriptor {
static const Register ArgumentRegister();
};
+class CreateIterResultObjectDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kValue, kDone)
+ DECLARE_DEFAULT_DESCRIPTOR(CreateIterResultObjectDescriptor,
+ CallInterfaceDescriptor, kParameterCount)
+};
+
class HasPropertyDescriptor final : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kKey, kObject)
@@ -610,11 +642,13 @@ SIMD128_TYPES(SIMD128_ALLOC_DESC)
class BuiltinDescriptor : public CallInterfaceDescriptor {
public:
+ // TODO(ishell): Where is kFunction??
DEFINE_PARAMETERS(kNewTarget, kArgumentsCount)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(BuiltinDescriptor,
CallInterfaceDescriptor)
static const Register ArgumentsCountRegister();
static const Register NewTargetRegister();
+ static const Register TargetRegister();
};
class ArrayNoArgumentConstructorDescriptor : public CallInterfaceDescriptor {
@@ -681,6 +715,19 @@ class StringAddDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(StringAddDescriptor, CallInterfaceDescriptor)
};
+class StringCharAtDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kReceiver, kPosition)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StringCharAtDescriptor,
+ CallInterfaceDescriptor)
+};
+
+class StringCharCodeAtDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kReceiver, kPosition)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(StringCharCodeAtDescriptor,
+ CallInterfaceDescriptor)
+};
class StringCompareDescriptor : public CallInterfaceDescriptor {
public:
@@ -775,7 +822,15 @@ class GrowArrayElementsDescriptor : public CallInterfaceDescriptor {
static const Register KeyRegister();
};
-class InterpreterDispatchDescriptor : public CallInterfaceDescriptor {
+class NewArgumentsElementsDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kFormalParameterCount)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(NewArgumentsElementsDescriptor,
+ CallInterfaceDescriptor)
+};
+
+class V8_EXPORT_PRIVATE InterpreterDispatchDescriptor
+ : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kAccumulator, kBytecodeOffset, kBytecodeArray,
kDispatchTable)
@@ -821,6 +876,13 @@ class ResumeGeneratorDescriptor final : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(ResumeGeneratorDescriptor, CallInterfaceDescriptor)
};
+class PromiseHandleRejectDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kPromise, kOnReject, kException)
+ DECLARE_DEFAULT_DESCRIPTOR(PromiseHandleRejectDescriptor,
+ CallInterfaceDescriptor, kParameterCount)
+};
+
#undef DECLARE_DESCRIPTOR_WITH_BASE
#undef DECLARE_DESCRIPTOR
#undef DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE
diff --git a/deps/v8/src/interpreter/OWNERS b/deps/v8/src/interpreter/OWNERS
index 4e6a721fe0..0f2165c647 100644
--- a/deps/v8/src/interpreter/OWNERS
+++ b/deps/v8/src/interpreter/OWNERS
@@ -1,6 +1,7 @@
set noparent
bmeurer@chromium.org
+leszeks@chromium.org
mstarzinger@chromium.org
mythria@chromium.org
rmcilroy@chromium.org
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.cc b/deps/v8/src/interpreter/bytecode-array-accessor.cc
new file mode 100644
index 0000000000..8e6a732861
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.cc
@@ -0,0 +1,205 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-array-accessor.h"
+
+#include "src/interpreter/bytecode-decoder.h"
+#include "src/interpreter/interpreter-intrinsics.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+BytecodeArrayAccessor::BytecodeArrayAccessor(
+ Handle<BytecodeArray> bytecode_array, int initial_offset)
+ : bytecode_array_(bytecode_array),
+ bytecode_offset_(initial_offset),
+ operand_scale_(OperandScale::kSingle),
+ prefix_offset_(0) {
+ UpdateOperandScale();
+}
+
+void BytecodeArrayAccessor::SetOffset(int offset) {
+ bytecode_offset_ = offset;
+ UpdateOperandScale();
+}
+
+void BytecodeArrayAccessor::UpdateOperandScale() {
+ if (OffsetInBounds()) {
+ uint8_t current_byte = bytecode_array()->get(bytecode_offset_);
+ Bytecode current_bytecode = Bytecodes::FromByte(current_byte);
+ if (Bytecodes::IsPrefixScalingBytecode(current_bytecode)) {
+ operand_scale_ =
+ Bytecodes::PrefixBytecodeToOperandScale(current_bytecode);
+ prefix_offset_ = 1;
+ } else {
+ operand_scale_ = OperandScale::kSingle;
+ prefix_offset_ = 0;
+ }
+ }
+}
+
+bool BytecodeArrayAccessor::OffsetInBounds() const {
+ return bytecode_offset_ >= 0 && bytecode_offset_ < bytecode_array()->length();
+}
+
+Bytecode BytecodeArrayAccessor::current_bytecode() const {
+ DCHECK(OffsetInBounds());
+ uint8_t current_byte =
+ bytecode_array()->get(bytecode_offset_ + current_prefix_offset());
+ Bytecode current_bytecode = Bytecodes::FromByte(current_byte);
+ DCHECK(!Bytecodes::IsPrefixScalingBytecode(current_bytecode));
+ return current_bytecode;
+}
+
+int BytecodeArrayAccessor::current_bytecode_size() const {
+ return current_prefix_offset() +
+ Bytecodes::Size(current_bytecode(), current_operand_scale());
+}
+
+uint32_t BytecodeArrayAccessor::GetUnsignedOperand(
+ int operand_index, OperandType operand_type) const {
+ DCHECK_GE(operand_index, 0);
+ DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
+ DCHECK_EQ(operand_type,
+ Bytecodes::GetOperandType(current_bytecode(), operand_index));
+ DCHECK(Bytecodes::IsUnsignedOperandType(operand_type));
+ const uint8_t* operand_start =
+ bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
+ current_prefix_offset() +
+ Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
+ current_operand_scale());
+ return BytecodeDecoder::DecodeUnsignedOperand(operand_start, operand_type,
+ current_operand_scale());
+}
+
+int32_t BytecodeArrayAccessor::GetSignedOperand(
+ int operand_index, OperandType operand_type) const {
+ DCHECK_GE(operand_index, 0);
+ DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
+ DCHECK_EQ(operand_type,
+ Bytecodes::GetOperandType(current_bytecode(), operand_index));
+ DCHECK(!Bytecodes::IsUnsignedOperandType(operand_type));
+ const uint8_t* operand_start =
+ bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
+ current_prefix_offset() +
+ Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
+ current_operand_scale());
+ return BytecodeDecoder::DecodeSignedOperand(operand_start, operand_type,
+ current_operand_scale());
+}
+
+uint32_t BytecodeArrayAccessor::GetFlagOperand(int operand_index) const {
+ DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
+ OperandType::kFlag8);
+ return GetUnsignedOperand(operand_index, OperandType::kFlag8);
+}
+
+uint32_t BytecodeArrayAccessor::GetUnsignedImmediateOperand(
+ int operand_index) const {
+ DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
+ OperandType::kUImm);
+ return GetUnsignedOperand(operand_index, OperandType::kUImm);
+}
+
+int32_t BytecodeArrayAccessor::GetImmediateOperand(int operand_index) const {
+ DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
+ OperandType::kImm);
+ return GetSignedOperand(operand_index, OperandType::kImm);
+}
+
+uint32_t BytecodeArrayAccessor::GetRegisterCountOperand(
+ int operand_index) const {
+ DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
+ OperandType::kRegCount);
+ return GetUnsignedOperand(operand_index, OperandType::kRegCount);
+}
+
+uint32_t BytecodeArrayAccessor::GetIndexOperand(int operand_index) const {
+ OperandType operand_type =
+ Bytecodes::GetOperandType(current_bytecode(), operand_index);
+ DCHECK_EQ(operand_type, OperandType::kIdx);
+ return GetUnsignedOperand(operand_index, operand_type);
+}
+
+Register BytecodeArrayAccessor::GetRegisterOperand(int operand_index) const {
+ OperandType operand_type =
+ Bytecodes::GetOperandType(current_bytecode(), operand_index);
+ const uint8_t* operand_start =
+ bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
+ current_prefix_offset() +
+ Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
+ current_operand_scale());
+ return BytecodeDecoder::DecodeRegisterOperand(operand_start, operand_type,
+ current_operand_scale());
+}
+
+int BytecodeArrayAccessor::GetRegisterOperandRange(int operand_index) const {
+ DCHECK_LE(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
+ const OperandType* operand_types =
+ Bytecodes::GetOperandTypes(current_bytecode());
+ OperandType operand_type = operand_types[operand_index];
+ DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
+ if (operand_type == OperandType::kRegList) {
+ return GetRegisterCountOperand(operand_index + 1);
+ } else {
+ return Bytecodes::GetNumberOfRegistersRepresentedBy(operand_type);
+ }
+}
+
+Runtime::FunctionId BytecodeArrayAccessor::GetRuntimeIdOperand(
+ int operand_index) const {
+ OperandType operand_type =
+ Bytecodes::GetOperandType(current_bytecode(), operand_index);
+ DCHECK(operand_type == OperandType::kRuntimeId);
+ uint32_t raw_id = GetUnsignedOperand(operand_index, operand_type);
+ return static_cast<Runtime::FunctionId>(raw_id);
+}
+
+Runtime::FunctionId BytecodeArrayAccessor::GetIntrinsicIdOperand(
+ int operand_index) const {
+ OperandType operand_type =
+ Bytecodes::GetOperandType(current_bytecode(), operand_index);
+ DCHECK(operand_type == OperandType::kIntrinsicId);
+ uint32_t raw_id = GetUnsignedOperand(operand_index, operand_type);
+ return IntrinsicsHelper::ToRuntimeId(
+ static_cast<IntrinsicsHelper::IntrinsicId>(raw_id));
+}
+
+Handle<Object> BytecodeArrayAccessor::GetConstantForIndexOperand(
+ int operand_index) const {
+ return FixedArray::get(bytecode_array()->constant_pool(),
+ GetIndexOperand(operand_index),
+ bytecode_array()->GetIsolate());
+}
+
+int BytecodeArrayAccessor::GetJumpTargetOffset() const {
+ Bytecode bytecode = current_bytecode();
+ if (interpreter::Bytecodes::IsJumpImmediate(bytecode)) {
+ int relative_offset = GetImmediateOperand(0);
+ return current_offset() + relative_offset + current_prefix_offset();
+ } else if (interpreter::Bytecodes::IsJumpConstant(bytecode)) {
+ Smi* smi = Smi::cast(*GetConstantForIndexOperand(0));
+ return current_offset() + smi->value() + current_prefix_offset();
+ } else {
+ UNREACHABLE();
+ return kMinInt;
+ }
+}
+
+bool BytecodeArrayAccessor::OffsetWithinBytecode(int offset) const {
+ return current_offset() <= offset &&
+ offset < current_offset() + current_bytecode_size();
+}
+
+std::ostream& BytecodeArrayAccessor::PrintTo(std::ostream& os) const {
+ return BytecodeDecoder::Decode(
+ os, bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_,
+ bytecode_array()->parameter_count());
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.h b/deps/v8/src/interpreter/bytecode-array-accessor.h
new file mode 100644
index 0000000000..e5a24f3e7f
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.h
@@ -0,0 +1,76 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_ARRAY_ACCESSOR_H_
+#define V8_INTERPRETER_BYTECODE_ARRAY_ACCESSOR_H_
+
+#include "src/globals.h"
+#include "src/handles.h"
+#include "src/interpreter/bytecode-register.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/objects.h"
+#include "src/runtime/runtime.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
+ public:
+ BytecodeArrayAccessor(Handle<BytecodeArray> bytecode_array,
+ int initial_offset);
+
+ void SetOffset(int offset);
+
+ Bytecode current_bytecode() const;
+ int current_bytecode_size() const;
+ int current_offset() const { return bytecode_offset_; }
+ OperandScale current_operand_scale() const { return operand_scale_; }
+ int current_prefix_offset() const { return prefix_offset_; }
+ const Handle<BytecodeArray>& bytecode_array() const {
+ return bytecode_array_;
+ }
+
+ uint32_t GetFlagOperand(int operand_index) const;
+ uint32_t GetUnsignedImmediateOperand(int operand_index) const;
+ int32_t GetImmediateOperand(int operand_index) const;
+ uint32_t GetIndexOperand(int operand_index) const;
+ uint32_t GetRegisterCountOperand(int operand_index) const;
+ Register GetRegisterOperand(int operand_index) const;
+ int GetRegisterOperandRange(int operand_index) const;
+ Runtime::FunctionId GetRuntimeIdOperand(int operand_index) const;
+ Runtime::FunctionId GetIntrinsicIdOperand(int operand_index) const;
+ Handle<Object> GetConstantForIndexOperand(int operand_index) const;
+
+ // Returns the absolute offset of the branch target at the current
+ // bytecode. It is an error to call this method if the bytecode is
+ // not for a jump or conditional jump.
+ int GetJumpTargetOffset() const;
+
+ bool OffsetWithinBytecode(int offset) const;
+
+ std::ostream& PrintTo(std::ostream& os) const;
+
+ private:
+ bool OffsetInBounds() const;
+
+ uint32_t GetUnsignedOperand(int operand_index,
+ OperandType operand_type) const;
+ int32_t GetSignedOperand(int operand_index, OperandType operand_type) const;
+
+ void UpdateOperandScale();
+
+ Handle<BytecodeArray> bytecode_array_;
+ int bytecode_offset_;
+ OperandScale operand_scale_;
+ int prefix_offset_;
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodeArrayAccessor);
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODE_GRAPH_ACCESSOR_H_
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index 904a8e021d..58d7d6df41 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -143,7 +143,8 @@ class OperandHelper {};
template <> \
class OperandHelper<OperandType::k##Name> \
: public UnsignedOperandHelper<Type> {};
-UNSIGNED_SCALAR_OPERAND_TYPE_LIST(DEFINE_UNSIGNED_OPERAND_HELPER)
+UNSIGNED_FIXED_SCALAR_OPERAND_TYPE_LIST(DEFINE_UNSIGNED_OPERAND_HELPER)
+UNSIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(DEFINE_UNSIGNED_OPERAND_HELPER)
#undef DEFINE_UNSIGNED_OPERAND_HELPER
template <>
@@ -211,14 +212,15 @@ class OperandHelper<OperandType::kRegOutTriple> {
} // namespace
-template <OperandType... operand_types>
+template <Bytecode bytecode, AccumulatorUse accumulator_use,
+ OperandType... operand_types>
class BytecodeNodeBuilder {
public:
template <typename... Operands>
INLINE(static BytecodeNode Make(BytecodeArrayBuilder* builder,
BytecodeSourceInfo source_info,
- Bytecode bytecode, Operands... operands)) {
- builder->PrepareToOutputBytecode(bytecode);
+ Operands... operands)) {
+ builder->PrepareToOutputBytecode<bytecode, accumulator_use>();
// The "OperandHelper<operand_types>::Convert(builder, operands)..." will
// expand both the OperandType... and Operands... parameter packs e.g. for:
// BytecodeNodeBuilder<OperandType::kReg, OperandType::kImm>::Make<
@@ -226,30 +228,34 @@ class BytecodeNodeBuilder {
// the code will expand into:
// OperandHelper<OperandType::kReg>::Convert(builder, reg),
// OperandHelper<OperandType::kImm>::Convert(builder, immediate),
- return BytecodeNode(
- bytecode, OperandHelper<operand_types>::Convert(builder, operands)...,
- source_info);
+ return BytecodeNode::Create<bytecode, accumulator_use, operand_types...>(
+ source_info,
+ OperandHelper<operand_types>::Convert(builder, operands)...);
}
};
-#define DEFINE_BYTECODE_OUTPUT(name, accumulator_use, ...) \
- template <typename... Operands> \
- void BytecodeArrayBuilder::Output##name(Operands... operands) { \
- BytecodeNode node(BytecodeNodeBuilder<__VA_ARGS__>::Make<Operands...>( \
- this, CurrentSourcePosition(Bytecode::k##name), Bytecode::k##name, \
- operands...)); \
- pipeline()->Write(&node); \
- } \
- \
- template <typename... Operands> \
- void BytecodeArrayBuilder::Output##name(BytecodeLabel* label, \
- Operands... operands) { \
- DCHECK(Bytecodes::IsJump(Bytecode::k##name)); \
- BytecodeNode node(BytecodeNodeBuilder<__VA_ARGS__>::Make<Operands...>( \
- this, CurrentSourcePosition(Bytecode::k##name), Bytecode::k##name, \
- operands...)); \
- pipeline()->WriteJump(&node, label); \
- LeaveBasicBlock(); \
+#define DEFINE_BYTECODE_OUTPUT(name, ...) \
+ template <typename... Operands> \
+ void BytecodeArrayBuilder::Output##name(Operands... operands) { \
+ static_assert(sizeof...(Operands) <= Bytecodes::kMaxOperands, \
+ "too many operands for bytecode"); \
+ BytecodeNode node( \
+ BytecodeNodeBuilder<Bytecode::k##name, __VA_ARGS__>::Make< \
+ Operands...>(this, CurrentSourcePosition(Bytecode::k##name), \
+ operands...)); \
+ pipeline()->Write(&node); \
+ } \
+ \
+ template <typename... Operands> \
+ void BytecodeArrayBuilder::Output##name(BytecodeLabel* label, \
+ Operands... operands) { \
+ DCHECK(Bytecodes::IsJump(Bytecode::k##name)); \
+ BytecodeNode node( \
+ BytecodeNodeBuilder<Bytecode::k##name, __VA_ARGS__>::Make< \
+ Operands...>(this, CurrentSourcePosition(Bytecode::k##name), \
+ operands...)); \
+ pipeline()->WriteJump(&node, label); \
+ LeaveBasicBlock(); \
}
BYTECODE_LIST(DEFINE_BYTECODE_OUTPUT)
#undef DEFINE_BYTECODE_OUTPUT
@@ -318,6 +324,11 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::TypeOf() {
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::GetSuperConstructor(Register out) {
+ OutputGetSuperConstructor(out);
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(
Token::Value op, Register reg, int feedback_slot) {
switch (op) {
@@ -433,13 +444,14 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::MoveRegister(Register from,
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(int feedback_slot,
- TypeofMode typeof_mode) {
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(
+ const Handle<String> name, int feedback_slot, TypeofMode typeof_mode) {
+ size_t name_index = GetConstantPoolEntry(name);
if (typeof_mode == INSIDE_TYPEOF) {
- OutputLdaGlobalInsideTypeof(feedback_slot);
+ OutputLdaGlobalInsideTypeof(name_index, feedback_slot);
} else {
DCHECK_EQ(typeof_mode, NOT_INSIDE_TYPEOF);
- OutputLdaGlobal(feedback_slot);
+ OutputLdaGlobal(name_index, feedback_slot);
}
return *this;
}
@@ -541,6 +553,13 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreDataPropertyInLiteral(
+ Register object, Register name, DataPropertyInLiteralFlags flags,
+ int feedback_slot) {
+ OutputStaDataPropertyInLiteral(object, name, flags, feedback_slot);
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
Register object, const Handle<Name> name, int feedback_slot,
LanguageMode language_mode) {
@@ -566,9 +585,9 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreKeyedProperty(
return *this;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::CreateClosure(size_t entry,
- int flags) {
- OutputCreateClosure(entry, flags);
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateClosure(
+ size_t shared_function_info_entry, int slot, int flags) {
+ OutputCreateClosure(shared_function_info_entry, slot, flags);
return *this;
}
@@ -592,6 +611,11 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateFunctionContext(int slots) {
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::CreateEvalContext(int slots) {
+ OutputCreateEvalContext(slots);
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateWithContext(
Register object, Handle<ScopeInfo> scope_info) {
size_t scope_info_index = GetConstantPoolEntry(scope_info);
@@ -625,16 +649,14 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CreateRegExpLiteral(
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateArrayLiteral(
- Handle<FixedArray> constant_elements, int literal_index, int flags) {
- size_t constant_elements_entry = GetConstantPoolEntry(constant_elements);
+ size_t constant_elements_entry, int literal_index, int flags) {
OutputCreateArrayLiteral(constant_elements_entry, literal_index, flags);
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::CreateObjectLiteral(
- Handle<FixedArray> constant_properties, int literal_index, int flags,
+ size_t constant_properties_entry, int literal_index, int flags,
Register output) {
- size_t constant_properties_entry = GetConstantPoolEntry(constant_properties);
OutputCreateObjectLiteral(constant_properties_entry, literal_index, flags,
output);
return *this;
@@ -718,6 +740,12 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfNotHole(
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfJSReceiver(
+ BytecodeLabel* label) {
+ OutputJumpIfJSReceiver(label, 0);
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::JumpLoop(BytecodeLabel* label,
int loop_depth) {
OutputJumpLoop(label, 0, loop_depth);
@@ -742,6 +770,11 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StackCheck(int position) {
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::SetPendingMessage() {
+ OutputSetPendingMessage();
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::Throw() {
OutputThrow();
return *this;
@@ -914,6 +947,11 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::CallJSRuntime(int context_index,
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::NewWithSpread(RegisterList args) {
+ OutputNewWithSpread(args, args.register_count());
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::Delete(Register object,
LanguageMode language_mode) {
if (language_mode == SLOPPY) {
@@ -975,8 +1013,10 @@ bool BytecodeArrayBuilder::RegisterListIsValid(RegisterList reg_list) const {
}
}
-void BytecodeArrayBuilder::PrepareToOutputBytecode(Bytecode bytecode) {
- if (register_optimizer_) register_optimizer_->PrepareForBytecode(bytecode);
+template <Bytecode bytecode, AccumulatorUse accumulator_use>
+void BytecodeArrayBuilder::PrepareToOutputBytecode() {
+ if (register_optimizer_)
+ register_optimizer_->PrepareForBytecode<bytecode, accumulator_use>();
}
uint32_t BytecodeArrayBuilder::GetInputRegisterOperand(Register reg) {
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index cc5b5e782b..121b84d523 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -84,7 +84,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
BytecodeArrayBuilder& LoadFalse();
// Global loads to the accumulator and stores from the accumulator.
- BytecodeArrayBuilder& LoadGlobal(int feedback_slot, TypeofMode typeof_mode);
+ BytecodeArrayBuilder& LoadGlobal(const Handle<String> name, int feedback_slot,
+ TypeofMode typeof_mode);
BytecodeArrayBuilder& StoreGlobal(const Handle<String> name,
int feedback_slot,
LanguageMode language_mode);
@@ -121,6 +122,12 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
// Keyed load property. The key should be in the accumulator.
BytecodeArrayBuilder& LoadKeyedProperty(Register object, int feedback_slot);
+ // Store properties. Flag for NeedsSetFunctionName() should
+ // be in the accumulator.
+ BytecodeArrayBuilder& StoreDataPropertyInLiteral(
+ Register object, Register name, DataPropertyInLiteralFlags flags,
+ int feedback_slot);
+
// Store properties. The value to be stored should be in the accumulator.
BytecodeArrayBuilder& StoreNamedProperty(Register object,
const Handle<Name> name,
@@ -153,8 +160,9 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
LanguageMode language_mode);
// Create a new closure for a SharedFunctionInfo which will be inserted at
- // constant pool index |entry|.
- BytecodeArrayBuilder& CreateClosure(size_t entry, int flags);
+ // constant pool index |shared_function_info_entry|.
+ BytecodeArrayBuilder& CreateClosure(size_t shared_function_info_entry,
+ int slot, int flags);
// Create a new local context for a |scope_info| and a closure which should be
// in the accumulator.
@@ -169,6 +177,9 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
// Create a new context with size |slots|.
BytecodeArrayBuilder& CreateFunctionContext(int slots);
+ // Create a new eval context with size |slots|.
+ BytecodeArrayBuilder& CreateEvalContext(int slots);
+
// Creates a new context with the given |scope_info| for a with-statement
// with the |object| in a register and the closure in the accumulator.
BytecodeArrayBuilder& CreateWithContext(Register object,
@@ -180,11 +191,11 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
// Literals creation. Constant elements should be in the accumulator.
BytecodeArrayBuilder& CreateRegExpLiteral(Handle<String> pattern,
int literal_index, int flags);
- BytecodeArrayBuilder& CreateArrayLiteral(Handle<FixedArray> constant_elements,
+ BytecodeArrayBuilder& CreateArrayLiteral(size_t constant_elements_entry,
int literal_index, int flags);
- BytecodeArrayBuilder& CreateObjectLiteral(
- Handle<FixedArray> constant_properties, int literal_index, int flags,
- Register output);
+ BytecodeArrayBuilder& CreateObjectLiteral(size_t constant_properties_entry,
+ int literal_index, int flags,
+ Register output);
// Push the context in accumulator as the new context, and store in register
// |context|.
@@ -232,6 +243,11 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
// Call the JS runtime function with |context_index| and arguments |args|.
BytecodeArrayBuilder& CallJSRuntime(int context_index, RegisterList args);
+ // Call the constructor in |args[0]| with new_target in |args[1]| and the
+ // arguments starting at |args[2]| onwards. The final argument must be a
+ // spread.
+ BytecodeArrayBuilder& NewWithSpread(RegisterList args);
+
// Operators (register holds the lhs value, accumulator holds the rhs value).
// Type feedback will be recorded in the |feedback_slot|
BytecodeArrayBuilder& BinaryOperation(Token::Value binop, Register reg,
@@ -245,6 +261,11 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
BytecodeArrayBuilder& LogicalNot();
BytecodeArrayBuilder& TypeOf();
+ // Expects a heap object in the accumulator. Returns its super constructor in
+ // the register |out| if it passes the IsConstructor test. Otherwise, it
+ // throws a TypeError exception.
+ BytecodeArrayBuilder& GetSuperConstructor(Register out);
+
// Deletes property from an object. This expects that accumulator contains
// the key to be deleted and the register contains a reference to the object.
BytecodeArrayBuilder& Delete(Register object, LanguageMode language_mode);
@@ -266,12 +287,17 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
BytecodeArrayBuilder& JumpIfTrue(BytecodeLabel* label);
BytecodeArrayBuilder& JumpIfFalse(BytecodeLabel* label);
BytecodeArrayBuilder& JumpIfNotHole(BytecodeLabel* label);
+ BytecodeArrayBuilder& JumpIfJSReceiver(BytecodeLabel* label);
BytecodeArrayBuilder& JumpIfNull(BytecodeLabel* label);
BytecodeArrayBuilder& JumpIfUndefined(BytecodeLabel* label);
BytecodeArrayBuilder& JumpLoop(BytecodeLabel* label, int loop_depth);
BytecodeArrayBuilder& StackCheck(int position);
+ // Sets the pending message to the value in the accumulator, and returns the
+ // previous pending message in the accumulator.
+ BytecodeArrayBuilder& SetPendingMessage();
+
BytecodeArrayBuilder& Throw();
BytecodeArrayBuilder& ReThrow();
BytecodeArrayBuilder& Return();
@@ -302,6 +328,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
// entry, so that it can be referenced by above exception handling support.
int NewHandlerEntry() { return handler_table_builder()->NewHandlerEntry(); }
+ // Gets a constant pool entry for the |object|.
+ size_t GetConstantPoolEntry(Handle<Object> object);
// Allocates a slot in the constant pool which can later be inserted.
size_t AllocateConstantPoolEntry();
// Inserts a entry into an allocated constant pool entry.
@@ -347,7 +375,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
private:
friend class BytecodeRegisterAllocator;
- template <OperandType... operand_types>
+ template <Bytecode bytecode, AccumulatorUse accumulator_use,
+ OperandType... operand_types>
friend class BytecodeNodeBuilder;
// Returns the current source position for the given |bytecode|.
@@ -367,15 +396,13 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
// Set position for return.
void SetReturnPosition();
- // Gets a constant pool entry for the |object|.
- size_t GetConstantPoolEntry(Handle<Object> object);
-
// Not implemented as the illegal bytecode is used inside internally
// to indicate a bytecode field is not valid or an error has occured
// during bytecode generation.
BytecodeArrayBuilder& Illegal();
- void PrepareToOutputBytecode(Bytecode bytecode);
+ template <Bytecode bytecode, AccumulatorUse accumulator_use>
+ void PrepareToOutputBytecode();
void LeaveBasicBlock() { return_seen_in_block_ = false; }
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.cc b/deps/v8/src/interpreter/bytecode-array-iterator.cc
index e596b11a05..0248dfda46 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.cc
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.cc
@@ -3,9 +3,6 @@
// found in the LICENSE file.
#include "src/interpreter/bytecode-array-iterator.h"
-
-#include "src/interpreter/bytecode-decoder.h"
-#include "src/interpreter/interpreter-intrinsics.h"
#include "src/objects-inl.h"
namespace v8 {
@@ -14,180 +11,14 @@ namespace interpreter {
BytecodeArrayIterator::BytecodeArrayIterator(
Handle<BytecodeArray> bytecode_array)
- : bytecode_array_(bytecode_array),
- bytecode_offset_(0),
- operand_scale_(OperandScale::kSingle),
- prefix_offset_(0) {
- UpdateOperandScale();
-}
+ : BytecodeArrayAccessor(bytecode_array, 0) {}
void BytecodeArrayIterator::Advance() {
- bytecode_offset_ += current_bytecode_size();
- UpdateOperandScale();
-}
-
-void BytecodeArrayIterator::UpdateOperandScale() {
- if (!done()) {
- uint8_t current_byte = bytecode_array()->get(bytecode_offset_);
- Bytecode current_bytecode = Bytecodes::FromByte(current_byte);
- if (Bytecodes::IsPrefixScalingBytecode(current_bytecode)) {
- operand_scale_ =
- Bytecodes::PrefixBytecodeToOperandScale(current_bytecode);
- prefix_offset_ = 1;
- } else {
- operand_scale_ = OperandScale::kSingle;
- prefix_offset_ = 0;
- }
- }
+ SetOffset(current_offset() + current_bytecode_size());
}
bool BytecodeArrayIterator::done() const {
- return bytecode_offset_ >= bytecode_array()->length();
-}
-
-Bytecode BytecodeArrayIterator::current_bytecode() const {
- DCHECK(!done());
- uint8_t current_byte =
- bytecode_array()->get(bytecode_offset_ + current_prefix_offset());
- Bytecode current_bytecode = Bytecodes::FromByte(current_byte);
- DCHECK(!Bytecodes::IsPrefixScalingBytecode(current_bytecode));
- return current_bytecode;
-}
-
-int BytecodeArrayIterator::current_bytecode_size() const {
- return current_prefix_offset() +
- Bytecodes::Size(current_bytecode(), current_operand_scale());
-}
-
-uint32_t BytecodeArrayIterator::GetUnsignedOperand(
- int operand_index, OperandType operand_type) const {
- DCHECK_GE(operand_index, 0);
- DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
- DCHECK_EQ(operand_type,
- Bytecodes::GetOperandType(current_bytecode(), operand_index));
- DCHECK(Bytecodes::IsUnsignedOperandType(operand_type));
- const uint8_t* operand_start =
- bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
- current_prefix_offset() +
- Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
- current_operand_scale());
- return BytecodeDecoder::DecodeUnsignedOperand(operand_start, operand_type,
- current_operand_scale());
-}
-
-int32_t BytecodeArrayIterator::GetSignedOperand(
- int operand_index, OperandType operand_type) const {
- DCHECK_GE(operand_index, 0);
- DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
- DCHECK_EQ(operand_type,
- Bytecodes::GetOperandType(current_bytecode(), operand_index));
- DCHECK(!Bytecodes::IsUnsignedOperandType(operand_type));
- const uint8_t* operand_start =
- bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
- current_prefix_offset() +
- Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
- current_operand_scale());
- return BytecodeDecoder::DecodeSignedOperand(operand_start, operand_type,
- current_operand_scale());
-}
-
-uint32_t BytecodeArrayIterator::GetFlagOperand(int operand_index) const {
- DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
- OperandType::kFlag8);
- return GetUnsignedOperand(operand_index, OperandType::kFlag8);
-}
-
-uint32_t BytecodeArrayIterator::GetUnsignedImmediateOperand(
- int operand_index) const {
- DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
- OperandType::kUImm);
- return GetUnsignedOperand(operand_index, OperandType::kUImm);
-}
-
-int32_t BytecodeArrayIterator::GetImmediateOperand(int operand_index) const {
- DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
- OperandType::kImm);
- return GetSignedOperand(operand_index, OperandType::kImm);
-}
-
-uint32_t BytecodeArrayIterator::GetRegisterCountOperand(
- int operand_index) const {
- DCHECK_EQ(Bytecodes::GetOperandType(current_bytecode(), operand_index),
- OperandType::kRegCount);
- return GetUnsignedOperand(operand_index, OperandType::kRegCount);
-}
-
-uint32_t BytecodeArrayIterator::GetIndexOperand(int operand_index) const {
- OperandType operand_type =
- Bytecodes::GetOperandType(current_bytecode(), operand_index);
- DCHECK_EQ(operand_type, OperandType::kIdx);
- return GetUnsignedOperand(operand_index, operand_type);
-}
-
-Register BytecodeArrayIterator::GetRegisterOperand(int operand_index) const {
- OperandType operand_type =
- Bytecodes::GetOperandType(current_bytecode(), operand_index);
- const uint8_t* operand_start =
- bytecode_array()->GetFirstBytecodeAddress() + bytecode_offset_ +
- current_prefix_offset() +
- Bytecodes::GetOperandOffset(current_bytecode(), operand_index,
- current_operand_scale());
- return BytecodeDecoder::DecodeRegisterOperand(operand_start, operand_type,
- current_operand_scale());
-}
-
-int BytecodeArrayIterator::GetRegisterOperandRange(int operand_index) const {
- DCHECK_LE(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
- const OperandType* operand_types =
- Bytecodes::GetOperandTypes(current_bytecode());
- OperandType operand_type = operand_types[operand_index];
- DCHECK(Bytecodes::IsRegisterOperandType(operand_type));
- if (operand_type == OperandType::kRegList) {
- return GetRegisterCountOperand(operand_index + 1);
- } else {
- return Bytecodes::GetNumberOfRegistersRepresentedBy(operand_type);
- }
-}
-
-Runtime::FunctionId BytecodeArrayIterator::GetRuntimeIdOperand(
- int operand_index) const {
- OperandType operand_type =
- Bytecodes::GetOperandType(current_bytecode(), operand_index);
- DCHECK(operand_type == OperandType::kRuntimeId);
- uint32_t raw_id = GetUnsignedOperand(operand_index, operand_type);
- return static_cast<Runtime::FunctionId>(raw_id);
-}
-
-Runtime::FunctionId BytecodeArrayIterator::GetIntrinsicIdOperand(
- int operand_index) const {
- OperandType operand_type =
- Bytecodes::GetOperandType(current_bytecode(), operand_index);
- DCHECK(operand_type == OperandType::kIntrinsicId);
- uint32_t raw_id = GetUnsignedOperand(operand_index, operand_type);
- return IntrinsicsHelper::ToRuntimeId(
- static_cast<IntrinsicsHelper::IntrinsicId>(raw_id));
-}
-
-Handle<Object> BytecodeArrayIterator::GetConstantForIndexOperand(
- int operand_index) const {
- return FixedArray::get(bytecode_array()->constant_pool(),
- GetIndexOperand(operand_index),
- bytecode_array()->GetIsolate());
-}
-
-
-int BytecodeArrayIterator::GetJumpTargetOffset() const {
- Bytecode bytecode = current_bytecode();
- if (interpreter::Bytecodes::IsJumpImmediate(bytecode)) {
- int relative_offset = GetImmediateOperand(0);
- return current_offset() + relative_offset + current_prefix_offset();
- } else if (interpreter::Bytecodes::IsJumpConstant(bytecode)) {
- Smi* smi = Smi::cast(*GetConstantForIndexOperand(0));
- return current_offset() + smi->value() + current_prefix_offset();
- } else {
- UNREACHABLE();
- return kMinInt;
- }
+ return current_offset() >= bytecode_array()->length();
}
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.h b/deps/v8/src/interpreter/bytecode-array-iterator.h
index 03279cbd43..7ec9d1288c 100644
--- a/deps/v8/src/interpreter/bytecode-array-iterator.h
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.h
@@ -1,64 +1,25 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
+// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_INTERPRETER_BYTECODE_ARRAY_ITERATOR_H_
#define V8_INTERPRETER_BYTECODE_ARRAY_ITERATOR_H_
-#include "src/globals.h"
-#include "src/handles.h"
-#include "src/interpreter/bytecode-register.h"
-#include "src/interpreter/bytecodes.h"
-#include "src/objects.h"
-#include "src/runtime/runtime.h"
+#include "src/interpreter/bytecode-array-accessor.h"
namespace v8 {
namespace internal {
namespace interpreter {
-class V8_EXPORT_PRIVATE BytecodeArrayIterator {
+class V8_EXPORT_PRIVATE BytecodeArrayIterator final
+ : public BytecodeArrayAccessor {
public:
explicit BytecodeArrayIterator(Handle<BytecodeArray> bytecode_array);
void Advance();
bool done() const;
- Bytecode current_bytecode() const;
- int current_bytecode_size() const;
- int current_offset() const { return bytecode_offset_; }
- OperandScale current_operand_scale() const { return operand_scale_; }
- int current_prefix_offset() const { return prefix_offset_; }
- const Handle<BytecodeArray>& bytecode_array() const {
- return bytecode_array_;
- }
-
- uint32_t GetFlagOperand(int operand_index) const;
- uint32_t GetUnsignedImmediateOperand(int operand_index) const;
- int32_t GetImmediateOperand(int operand_index) const;
- uint32_t GetIndexOperand(int operand_index) const;
- uint32_t GetRegisterCountOperand(int operand_index) const;
- Register GetRegisterOperand(int operand_index) const;
- int GetRegisterOperandRange(int operand_index) const;
- Runtime::FunctionId GetRuntimeIdOperand(int operand_index) const;
- Runtime::FunctionId GetIntrinsicIdOperand(int operand_index) const;
- Handle<Object> GetConstantForIndexOperand(int operand_index) const;
-
- // Returns the absolute offset of the branch target at the current
- // bytecode. It is an error to call this method if the bytecode is
- // not for a jump or conditional jump.
- int GetJumpTargetOffset() const;
private:
- uint32_t GetUnsignedOperand(int operand_index,
- OperandType operand_type) const;
- int32_t GetSignedOperand(int operand_index, OperandType operand_type) const;
-
- void UpdateOperandScale();
-
- Handle<BytecodeArray> bytecode_array_;
- int bytecode_offset_;
- OperandScale operand_scale_;
- int prefix_offset_;
-
DISALLOW_COPY_AND_ASSIGN(BytecodeArrayIterator);
};
@@ -66,4 +27,4 @@ class V8_EXPORT_PRIVATE BytecodeArrayIterator {
} // namespace internal
} // namespace v8
-#endif // V8_INTERPRETER_BYTECODE_GRAPH_ITERATOR_H_
+#endif // V8_INTERPRETER_BYTECODE_ARRAY_ITERATOR_H_
diff --git a/deps/v8/src/interpreter/bytecode-array-random-iterator.cc b/deps/v8/src/interpreter/bytecode-array-random-iterator.cc
new file mode 100644
index 0000000000..f499887ccb
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-array-random-iterator.cc
@@ -0,0 +1,37 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-array-random-iterator.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+BytecodeArrayRandomIterator::BytecodeArrayRandomIterator(
+ Handle<BytecodeArray> bytecode_array, Zone* zone)
+ : BytecodeArrayAccessor(bytecode_array, 0), offsets_(zone) {
+ // Run forwards through the bytecode array to determine the offset of each
+ // bytecode.
+ while (current_offset() < bytecode_array->length()) {
+ offsets_.push_back(current_offset());
+ SetOffset(current_offset() + current_bytecode_size());
+ }
+ GoToStart();
+}
+
+bool BytecodeArrayRandomIterator::IsValid() const {
+ return current_index_ >= 0 &&
+ static_cast<size_t>(current_index_) < offsets_.size();
+}
+
+void BytecodeArrayRandomIterator::UpdateOffsetFromIndex() {
+ if (IsValid()) {
+ SetOffset(offsets_[current_index_]);
+ }
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-array-random-iterator.h b/deps/v8/src/interpreter/bytecode-array-random-iterator.h
new file mode 100644
index 0000000000..7d559ea176
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-array-random-iterator.h
@@ -0,0 +1,78 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_ARRAY_RANDOM_ITERATOR_H_
+#define V8_INTERPRETER_BYTECODE_ARRAY_RANDOM_ITERATOR_H_
+
+#include "src/interpreter/bytecode-array-accessor.h"
+#include "src/zone/zone-containers.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class V8_EXPORT_PRIVATE BytecodeArrayRandomIterator final
+ : public BytecodeArrayAccessor {
+ public:
+ explicit BytecodeArrayRandomIterator(Handle<BytecodeArray> bytecode_array,
+ Zone* zone);
+
+ BytecodeArrayRandomIterator& operator++() {
+ ++current_index_;
+ UpdateOffsetFromIndex();
+ return *this;
+ }
+ BytecodeArrayRandomIterator& operator--() {
+ --current_index_;
+ UpdateOffsetFromIndex();
+ return *this;
+ }
+
+ BytecodeArrayRandomIterator& operator+=(int offset) {
+ current_index_ += offset;
+ UpdateOffsetFromIndex();
+ return *this;
+ }
+
+ BytecodeArrayRandomIterator& operator-=(int offset) {
+ current_index_ -= offset;
+ UpdateOffsetFromIndex();
+ return *this;
+ }
+
+ int current_index() const { return current_index_; }
+
+ size_t size() const { return offsets_.size(); }
+
+ void GoToIndex(int index) {
+ current_index_ = index;
+ UpdateOffsetFromIndex();
+ }
+ void GoToStart() {
+ current_index_ = 0;
+ UpdateOffsetFromIndex();
+ }
+ void GoToEnd() {
+ DCHECK_LT(offsets_.size() - 1, static_cast<size_t>(INT_MAX));
+ current_index_ = static_cast<int>(offsets_.size() - 1);
+ UpdateOffsetFromIndex();
+ }
+
+ bool IsValid() const;
+
+ private:
+ ZoneVector<int> offsets_;
+ int current_index_;
+
+ void UpdateOffsetFromIndex();
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodeArrayRandomIterator);
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODE_ARRAY_RANDOM_ITERATOR_H_
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.cc b/deps/v8/src/interpreter/bytecode-array-writer.cc
index 28f997b534..dd91564b16 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.cc
+++ b/deps/v8/src/interpreter/bytecode-array-writer.cc
@@ -9,6 +9,7 @@
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/constant-array-builder.h"
#include "src/log.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -163,6 +164,8 @@ Bytecode GetJumpWithConstantOperand(Bytecode jump_bytecode) {
return Bytecode::kJumpIfNullConstant;
case Bytecode::kJumpIfUndefined:
return Bytecode::kJumpIfUndefinedConstant;
+ case Bytecode::kJumpIfJSReceiver:
+ return Bytecode::kJumpIfJSReceiverConstant;
default:
UNREACHABLE();
return Bytecode::kIllegal;
@@ -290,7 +293,7 @@ void BytecodeArrayWriter::EmitJump(BytecodeNode* node, BytecodeLabel* label) {
delta -= 1;
}
DCHECK_EQ(Bytecode::kJumpLoop, node->bytecode());
- node->set_bytecode(node->bytecode(), delta, node->operand(1));
+ node->update_operand0(delta);
} else {
// The label has not yet been bound so this is a forward reference
// that will be patched when the label is bound. We create a
@@ -308,13 +311,13 @@ void BytecodeArrayWriter::EmitJump(BytecodeNode* node, BytecodeLabel* label) {
UNREACHABLE();
break;
case OperandSize::kByte:
- node->set_bytecode(node->bytecode(), k8BitJumpPlaceholder);
+ node->update_operand0(k8BitJumpPlaceholder);
break;
case OperandSize::kShort:
- node->set_bytecode(node->bytecode(), k16BitJumpPlaceholder);
+ node->update_operand0(k16BitJumpPlaceholder);
break;
case OperandSize::kQuad:
- node->set_bytecode(node->bytecode(), k32BitJumpPlaceholder);
+ node->update_operand0(k32BitJumpPlaceholder);
break;
}
}
diff --git a/deps/v8/src/interpreter/bytecode-flags.cc b/deps/v8/src/interpreter/bytecode-flags.cc
index 158af13ea7..31ac88c1f7 100644
--- a/deps/v8/src/interpreter/bytecode-flags.cc
+++ b/deps/v8/src/interpreter/bytecode-flags.cc
@@ -4,6 +4,7 @@
#include "src/interpreter/bytecode-flags.h"
+#include "src/builtins/builtins-constructor.h"
#include "src/code-stubs.h"
namespace v8 {
@@ -25,10 +26,11 @@ uint8_t CreateObjectLiteralFlags::Encode(bool fast_clone_supported,
uint8_t result = FlagsBits::encode(runtime_flags);
if (fast_clone_supported) {
STATIC_ASSERT(
- FastCloneShallowObjectStub::kMaximumClonedProperties <=
+ ConstructorBuiltinsAssembler::kMaximumClonedShallowObjectProperties <=
1 << CreateObjectLiteralFlags::FastClonePropertiesCountBits::kShift);
- DCHECK_LE(properties_count,
- FastCloneShallowObjectStub::kMaximumClonedProperties);
+ DCHECK_LE(
+ properties_count,
+ ConstructorBuiltinsAssembler::kMaximumClonedShallowObjectProperties);
result |= CreateObjectLiteralFlags::FastClonePropertiesCountBits::encode(
properties_count);
}
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 99e76725d5..02f6c3bb2c 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -6,6 +6,7 @@
#include "src/ast/compile-time-value.h"
#include "src/ast/scopes.h"
+#include "src/builtins/builtins-constructor.h"
#include "src/code-stubs.h"
#include "src/compilation-info.h"
#include "src/compiler.h"
@@ -496,24 +497,24 @@ class BytecodeGenerator::GlobalDeclarationsBuilder final : public ZoneObject {
constant_pool_entry_(0),
has_constant_pool_entry_(false) {}
- void AddFunctionDeclaration(FeedbackVectorSlot slot, FunctionLiteral* func) {
+ void AddFunctionDeclaration(Handle<String> name, FeedbackVectorSlot slot,
+ FunctionLiteral* func) {
DCHECK(!slot.IsInvalid());
- declarations_.push_back(std::make_pair(slot, func));
+ declarations_.push_back(Declaration(name, slot, func));
}
- void AddUndefinedDeclaration(FeedbackVectorSlot slot) {
+ void AddUndefinedDeclaration(Handle<String> name, FeedbackVectorSlot slot) {
DCHECK(!slot.IsInvalid());
- declarations_.push_back(std::make_pair(slot, nullptr));
+ declarations_.push_back(Declaration(name, slot, nullptr));
}
- Handle<FixedArray> AllocateDeclarationPairs(CompilationInfo* info) {
+ Handle<FixedArray> AllocateDeclarations(CompilationInfo* info) {
DCHECK(has_constant_pool_entry_);
int array_index = 0;
- Handle<FixedArray> pairs = info->isolate()->factory()->NewFixedArray(
- static_cast<int>(declarations_.size() * 2), TENURED);
- for (std::pair<FeedbackVectorSlot, FunctionLiteral*> declaration :
- declarations_) {
- FunctionLiteral* func = declaration.second;
+ Handle<FixedArray> data = info->isolate()->factory()->NewFixedArray(
+ static_cast<int>(declarations_.size() * 3), TENURED);
+ for (const Declaration& declaration : declarations_) {
+ FunctionLiteral* func = declaration.func;
Handle<Object> initial_value;
if (func == nullptr) {
initial_value = info->isolate()->factory()->undefined_value();
@@ -526,10 +527,11 @@ class BytecodeGenerator::GlobalDeclarationsBuilder final : public ZoneObject {
// will set stack overflow.
if (initial_value.is_null()) return Handle<FixedArray>();
- pairs->set(array_index++, Smi::FromInt(declaration.first.ToInt()));
- pairs->set(array_index++, *initial_value);
+ data->set(array_index++, *declaration.name);
+ data->set(array_index++, Smi::FromInt(declaration.slot.ToInt()));
+ data->set(array_index++, *initial_value);
}
- return pairs;
+ return data;
}
size_t constant_pool_entry() {
@@ -547,7 +549,17 @@ class BytecodeGenerator::GlobalDeclarationsBuilder final : public ZoneObject {
bool empty() { return declarations_.empty(); }
private:
- ZoneVector<std::pair<FeedbackVectorSlot, FunctionLiteral*>> declarations_;
+ struct Declaration {
+ Declaration() : slot(FeedbackVectorSlot::Invalid()), func(nullptr) {}
+ Declaration(Handle<String> name, FeedbackVectorSlot slot,
+ FunctionLiteral* func)
+ : name(name), slot(slot), func(func) {}
+
+ Handle<String> name;
+ FeedbackVectorSlot slot;
+ FunctionLiteral* func;
+ };
+ ZoneVector<Declaration> declarations_;
size_t constant_pool_entry_;
bool has_constant_pool_entry_;
};
@@ -565,6 +577,8 @@ BytecodeGenerator::BytecodeGenerator(CompilationInfo* info)
global_declarations_(0, info->zone()),
function_literals_(0, info->zone()),
native_function_literals_(0, info->zone()),
+ object_literals_(0, info->zone()),
+ array_literals_(0, info->zone()),
execution_control_(nullptr),
execution_context_(nullptr),
execution_result_(nullptr),
@@ -572,24 +586,23 @@ BytecodeGenerator::BytecodeGenerator(CompilationInfo* info)
generator_state_(),
loop_depth_(0),
home_object_symbol_(info->isolate()->factory()->home_object_symbol()),
- empty_fixed_array_(info->isolate()->factory()->empty_fixed_array()) {
- AstValueFactory* ast_value_factory = info->parse_info()->ast_value_factory();
- const AstRawString* prototype_string = ast_value_factory->prototype_string();
- ast_value_factory->Internalize(info->isolate());
- prototype_string_ = prototype_string->string();
-}
+ iterator_symbol_(info->isolate()->factory()->iterator_symbol()),
+ prototype_string_(info->isolate()->factory()->prototype_string()),
+ empty_fixed_array_(info->isolate()->factory()->empty_fixed_array()),
+ undefined_string_(
+ info->isolate()->ast_string_constants()->undefined_string()) {}
Handle<BytecodeArray> BytecodeGenerator::FinalizeBytecode(Isolate* isolate) {
- AllocateDeferredConstants();
+ AllocateDeferredConstants(isolate);
if (HasStackOverflow()) return Handle<BytecodeArray>();
return builder()->ToBytecodeArray(isolate);
}
-void BytecodeGenerator::AllocateDeferredConstants() {
+void BytecodeGenerator::AllocateDeferredConstants(Isolate* isolate) {
// Build global declaration pair arrays.
for (GlobalDeclarationsBuilder* globals_builder : global_declarations_) {
Handle<FixedArray> declarations =
- globals_builder->AllocateDeclarationPairs(info());
+ globals_builder->AllocateDeclarations(info());
if (declarations.is_null()) return SetStackOverflow();
builder()->InsertConstantPoolEntryAt(globals_builder->constant_pool_entry(),
declarations);
@@ -614,6 +627,27 @@ void BytecodeGenerator::AllocateDeferredConstants() {
if (shared_info.is_null()) return SetStackOverflow();
builder()->InsertConstantPoolEntryAt(literal.second, shared_info);
}
+
+ // Build object literal constant properties
+ for (std::pair<ObjectLiteral*, size_t> literal : object_literals_) {
+ ObjectLiteral* object_literal = literal.first;
+ if (object_literal->properties_count() > 0) {
+ // If constant properties is an empty fixed array, we've already added it
+ // to the constant pool when visiting the object literal.
+ Handle<FixedArray> constant_properties =
+ object_literal->GetOrBuildConstantProperties(isolate);
+
+ builder()->InsertConstantPoolEntryAt(literal.second, constant_properties);
+ }
+ }
+
+ // Build array literal constant elements
+ for (std::pair<ArrayLiteral*, size_t> literal : array_literals_) {
+ ArrayLiteral* array_literal = literal.first;
+ Handle<ConstantElementsPair> constant_elements =
+ array_literal->GetOrBuildConstantElements(isolate);
+ builder()->InsertConstantPoolEntryAt(literal.second, constant_elements);
+ }
}
void BytecodeGenerator::GenerateBytecode(uintptr_t stack_limit) {
@@ -711,22 +745,25 @@ void BytecodeGenerator::VisitIterationHeader(IterationStatement* stmt,
LoopBuilder* loop_builder) {
// Recall that stmt->yield_count() is always zero inside ordinary
// (i.e. non-generator) functions.
+ if (stmt->yield_count() == 0) {
+ loop_builder->LoopHeader();
+ } else {
+ // Collect all labels for generator resume points within the loop (if any)
+ // so that they can be bound to the loop header below. Also create fresh
+ // labels for these resume points, to be used inside the loop.
+ ZoneVector<BytecodeLabel> resume_points_in_loop(zone());
+ size_t first_yield = stmt->first_yield_id();
+ DCHECK_LE(first_yield + stmt->yield_count(),
+ generator_resume_points_.size());
+ for (size_t id = first_yield; id < first_yield + stmt->yield_count();
+ id++) {
+ auto& label = generator_resume_points_[id];
+ resume_points_in_loop.push_back(label);
+ generator_resume_points_[id] = BytecodeLabel();
+ }
+
+ loop_builder->LoopHeader(&resume_points_in_loop);
- // Collect all labels for generator resume points within the loop (if any) so
- // that they can be bound to the loop header below. Also create fresh labels
- // for these resume points, to be used inside the loop.
- ZoneVector<BytecodeLabel> resume_points_in_loop(zone());
- size_t first_yield = stmt->first_yield_id();
- DCHECK_LE(first_yield + stmt->yield_count(), generator_resume_points_.size());
- for (size_t id = first_yield; id < first_yield + stmt->yield_count(); id++) {
- auto& label = generator_resume_points_[id];
- resume_points_in_loop.push_back(label);
- generator_resume_points_[id] = BytecodeLabel();
- }
-
- loop_builder->LoopHeader(&resume_points_in_loop);
-
- if (stmt->yield_count() > 0) {
// If we are not resuming, fall through to loop body.
// If we are resuming, perform state dispatch.
BytecodeLabel not_resuming;
@@ -751,10 +788,13 @@ void BytecodeGenerator::VisitGeneratorPrologue() {
->LoadAccumulatorWithRegister(generator_object)
.JumpIfUndefined(&regular_call);
- // This is a resume call. Restore registers and perform state dispatch.
- // (The current context has already been restored by the trampoline.)
+ // This is a resume call. Restore the current context and the registers, then
+ // perform state dispatch.
+ Register dummy = register_allocator()->NewRegister();
builder()
- ->ResumeGenerator(generator_object)
+ ->CallRuntime(Runtime::kInlineGeneratorGetContext, generator_object)
+ .PushContext(dummy)
+ .ResumeGenerator(generator_object)
.StoreAccumulatorInRegister(generator_state_);
BuildIndexedJump(generator_state_, 0, generator_resume_points_.size(),
generator_resume_points_);
@@ -795,7 +835,7 @@ void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
case VariableLocation::UNALLOCATED: {
DCHECK(!variable->binding_needs_init());
FeedbackVectorSlot slot = decl->proxy()->VariableFeedbackSlot();
- globals_builder()->AddUndefinedDeclaration(slot);
+ globals_builder()->AddUndefinedDeclaration(variable->name(), slot);
break;
}
case VariableLocation::LOCAL:
@@ -849,7 +889,8 @@ void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
switch (variable->location()) {
case VariableLocation::UNALLOCATED: {
FeedbackVectorSlot slot = decl->proxy()->VariableFeedbackSlot();
- globals_builder()->AddFunctionDeclaration(slot, decl->fun());
+ globals_builder()->AddFunctionDeclaration(variable->name(), slot,
+ decl->fun());
break;
}
case VariableLocation::PARAMETER:
@@ -1300,7 +1341,7 @@ void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
// If requested, clear message object as we enter the catch block.
if (stmt->clear_pending_message()) {
- builder()->CallRuntime(Runtime::kInterpreterClearPendingMessage);
+ builder()->LoadTheHole().SetPendingMessage();
}
// Load the catch context into the accumulator.
@@ -1359,16 +1400,15 @@ void BytecodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
Register message = context; // Reuse register.
// Clear message object as we enter the finally block.
- builder()
- ->CallRuntime(Runtime::kInterpreterClearPendingMessage)
- .StoreAccumulatorInRegister(message);
+ builder()->LoadTheHole().SetPendingMessage().StoreAccumulatorInRegister(
+ message);
// Evaluate the finally-block.
Visit(stmt->finally_block());
try_control_builder.EndFinally();
// Pending message object is restored on exit.
- builder()->CallRuntime(Runtime::kInterpreterSetPendingMessage, message);
+ builder()->LoadAccumulatorWithRegister(message).SetPendingMessage();
// Dynamic dispatch after the finally-block.
commands.ApplyDeferredCommands();
@@ -1383,25 +1423,39 @@ void BytecodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
uint8_t flags = CreateClosureFlags::Encode(expr->pretenure(),
scope()->is_function_scope());
size_t entry = builder()->AllocateConstantPoolEntry();
- builder()->CreateClosure(entry, flags);
+ int slot_index = feedback_index(expr->LiteralFeedbackSlot());
+ builder()->CreateClosure(entry, slot_index, flags);
function_literals_.push_back(std::make_pair(expr, entry));
}
void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
- VisitClassLiteralForRuntimeDefinition(expr);
+ Register constructor = VisitForRegisterValue(expr->constructor());
+ {
+ RegisterAllocationScope register_scope(this);
+ RegisterList args = register_allocator()->NewRegisterList(4);
+ VisitForAccumulatorValueOrTheHole(expr->extends());
+ builder()
+ ->StoreAccumulatorInRegister(args[0])
+ .MoveRegister(constructor, args[1])
+ .LoadLiteral(Smi::FromInt(expr->start_position()))
+ .StoreAccumulatorInRegister(args[2])
+ .LoadLiteral(Smi::FromInt(expr->end_position()))
+ .StoreAccumulatorInRegister(args[3])
+ .CallRuntime(Runtime::kDefineClass, args);
+ }
+ Register prototype = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(prototype);
- // Load the "prototype" from the constructor.
- RegisterList args = register_allocator()->NewRegisterList(2);
- Register literal = args[0];
- Register prototype = args[1];
- FeedbackVectorSlot slot = expr->PrototypeSlot();
- builder()
- ->StoreAccumulatorInRegister(literal)
- .LoadNamedProperty(literal, prototype_string(), feedback_index(slot))
- .StoreAccumulatorInRegister(prototype);
+ if (FunctionLiteral::NeedsHomeObject(expr->constructor())) {
+ // Prototype is already in the accumulator.
+ builder()->StoreNamedProperty(constructor, home_object_symbol(),
+ feedback_index(expr->HomeObjectSlot()),
+ language_mode());
+ }
- VisitClassLiteralProperties(expr, literal, prototype);
- builder()->CallRuntime(Runtime::kToFastProperties, literal);
+ VisitClassLiteralProperties(expr, constructor, prototype);
+ BuildClassLiteralNameProperty(expr, constructor);
+ builder()->CallRuntime(Runtime::kToFastProperties, constructor);
// Assign to class variable.
if (expr->class_variable_proxy() != nullptr) {
VariableProxy* proxy = expr->class_variable_proxy();
@@ -1413,28 +1467,12 @@ void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
}
}
-void BytecodeGenerator::VisitClassLiteralForRuntimeDefinition(
- ClassLiteral* expr) {
- RegisterAllocationScope register_scope(this);
- RegisterList args = register_allocator()->NewRegisterList(4);
- VisitForAccumulatorValueOrTheHole(expr->extends());
- builder()->StoreAccumulatorInRegister(args[0]);
- VisitForRegisterValue(expr->constructor(), args[1]);
- builder()
- ->LoadLiteral(Smi::FromInt(expr->start_position()))
- .StoreAccumulatorInRegister(args[2])
- .LoadLiteral(Smi::FromInt(expr->end_position()))
- .StoreAccumulatorInRegister(args[3])
- .CallRuntime(Runtime::kDefineClass, args);
-}
-
void BytecodeGenerator::VisitClassLiteralProperties(ClassLiteral* expr,
- Register literal,
+ Register constructor,
Register prototype) {
RegisterAllocationScope register_scope(this);
- RegisterList args = register_allocator()->NewRegisterList(5);
- Register receiver = args[0], key = args[1], value = args[2], attr = args[3],
- set_function_name = args[4];
+ RegisterList args = register_allocator()->NewRegisterList(4);
+ Register receiver = args[0], key = args[1], value = args[2], attr = args[3];
bool attr_assigned = false;
Register old_receiver = Register::invalid_value();
@@ -1444,14 +1482,18 @@ void BytecodeGenerator::VisitClassLiteralProperties(ClassLiteral* expr,
ClassLiteral::Property* property = expr->properties()->at(i);
// Set-up receiver.
- Register new_receiver = property->is_static() ? literal : prototype;
+ Register new_receiver = property->is_static() ? constructor : prototype;
if (new_receiver != old_receiver) {
builder()->MoveRegister(new_receiver, receiver);
old_receiver = new_receiver;
}
- VisitForAccumulatorValue(property->key());
- builder()->ConvertAccumulatorToName(key);
+ if (property->key()->IsStringLiteral()) {
+ VisitForRegisterValue(property->key(), key);
+ } else {
+ VisitForAccumulatorValue(property->key());
+ builder()->ConvertAccumulatorToName(key);
+ }
if (property->is_static() && property->is_computed_name()) {
// The static prototype property is read only. We handle the non computed
@@ -1479,20 +1521,26 @@ void BytecodeGenerator::VisitClassLiteralProperties(ClassLiteral* expr,
switch (property->kind()) {
case ClassLiteral::Property::METHOD: {
+ DataPropertyInLiteralFlags flags = DataPropertyInLiteralFlag::kDontEnum;
+ if (property->NeedsSetFunctionName()) {
+ flags |= DataPropertyInLiteralFlag::kSetFunctionName;
+ }
+
+ FeedbackVectorSlot slot = property->GetStoreDataPropertySlot();
+ DCHECK(!slot.IsInvalid());
+
builder()
- ->LoadLiteral(Smi::FromInt(property->NeedsSetFunctionName()))
- .StoreAccumulatorInRegister(set_function_name)
- .CallRuntime(Runtime::kDefineDataPropertyInLiteral, args);
+ ->LoadAccumulatorWithRegister(value)
+ .StoreDataPropertyInLiteral(receiver, key, flags,
+ feedback_index(slot));
break;
}
case ClassLiteral::Property::GETTER: {
- builder()->CallRuntime(Runtime::kDefineGetterPropertyUnchecked,
- args.Truncate(4));
+ builder()->CallRuntime(Runtime::kDefineGetterPropertyUnchecked, args);
break;
}
case ClassLiteral::Property::SETTER: {
- builder()->CallRuntime(Runtime::kDefineSetterPropertyUnchecked,
- args.Truncate(4));
+ builder()->CallRuntime(Runtime::kDefineSetterPropertyUnchecked, args);
break;
}
case ClassLiteral::Property::FIELD: {
@@ -1503,10 +1551,23 @@ void BytecodeGenerator::VisitClassLiteralProperties(ClassLiteral* expr,
}
}
+void BytecodeGenerator::BuildClassLiteralNameProperty(ClassLiteral* expr,
+ Register literal) {
+ if (!expr->has_name_static_property() &&
+ !expr->constructor()->raw_name()->IsEmpty()) {
+ Runtime::FunctionId runtime_id =
+ expr->has_static_computed_names()
+ ? Runtime::kInstallClassNameAccessorWithCheck
+ : Runtime::kInstallClassNameAccessor;
+ builder()->CallRuntime(runtime_id, literal);
+ }
+}
+
void BytecodeGenerator::VisitNativeFunctionLiteral(
NativeFunctionLiteral* expr) {
size_t entry = builder()->AllocateConstantPoolEntry();
- builder()->CreateClosure(entry, NOT_TENURED);
+ int slot_index = feedback_index(expr->LiteralFeedbackSlot());
+ builder()->CreateClosure(entry, slot_index, NOT_TENURED);
native_function_literals_.push_back(std::make_pair(expr, entry));
}
@@ -1567,19 +1628,24 @@ void BytecodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
}
void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
- // Copy the literal boilerplate.
+ // Deep-copy the literal boilerplate.
uint8_t flags = CreateObjectLiteralFlags::Encode(
- FastCloneShallowObjectStub::IsSupported(expr),
- FastCloneShallowObjectStub::PropertiesCount(expr->properties_count()),
+ expr->IsFastCloningSupported(),
+ ConstructorBuiltinsAssembler::FastCloneShallowObjectPropertiesCount(
+ expr->properties_count()),
expr->ComputeFlags());
+
+ Register literal = register_allocator()->NewRegister();
+ size_t entry;
// If constant properties is an empty fixed array, use our cached
// empty_fixed_array to ensure it's only added to the constant pool once.
- Handle<FixedArray> constant_properties = expr->properties_count() == 0
- ? empty_fixed_array()
- : expr->constant_properties();
- Register literal = register_allocator()->NewRegister();
- builder()->CreateObjectLiteral(constant_properties, expr->literal_index(),
- flags, literal);
+ if (expr->properties_count() == 0) {
+ entry = builder()->GetConstantPoolEntry(empty_fixed_array());
+ } else {
+ entry = builder()->AllocateConstantPoolEntry();
+ object_literals_.push_back(std::make_pair(expr, entry));
+ }
+ builder()->CreateObjectLiteral(entry, expr->literal_index(), flags, literal);
// Store computed values into the literal.
int property_index = 0;
@@ -1592,6 +1658,7 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
RegisterAllocationScope inner_register_scope(this);
Literal* key = property->key()->AsLiteral();
switch (property->kind()) {
+ case ObjectLiteral::Property::SPREAD:
case ObjectLiteral::Property::CONSTANT:
UNREACHABLE();
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -1700,18 +1767,26 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::CONSTANT:
case ObjectLiteral::Property::COMPUTED:
case ObjectLiteral::Property::MATERIALIZED_LITERAL: {
- RegisterList args = register_allocator()->NewRegisterList(5);
- builder()->MoveRegister(literal, args[0]);
+ Register key = register_allocator()->NewRegister();
VisitForAccumulatorValue(property->key());
- builder()->ConvertAccumulatorToName(args[1]);
- VisitForRegisterValue(property->value(), args[2]);
- VisitSetHomeObject(args[2], literal, property);
+ builder()->ConvertAccumulatorToName(key);
+
+ Register value = VisitForRegisterValue(property->value());
+ VisitSetHomeObject(value, literal, property);
+
+ DataPropertyInLiteralFlags data_property_flags =
+ DataPropertyInLiteralFlag::kNoFlags;
+ if (property->NeedsSetFunctionName()) {
+ data_property_flags |= DataPropertyInLiteralFlag::kSetFunctionName;
+ }
+
+ FeedbackVectorSlot slot = property->GetStoreDataPropertySlot();
+ DCHECK(!slot.IsInvalid());
+
builder()
- ->LoadLiteral(Smi::FromInt(NONE))
- .StoreAccumulatorInRegister(args[3])
- .LoadLiteral(Smi::FromInt(property->NeedsSetFunctionName()))
- .StoreAccumulatorInRegister(args[4]);
- builder()->CallRuntime(Runtime::kDefineDataPropertyInLiteral, args);
+ ->LoadAccumulatorWithRegister(value)
+ .StoreDataPropertyInLiteral(literal, key, data_property_flags,
+ feedback_index(slot));
break;
}
case ObjectLiteral::Property::GETTER:
@@ -1732,6 +1807,13 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
builder()->CallRuntime(function_id, args);
break;
}
+ case ObjectLiteral::Property::SPREAD: {
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ builder()->MoveRegister(literal, args[0]);
+ VisitForRegisterValue(property->value(), args[1]);
+ builder()->CallRuntime(Runtime::kCopyDataProperties, args);
+ break;
+ }
case ObjectLiteral::Property::PROTOTYPE:
UNREACHABLE(); // Handled specially above.
break;
@@ -1743,14 +1825,13 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// Deep-copy the literal boilerplate.
- int runtime_flags = expr->ComputeFlags();
- bool use_fast_shallow_clone =
- (runtime_flags & ArrayLiteral::kShallowElements) != 0 &&
- expr->values()->length() <= JSArray::kInitialMaxFastElementArray;
- uint8_t flags =
- CreateArrayLiteralFlags::Encode(use_fast_shallow_clone, runtime_flags);
- builder()->CreateArrayLiteral(expr->constant_elements(),
- expr->literal_index(), flags);
+ uint8_t flags = CreateArrayLiteralFlags::Encode(
+ expr->IsFastCloningSupported(), expr->ComputeFlags());
+
+ size_t entry = builder()->AllocateConstantPoolEntry();
+ builder()->CreateArrayLiteral(entry, expr->literal_index(), flags);
+ array_literals_.push_back(std::make_pair(expr, entry));
+
Register index, literal;
// Evaluate all the non-constant subexpressions and store them into the
@@ -1820,7 +1901,15 @@ void BytecodeGenerator::BuildVariableLoad(Variable* variable,
break;
}
case VariableLocation::UNALLOCATED: {
- builder()->LoadGlobal(feedback_index(slot), typeof_mode);
+ // The global identifier "undefined" is immutable. Everything
+ // else could be reassigned. For performance, we do a pointer comparison
+ // rather than checking if the raw_name is really "undefined".
+ if (variable->raw_name() == undefined_string()) {
+ builder()->LoadUndefined();
+ } else {
+ builder()->LoadGlobal(variable->name(), feedback_index(slot),
+ typeof_mode);
+ }
break;
}
case VariableLocation::CONTEXT: {
@@ -1920,25 +2009,19 @@ void BytecodeGenerator::BuildThrowIfHole(Handle<String> name) {
builder()->Bind(&no_reference_error);
}
-void BytecodeGenerator::BuildThrowIfNotHole(Handle<String> name) {
- // TODO(interpreter): Can the parser reduce the number of checks
- // performed? Or should there be a ThrowIfNotHole bytecode.
- BytecodeLabel no_reference_error, reference_error;
- builder()
- ->JumpIfNotHole(&reference_error)
- .Jump(&no_reference_error)
- .Bind(&reference_error);
- BuildThrowReferenceError(name);
- builder()->Bind(&no_reference_error);
-}
-
void BytecodeGenerator::BuildHoleCheckForVariableAssignment(Variable* variable,
Token::Value op) {
if (variable->is_this() && variable->mode() == CONST && op == Token::INIT) {
// Perform an initialization check for 'this'. 'this' variable is the
// only variable able to trigger bind operations outside the TDZ
// via 'super' calls.
- BuildThrowIfNotHole(variable->name());
+ BytecodeLabel no_reference_error, reference_error;
+ builder()
+ ->JumpIfNotHole(&reference_error)
+ .Jump(&no_reference_error)
+ .Bind(&reference_error)
+ .CallRuntime(Runtime::kThrowSuperAlreadyCalledError)
+ .Bind(&no_reference_error);
} else {
// Perform an initialization check for let/const declared variables.
// E.g. let x = (x = 20); is not allowed.
@@ -2477,29 +2560,44 @@ void BytecodeGenerator::VisitCallSuper(Call* expr) {
SuperCallReference* super = expr->expression()->AsSuperCallReference();
// Prepare the constructor to the super call.
- Register this_function = VisitForRegisterValue(super->this_function_var());
- builder()->CallRuntime(Runtime::kInlineGetSuperConstructor, this_function);
-
- Register constructor = this_function; // Re-use dead this_function register.
- builder()->StoreAccumulatorInRegister(constructor);
-
- RegisterList args = register_allocator()->NewGrowableRegisterList();
- VisitArguments(expr->arguments(), &args);
-
- // The new target is loaded into the accumulator from the
- // {new.target} variable.
- VisitForAccumulatorValue(super->new_target_var());
+ VisitForAccumulatorValue(super->this_function_var());
+ Register constructor = register_allocator()->NewRegister();
+ builder()->GetSuperConstructor(constructor);
+
+ ZoneList<Expression*>* args = expr->arguments();
+
+ // When a super call contains a spread, a CallSuper AST node is only created
+ // if there is exactly one spread, and it is the last argument.
+ if (!args->is_empty() && args->last()->IsSpread()) {
+ RegisterList args_regs = register_allocator()->NewGrowableRegisterList();
+ Register constructor_arg =
+ register_allocator()->GrowRegisterList(&args_regs);
+ builder()->MoveRegister(constructor, constructor_arg);
+ // Reserve argument reg for new.target in correct place for runtime call.
+ // TODO(petermarshall): Remove this when changing bytecode to use the new
+ // stub.
+ Register new_target = register_allocator()->GrowRegisterList(&args_regs);
+ VisitArguments(args, &args_regs);
+ VisitForRegisterValue(super->new_target_var(), new_target);
+ builder()->NewWithSpread(args_regs);
+ } else {
+ RegisterList args_regs = register_allocator()->NewGrowableRegisterList();
+ VisitArguments(args, &args_regs);
+ // The new target is loaded into the accumulator from the
+ // {new.target} variable.
+ VisitForAccumulatorValue(super->new_target_var());
- // Call construct.
- builder()->SetExpressionPosition(expr);
- // TODO(turbofan): For now we do gather feedback on super constructor
- // calls, utilizing the existing machinery to inline the actual call
- // target and the JSCreate for the implicit receiver allocation. This
- // is not an ideal solution for super constructor calls, but it gets
- // the job done for now. In the long run we might want to revisit this
- // and come up with a better way.
- int const feedback_slot_index = feedback_index(expr->CallFeedbackICSlot());
- builder()->New(constructor, args, feedback_slot_index);
+ // Call construct.
+ builder()->SetExpressionPosition(expr);
+ // TODO(turbofan): For now we do gather feedback on super constructor
+ // calls, utilizing the existing machinery to inline the actual call
+ // target and the JSCreate for the implicit receiver allocation. This
+ // is not an ideal solution for super constructor calls, but it gets
+ // the job done for now. In the long run we might want to revisit this
+ // and come up with a better way.
+ int const feedback_slot_index = feedback_index(expr->CallFeedbackICSlot());
+ builder()->New(constructor, args_regs, feedback_slot_index);
+ }
}
void BytecodeGenerator::VisitCallNew(CallNew* expr) {
@@ -2800,15 +2898,43 @@ void BytecodeGenerator::VisitArithmeticExpression(BinaryOperation* expr) {
Register lhs = VisitForRegisterValue(expr->left());
VisitForAccumulatorValue(expr->right());
FeedbackVectorSlot slot = expr->BinaryOperationFeedbackSlot();
+ builder()->SetExpressionPosition(expr);
builder()->BinaryOperation(expr->op(), lhs, feedback_index(slot));
}
-void BytecodeGenerator::VisitSpread(Spread* expr) { UNREACHABLE(); }
+void BytecodeGenerator::VisitSpread(Spread* expr) { Visit(expr->expression()); }
void BytecodeGenerator::VisitEmptyParentheses(EmptyParentheses* expr) {
UNREACHABLE();
}
+void BytecodeGenerator::VisitGetIterator(GetIterator* expr) {
+ FeedbackVectorSlot load_slot = expr->IteratorPropertyFeedbackSlot();
+ FeedbackVectorSlot call_slot = expr->IteratorCallFeedbackSlot();
+
+ RegisterList args = register_allocator()->NewRegisterList(1);
+ Register method = register_allocator()->NewRegister();
+ Register obj = args[0];
+
+ VisitForAccumulatorValue(expr->iterable());
+
+ // Let method be GetMethod(obj, @@iterator).
+ builder()
+ ->StoreAccumulatorInRegister(obj)
+ .LoadNamedProperty(obj, iterator_symbol(), feedback_index(load_slot))
+ .StoreAccumulatorInRegister(method);
+
+ // Let iterator be Call(method, obj).
+ builder()->Call(method, args, feedback_index(call_slot),
+ Call::NAMED_PROPERTY_CALL);
+
+ // If Type(iterator) is not Object, throw a TypeError exception.
+ BytecodeLabel no_type_error;
+ builder()->JumpIfJSReceiver(&no_type_error);
+ builder()->CallRuntime(Runtime::kThrowSymbolIteratorInvalid);
+ builder()->Bind(&no_type_error);
+}
+
void BytecodeGenerator::VisitThisFunction(ThisFunction* expr) {
builder()->LoadAccumulatorWithRegister(Register::function_closure());
}
@@ -2930,12 +3056,27 @@ void BytecodeGenerator::BuildNewLocalActivationContext() {
.StoreAccumulatorInRegister(args[2])
.CallRuntime(Runtime::kPushModuleContext, args);
} else {
+ DCHECK(scope->is_function_scope() || scope->is_eval_scope());
int slot_count = scope->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (slot_count <= FastNewFunctionContextStub::kMaximumSlots) {
- builder()->CreateFunctionContext(slot_count);
+ if (slot_count <=
+ ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+ switch (scope->scope_type()) {
+ case EVAL_SCOPE:
+ builder()->CreateEvalContext(slot_count);
+ break;
+ case FUNCTION_SCOPE:
+ builder()->CreateFunctionContext(slot_count);
+ break;
+ default:
+ UNREACHABLE();
+ }
} else {
- builder()->CallRuntime(Runtime::kNewFunctionContext,
- Register::function_closure());
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ builder()
+ ->MoveRegister(Register::function_closure(), args[0])
+ .LoadLiteral(Smi::FromInt(scope->scope_type()))
+ .StoreAccumulatorInRegister(args[1])
+ .CallRuntime(Runtime::kNewFunctionContext, args);
}
}
}
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index bcab9975d0..f15829dea8 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -53,7 +53,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
enum class TestFallthrough { kThen, kElse, kNone };
void GenerateBytecodeBody();
- void AllocateDeferredConstants();
+ void AllocateDeferredConstants(Isolate* isolate);
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
@@ -109,7 +109,6 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void BuildReThrow();
void BuildAbort(BailoutReason bailout_reason);
void BuildThrowIfHole(Handle<String> name);
- void BuildThrowIfNotHole(Handle<String> name);
void BuildThrowReferenceError(Handle<String> name);
void BuildHoleCheckForVariableAssignment(Variable* variable, Token::Value op);
@@ -129,9 +128,9 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void VisitArgumentsObject(Variable* variable);
void VisitRestArgumentsArray(Variable* rest);
void VisitCallSuper(Call* call);
- void VisitClassLiteralForRuntimeDefinition(ClassLiteral* expr);
- void VisitClassLiteralProperties(ClassLiteral* expr, Register literal,
+ void VisitClassLiteralProperties(ClassLiteral* expr, Register constructor,
Register prototype);
+ void BuildClassLiteralNameProperty(ClassLiteral* expr, Register constructor);
void VisitThisFunctionVariable(Variable* variable);
void VisitNewTargetVariable(Variable* variable);
void VisitBlockDeclarationsAndStatements(Block* stmt);
@@ -196,8 +195,10 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
int feedback_index(FeedbackVectorSlot slot) const;
Handle<Name> home_object_symbol() const { return home_object_symbol_; }
+ Handle<Name> iterator_symbol() const { return iterator_symbol_; }
Handle<Name> prototype_string() const { return prototype_string_; }
Handle<FixedArray> empty_fixed_array() const { return empty_fixed_array_; }
+ const AstRawString* undefined_string() const { return undefined_string_; }
Zone* zone_;
BytecodeArrayBuilder* builder_;
@@ -209,6 +210,8 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
ZoneVector<std::pair<FunctionLiteral*, size_t>> function_literals_;
ZoneVector<std::pair<NativeFunctionLiteral*, size_t>>
native_function_literals_;
+ ZoneVector<std::pair<ObjectLiteral*, size_t>> object_literals_;
+ ZoneVector<std::pair<ArrayLiteral*, size_t>> array_literals_;
ControlScope* execution_control_;
ContextScope* execution_context_;
@@ -219,8 +222,10 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
int loop_depth_;
Handle<Name> home_object_symbol_;
+ Handle<Name> iterator_symbol_;
Handle<Name> prototype_string_;
Handle<FixedArray> empty_fixed_array_;
+ const AstRawString* undefined_string_;
};
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/bytecode-label.cc b/deps/v8/src/interpreter/bytecode-label.cc
index a12e8ab4cc..ef32bdd104 100644
--- a/deps/v8/src/interpreter/bytecode-label.cc
+++ b/deps/v8/src/interpreter/bytecode-label.cc
@@ -5,6 +5,7 @@
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-array-builder.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/interpreter/bytecode-label.h b/deps/v8/src/interpreter/bytecode-label.h
index b5f602d216..4ef6265eb2 100644
--- a/deps/v8/src/interpreter/bytecode-label.h
+++ b/deps/v8/src/interpreter/bytecode-label.h
@@ -17,7 +17,7 @@ class BytecodeArrayBuilder;
// label is bound, it represents a known position in the bytecode
// array. For labels that are forward references there can be at most
// one reference whilst it is unbound.
-class BytecodeLabel final {
+class V8_EXPORT_PRIVATE BytecodeLabel final {
public:
BytecodeLabel() : bound_(false), offset_(kInvalidOffset) {}
@@ -54,7 +54,7 @@ class BytecodeLabel final {
};
// Class representing a branch target of multiple jumps.
-class BytecodeLabels {
+class V8_EXPORT_PRIVATE BytecodeLabels {
public:
explicit BytecodeLabels(Zone* zone) : labels_(zone) {}
diff --git a/deps/v8/src/interpreter/bytecode-operands.h b/deps/v8/src/interpreter/bytecode-operands.h
index 55485027d3..f649d93a08 100644
--- a/deps/v8/src/interpreter/bytecode-operands.h
+++ b/deps/v8/src/interpreter/bytecode-operands.h
@@ -23,27 +23,33 @@ namespace interpreter {
V(RegOutPair, OperandTypeInfo::kScalableSignedByte) \
V(RegOutTriple, OperandTypeInfo::kScalableSignedByte)
-#define UNSIGNED_SCALAR_OPERAND_TYPE_LIST(V) \
- V(Flag8, OperandTypeInfo::kFixedUnsignedByte) \
- V(IntrinsicId, OperandTypeInfo::kFixedUnsignedByte) \
+#define SIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(V) \
+ V(Imm, OperandTypeInfo::kScalableSignedByte)
+
+#define UNSIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(V) \
V(Idx, OperandTypeInfo::kScalableUnsignedByte) \
V(UImm, OperandTypeInfo::kScalableUnsignedByte) \
- V(RegCount, OperandTypeInfo::kScalableUnsignedByte) \
+ V(RegCount, OperandTypeInfo::kScalableUnsignedByte)
+
+#define UNSIGNED_FIXED_SCALAR_OPERAND_TYPE_LIST(V) \
+ V(Flag8, OperandTypeInfo::kFixedUnsignedByte) \
+ V(IntrinsicId, OperandTypeInfo::kFixedUnsignedByte) \
V(RuntimeId, OperandTypeInfo::kFixedUnsignedShort)
-#define SIGNED_SCALAR_OPERAND_TYPE_LIST(V) \
- V(Imm, OperandTypeInfo::kScalableSignedByte)
+// Carefully ordered for operand type range checks below.
+#define NON_REGISTER_OPERAND_TYPE_LIST(V) \
+ INVALID_OPERAND_TYPE_LIST(V) \
+ UNSIGNED_FIXED_SCALAR_OPERAND_TYPE_LIST(V) \
+ UNSIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(V) \
+ SIGNED_SCALABLE_SCALAR_OPERAND_TYPE_LIST(V)
+// Carefully ordered for operand type range checks below.
#define REGISTER_OPERAND_TYPE_LIST(V) \
REGISTER_INPUT_OPERAND_TYPE_LIST(V) \
REGISTER_OUTPUT_OPERAND_TYPE_LIST(V)
-#define NON_REGISTER_OPERAND_TYPE_LIST(V) \
- INVALID_OPERAND_TYPE_LIST(V) \
- UNSIGNED_SCALAR_OPERAND_TYPE_LIST(V) \
- SIGNED_SCALAR_OPERAND_TYPE_LIST(V)
-
// The list of operand types used by bytecodes.
+// Carefully ordered for operand type range checks below.
#define OPERAND_TYPE_LIST(V) \
NON_REGISTER_OPERAND_TYPE_LIST(V) \
REGISTER_OPERAND_TYPE_LIST(V)
@@ -125,6 +131,33 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
const OperandSize& operand_size);
std::ostream& operator<<(std::ostream& os, const OperandType& operand_type);
+class BytecodeOperands {
+ public:
+ // Returns true if |accumulator_use| reads the accumulator.
+ static constexpr bool ReadsAccumulator(AccumulatorUse accumulator_use) {
+ return accumulator_use == AccumulatorUse::kRead ||
+ accumulator_use == AccumulatorUse::kReadWrite;
+ }
+
+ // Returns true if |accumulator_use| writes the accumulator.
+ static constexpr bool WritesAccumulator(AccumulatorUse accumulator_use) {
+ return accumulator_use == AccumulatorUse::kWrite ||
+ accumulator_use == AccumulatorUse::kReadWrite;
+ }
+
+ // Returns true if |operand_type| is a scalable signed byte.
+ static constexpr bool IsScalableSignedByte(OperandType operand_type) {
+ return operand_type >= OperandType::kImm &&
+ operand_type <= OperandType::kRegOutTriple;
+ }
+
+ // Returns true if |operand_type| is a scalable unsigned byte.
+ static constexpr bool IsScalableUnsignedByte(OperandType operand_type) {
+ return operand_type >= OperandType::kIdx &&
+ operand_type <= OperandType::kRegCount;
+ }
+};
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-peephole-optimizer.cc b/deps/v8/src/interpreter/bytecode-peephole-optimizer.cc
index 40552943f7..acfe484ad3 100644
--- a/deps/v8/src/interpreter/bytecode-peephole-optimizer.cc
+++ b/deps/v8/src/interpreter/bytecode-peephole-optimizer.cc
@@ -13,7 +13,8 @@ namespace interpreter {
BytecodePeepholeOptimizer::BytecodePeepholeOptimizer(
BytecodePipelineStage* next_stage)
- : next_stage_(next_stage), last_(Bytecode::kIllegal, BytecodeSourceInfo()) {
+ : next_stage_(next_stage),
+ last_(BytecodeNode::Illegal(BytecodeSourceInfo())) {
InvalidateLast();
}
@@ -65,7 +66,7 @@ void BytecodePeepholeOptimizer::Flush() {
}
void BytecodePeepholeOptimizer::InvalidateLast() {
- last_.set_bytecode(Bytecode::kIllegal);
+ last_ = BytecodeNode::Illegal(BytecodeSourceInfo());
}
bool BytecodePeepholeOptimizer::LastIsValid() const {
@@ -116,26 +117,42 @@ bool BytecodePeepholeOptimizer::CanElideLastBasedOnSourcePosition(
namespace {
-void TransformLdaSmiBinaryOpToBinaryOpWithSmi(Bytecode new_bytecode,
- BytecodeNode* const last,
- BytecodeNode* const current) {
+BytecodeNode TransformLdaSmiBinaryOpToBinaryOpWithSmi(
+ Bytecode new_bytecode, BytecodeNode* const last,
+ BytecodeNode* const current) {
DCHECK_EQ(last->bytecode(), Bytecode::kLdaSmi);
- current->set_bytecode(new_bytecode, last->operand(0), current->operand(0),
- current->operand(1));
+ BytecodeNode node(new_bytecode, last->operand(0), current->operand(0),
+ current->operand(1), current->source_info());
if (last->source_info().is_valid()) {
- current->set_source_info(last->source_info());
+ node.set_source_info(last->source_info());
}
+ return node;
}
-void TransformLdaZeroBinaryOpToBinaryOpWithZero(Bytecode new_bytecode,
- BytecodeNode* const last,
- BytecodeNode* const current) {
+BytecodeNode TransformLdaZeroBinaryOpToBinaryOpWithZero(
+ Bytecode new_bytecode, BytecodeNode* const last,
+ BytecodeNode* const current) {
DCHECK_EQ(last->bytecode(), Bytecode::kLdaZero);
- current->set_bytecode(new_bytecode, 0, current->operand(0),
- current->operand(1));
+ BytecodeNode node(new_bytecode, 0, current->operand(0), current->operand(1),
+ current->source_info());
if (last->source_info().is_valid()) {
- current->set_source_info(last->source_info());
+ node.set_source_info(last->source_info());
}
+ return node;
+}
+
+BytecodeNode TransformEqualityWithNullOrUndefined(Bytecode new_bytecode,
+ BytecodeNode* const last,
+ BytecodeNode* const current) {
+ DCHECK((last->bytecode() == Bytecode::kLdaNull) ||
+ (last->bytecode() == Bytecode::kLdaUndefined));
+ DCHECK((current->bytecode() == Bytecode::kTestEqual) ||
+ (current->bytecode() == Bytecode::kTestEqualStrict));
+ BytecodeNode node(new_bytecode, current->operand(0), current->source_info());
+ if (last->source_info().is_valid()) {
+ node.set_source_info(last->source_info());
+ }
+ return node;
}
} // namespace
@@ -175,8 +192,8 @@ void BytecodePeepholeOptimizer::ElideCurrentAction(
if (node->source_info().is_valid()) {
// Preserve the source information by replacing the node bytecode
// with a no op bytecode.
- node->set_bytecode(Bytecode::kNop);
- DefaultAction(node);
+ BytecodeNode new_node(BytecodeNode::Nop(node->source_info()));
+ DefaultAction(&new_node);
} else {
// Nothing to do, keep last and wait for next bytecode to pair with it.
}
@@ -228,9 +245,9 @@ void BytecodePeepholeOptimizer::TransformLdaSmiBinaryOpToBinaryOpWithSmiAction(
if (!node->source_info().is_valid() || !last()->source_info().is_valid()) {
// Fused last and current into current.
- TransformLdaSmiBinaryOpToBinaryOpWithSmi(action_data->bytecode, last(),
- node);
- SetLast(node);
+ BytecodeNode new_node(TransformLdaSmiBinaryOpToBinaryOpWithSmi(
+ action_data->bytecode, last(), node));
+ SetLast(&new_node);
} else {
DefaultAction(node);
}
@@ -243,14 +260,24 @@ void BytecodePeepholeOptimizer::
DCHECK(!Bytecodes::IsJump(node->bytecode()));
if (!node->source_info().is_valid() || !last()->source_info().is_valid()) {
// Fused last and current into current.
- TransformLdaZeroBinaryOpToBinaryOpWithZero(action_data->bytecode, last(),
- node);
- SetLast(node);
+ BytecodeNode new_node(TransformLdaZeroBinaryOpToBinaryOpWithZero(
+ action_data->bytecode, last(), node));
+ SetLast(&new_node);
} else {
DefaultAction(node);
}
}
+void BytecodePeepholeOptimizer::TransformEqualityWithNullOrUndefinedAction(
+ BytecodeNode* const node, const PeepholeActionAndData* action_data) {
+ DCHECK(LastIsValid());
+ DCHECK(!Bytecodes::IsJump(node->bytecode()));
+ // Fused last and current into current.
+ BytecodeNode new_node(TransformEqualityWithNullOrUndefined(
+ action_data->bytecode, last(), node));
+ SetLast(&new_node);
+}
+
void BytecodePeepholeOptimizer::DefaultJumpAction(
BytecodeNode* const node, const PeepholeActionAndData* action_data) {
DCHECK(LastIsValid());
@@ -273,7 +300,7 @@ void BytecodePeepholeOptimizer::ChangeJumpBytecodeAction(
next_stage()->Write(last());
InvalidateLast();
- node->set_bytecode(action_data->bytecode, node->operand(0));
+ node->replace_bytecode(action_data->bytecode);
}
void BytecodePeepholeOptimizer::ElideLastBeforeJumpAction(
diff --git a/deps/v8/src/interpreter/bytecode-peephole-table.h b/deps/v8/src/interpreter/bytecode-peephole-table.h
index 1790f5a109..fe46979fd9 100644
--- a/deps/v8/src/interpreter/bytecode-peephole-table.h
+++ b/deps/v8/src/interpreter/bytecode-peephole-table.h
@@ -11,16 +11,17 @@ namespace v8 {
namespace internal {
namespace interpreter {
-#define PEEPHOLE_NON_JUMP_ACTION_LIST(V) \
- V(DefaultAction) \
- V(UpdateLastAction) \
- V(UpdateLastIfSourceInfoPresentAction) \
- V(ElideCurrentAction) \
- V(ElideCurrentIfOperand0MatchesAction) \
- V(ElideLastAction) \
- V(ChangeBytecodeAction) \
- V(TransformLdaSmiBinaryOpToBinaryOpWithSmiAction) \
- V(TransformLdaZeroBinaryOpToBinaryOpWithZeroAction)
+#define PEEPHOLE_NON_JUMP_ACTION_LIST(V) \
+ V(DefaultAction) \
+ V(UpdateLastAction) \
+ V(UpdateLastIfSourceInfoPresentAction) \
+ V(ElideCurrentAction) \
+ V(ElideCurrentIfOperand0MatchesAction) \
+ V(ElideLastAction) \
+ V(ChangeBytecodeAction) \
+ V(TransformLdaSmiBinaryOpToBinaryOpWithSmiAction) \
+ V(TransformLdaZeroBinaryOpToBinaryOpWithZeroAction) \
+ V(TransformEqualityWithNullOrUndefinedAction)
#define PEEPHOLE_JUMP_ACTION_LIST(V) \
V(DefaultJumpAction) \
diff --git a/deps/v8/src/interpreter/bytecode-pipeline.h b/deps/v8/src/interpreter/bytecode-pipeline.h
index d508defea0..03d40f7344 100644
--- a/deps/v8/src/interpreter/bytecode-pipeline.h
+++ b/deps/v8/src/interpreter/bytecode-pipeline.h
@@ -191,6 +191,15 @@ class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) {
SetOperand(3, operand3);
}
+#define DEFINE_BYTECODE_NODE_CREATOR(Name, ...) \
+ template <typename... Operands> \
+ INLINE(static BytecodeNode Name(BytecodeSourceInfo source_info, \
+ Operands... operands)) { \
+ return Create<Bytecode::k##Name, __VA_ARGS__>(source_info, operands...); \
+ }
+ BYTECODE_LIST(DEFINE_BYTECODE_NODE_CREATOR)
+#undef DEFINE_BYTECODE_NODE_CREATOR
+
// Replace the bytecode of this node with |bytecode| and keep the operands.
void replace_bytecode(Bytecode bytecode) {
DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode_),
@@ -198,40 +207,7 @@ class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) {
bytecode_ = bytecode;
}
- void set_bytecode(Bytecode bytecode) {
- DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
- bytecode_ = bytecode;
- operand_count_ = 0;
- operand_scale_ = OperandScale::kSingle;
- }
-
- void set_bytecode(Bytecode bytecode, uint32_t operand0) {
- DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 1);
- bytecode_ = bytecode;
- operand_count_ = 1;
- operand_scale_ = OperandScale::kSingle;
- SetOperand(0, operand0);
- }
-
- void set_bytecode(Bytecode bytecode, uint32_t operand0, uint32_t operand1) {
- DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 2);
- bytecode_ = bytecode;
- operand_count_ = 2;
- operand_scale_ = OperandScale::kSingle;
- SetOperand(0, operand0);
- SetOperand(1, operand1);
- }
-
- void set_bytecode(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
- uint32_t operand2) {
- DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 3);
- bytecode_ = bytecode;
- operand_count_ = 3;
- operand_scale_ = OperandScale::kSingle;
- SetOperand(0, operand0);
- SetOperand(1, operand1);
- SetOperand(2, operand2);
- }
+ void update_operand0(uint32_t operand0) { SetOperand(0, operand0); }
// Print to stream |os|.
void Print(std::ostream& os) const;
@@ -277,6 +253,100 @@ class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) {
bool operator!=(const BytecodeNode& other) const { return !(*this == other); }
private:
+ template <Bytecode bytecode, AccumulatorUse accumulator_use,
+ OperandType... operand_types>
+ friend class BytecodeNodeBuilder;
+
+ INLINE(BytecodeNode(Bytecode bytecode, int operand_count,
+ OperandScale operand_scale,
+ BytecodeSourceInfo source_info, uint32_t operand0 = 0,
+ uint32_t operand1 = 0, uint32_t operand2 = 0,
+ uint32_t operand3 = 0))
+ : bytecode_(bytecode),
+ operand_count_(operand_count),
+ operand_scale_(operand_scale),
+ source_info_(source_info) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), operand_count);
+ operands_[0] = operand0;
+ operands_[1] = operand1;
+ operands_[2] = operand2;
+ operands_[3] = operand3;
+ }
+
+ template <Bytecode bytecode, AccumulatorUse accum_use>
+ INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info)) {
+ return BytecodeNode(bytecode, 0, OperandScale::kSingle, source_info);
+ }
+
+ template <Bytecode bytecode, AccumulatorUse accum_use,
+ OperandType operand0_type>
+ INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info,
+ uint32_t operand0)) {
+ DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 0), operand0_type);
+ OperandScale scale = OperandScale::kSingle;
+ scale = std::max(scale, ScaleForOperand<operand0_type>(operand0));
+ return BytecodeNode(bytecode, 1, scale, source_info, operand0);
+ }
+
+ template <Bytecode bytecode, AccumulatorUse accum_use,
+ OperandType operand0_type, OperandType operand1_type>
+ INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info,
+ uint32_t operand0, uint32_t operand1)) {
+ DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 0), operand0_type);
+ DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 1), operand1_type);
+ OperandScale scale = OperandScale::kSingle;
+ scale = std::max(scale, ScaleForOperand<operand0_type>(operand0));
+ scale = std::max(scale, ScaleForOperand<operand1_type>(operand1));
+ return BytecodeNode(bytecode, 2, scale, source_info, operand0, operand1);
+ }
+
+ template <Bytecode bytecode, AccumulatorUse accum_use,
+ OperandType operand0_type, OperandType operand1_type,
+ OperandType operand2_type>
+ INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info,
+ uint32_t operand0, uint32_t operand1,
+ uint32_t operand2)) {
+ DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 0), operand0_type);
+ DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 1), operand1_type);
+ DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 2), operand2_type);
+ OperandScale scale = OperandScale::kSingle;
+ scale = std::max(scale, ScaleForOperand<operand0_type>(operand0));
+ scale = std::max(scale, ScaleForOperand<operand1_type>(operand1));
+ scale = std::max(scale, ScaleForOperand<operand2_type>(operand2));
+ return BytecodeNode(bytecode, 3, scale, source_info, operand0, operand1,
+ operand2);
+ }
+
+ template <Bytecode bytecode, AccumulatorUse accum_use,
+ OperandType operand0_type, OperandType operand1_type,
+ OperandType operand2_type, OperandType operand3_type>
+ INLINE(static BytecodeNode Create(BytecodeSourceInfo source_info,
+ uint32_t operand0, uint32_t operand1,
+ uint32_t operand2, uint32_t operand3)) {
+ DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 0), operand0_type);
+ DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 1), operand1_type);
+ DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 2), operand2_type);
+ DCHECK_EQ(Bytecodes::GetOperandType(bytecode, 3), operand3_type);
+ OperandScale scale = OperandScale::kSingle;
+ scale = std::max(scale, ScaleForOperand<operand0_type>(operand0));
+ scale = std::max(scale, ScaleForOperand<operand1_type>(operand1));
+ scale = std::max(scale, ScaleForOperand<operand2_type>(operand2));
+ scale = std::max(scale, ScaleForOperand<operand3_type>(operand3));
+ return BytecodeNode(bytecode, 4, scale, source_info, operand0, operand1,
+ operand2, operand3);
+ }
+
+ template <OperandType operand_type>
+ INLINE(static OperandScale ScaleForOperand(uint32_t operand)) {
+ if (BytecodeOperands::IsScalableUnsignedByte(operand_type)) {
+ return Bytecodes::ScaleForUnsignedOperand(operand);
+ } else if (BytecodeOperands::IsScalableSignedByte(operand_type)) {
+ return Bytecodes::ScaleForSignedOperand(operand);
+ } else {
+ return OperandScale::kSingle;
+ }
+ }
+
INLINE(void UpdateScaleForOperand(int operand_index, uint32_t operand)) {
if (Bytecodes::OperandIsScalableSignedByte(bytecode(), operand_index)) {
operand_scale_ =
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.cc b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
index 563956e5c6..e1e38a6d16 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.cc
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
@@ -265,16 +265,16 @@ void BytecodeRegisterOptimizer::OutputRegisterTransfer(
if (input == accumulator_) {
uint32_t operand = static_cast<uint32_t>(output.ToOperand());
- BytecodeNode node(Bytecode::kStar, operand, source_info);
+ BytecodeNode node = BytecodeNode::Star(source_info, operand);
next_stage_->Write(&node);
} else if (output == accumulator_) {
uint32_t operand = static_cast<uint32_t>(input.ToOperand());
- BytecodeNode node(Bytecode::kLdar, operand, source_info);
+ BytecodeNode node = BytecodeNode::Ldar(source_info, operand);
next_stage_->Write(&node);
} else {
uint32_t operand0 = static_cast<uint32_t>(input.ToOperand());
uint32_t operand1 = static_cast<uint32_t>(output.ToOperand());
- BytecodeNode node(Bytecode::kMov, operand0, operand1, source_info);
+ BytecodeNode node = BytecodeNode::Mov(source_info, operand0, operand1);
next_stage_->Write(&node);
}
if (output != accumulator_) {
@@ -365,7 +365,7 @@ void BytecodeRegisterOptimizer::RegisterTransfer(
void BytecodeRegisterOptimizer::EmitNopForSourceInfo(
BytecodeSourceInfo source_info) const {
DCHECK(source_info.is_valid());
- BytecodeNode nop(Bytecode::kNop, source_info);
+ BytecodeNode nop = BytecodeNode::Nop(source_info);
next_stage_->Write(&nop);
}
@@ -416,32 +416,6 @@ RegisterList BytecodeRegisterOptimizer::GetInputRegisterList(
}
}
-void BytecodeRegisterOptimizer::PrepareForBytecode(Bytecode bytecode) {
- if (Bytecodes::IsJump(bytecode) || bytecode == Bytecode::kDebugger ||
- bytecode == Bytecode::kSuspendGenerator) {
- // All state must be flushed before emitting
- // - a jump bytecode (as the register equivalents at the jump target aren't
- // known.
- // - a call to the debugger (as it can manipulate locals and parameters),
- // - a generator suspend (as this involves saving all registers).
- Flush();
- }
-
- // Materialize the accumulator if it is read by the bytecode. The
- // accumulator is special and no other register can be materialized
- // in it's place.
- if (Bytecodes::ReadsAccumulator(bytecode) &&
- !accumulator_info_->materialized()) {
- Materialize(accumulator_info_);
- }
-
- // Materialize an equivalent to the accumulator if it will be
- // clobbered when the bytecode is dispatched.
- if (Bytecodes::WritesAccumulator(bytecode)) {
- PrepareOutputRegister(accumulator_);
- }
-}
-
void BytecodeRegisterOptimizer::GrowRegisterMap(Register reg) {
DCHECK(RegisterIsTemporary(reg));
size_t index = GetRegisterInfoTableIndex(reg);
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.h b/deps/v8/src/interpreter/bytecode-register-optimizer.h
index e2a02cf594..80c2f2587f 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.h
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.h
@@ -46,7 +46,32 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
void Flush();
// Prepares for |bytecode|.
- void PrepareForBytecode(Bytecode bytecode);
+ template <Bytecode bytecode, AccumulatorUse accumulator_use>
+ INLINE(void PrepareForBytecode()) {
+ if (Bytecodes::IsJump(bytecode) || bytecode == Bytecode::kDebugger ||
+ bytecode == Bytecode::kSuspendGenerator) {
+ // All state must be flushed before emitting
+ // - a jump bytecode (as the register equivalents at the jump target
+ // aren't
+ // known.
+ // - a call to the debugger (as it can manipulate locals and parameters),
+ // - a generator suspend (as this involves saving all registers).
+ Flush();
+ }
+
+ // Materialize the accumulator if it is read by the bytecode. The
+ // accumulator is special and no other register can be materialized
+ // in it's place.
+ if (BytecodeOperands::ReadsAccumulator(accumulator_use)) {
+ Materialize(accumulator_info_);
+ }
+
+ // Materialize an equivalent to the accumulator if it will be
+ // clobbered when the bytecode is dispatched.
+ if (BytecodeOperands::WritesAccumulator(accumulator_use)) {
+ PrepareOutputRegister(accumulator_);
+ }
+ }
// Prepares |reg| for being used as an output operand.
void PrepareOutputRegister(Register reg);
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index 23d77f0c33..f09af85be4 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -38,8 +38,9 @@ namespace interpreter {
V(LdaConstant, AccumulatorUse::kWrite, OperandType::kIdx) \
\
/* Globals */ \
- V(LdaGlobal, AccumulatorUse::kWrite, OperandType::kIdx) \
- V(LdaGlobalInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx) \
+ V(LdaGlobal, AccumulatorUse::kWrite, OperandType::kIdx, OperandType::kIdx) \
+ V(LdaGlobalInsideTypeof, AccumulatorUse::kWrite, OperandType::kIdx, \
+ OperandType::kIdx) \
V(StaGlobalSloppy, AccumulatorUse::kRead, OperandType::kIdx, \
OperandType::kIdx) \
V(StaGlobalStrict, AccumulatorUse::kRead, OperandType::kIdx, \
@@ -97,6 +98,8 @@ namespace interpreter {
OperandType::kReg, OperandType::kIdx) \
V(StaKeyedPropertyStrict, AccumulatorUse::kRead, OperandType::kReg, \
OperandType::kReg, OperandType::kIdx) \
+ V(StaDataPropertyInLiteral, AccumulatorUse::kRead, OperandType::kReg, \
+ OperandType::kReg, OperandType::kFlag8, OperandType::kIdx) \
\
/* Binary Operators */ \
V(Add, AccumulatorUse::kReadWrite, OperandType::kReg, OperandType::kIdx) \
@@ -140,6 +143,9 @@ namespace interpreter {
V(DeletePropertyStrict, AccumulatorUse::kReadWrite, OperandType::kReg) \
V(DeletePropertySloppy, AccumulatorUse::kReadWrite, OperandType::kReg) \
\
+ /* GetSuperConstructor operator */ \
+ V(GetSuperConstructor, AccumulatorUse::kRead, OperandType::kRegOut) \
+ \
/* Call operations */ \
V(Call, AccumulatorUse::kWrite, OperandType::kReg, OperandType::kRegList, \
OperandType::kRegCount, OperandType::kIdx) \
@@ -158,9 +164,11 @@ namespace interpreter {
V(InvokeIntrinsic, AccumulatorUse::kWrite, OperandType::kIntrinsicId, \
OperandType::kRegList, OperandType::kRegCount) \
\
- /* New operator */ \
+ /* New operators */ \
V(New, AccumulatorUse::kReadWrite, OperandType::kReg, OperandType::kRegList, \
OperandType::kRegCount, OperandType::kIdx) \
+ V(NewWithSpread, AccumulatorUse::kWrite, OperandType::kRegList, \
+ OperandType::kRegCount) \
\
/* Test Operators */ \
V(TestEqual, AccumulatorUse::kReadWrite, OperandType::kReg, \
@@ -180,6 +188,11 @@ namespace interpreter {
V(TestInstanceOf, AccumulatorUse::kReadWrite, OperandType::kReg) \
V(TestIn, AccumulatorUse::kReadWrite, OperandType::kReg) \
\
+ /* TestEqual with Null or Undefined */ \
+ V(TestUndetectable, AccumulatorUse::kWrite, OperandType::kReg) \
+ V(TestNull, AccumulatorUse::kWrite, OperandType::kReg) \
+ V(TestUndefined, AccumulatorUse::kWrite, OperandType::kReg) \
+ \
/* Cast operators */ \
V(ToName, AccumulatorUse::kRead, OperandType::kRegOut) \
V(ToNumber, AccumulatorUse::kRead, OperandType::kRegOut) \
@@ -195,13 +208,14 @@ namespace interpreter {
\
/* Closure allocation */ \
V(CreateClosure, AccumulatorUse::kWrite, OperandType::kIdx, \
- OperandType::kFlag8) \
+ OperandType::kIdx, OperandType::kFlag8) \
\
/* Context allocation */ \
V(CreateBlockContext, AccumulatorUse::kReadWrite, OperandType::kIdx) \
V(CreateCatchContext, AccumulatorUse::kReadWrite, OperandType::kReg, \
OperandType::kIdx, OperandType::kIdx) \
V(CreateFunctionContext, AccumulatorUse::kWrite, OperandType::kUImm) \
+ V(CreateEvalContext, AccumulatorUse::kWrite, OperandType::kUImm) \
V(CreateWithContext, AccumulatorUse::kReadWrite, OperandType::kReg, \
OperandType::kIdx) \
\
@@ -210,24 +224,35 @@ namespace interpreter {
V(CreateUnmappedArguments, AccumulatorUse::kWrite) \
V(CreateRestParameter, AccumulatorUse::kWrite) \
\
- /* Control Flow */ \
+ /* Control Flow -- carefully ordered for efficient checks */ \
+ /* - [Unconditional jumps] */ \
+ V(JumpLoop, AccumulatorUse::kNone, OperandType::kImm, OperandType::kImm) \
+ /* - [Forward jumps] */ \
V(Jump, AccumulatorUse::kNone, OperandType::kImm) \
+ /* - [Start constant jumps] */ \
V(JumpConstant, AccumulatorUse::kNone, OperandType::kIdx) \
- V(JumpIfTrue, AccumulatorUse::kRead, OperandType::kImm) \
+ /* - [Conditional jumps] */ \
+ /* - [Conditional constant jumps] */ \
+ V(JumpIfNullConstant, AccumulatorUse::kRead, OperandType::kIdx) \
+ V(JumpIfUndefinedConstant, AccumulatorUse::kRead, OperandType::kIdx) \
V(JumpIfTrueConstant, AccumulatorUse::kRead, OperandType::kIdx) \
- V(JumpIfFalse, AccumulatorUse::kRead, OperandType::kImm) \
V(JumpIfFalseConstant, AccumulatorUse::kRead, OperandType::kIdx) \
- V(JumpIfToBooleanTrue, AccumulatorUse::kRead, OperandType::kImm) \
+ V(JumpIfJSReceiverConstant, AccumulatorUse::kRead, OperandType::kIdx) \
+ V(JumpIfNotHoleConstant, AccumulatorUse::kRead, OperandType::kIdx) \
+ /* - [Start ToBoolean jumps] */ \
V(JumpIfToBooleanTrueConstant, AccumulatorUse::kRead, OperandType::kIdx) \
- V(JumpIfToBooleanFalse, AccumulatorUse::kRead, OperandType::kImm) \
V(JumpIfToBooleanFalseConstant, AccumulatorUse::kRead, OperandType::kIdx) \
+ /* - [End constant jumps] */ \
+ /* - [Conditional immediate jumps] */ \
+ V(JumpIfToBooleanTrue, AccumulatorUse::kRead, OperandType::kImm) \
+ V(JumpIfToBooleanFalse, AccumulatorUse::kRead, OperandType::kImm) \
+ /* - [End ToBoolean jumps] */ \
+ V(JumpIfTrue, AccumulatorUse::kRead, OperandType::kImm) \
+ V(JumpIfFalse, AccumulatorUse::kRead, OperandType::kImm) \
V(JumpIfNull, AccumulatorUse::kRead, OperandType::kImm) \
- V(JumpIfNullConstant, AccumulatorUse::kRead, OperandType::kIdx) \
V(JumpIfUndefined, AccumulatorUse::kRead, OperandType::kImm) \
- V(JumpIfUndefinedConstant, AccumulatorUse::kRead, OperandType::kIdx) \
+ V(JumpIfJSReceiver, AccumulatorUse::kRead, OperandType::kImm) \
V(JumpIfNotHole, AccumulatorUse::kRead, OperandType::kImm) \
- V(JumpIfNotHoleConstant, AccumulatorUse::kRead, OperandType::kIdx) \
- V(JumpLoop, AccumulatorUse::kNone, OperandType::kImm, OperandType::kImm) \
\
/* Complex flow control For..in */ \
V(ForInPrepare, AccumulatorUse::kNone, OperandType::kReg, \
@@ -241,6 +266,9 @@ namespace interpreter {
/* Perform a stack guard check */ \
V(StackCheck, AccumulatorUse::kNone) \
\
+ /* Update the pending message */ \
+ V(SetPendingMessage, AccumulatorUse::kReadWrite) \
+ \
/* Non-local flow control */ \
V(Throw, AccumulatorUse::kRead) \
V(ReThrow, AccumulatorUse::kRead) \
@@ -294,6 +322,69 @@ namespace interpreter {
DEBUG_BREAK_PLAIN_BYTECODE_LIST(V) \
DEBUG_BREAK_PREFIX_BYTECODE_LIST(V)
+// Lists of jump bytecodes.
+
+#define JUMP_UNCONDITIONAL_IMMEDIATE_BYTECODE_LIST(V) \
+ V(JumpLoop) \
+ V(Jump)
+
+#define JUMP_UNCONDITIONAL_CONSTANT_BYTECODE_LIST(V) V(JumpConstant)
+
+#define JUMP_TOBOOLEAN_CONDITIONAL_IMMEDIATE_BYTECODE_LIST(V) \
+ V(JumpIfToBooleanTrue) \
+ V(JumpIfToBooleanFalse)
+
+#define JUMP_TOBOOLEAN_CONDITIONAL_CONSTANT_BYTECODE_LIST(V) \
+ V(JumpIfToBooleanTrueConstant) \
+ V(JumpIfToBooleanFalseConstant)
+
+#define JUMP_CONDITIONAL_IMMEDIATE_BYTECODE_LIST(V) \
+ JUMP_TOBOOLEAN_CONDITIONAL_IMMEDIATE_BYTECODE_LIST(V) \
+ V(JumpIfTrue) \
+ V(JumpIfFalse) \
+ V(JumpIfNull) \
+ V(JumpIfUndefined) \
+ V(JumpIfJSReceiver) \
+ V(JumpIfNotHole)
+
+#define JUMP_CONDITIONAL_CONSTANT_BYTECODE_LIST(V) \
+ JUMP_TOBOOLEAN_CONDITIONAL_CONSTANT_BYTECODE_LIST(V) \
+ V(JumpIfNullConstant) \
+ V(JumpIfUndefinedConstant) \
+ V(JumpIfTrueConstant) \
+ V(JumpIfFalseConstant) \
+ V(JumpIfJSReceiverConstant) \
+ V(JumpIfNotHoleConstant)
+
+#define JUMP_CONSTANT_BYTECODE_LIST(V) \
+ JUMP_UNCONDITIONAL_CONSTANT_BYTECODE_LIST(V) \
+ JUMP_CONDITIONAL_CONSTANT_BYTECODE_LIST(V)
+
+#define JUMP_IMMEDIATE_BYTECODE_LIST(V) \
+ JUMP_UNCONDITIONAL_IMMEDIATE_BYTECODE_LIST(V) \
+ JUMP_CONDITIONAL_IMMEDIATE_BYTECODE_LIST(V)
+
+#define JUMP_TO_BOOLEAN_BYTECODE_LIST(V) \
+ JUMP_TOBOOLEAN_CONDITIONAL_IMMEDIATE_BYTECODE_LIST(V) \
+ JUMP_TOBOOLEAN_CONDITIONAL_CONSTANT_BYTECODE_LIST(V)
+
+#define JUMP_UNCONDITIONAL_BYTECODE_LIST(V) \
+ JUMP_UNCONDITIONAL_IMMEDIATE_BYTECODE_LIST(V) \
+ JUMP_UNCONDITIONAL_CONSTANT_BYTECODE_LIST(V)
+
+#define JUMP_CONDITIONAL_BYTECODE_LIST(V) \
+ JUMP_CONDITIONAL_IMMEDIATE_BYTECODE_LIST(V) \
+ JUMP_CONDITIONAL_CONSTANT_BYTECODE_LIST(V)
+
+#define JUMP_FORWARD_BYTECODE_LIST(V) \
+ V(Jump) \
+ V(JumpConstant) \
+ JUMP_CONDITIONAL_BYTECODE_LIST(V)
+
+#define JUMP_BYTECODE_LIST(V) \
+ JUMP_FORWARD_BYTECODE_LIST(V) \
+ V(JumpLoop)
+
// Enumeration of interpreter bytecodes.
enum class Bytecode : uint8_t {
#define DECLARE_BYTECODE(Name, ...) k##Name,
@@ -306,14 +397,6 @@ enum class Bytecode : uint8_t {
#undef COUNT_BYTECODE
};
-// TODO(rmcilroy): Remove once we switch to MSVC 2015 which supports constexpr.
-// See crbug.com/603131.
-#if V8_CC_MSVC
-#define CONSTEXPR const
-#else
-#define CONSTEXPR constexpr
-#endif
-
class V8_EXPORT_PRIVATE Bytecodes final {
public:
// The maximum number of operands a bytecode may have.
@@ -381,14 +464,12 @@ class V8_EXPORT_PRIVATE Bytecodes final {
// Returns true if |bytecode| reads the accumulator.
static bool ReadsAccumulator(Bytecode bytecode) {
- return (GetAccumulatorUse(bytecode) & AccumulatorUse::kRead) ==
- AccumulatorUse::kRead;
+ return BytecodeOperands::ReadsAccumulator(GetAccumulatorUse(bytecode));
}
// Returns true if |bytecode| writes the accumulator.
static bool WritesAccumulator(Bytecode bytecode) {
- return (GetAccumulatorUse(bytecode) & AccumulatorUse::kWrite) ==
- AccumulatorUse::kWrite;
+ return BytecodeOperands::WritesAccumulator(GetAccumulatorUse(bytecode));
}
// Return true if |bytecode| writes the accumulator with a boolean value.
@@ -407,7 +488,10 @@ class V8_EXPORT_PRIVATE Bytecodes final {
case Bytecode::kTestGreaterThanOrEqual:
case Bytecode::kTestInstanceOf:
case Bytecode::kTestIn:
+ case Bytecode::kTestUndetectable:
case Bytecode::kForInContinue:
+ case Bytecode::kTestUndefined:
+ case Bytecode::kTestNull:
return true;
default:
return false;
@@ -416,7 +500,7 @@ class V8_EXPORT_PRIVATE Bytecodes final {
// Return true if |bytecode| is an accumulator load without effects,
// e.g. LdaConstant, LdaTrue, Ldar.
- static CONSTEXPR bool IsAccumulatorLoadWithoutEffects(Bytecode bytecode) {
+ static constexpr bool IsAccumulatorLoadWithoutEffects(Bytecode bytecode) {
return bytecode == Bytecode::kLdar || bytecode == Bytecode::kLdaZero ||
bytecode == Bytecode::kLdaSmi || bytecode == Bytecode::kLdaNull ||
bytecode == Bytecode::kLdaTrue || bytecode == Bytecode::kLdaFalse ||
@@ -429,123 +513,124 @@ class V8_EXPORT_PRIVATE Bytecodes final {
// Return true if |bytecode| is a register load without effects,
// e.g. Mov, Star.
- static CONSTEXPR bool IsRegisterLoadWithoutEffects(Bytecode bytecode) {
+ static constexpr bool IsRegisterLoadWithoutEffects(Bytecode bytecode) {
return bytecode == Bytecode::kMov || bytecode == Bytecode::kPopContext ||
bytecode == Bytecode::kPushContext || bytecode == Bytecode::kStar;
}
// Returns true if the bytecode is a conditional jump taking
// an immediate byte operand (OperandType::kImm).
- static CONSTEXPR bool IsConditionalJumpImmediate(Bytecode bytecode) {
- return bytecode == Bytecode::kJumpIfTrue ||
- bytecode == Bytecode::kJumpIfFalse ||
- bytecode == Bytecode::kJumpIfToBooleanTrue ||
- bytecode == Bytecode::kJumpIfToBooleanFalse ||
- bytecode == Bytecode::kJumpIfNotHole ||
- bytecode == Bytecode::kJumpIfNull ||
- bytecode == Bytecode::kJumpIfUndefined;
+ static constexpr bool IsConditionalJumpImmediate(Bytecode bytecode) {
+ return bytecode >= Bytecode::kJumpIfToBooleanTrue &&
+ bytecode <= Bytecode::kJumpIfNotHole;
}
// Returns true if the bytecode is a conditional jump taking
// a constant pool entry (OperandType::kIdx).
- static CONSTEXPR bool IsConditionalJumpConstant(Bytecode bytecode) {
- return bytecode == Bytecode::kJumpIfTrueConstant ||
- bytecode == Bytecode::kJumpIfFalseConstant ||
- bytecode == Bytecode::kJumpIfToBooleanTrueConstant ||
- bytecode == Bytecode::kJumpIfToBooleanFalseConstant ||
- bytecode == Bytecode::kJumpIfNotHoleConstant ||
- bytecode == Bytecode::kJumpIfNullConstant ||
- bytecode == Bytecode::kJumpIfUndefinedConstant;
+ static constexpr bool IsConditionalJumpConstant(Bytecode bytecode) {
+ return bytecode >= Bytecode::kJumpIfNullConstant &&
+ bytecode <= Bytecode::kJumpIfToBooleanFalseConstant;
}
// Returns true if the bytecode is a conditional jump taking
// any kind of operand.
- static CONSTEXPR bool IsConditionalJump(Bytecode bytecode) {
- return IsConditionalJumpImmediate(bytecode) ||
- IsConditionalJumpConstant(bytecode);
+ static constexpr bool IsConditionalJump(Bytecode bytecode) {
+ return bytecode >= Bytecode::kJumpIfNullConstant &&
+ bytecode <= Bytecode::kJumpIfNotHole;
+ }
+
+ // Returns true if the bytecode is an unconditional jump.
+ static constexpr bool IsUnconditionalJump(Bytecode bytecode) {
+ return bytecode >= Bytecode::kJumpLoop &&
+ bytecode <= Bytecode::kJumpConstant;
}
// Returns true if the bytecode is a jump or a conditional jump taking
// an immediate byte operand (OperandType::kImm).
- static CONSTEXPR bool IsJumpImmediate(Bytecode bytecode) {
+ static constexpr bool IsJumpImmediate(Bytecode bytecode) {
return bytecode == Bytecode::kJump || bytecode == Bytecode::kJumpLoop ||
IsConditionalJumpImmediate(bytecode);
}
// Returns true if the bytecode is a jump or conditional jump taking a
// constant pool entry (OperandType::kIdx).
- static CONSTEXPR bool IsJumpConstant(Bytecode bytecode) {
- return bytecode == Bytecode::kJumpConstant ||
- IsConditionalJumpConstant(bytecode);
+ static constexpr bool IsJumpConstant(Bytecode bytecode) {
+ return bytecode >= Bytecode::kJumpConstant &&
+ bytecode <= Bytecode::kJumpIfToBooleanFalseConstant;
}
// Returns true if the bytecode is a jump that internally coerces the
// accumulator to a boolean.
- static CONSTEXPR bool IsJumpIfToBoolean(Bytecode bytecode) {
- return bytecode == Bytecode::kJumpIfToBooleanTrue ||
- bytecode == Bytecode::kJumpIfToBooleanFalse ||
- bytecode == Bytecode::kJumpIfToBooleanTrueConstant ||
- bytecode == Bytecode::kJumpIfToBooleanFalseConstant;
+ static constexpr bool IsJumpIfToBoolean(Bytecode bytecode) {
+ return bytecode >= Bytecode::kJumpIfToBooleanTrueConstant &&
+ bytecode <= Bytecode::kJumpIfToBooleanFalse;
}
// Returns true if the bytecode is a jump or conditional jump taking
// any kind of operand.
- static CONSTEXPR bool IsJump(Bytecode bytecode) {
- return IsJumpImmediate(bytecode) || IsJumpConstant(bytecode);
+ static constexpr bool IsJump(Bytecode bytecode) {
+ return bytecode >= Bytecode::kJumpLoop &&
+ bytecode <= Bytecode::kJumpIfNotHole;
+ }
+
+ // Returns true if the bytecode is a forward jump or conditional jump taking
+ // any kind of operand.
+ static constexpr bool IsForwardJump(Bytecode bytecode) {
+ return bytecode >= Bytecode::kJump && bytecode <= Bytecode::kJumpIfNotHole;
}
// Returns true if the bytecode is a conditional jump, a jump, or a return.
- static CONSTEXPR bool IsJumpOrReturn(Bytecode bytecode) {
+ static constexpr bool IsJumpOrReturn(Bytecode bytecode) {
return bytecode == Bytecode::kReturn || IsJump(bytecode);
}
// Return true if |bytecode| is a jump without effects,
// e.g. any jump excluding those that include type coercion like
// JumpIfTrueToBoolean.
- static CONSTEXPR bool IsJumpWithoutEffects(Bytecode bytecode) {
+ static constexpr bool IsJumpWithoutEffects(Bytecode bytecode) {
return IsJump(bytecode) && !IsJumpIfToBoolean(bytecode);
}
// Returns true if |bytecode| has no effects. These bytecodes only manipulate
// interpreter frame state and will never throw.
- static CONSTEXPR bool IsWithoutExternalSideEffects(Bytecode bytecode) {
+ static constexpr bool IsWithoutExternalSideEffects(Bytecode bytecode) {
return (IsAccumulatorLoadWithoutEffects(bytecode) ||
IsRegisterLoadWithoutEffects(bytecode) ||
bytecode == Bytecode::kNop || IsJumpWithoutEffects(bytecode));
}
// Returns true if the bytecode is Ldar or Star.
- static CONSTEXPR bool IsLdarOrStar(Bytecode bytecode) {
+ static constexpr bool IsLdarOrStar(Bytecode bytecode) {
return bytecode == Bytecode::kLdar || bytecode == Bytecode::kStar;
}
// Returns true if |bytecode| puts a name in the accumulator.
- static CONSTEXPR bool PutsNameInAccumulator(Bytecode bytecode) {
+ static constexpr bool PutsNameInAccumulator(Bytecode bytecode) {
return bytecode == Bytecode::kTypeOf;
}
// Returns true if the bytecode is a call or a constructor call.
- static CONSTEXPR bool IsCallOrNew(Bytecode bytecode) {
+ static constexpr bool IsCallOrNew(Bytecode bytecode) {
return bytecode == Bytecode::kCall || bytecode == Bytecode::kCallProperty ||
bytecode == Bytecode::kTailCall || bytecode == Bytecode::kNew;
}
// Returns true if the bytecode is a call to the runtime.
- static CONSTEXPR bool IsCallRuntime(Bytecode bytecode) {
+ static constexpr bool IsCallRuntime(Bytecode bytecode) {
return bytecode == Bytecode::kCallRuntime ||
bytecode == Bytecode::kCallRuntimeForPair ||
bytecode == Bytecode::kInvokeIntrinsic;
}
// Returns true if the bytecode is a scaling prefix bytecode.
- static CONSTEXPR bool IsPrefixScalingBytecode(Bytecode bytecode) {
+ static constexpr bool IsPrefixScalingBytecode(Bytecode bytecode) {
return bytecode == Bytecode::kExtraWide || bytecode == Bytecode::kWide ||
bytecode == Bytecode::kDebugBreakExtraWide ||
bytecode == Bytecode::kDebugBreakWide;
}
// Returns the number of values which |bytecode| returns.
- static CONSTEXPR size_t ReturnCount(Bytecode bytecode) {
+ static constexpr size_t ReturnCount(Bytecode bytecode) {
return bytecode == Bytecode::kReturn ? 1 : 0;
}
@@ -730,10 +815,6 @@ class V8_EXPORT_PRIVATE Bytecodes final {
static const OperandSize* const kOperandSizes[][3];
};
-// TODO(rmcilroy): Remove once we switch to MSVC 2015 which supports constexpr.
-// See crbug.com/603131.
-#undef CONSTEXPR
-
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
const Bytecode& bytecode);
diff --git a/deps/v8/src/interpreter/constant-array-builder.cc b/deps/v8/src/interpreter/constant-array-builder.cc
index d2b7995623..6fd141e911 100644
--- a/deps/v8/src/interpreter/constant-array-builder.cc
+++ b/deps/v8/src/interpreter/constant-array-builder.cc
@@ -56,14 +56,20 @@ void ConstantArrayBuilder::ConstantArraySlice::InsertAt(size_t index,
constants_[index - start_index()] = object;
}
-bool ConstantArrayBuilder::ConstantArraySlice::AllElementsAreUnique() const {
+#if DEBUG
+void ConstantArrayBuilder::ConstantArraySlice::CheckAllElementsAreUnique()
+ const {
std::set<Object*> elements;
for (auto constant : constants_) {
- if (elements.find(*constant) != elements.end()) return false;
+ if (elements.find(*constant) != elements.end()) {
+ std::ostringstream os;
+ os << "Duplicate constant found: " << Brief(*constant);
+ FATAL(os.str().c_str());
+ }
elements.insert(*constant);
}
- return true;
}
+#endif
STATIC_CONST_MEMBER_DEFINITION const size_t ConstantArrayBuilder::k8BitCapacity;
STATIC_CONST_MEMBER_DEFINITION const size_t
@@ -126,32 +132,30 @@ Handle<FixedArray> ConstantArrayBuilder::ToFixedArray(Isolate* isolate) {
handle(reserved_smi.first, isolate));
}
- Handle<FixedArray> fixed_array = isolate->factory()->NewFixedArray(
+ Handle<FixedArray> fixed_array = isolate->factory()->NewFixedArrayWithHoles(
static_cast<int>(size()), PretenureFlag::TENURED);
int array_index = 0;
for (const ConstantArraySlice* slice : idx_slice_) {
- if (array_index == fixed_array->length()) {
- break;
- }
DCHECK(array_index == 0 ||
base::bits::IsPowerOfTwo32(static_cast<uint32_t>(array_index)));
+#if DEBUG
// Different slices might contain the same element due to reservations, but
// all elements within a slice should be unique. If this DCHECK fails, then
// the AST nodes are not being internalized within a CanonicalHandleScope.
- DCHECK(slice->AllElementsAreUnique());
+ slice->CheckAllElementsAreUnique();
+#endif
// Copy objects from slice into array.
for (size_t i = 0; i < slice->size(); ++i) {
fixed_array->set(array_index++, *slice->At(slice->start_index() + i));
}
- // Insert holes where reservations led to unused slots.
- size_t padding =
- std::min(static_cast<size_t>(fixed_array->length() - array_index),
- slice->capacity() - slice->size());
- for (size_t i = 0; i < padding; i++) {
- fixed_array->set(array_index++, *the_hole_value());
+ // Leave holes where reservations led to unused slots.
+ size_t padding = slice->capacity() - slice->size();
+ if (static_cast<size_t>(fixed_array->length() - array_index) <= padding) {
+ break;
}
+ array_index += padding;
}
- DCHECK_EQ(array_index, fixed_array->length());
+ DCHECK_GE(array_index, fixed_array->length());
return fixed_array;
}
diff --git a/deps/v8/src/interpreter/constant-array-builder.h b/deps/v8/src/interpreter/constant-array-builder.h
index 8e95913e57..c99c8e7c59 100644
--- a/deps/v8/src/interpreter/constant-array-builder.h
+++ b/deps/v8/src/interpreter/constant-array-builder.h
@@ -82,7 +82,10 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final BASE_EMBEDDED {
size_t Allocate(Handle<Object> object);
Handle<Object> At(size_t index) const;
void InsertAt(size_t index, Handle<Object> object);
- bool AllElementsAreUnique() const;
+
+#if DEBUG
+ void CheckAllElementsAreUnique() const;
+#endif
inline size_t available() const { return capacity() - reserved() - size(); }
inline size_t reserved() const { return reserved_; }
diff --git a/deps/v8/src/interpreter/control-flow-builders.cc b/deps/v8/src/interpreter/control-flow-builders.cc
index 0e71b96cce..41d1ad82d9 100644
--- a/deps/v8/src/interpreter/control-flow-builders.cc
+++ b/deps/v8/src/interpreter/control-flow-builders.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/interpreter/control-flow-builders.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -55,8 +56,10 @@ void LoopBuilder::LoopHeader(ZoneVector<BytecodeLabel>* additional_labels) {
// and misplaced between the headers.
DCHECK(break_labels_.empty() && continue_labels_.empty());
builder()->Bind(&loop_header_);
- for (auto& label : *additional_labels) {
- builder()->Bind(&label);
+ if (additional_labels != nullptr) {
+ for (auto& label : *additional_labels) {
+ builder()->Bind(&label);
+ }
}
}
diff --git a/deps/v8/src/interpreter/control-flow-builders.h b/deps/v8/src/interpreter/control-flow-builders.h
index 3174db5da1..68c28c70d1 100644
--- a/deps/v8/src/interpreter/control-flow-builders.h
+++ b/deps/v8/src/interpreter/control-flow-builders.h
@@ -14,7 +14,7 @@ namespace v8 {
namespace internal {
namespace interpreter {
-class ControlFlowBuilder BASE_EMBEDDED {
+class V8_EXPORT_PRIVATE ControlFlowBuilder BASE_EMBEDDED {
public:
explicit ControlFlowBuilder(BytecodeArrayBuilder* builder)
: builder_(builder) {}
@@ -29,7 +29,8 @@ class ControlFlowBuilder BASE_EMBEDDED {
DISALLOW_COPY_AND_ASSIGN(ControlFlowBuilder);
};
-class BreakableControlFlowBuilder : public ControlFlowBuilder {
+class V8_EXPORT_PRIVATE BreakableControlFlowBuilder
+ : public ControlFlowBuilder {
public:
explicit BreakableControlFlowBuilder(BytecodeArrayBuilder* builder)
: ControlFlowBuilder(builder), break_labels_(builder->zone()) {}
@@ -63,7 +64,8 @@ class BreakableControlFlowBuilder : public ControlFlowBuilder {
// Class to track control flow for block statements (which can break in JS).
-class BlockBuilder final : public BreakableControlFlowBuilder {
+class V8_EXPORT_PRIVATE BlockBuilder final
+ : public BreakableControlFlowBuilder {
public:
explicit BlockBuilder(BytecodeArrayBuilder* builder)
: BreakableControlFlowBuilder(builder) {}
@@ -77,7 +79,7 @@ class BlockBuilder final : public BreakableControlFlowBuilder {
// A class to help with co-ordinating break and continue statements with
// their loop.
-class LoopBuilder final : public BreakableControlFlowBuilder {
+class V8_EXPORT_PRIVATE LoopBuilder final : public BreakableControlFlowBuilder {
public:
explicit LoopBuilder(BytecodeArrayBuilder* builder)
: BreakableControlFlowBuilder(builder),
@@ -85,7 +87,7 @@ class LoopBuilder final : public BreakableControlFlowBuilder {
header_labels_(builder->zone()) {}
~LoopBuilder();
- void LoopHeader(ZoneVector<BytecodeLabel>* additional_labels);
+ void LoopHeader(ZoneVector<BytecodeLabel>* additional_labels = nullptr);
void JumpToHeader(int loop_depth);
void BindContinueTarget();
void EndLoop();
@@ -109,7 +111,8 @@ class LoopBuilder final : public BreakableControlFlowBuilder {
// A class to help with co-ordinating break statements with their switch.
-class SwitchBuilder final : public BreakableControlFlowBuilder {
+class V8_EXPORT_PRIVATE SwitchBuilder final
+ : public BreakableControlFlowBuilder {
public:
explicit SwitchBuilder(BytecodeArrayBuilder* builder, int number_of_cases)
: BreakableControlFlowBuilder(builder),
@@ -139,7 +142,7 @@ class SwitchBuilder final : public BreakableControlFlowBuilder {
// A class to help with co-ordinating control flow in try-catch statements.
-class TryCatchBuilder final : public ControlFlowBuilder {
+class V8_EXPORT_PRIVATE TryCatchBuilder final : public ControlFlowBuilder {
public:
explicit TryCatchBuilder(BytecodeArrayBuilder* builder,
HandlerTable::CatchPrediction catch_prediction)
@@ -160,7 +163,7 @@ class TryCatchBuilder final : public ControlFlowBuilder {
// A class to help with co-ordinating control flow in try-finally statements.
-class TryFinallyBuilder final : public ControlFlowBuilder {
+class V8_EXPORT_PRIVATE TryFinallyBuilder final : public ControlFlowBuilder {
public:
explicit TryFinallyBuilder(BytecodeArrayBuilder* builder,
HandlerTable::CatchPrediction catch_prediction)
diff --git a/deps/v8/src/interpreter/handler-table-builder.h b/deps/v8/src/interpreter/handler-table-builder.h
index 25147ca26b..50061949dc 100644
--- a/deps/v8/src/interpreter/handler-table-builder.h
+++ b/deps/v8/src/interpreter/handler-table-builder.h
@@ -19,7 +19,7 @@ class Isolate;
namespace interpreter {
// A helper class for constructing exception handler tables for the interpreter.
-class HandlerTableBuilder final BASE_EMBEDDED {
+class V8_EXPORT_PRIVATE HandlerTableBuilder final BASE_EMBEDDED {
public:
explicit HandlerTableBuilder(Zone* zone);
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index c8ce5539e9..1ccd342f06 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -20,15 +20,13 @@ namespace v8 {
namespace internal {
namespace interpreter {
+using compiler::CodeAssemblerState;
using compiler::Node;
-InterpreterAssembler::InterpreterAssembler(Isolate* isolate, Zone* zone,
+InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
Bytecode bytecode,
OperandScale operand_scale)
- : CodeStubAssembler(isolate, zone, InterpreterDispatchDescriptor(isolate),
- Code::ComputeFlags(Code::BYTECODE_HANDLER),
- Bytecodes::ToString(bytecode),
- Bytecodes::ReturnCount(bytecode)),
+ : CodeStubAssembler(state),
bytecode_(bytecode),
operand_scale_(operand_scale),
bytecode_offset_(this, MachineType::PointerRepresentation()),
@@ -44,6 +42,8 @@ InterpreterAssembler::InterpreterAssembler(Isolate* isolate, Zone* zone,
if (FLAG_trace_ignition) {
TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
}
+ RegisterCallGenerationCallbacks([this] { CallPrologue(); },
+ [this] { CallEpilogue(); });
}
InterpreterAssembler::~InterpreterAssembler() {
@@ -51,6 +51,7 @@ InterpreterAssembler::~InterpreterAssembler() {
// accumulator in the way described in the bytecode definitions in
// bytecodes.h.
DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
+ UnregisterCallGenerationCallbacks();
}
Node* InterpreterAssembler::GetInterpretedFramePointer() {
@@ -222,14 +223,8 @@ Node* InterpreterAssembler::BytecodeOperandSignedByte(int operand_index) {
DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
bytecode_, operand_index, operand_scale()));
Node* operand_offset = OperandOffset(operand_index);
- Node* load = Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), operand_offset));
-
- // Ensure that we sign extend to full pointer size
- if (kPointerSize == 8) {
- load = ChangeInt32ToInt64(load);
- }
- return load;
+ return Load(MachineType::Int8(), BytecodeArrayTaggedPointer(),
+ IntPtrAdd(BytecodeOffset(), operand_offset));
}
compiler::Node* InterpreterAssembler::BytecodeOperandReadUnaligned(
@@ -305,19 +300,12 @@ Node* InterpreterAssembler::BytecodeOperandSignedShort(int operand_index) {
Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()));
int operand_offset =
Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
- Node* load;
if (TargetSupportsUnalignedAccess()) {
- load = Load(MachineType::Int16(), BytecodeArrayTaggedPointer(),
+ return Load(MachineType::Int16(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
} else {
- load = BytecodeOperandReadUnaligned(operand_offset, MachineType::Int16());
+ return BytecodeOperandReadUnaligned(operand_offset, MachineType::Int16());
}
-
- // Ensure that we sign extend to full pointer size
- if (kPointerSize == 8) {
- load = ChangeInt32ToInt64(load);
- }
- return load;
}
Node* InterpreterAssembler::BytecodeOperandUnsignedQuad(int operand_index) {
@@ -340,19 +328,12 @@ Node* InterpreterAssembler::BytecodeOperandSignedQuad(int operand_index) {
bytecode_, operand_index, operand_scale()));
int operand_offset =
Bytecodes::GetOperandOffset(bytecode_, operand_index, operand_scale());
- Node* load;
if (TargetSupportsUnalignedAccess()) {
- load = Load(MachineType::Int32(), BytecodeArrayTaggedPointer(),
+ return Load(MachineType::Int32(), BytecodeArrayTaggedPointer(),
IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
} else {
- load = BytecodeOperandReadUnaligned(operand_offset, MachineType::Int32());
- }
-
- // Ensure that we sign extend to full pointer size
- if (kPointerSize == 8) {
- load = ChangeInt32ToInt64(load);
+ return BytecodeOperandReadUnaligned(operand_offset, MachineType::Int32());
}
- return load;
}
Node* InterpreterAssembler::BytecodeSignedOperand(int operand_index,
@@ -422,12 +403,25 @@ Node* InterpreterAssembler::BytecodeOperandImm(int operand_index) {
return BytecodeSignedOperand(operand_index, operand_size);
}
+Node* InterpreterAssembler::BytecodeOperandImmIntPtr(int operand_index) {
+ return ChangeInt32ToIntPtr(BytecodeOperandImm(operand_index));
+}
+
+Node* InterpreterAssembler::BytecodeOperandImmSmi(int operand_index) {
+ return SmiFromWord32(BytecodeOperandImm(operand_index));
+}
+
Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
DCHECK(OperandType::kIdx ==
Bytecodes::GetOperandType(bytecode_, operand_index));
OperandSize operand_size =
Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
- return BytecodeUnsignedOperand(operand_index, operand_size);
+ return ChangeUint32ToWord(
+ BytecodeUnsignedOperand(operand_index, operand_size));
+}
+
+Node* InterpreterAssembler::BytecodeOperandIdxSmi(int operand_index) {
+ return SmiTag(BytecodeOperandIdx(operand_index));
}
Node* InterpreterAssembler::BytecodeOperandReg(int operand_index) {
@@ -435,7 +429,8 @@ Node* InterpreterAssembler::BytecodeOperandReg(int operand_index) {
Bytecodes::GetOperandType(bytecode_, operand_index)));
OperandSize operand_size =
Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
- return BytecodeSignedOperand(operand_index, operand_size);
+ return ChangeInt32ToIntPtr(
+ BytecodeSignedOperand(operand_index, operand_size));
}
Node* InterpreterAssembler::BytecodeOperandRuntimeId(int operand_index) {
@@ -459,30 +454,11 @@ Node* InterpreterAssembler::BytecodeOperandIntrinsicId(int operand_index) {
Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
BytecodeArray::kConstantPoolOffset);
- Node* entry_offset =
- IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
- WordShl(index, kPointerSizeLog2));
- return Load(MachineType::AnyTagged(), constant_pool, entry_offset);
+ return LoadFixedArrayElement(constant_pool, index);
}
Node* InterpreterAssembler::LoadAndUntagConstantPoolEntry(Node* index) {
- Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
- BytecodeArray::kConstantPoolOffset);
- int offset = FixedArray::kHeaderSize - kHeapObjectTag;
-#if V8_TARGET_LITTLE_ENDIAN
- if (Is64()) {
- offset += kPointerSize / 2;
- }
-#endif
- Node* entry_offset =
- IntPtrAdd(IntPtrConstant(offset), WordShl(index, kPointerSizeLog2));
- if (Is64()) {
- return ChangeInt32ToInt64(
- Load(MachineType::Int32(), constant_pool, entry_offset));
- } else {
- return SmiUntag(
- Load(MachineType::AnyTagged(), constant_pool, entry_offset));
- }
+ return SmiUntag(LoadConstantPoolEntry(index));
}
Node* InterpreterAssembler::LoadTypeFeedbackVector() {
@@ -519,7 +495,7 @@ Node* InterpreterAssembler::IncrementCallCount(Node* type_feedback_vector,
Node* call_count_slot = IntPtrAdd(slot_id, IntPtrConstant(1));
Node* call_count =
LoadFixedArrayElement(type_feedback_vector, call_count_slot);
- Node* new_count = SmiAdd(call_count, SmiTag(Int32Constant(1)));
+ Node* new_count = SmiAdd(call_count, SmiConstant(1));
// Count is Smi, so we don't need a write barrier.
return StoreFixedArrayElement(type_feedback_vector, call_count_slot,
new_count, SKIP_WRITE_BARRIER);
@@ -588,14 +564,12 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
GotoIf(is_megamorphic, &call);
Comment("check if it is an allocation site");
- Node* is_allocation_site = WordEqual(
- LoadMap(feedback_element), LoadRoot(Heap::kAllocationSiteMapRootIndex));
- GotoUnless(is_allocation_site, &check_initialized);
+ GotoUnless(IsAllocationSiteMap(LoadMap(feedback_element)),
+ &check_initialized);
// If it is not the Array() function, mark megamorphic.
- Node* context_slot =
- LoadFixedArrayElement(LoadNativeContext(context),
- Int32Constant(Context::ARRAY_FUNCTION_INDEX));
+ Node* context_slot = LoadContextElement(LoadNativeContext(context),
+ Context::ARRAY_FUNCTION_INDEX);
Node* is_array_function = WordEqual(context_slot, function);
GotoUnless(is_array_function, &mark_megamorphic);
@@ -629,13 +603,12 @@ Node* InterpreterAssembler::CallJSWithFeedback(Node* function, Node* context,
// Check if function is an object of JSFunction type.
Node* instance_type = LoadInstanceType(function);
Node* is_js_function =
- WordEqual(instance_type, Int32Constant(JS_FUNCTION_TYPE));
+ Word32Equal(instance_type, Int32Constant(JS_FUNCTION_TYPE));
GotoUnless(is_js_function, &mark_megamorphic);
// Check if it is the Array() function.
- Node* context_slot =
- LoadFixedArrayElement(LoadNativeContext(context),
- Int32Constant(Context::ARRAY_FUNCTION_INDEX));
+ Node* context_slot = LoadContextElement(LoadNativeContext(context),
+ Context::ARRAY_FUNCTION_INDEX);
Node* is_array_function = WordEqual(context_slot, function);
GotoIf(is_array_function, &create_allocation_site);
@@ -704,6 +677,7 @@ Node* InterpreterAssembler::CallJS(Node* function, Node* context,
Callable callable = CodeFactory::InterpreterPushArgsAndCall(
isolate(), tail_call_mode, CallableType::kAny);
Node* code_target = HeapConstant(callable.code());
+
return CallStub(callable.descriptor(), code_target, context, arg_count,
first_arg, function);
}
@@ -719,7 +693,7 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
// Slot id of 0 is used to indicate no type feedback is available.
STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
- Node* is_feedback_unavailable = Word32Equal(slot_id, Int32Constant(0));
+ Node* is_feedback_unavailable = WordEqual(slot_id, IntPtrConstant(0));
GotoIf(is_feedback_unavailable, &call_construct);
// Check that the constructor is not a smi.
@@ -729,7 +703,7 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
// Check that constructor is a JSFunction.
Node* instance_type = LoadInstanceType(constructor);
Node* is_js_function =
- WordEqual(instance_type, Int32Constant(JS_FUNCTION_TYPE));
+ Word32Equal(instance_type, Int32Constant(JS_FUNCTION_TYPE));
GotoUnless(is_js_function, &call_construct);
// Check if it is a monomorphic constructor.
@@ -784,9 +758,8 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
GotoUnless(is_allocation_site, &check_initialized);
// Make sure the function is the Array() function.
- Node* context_slot =
- LoadFixedArrayElement(LoadNativeContext(context),
- Int32Constant(Context::ARRAY_FUNCTION_INDEX));
+ Node* context_slot = LoadContextElement(LoadNativeContext(context),
+ Context::ARRAY_FUNCTION_INDEX);
Node* is_array_function = WordEqual(context_slot, constructor);
GotoUnless(is_array_function, &mark_megamorphic);
@@ -809,9 +782,8 @@ Node* InterpreterAssembler::CallConstruct(Node* constructor, Node* context,
Comment("initialize the feedback element");
// Create an allocation site if the function is an array function,
// otherwise create a weak cell.
- Node* context_slot =
- LoadFixedArrayElement(LoadNativeContext(context),
- Int32Constant(Context::ARRAY_FUNCTION_INDEX));
+ Node* context_slot = LoadContextElement(LoadNativeContext(context),
+ Context::ARRAY_FUNCTION_INDEX);
Node* is_array_function = WordEqual(context_slot, constructor);
Branch(is_array_function, &create_allocation_site, &create_weak_cell);
@@ -872,13 +844,14 @@ Node* InterpreterAssembler::CallRuntimeN(Node* function_id, Node* context,
ExternalReference::runtime_function_table_address(isolate()));
Node* function_offset =
Int32Mul(function_id, Int32Constant(sizeof(Runtime::Function)));
- Node* function = IntPtrAdd(function_table, function_offset);
+ Node* function =
+ IntPtrAdd(function_table, ChangeUint32ToWord(function_offset));
Node* function_entry =
Load(MachineType::Pointer(), function,
IntPtrConstant(offsetof(Runtime::Function, entry)));
- return CallStub(callable.descriptor(), code_target, context, arg_count,
- first_arg, function_entry, result_size);
+ return CallStubR(callable.descriptor(), result_size, code_target, context,
+ arg_count, first_arg, function_entry);
}
void InterpreterAssembler::UpdateInterruptBudget(Node* weight) {
@@ -933,7 +906,7 @@ Node* InterpreterAssembler::Advance(Node* delta) {
Node* InterpreterAssembler::Jump(Node* delta) {
DCHECK(!Bytecodes::IsStarLookahead(bytecode_, operand_scale_));
- UpdateInterruptBudget(delta);
+ UpdateInterruptBudget(TruncateWordToWord32(delta));
Node* new_bytecode_offset = Advance(delta);
Node* target_bytecode = LoadBytecode(new_bytecode_offset);
return DispatchToBytecode(target_bytecode, new_bytecode_offset);
@@ -961,10 +934,7 @@ void InterpreterAssembler::JumpIfWordNotEqual(Node* lhs, Node* rhs,
Node* InterpreterAssembler::LoadBytecode(compiler::Node* bytecode_offset) {
Node* bytecode =
Load(MachineType::Uint8(), BytecodeArrayTaggedPointer(), bytecode_offset);
- if (kPointerSize == 8) {
- bytecode = ChangeUint32ToUint64(bytecode);
- }
- return bytecode;
+ return ChangeUint32ToWord(bytecode);
}
Node* InterpreterAssembler::StarDispatchLookahead(Node* target_bytecode) {
@@ -1007,6 +977,7 @@ void InterpreterAssembler::InlineStar() {
}
Node* InterpreterAssembler::Dispatch() {
+ Comment("========= Dispatch");
Node* target_offset = Advance();
Node* target_bytecode = LoadBytecode(target_offset);
@@ -1031,17 +1002,19 @@ Node* InterpreterAssembler::DispatchToBytecode(Node* target_bytecode,
Node* InterpreterAssembler::DispatchToBytecodeHandler(Node* handler,
Node* bytecode_offset) {
+ // TODO(ishell): Add CSA::CodeEntryPoint(code).
Node* handler_entry =
- IntPtrAdd(handler, IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
+ IntPtrAdd(BitcastTaggedToWord(handler),
+ IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
return DispatchToBytecodeHandlerEntry(handler_entry, bytecode_offset);
}
Node* InterpreterAssembler::DispatchToBytecodeHandlerEntry(
Node* handler_entry, Node* bytecode_offset) {
InterpreterDispatchDescriptor descriptor(isolate());
- Node* args[] = {GetAccumulatorUnchecked(), bytecode_offset,
- BytecodeArrayTaggedPointer(), DispatchTableRawPointer()};
- return TailCallBytecodeDispatch(descriptor, handler_entry, args);
+ return TailCallBytecodeDispatch(
+ descriptor, handler_entry, GetAccumulatorUnchecked(), bytecode_offset,
+ BytecodeArrayTaggedPointer(), DispatchTableRawPointer());
}
void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
@@ -1087,7 +1060,7 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
Variable* loop_vars[] = {&var_value, var_type_feedback};
Label loop(this, 2, loop_vars), done_loop(this, &var_result);
var_value.Bind(value);
- var_type_feedback->Bind(Int32Constant(BinaryOperationFeedback::kNone));
+ var_type_feedback->Bind(SmiConstant(BinaryOperationFeedback::kNone));
Goto(&loop);
Bind(&loop);
{
@@ -1103,8 +1076,8 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
// Convert the Smi {value}.
var_result.Bind(SmiToWord32(value));
var_type_feedback->Bind(
- Word32Or(var_type_feedback->value(),
- Int32Constant(BinaryOperationFeedback::kSignedSmall)));
+ SmiOr(var_type_feedback->value(),
+ SmiConstant(BinaryOperationFeedback::kSignedSmall)));
Goto(&done_loop);
}
@@ -1114,16 +1087,16 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
Label if_valueisheapnumber(this),
if_valueisnotheapnumber(this, Label::kDeferred);
Node* value_map = LoadMap(value);
- Branch(WordEqual(value_map, HeapNumberMapConstant()),
- &if_valueisheapnumber, &if_valueisnotheapnumber);
+ Branch(IsHeapNumberMap(value_map), &if_valueisheapnumber,
+ &if_valueisnotheapnumber);
Bind(&if_valueisheapnumber);
{
// Truncate the floating point value.
var_result.Bind(TruncateHeapNumberValueToWord32(value));
var_type_feedback->Bind(
- Word32Or(var_type_feedback->value(),
- Int32Constant(BinaryOperationFeedback::kNumber)));
+ SmiOr(var_type_feedback->value(),
+ SmiConstant(BinaryOperationFeedback::kNumber)));
Goto(&done_loop);
}
@@ -1132,9 +1105,8 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
// We do not require an Or with earlier feedback here because once we
// convert the value to a number, we cannot reach this path. We can
// only reach this path on the first pass when the feedback is kNone.
- CSA_ASSERT(this,
- Word32Equal(var_type_feedback->value(),
- Int32Constant(BinaryOperationFeedback::kNone)));
+ CSA_ASSERT(this, SmiEqual(var_type_feedback->value(),
+ SmiConstant(BinaryOperationFeedback::kNone)));
Label if_valueisoddball(this),
if_valueisnotoddball(this, Label::kDeferred);
@@ -1147,7 +1119,7 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
// Convert Oddball to a Number and perform checks again.
var_value.Bind(LoadObjectField(value, Oddball::kToNumberOffset));
var_type_feedback->Bind(
- Int32Constant(BinaryOperationFeedback::kNumberOrOddball));
+ SmiConstant(BinaryOperationFeedback::kNumberOrOddball));
Goto(&loop);
}
@@ -1156,7 +1128,7 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
// Convert the {value} to a Number first.
Callable callable = CodeFactory::NonNumberToNumber(isolate());
var_value.Bind(CallStub(callable, context, value));
- var_type_feedback->Bind(Int32Constant(BinaryOperationFeedback::kAny));
+ var_type_feedback->Bind(SmiConstant(BinaryOperationFeedback::kAny));
Goto(&loop);
}
}
@@ -1174,7 +1146,7 @@ void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
// function.
Node* profiling_weight =
Int32Sub(Int32Constant(kHeapObjectTag + BytecodeArray::kHeaderSize),
- BytecodeOffset());
+ TruncateWordToWord32(BytecodeOffset()));
UpdateInterruptBudget(profiling_weight);
}
@@ -1187,9 +1159,9 @@ Node* InterpreterAssembler::StackCheckTriggeredInterrupt() {
}
Node* InterpreterAssembler::LoadOSRNestingLevel() {
- Node* offset =
- IntPtrConstant(BytecodeArray::kOSRNestingLevelOffset - kHeapObjectTag);
- return Load(MachineType::Int8(), BytecodeArrayTaggedPointer(), offset);
+ return LoadObjectField(BytecodeArrayTaggedPointer(),
+ BytecodeArray::kOSRNestingLevelOffset,
+ MachineType::Int8());
}
void InterpreterAssembler::Abort(BailoutReason bailout_reason) {
@@ -1261,19 +1233,21 @@ bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
Node* InterpreterAssembler::RegisterCount() {
Node* bytecode_array = LoadRegister(Register::bytecode_array());
Node* frame_size = LoadObjectField(
- bytecode_array, BytecodeArray::kFrameSizeOffset, MachineType::Int32());
- return Word32Sar(frame_size, Int32Constant(kPointerSizeLog2));
+ bytecode_array, BytecodeArray::kFrameSizeOffset, MachineType::Uint32());
+ return WordShr(ChangeUint32ToWord(frame_size),
+ IntPtrConstant(kPointerSizeLog2));
}
Node* InterpreterAssembler::ExportRegisterFile(Node* array) {
+ Node* register_count = RegisterCount();
if (FLAG_debug_code) {
Node* array_size = LoadAndUntagFixedArrayBaseLength(array);
- AbortIfWordNotEqual(
- array_size, RegisterCount(), kInvalidRegisterFileInGenerator);
+ AbortIfWordNotEqual(array_size, register_count,
+ kInvalidRegisterFileInGenerator);
}
- Variable var_index(this, MachineRepresentation::kWord32);
- var_index.Bind(Int32Constant(0));
+ Variable var_index(this, MachineType::PointerRepresentation());
+ var_index.Bind(IntPtrConstant(0));
// Iterate over register file and write values into array.
// The mapping of register to array index must match that used in
@@ -1283,16 +1257,14 @@ Node* InterpreterAssembler::ExportRegisterFile(Node* array) {
Bind(&loop);
{
Node* index = var_index.value();
- Node* condition = Int32LessThan(index, RegisterCount());
- GotoUnless(condition, &done_loop);
+ GotoUnless(UintPtrLessThan(index, register_count), &done_loop);
- Node* reg_index =
- Int32Sub(Int32Constant(Register(0).ToOperand()), index);
- Node* value = LoadRegister(ChangeInt32ToIntPtr(reg_index));
+ Node* reg_index = IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
+ Node* value = LoadRegister(reg_index);
StoreFixedArrayElement(array, index, value);
- var_index.Bind(Int32Add(index, Int32Constant(1)));
+ var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
Goto(&loop);
}
Bind(&done_loop);
@@ -1301,14 +1273,15 @@ Node* InterpreterAssembler::ExportRegisterFile(Node* array) {
}
Node* InterpreterAssembler::ImportRegisterFile(Node* array) {
+ Node* register_count = RegisterCount();
if (FLAG_debug_code) {
Node* array_size = LoadAndUntagFixedArrayBaseLength(array);
- AbortIfWordNotEqual(
- array_size, RegisterCount(), kInvalidRegisterFileInGenerator);
+ AbortIfWordNotEqual(array_size, register_count,
+ kInvalidRegisterFileInGenerator);
}
- Variable var_index(this, MachineRepresentation::kWord32);
- var_index.Bind(Int32Constant(0));
+ Variable var_index(this, MachineType::PointerRepresentation());
+ var_index.Bind(IntPtrConstant(0));
// Iterate over array and write values into register file. Also erase the
// array contents to not keep them alive artificially.
@@ -1317,18 +1290,16 @@ Node* InterpreterAssembler::ImportRegisterFile(Node* array) {
Bind(&loop);
{
Node* index = var_index.value();
- Node* condition = Int32LessThan(index, RegisterCount());
- GotoUnless(condition, &done_loop);
+ GotoUnless(UintPtrLessThan(index, register_count), &done_loop);
Node* value = LoadFixedArrayElement(array, index);
- Node* reg_index =
- Int32Sub(Int32Constant(Register(0).ToOperand()), index);
- StoreRegister(value, ChangeInt32ToIntPtr(reg_index));
+ Node* reg_index = IntPtrSub(IntPtrConstant(Register(0).ToOperand()), index);
+ StoreRegister(value, reg_index);
StoreFixedArrayElement(array, index, StaleRegisterConstant());
- var_index.Bind(Int32Add(index, Int32Constant(1)));
+ var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
Goto(&loop);
}
Bind(&done_loop);
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index aefd2bc053..5183f3efed 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -20,32 +20,41 @@ namespace interpreter {
class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
public:
- InterpreterAssembler(Isolate* isolate, Zone* zone, Bytecode bytecode,
+ InterpreterAssembler(compiler::CodeAssemblerState* state, Bytecode bytecode,
OperandScale operand_scale);
- virtual ~InterpreterAssembler();
+ ~InterpreterAssembler();
- // Returns the count immediate for bytecode operand |operand_index| in the
- // current bytecode.
+ // Returns the 32-bit unsigned count immediate for bytecode operand
+ // |operand_index| in the current bytecode.
compiler::Node* BytecodeOperandCount(int operand_index);
- // Returns the 8-bit flag for bytecode operand |operand_index| in the
- // current bytecode.
+ // Returns the 32-bit unsigned flag for bytecode operand |operand_index|
+ // in the current bytecode.
compiler::Node* BytecodeOperandFlag(int operand_index);
- // Returns the index immediate for bytecode operand |operand_index| in the
- // current bytecode.
+ // Returns the 32-bit zero-extended index immediate for bytecode operand
+ // |operand_index| in the current bytecode.
compiler::Node* BytecodeOperandIdx(int operand_index);
- // Returns the UImm8 immediate for bytecode operand |operand_index| in the
- // current bytecode.
+ // Returns the smi index immediate for bytecode operand |operand_index|
+ // in the current bytecode.
+ compiler::Node* BytecodeOperandIdxSmi(int operand_index);
+ // Returns the 32-bit unsigned immediate for bytecode operand |operand_index|
+ // in the current bytecode.
compiler::Node* BytecodeOperandUImm(int operand_index);
- // Returns the Imm8 immediate for bytecode operand |operand_index| in the
- // current bytecode.
+ // Returns the 32-bit signed immediate for bytecode operand |operand_index|
+ // in the current bytecode.
compiler::Node* BytecodeOperandImm(int operand_index);
- // Returns the register index for bytecode operand |operand_index| in the
+ // Returns the word-size signed immediate for bytecode operand |operand_index|
+ // in the current bytecode.
+ compiler::Node* BytecodeOperandImmIntPtr(int operand_index);
+ // Returns the smi immediate for bytecode operand |operand_index| in the
// current bytecode.
+ compiler::Node* BytecodeOperandImmSmi(int operand_index);
+ // Returns the word-size sign-extended register index for bytecode operand
+ // |operand_index| in the current bytecode.
compiler::Node* BytecodeOperandReg(int operand_index);
- // Returns the runtime id immediate for bytecode operand
+ // Returns the 32-bit unsigned runtime id immediate for bytecode operand
// |operand_index| in the current bytecode.
compiler::Node* BytecodeOperandRuntimeId(int operand_index);
- // Returns the intrinsic id immediate for bytecode operand
+ // Returns the 32-bit unsigned intrinsic id immediate for bytecode operand
// |operand_index| in the current bytecode.
compiler::Node* BytecodeOperandIntrinsicId(int operand_index);
@@ -209,8 +218,8 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Saves and restores interpreter bytecode offset to the interpreter stack
// frame when performing a call.
- void CallPrologue() override;
- void CallEpilogue() override;
+ void CallPrologue();
+ void CallEpilogue();
// Increment the dispatch counter for the (current, next) bytecode pair.
void TraceBytecodeDispatch(compiler::Node* target_index);
@@ -218,8 +227,8 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Traces the current bytecode by calling |function_id|.
void TraceBytecode(Runtime::FunctionId function_id);
- // Updates the bytecode array's interrupt budget by |weight| and calls
- // Runtime::kInterrupt if counter reaches zero.
+ // Updates the bytecode array's interrupt budget by a 32-bit signed |weight|
+ // and calls Runtime::kInterrupt if counter reaches zero.
void UpdateInterruptBudget(compiler::Node* weight);
// Returns the offset of register |index| relative to RegisterFilePointer().
@@ -236,6 +245,7 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
compiler::Node* BytecodeOperandReadUnaligned(int relative_offset,
MachineType result_type);
+ // Returns zero- or sign-extended to word32 value of the operand.
compiler::Node* BytecodeOperandUnsignedByte(int operand_index);
compiler::Node* BytecodeOperandSignedByte(int operand_index);
compiler::Node* BytecodeOperandUnsignedShort(int operand_index);
@@ -243,6 +253,8 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
compiler::Node* BytecodeOperandUnsignedQuad(int operand_index);
compiler::Node* BytecodeOperandSignedQuad(int operand_index);
+ // Returns zero- or sign-extended to word32 value of the operand of
+ // given size.
compiler::Node* BytecodeSignedOperand(int operand_index,
OperandSize operand_size);
compiler::Node* BytecodeUnsignedOperand(int operand_index,
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics.cc b/deps/v8/src/interpreter/interpreter-intrinsics.cc
index b46ca878cc..a2820fb128 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics.cc
+++ b/deps/v8/src/interpreter/interpreter-intrinsics.cc
@@ -105,12 +105,8 @@ Node* IntrinsicsHelper::InvokeIntrinsic(Node* function_id, Node* context,
Node* IntrinsicsHelper::CompareInstanceType(Node* object, int type,
InstanceTypeCompareMode mode) {
- InterpreterAssembler::Variable return_value(assembler_,
- MachineRepresentation::kTagged);
Node* instance_type = __ LoadInstanceType(object);
- InterpreterAssembler::Label if_true(assembler_), if_false(assembler_),
- end(assembler_);
if (mode == kInstanceTypeEqual) {
return __ Word32Equal(instance_type, __ Int32Constant(type));
} else {
@@ -122,6 +118,7 @@ Node* IntrinsicsHelper::CompareInstanceType(Node* object, int type,
Node* IntrinsicsHelper::IsInstanceType(Node* input, int type) {
InterpreterAssembler::Variable return_value(assembler_,
MachineRepresentation::kTagged);
+ // TODO(ishell): Use Select here.
InterpreterAssembler::Label if_not_smi(assembler_), return_true(assembler_),
return_false(assembler_), end(assembler_);
Node* arg = __ LoadRegister(input);
@@ -148,6 +145,8 @@ Node* IntrinsicsHelper::IsInstanceType(Node* input, int type) {
Node* IntrinsicsHelper::IsJSReceiver(Node* input, Node* arg_count,
Node* context) {
+ // TODO(ishell): Use Select here.
+ // TODO(ishell): Use CSA::IsJSReceiverInstanceType here.
InterpreterAssembler::Variable return_value(assembler_,
MachineRepresentation::kTagged);
InterpreterAssembler::Label return_true(assembler_), return_false(assembler_),
@@ -185,16 +184,13 @@ Node* IntrinsicsHelper::IsJSProxy(Node* input, Node* arg_count, Node* context) {
return IsInstanceType(input, JS_PROXY_TYPE);
}
-Node* IntrinsicsHelper::IsRegExp(Node* input, Node* arg_count, Node* context) {
- return IsInstanceType(input, JS_REGEXP_TYPE);
-}
-
Node* IntrinsicsHelper::IsTypedArray(Node* input, Node* arg_count,
Node* context) {
return IsInstanceType(input, JS_TYPED_ARRAY_TYPE);
}
Node* IntrinsicsHelper::IsSmi(Node* input, Node* arg_count, Node* context) {
+ // TODO(ishell): Use SelectBooleanConstant here.
InterpreterAssembler::Variable return_value(assembler_,
MachineRepresentation::kTagged);
InterpreterAssembler::Label if_smi(assembler_), if_not_smi(assembler_),
@@ -222,14 +218,22 @@ Node* IntrinsicsHelper::IsSmi(Node* input, Node* arg_count, Node* context) {
Node* IntrinsicsHelper::IntrinsicAsStubCall(Node* args_reg, Node* context,
Callable const& callable) {
int param_count = callable.descriptor().GetParameterCount();
- Node** args = zone()->NewArray<Node*>(param_count + 1); // 1 for context
+ int input_count = param_count + 2; // +2 for target and context
+ Node** args = zone()->NewArray<Node*>(input_count);
+ int index = 0;
+ args[index++] = __ HeapConstant(callable.code());
for (int i = 0; i < param_count; i++) {
- args[i] = __ LoadRegister(args_reg);
+ args[index++] = __ LoadRegister(args_reg);
args_reg = __ NextRegister(args_reg);
}
- args[param_count] = context;
+ args[index++] = context;
+ return __ CallStubN(callable.descriptor(), 1, input_count, args);
+}
- return __ CallStubN(callable, args);
+Node* IntrinsicsHelper::CreateIterResultObject(Node* input, Node* arg_count,
+ Node* context) {
+ return IntrinsicAsStubCall(input, context,
+ CodeFactory::CreateIterResultObject(isolate()));
}
Node* IntrinsicsHelper::HasProperty(Node* input, Node* arg_count,
@@ -238,11 +242,6 @@ Node* IntrinsicsHelper::HasProperty(Node* input, Node* arg_count,
CodeFactory::HasProperty(isolate()));
}
-Node* IntrinsicsHelper::NewObject(Node* input, Node* arg_count, Node* context) {
- return IntrinsicAsStubCall(input, context,
- CodeFactory::FastNewObject(isolate()));
-}
-
Node* IntrinsicsHelper::NumberToString(Node* input, Node* arg_count,
Node* context) {
return IntrinsicAsStubCall(input, context,
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics.h b/deps/v8/src/interpreter/interpreter-intrinsics.h
index 70ff291df3..825e2b9a98 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics.h
+++ b/deps/v8/src/interpreter/interpreter-intrinsics.h
@@ -23,25 +23,24 @@ namespace interpreter {
// List of supported intrisics, with upper case name, lower case name and
// expected number of arguments (-1 denoting argument count is variable).
-#define INTRINSICS_LIST(V) \
- V(Call, call, -1) \
- V(ClassOf, class_of, 1) \
- V(HasProperty, has_property, 2) \
- V(IsArray, is_array, 1) \
- V(IsJSProxy, is_js_proxy, 1) \
- V(IsJSReceiver, is_js_receiver, 1) \
- V(IsRegExp, is_regexp, 1) \
- V(IsSmi, is_smi, 1) \
- V(IsTypedArray, is_typed_array, 1) \
- V(NewObject, new_object, 2) \
- V(NumberToString, number_to_string, 1) \
- V(RegExpExec, reg_exp_exec, 4) \
- V(SubString, sub_string, 3) \
- V(ToString, to_string, 1) \
- V(ToLength, to_length, 1) \
- V(ToInteger, to_integer, 1) \
- V(ToNumber, to_number, 1) \
- V(ToObject, to_object, 1) \
+#define INTRINSICS_LIST(V) \
+ V(Call, call, -1) \
+ V(ClassOf, class_of, 1) \
+ V(CreateIterResultObject, create_iter_result_object, 2) \
+ V(HasProperty, has_property, 2) \
+ V(IsArray, is_array, 1) \
+ V(IsJSProxy, is_js_proxy, 1) \
+ V(IsJSReceiver, is_js_receiver, 1) \
+ V(IsSmi, is_smi, 1) \
+ V(IsTypedArray, is_typed_array, 1) \
+ V(NumberToString, number_to_string, 1) \
+ V(RegExpExec, reg_exp_exec, 4) \
+ V(SubString, sub_string, 3) \
+ V(ToString, to_string, 1) \
+ V(ToLength, to_length, 1) \
+ V(ToInteger, to_integer, 1) \
+ V(ToNumber, to_number, 1) \
+ V(ToObject, to_object, 1) \
V(ValueOf, value_of, 1)
class IntrinsicsHelper {
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index 81aecafecf..60c5e595af 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -8,6 +8,7 @@
#include <memory>
#include "src/ast/prettyprinter.h"
+#include "src/builtins/builtins-constructor.h"
#include "src/code-factory.h"
#include "src/compilation-info.h"
#include "src/compiler.h"
@@ -27,7 +28,6 @@ namespace interpreter {
using compiler::Node;
typedef CodeStubAssembler::Label Label;
typedef CodeStubAssembler::Variable Variable;
-typedef InterpreterAssembler::Arg Arg;
#define __ assembler->
@@ -41,9 +41,41 @@ class InterpreterCompilationJob final : public CompilationJob {
Status FinalizeJobImpl() final;
private:
+ class TimerScope final {
+ public:
+ TimerScope(RuntimeCallStats* stats, RuntimeCallStats::CounterId counter_id)
+ : stats_(stats) {
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
+ RuntimeCallStats::Enter(stats_, &timer_, counter_id);
+ }
+ }
+
+ explicit TimerScope(RuntimeCallCounter* counter) : stats_(nullptr) {
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
+ timer_.Start(counter, nullptr);
+ }
+ }
+
+ ~TimerScope() {
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
+ if (stats_) {
+ RuntimeCallStats::Leave(stats_, &timer_);
+ } else {
+ timer_.Stop();
+ }
+ }
+ }
+
+ private:
+ RuntimeCallStats* stats_;
+ RuntimeCallTimer timer_;
+ };
+
BytecodeGenerator* generator() { return &generator_; }
BytecodeGenerator generator_;
+ RuntimeCallStats* runtime_call_stats_;
+ RuntimeCallCounter background_execute_counter_;
DISALLOW_COPY_AND_ASSIGN(InterpreterCompilationJob);
};
@@ -73,24 +105,9 @@ void Interpreter::Initialize() {
};
for (OperandScale operand_scale : kOperandScales) {
-#define GENERATE_CODE(Name, ...) \
- { \
- if (Bytecodes::BytecodeHasHandler(Bytecode::k##Name, operand_scale)) { \
- InterpreterAssembler assembler(isolate_, &zone, Bytecode::k##Name, \
- operand_scale); \
- Do##Name(&assembler); \
- Handle<Code> code = assembler.GenerateCode(); \
- size_t index = GetDispatchTableIndex(Bytecode::k##Name, operand_scale); \
- dispatch_table_[index] = code->entry(); \
- TraceCodegen(code); \
- PROFILE( \
- isolate_, \
- CodeCreateEvent( \
- CodeEventListener::BYTECODE_HANDLER_TAG, \
- AbstractCode::cast(*code), \
- Bytecodes::ToString(Bytecode::k##Name, operand_scale).c_str())); \
- } \
- }
+#define GENERATE_CODE(Name, ...) \
+ InstallBytecodeHandler(&zone, Bytecode::k##Name, operand_scale, \
+ &Interpreter::Do##Name);
BYTECODE_LIST(GENERATE_CODE)
#undef GENERATE_CODE
}
@@ -108,6 +125,27 @@ void Interpreter::Initialize() {
DCHECK(IsDispatchTableInitialized());
}
+void Interpreter::InstallBytecodeHandler(Zone* zone, Bytecode bytecode,
+ OperandScale operand_scale,
+ BytecodeGeneratorFunc generator) {
+ if (!Bytecodes::BytecodeHasHandler(bytecode, operand_scale)) return;
+
+ InterpreterDispatchDescriptor descriptor(isolate_);
+ compiler::CodeAssemblerState state(
+ isolate_, zone, descriptor, Code::ComputeFlags(Code::BYTECODE_HANDLER),
+ Bytecodes::ToString(bytecode), Bytecodes::ReturnCount(bytecode));
+ InterpreterAssembler assembler(&state, bytecode, operand_scale);
+ (this->*generator)(&assembler);
+ Handle<Code> code = compiler::CodeAssembler::GenerateCode(&state);
+ size_t index = GetDispatchTableIndex(bytecode, operand_scale);
+ dispatch_table_[index] = code->entry();
+ TraceCodegen(code);
+ PROFILE(isolate_, CodeCreateEvent(
+ CodeEventListener::BYTECODE_HANDLER_TAG,
+ AbstractCode::cast(*code),
+ Bytecodes::ToString(bytecode, operand_scale).c_str()));
+}
+
Code* Interpreter::GetBytecodeHandler(Bytecode bytecode,
OperandScale operand_scale) {
DCHECK(IsDispatchTableInitialized());
@@ -154,10 +192,14 @@ int Interpreter::InterruptBudget() {
}
InterpreterCompilationJob::InterpreterCompilationJob(CompilationInfo* info)
- : CompilationJob(info->isolate(), info, "Ignition"), generator_(info) {}
+ : CompilationJob(info->isolate(), info, "Ignition"),
+ generator_(info),
+ runtime_call_stats_(info->isolate()->counters()->runtime_call_stats()),
+ background_execute_counter_("CompileBackgroundIgnition") {}
InterpreterCompilationJob::Status InterpreterCompilationJob::PrepareJobImpl() {
- if (FLAG_print_bytecode || FLAG_print_ast) {
+ CodeGenerator::MakeCodePrologue(info(), "interpreter");
+ if (FLAG_print_bytecode) {
OFStream os(stdout);
std::unique_ptr<char[]> name = info()->GetDebugName();
os << "[generating bytecode for function: " << info()->GetDebugName().get()
@@ -165,25 +207,15 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::PrepareJobImpl() {
<< std::flush;
}
-#ifdef DEBUG
- if (info()->parse_info() && FLAG_print_ast) {
- OFStream os(stdout);
- os << "--- AST ---" << std::endl
- << AstPrinter(info()->isolate()).PrintProgram(info()->literal())
- << std::endl
- << std::flush;
- }
-#endif // DEBUG
-
return SUCCEEDED;
}
InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() {
- // TODO(5203): These timers aren't thread safe, move to using the CompilerJob
- // timers.
- RuntimeCallTimerScope runtimeTimer(info()->isolate(),
- &RuntimeCallStats::CompileIgnition);
- TimerEventScope<TimerEventCompileIgnition> timer(info()->isolate());
+ TimerScope runtimeTimer =
+ executed_on_background_thread()
+ ? TimerScope(&background_execute_counter_)
+ : TimerScope(runtime_call_stats_, &RuntimeCallStats::CompileIgnition);
+ // TODO(lpy): add support for background compilation RCS trace.
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileIgnition");
generator()->GenerateBytecode(stack_limit());
@@ -195,13 +227,20 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::ExecuteJobImpl() {
}
InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl() {
+ // Add background runtime call stats.
+ if (V8_UNLIKELY(FLAG_runtime_stats && executed_on_background_thread())) {
+ runtime_call_stats_->CompileBackgroundIgnition.Add(
+ &background_execute_counter_);
+ }
+
+ RuntimeCallTimerScope runtimeTimer(
+ runtime_call_stats_, &RuntimeCallStats::CompileIgnitionFinalization);
+
Handle<BytecodeArray> bytecodes = generator()->FinalizeBytecode(isolate());
if (generator()->HasStackOverflow()) {
return FAILED;
}
- CodeGenerator::MakeCodePrologue(info(), "interpreter");
-
if (FLAG_print_bytecode) {
OFStream os(stdout);
bytecodes->Print(os);
@@ -326,8 +365,7 @@ void Interpreter::DoLdaZero(InterpreterAssembler* assembler) {
//
// Load an integer literal into the accumulator as a Smi.
void Interpreter::DoLdaSmi(InterpreterAssembler* assembler) {
- Node* raw_int = __ BytecodeOperandImm(0);
- Node* smi_int = __ SmiTag(raw_int);
+ Node* smi_int = __ BytecodeOperandImmSmi(0);
__ SetAccumulator(smi_int);
__ Dispatch();
}
@@ -419,21 +457,19 @@ void Interpreter::DoMov(InterpreterAssembler* assembler) {
__ Dispatch();
}
-Node* Interpreter::BuildLoadGlobal(Callable ic, Node* context,
+Node* Interpreter::BuildLoadGlobal(Callable ic, Node* context, Node* name_index,
Node* feedback_slot,
InterpreterAssembler* assembler) {
- typedef LoadGlobalWithVectorDescriptor Descriptor;
-
// Load the global via the LoadGlobalIC.
Node* code_target = __ HeapConstant(ic.code());
+ Node* name = __ LoadConstantPoolEntry(name_index);
Node* smi_slot = __ SmiTag(feedback_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
- return __ CallStub(ic.descriptor(), code_target, context,
- Arg(Descriptor::kSlot, smi_slot),
- Arg(Descriptor::kVector, type_feedback_vector));
+ return __ CallStub(ic.descriptor(), code_target, context, name, smi_slot,
+ type_feedback_vector);
}
-// LdaGlobal <slot>
+// LdaGlobal <name_index> <slot>
//
// Load the global with name in constant pool entry <name_index> into the
// accumulator using FeedBackVector slot <slot> outside of a typeof.
@@ -443,13 +479,14 @@ void Interpreter::DoLdaGlobal(InterpreterAssembler* assembler) {
Node* context = __ GetContext();
- Node* raw_slot = __ BytecodeOperandIdx(0);
- Node* result = BuildLoadGlobal(ic, context, raw_slot, assembler);
+ Node* name_index = __ BytecodeOperandIdx(0);
+ Node* raw_slot = __ BytecodeOperandIdx(1);
+ Node* result = BuildLoadGlobal(ic, context, name_index, raw_slot, assembler);
__ SetAccumulator(result);
__ Dispatch();
}
-// LdaGlobalInsideTypeof <slot>
+// LdaGlobalInsideTypeof <name_index> <slot>
//
// Load the global with name in constant pool entry <name_index> into the
// accumulator using FeedBackVector slot <slot> inside of a typeof.
@@ -459,14 +496,14 @@ void Interpreter::DoLdaGlobalInsideTypeof(InterpreterAssembler* assembler) {
Node* context = __ GetContext();
- Node* raw_slot = __ BytecodeOperandIdx(0);
- Node* result = BuildLoadGlobal(ic, context, raw_slot, assembler);
+ Node* name_index = __ BytecodeOperandIdx(0);
+ Node* raw_slot = __ BytecodeOperandIdx(1);
+ Node* result = BuildLoadGlobal(ic, context, name_index, raw_slot, assembler);
__ SetAccumulator(result);
__ Dispatch();
}
void Interpreter::DoStaGlobal(Callable ic, InterpreterAssembler* assembler) {
- typedef StoreWithVectorDescriptor Descriptor;
// Get the global object.
Node* context = __ GetContext();
Node* native_context = __ LoadNativeContext(context);
@@ -481,10 +518,8 @@ void Interpreter::DoStaGlobal(Callable ic, InterpreterAssembler* assembler) {
Node* raw_slot = __ BytecodeOperandIdx(1);
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
- __ CallStub(ic.descriptor(), code_target, context,
- Arg(Descriptor::kReceiver, global), Arg(Descriptor::kName, name),
- Arg(Descriptor::kValue, value), Arg(Descriptor::kSlot, smi_slot),
- Arg(Descriptor::kVector, type_feedback_vector));
+ __ CallStub(ic.descriptor(), code_target, context, global, name, value,
+ smi_slot, type_feedback_vector);
__ Dispatch();
}
@@ -650,7 +685,8 @@ void Interpreter::DoLdaLookupGlobalSlot(Runtime::FunctionId function_id,
isolate_, function_id == Runtime::kLoadLookupSlotInsideTypeof
? INSIDE_TYPEOF
: NOT_INSIDE_TYPEOF);
- Node* result = BuildLoadGlobal(ic, context, feedback_slot, assembler);
+ Node* result =
+ BuildLoadGlobal(ic, context, name_index, feedback_slot, assembler);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -717,7 +753,6 @@ void Interpreter::DoStaLookupSlotStrict(InterpreterAssembler* assembler) {
// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name at
// constant pool entry <name_index>.
void Interpreter::DoLdaNamedProperty(InterpreterAssembler* assembler) {
- typedef LoadWithVectorDescriptor Descriptor;
Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_);
Node* code_target = __ HeapConstant(ic.code());
Node* register_index = __ BytecodeOperandReg(0);
@@ -728,10 +763,8 @@ void Interpreter::DoLdaNamedProperty(InterpreterAssembler* assembler) {
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
Node* context = __ GetContext();
- Node* result = __ CallStub(
- ic.descriptor(), code_target, context, Arg(Descriptor::kReceiver, object),
- Arg(Descriptor::kName, name), Arg(Descriptor::kSlot, smi_slot),
- Arg(Descriptor::kVector, type_feedback_vector));
+ Node* result = __ CallStub(ic.descriptor(), code_target, context, object,
+ name, smi_slot, type_feedback_vector);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -741,7 +774,6 @@ void Interpreter::DoLdaNamedProperty(InterpreterAssembler* assembler) {
// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
// in the accumulator.
void Interpreter::DoLdaKeyedProperty(InterpreterAssembler* assembler) {
- typedef LoadWithVectorDescriptor Descriptor;
Callable ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate_);
Node* code_target = __ HeapConstant(ic.code());
Node* reg_index = __ BytecodeOperandReg(0);
@@ -751,16 +783,13 @@ void Interpreter::DoLdaKeyedProperty(InterpreterAssembler* assembler) {
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
Node* context = __ GetContext();
- Node* result = __ CallStub(
- ic.descriptor(), code_target, context, Arg(Descriptor::kReceiver, object),
- Arg(Descriptor::kName, name), Arg(Descriptor::kSlot, smi_slot),
- Arg(Descriptor::kVector, type_feedback_vector));
+ Node* result = __ CallStub(ic.descriptor(), code_target, context, object,
+ name, smi_slot, type_feedback_vector);
__ SetAccumulator(result);
__ Dispatch();
}
void Interpreter::DoStoreIC(Callable ic, InterpreterAssembler* assembler) {
- typedef StoreWithVectorDescriptor Descriptor;
Node* code_target = __ HeapConstant(ic.code());
Node* object_reg_index = __ BytecodeOperandReg(0);
Node* object = __ LoadRegister(object_reg_index);
@@ -771,10 +800,8 @@ void Interpreter::DoStoreIC(Callable ic, InterpreterAssembler* assembler) {
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
Node* context = __ GetContext();
- __ CallStub(ic.descriptor(), code_target, context,
- Arg(Descriptor::kReceiver, object), Arg(Descriptor::kName, name),
- Arg(Descriptor::kValue, value), Arg(Descriptor::kSlot, smi_slot),
- Arg(Descriptor::kVector, type_feedback_vector));
+ __ CallStub(ic.descriptor(), code_target, context, object, name, value,
+ smi_slot, type_feedback_vector);
__ Dispatch();
}
@@ -799,7 +826,6 @@ void Interpreter::DoStaNamedPropertyStrict(InterpreterAssembler* assembler) {
}
void Interpreter::DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler) {
- typedef StoreWithVectorDescriptor Descriptor;
Node* code_target = __ HeapConstant(ic.code());
Node* object_reg_index = __ BytecodeOperandReg(0);
Node* object = __ LoadRegister(object_reg_index);
@@ -810,10 +836,8 @@ void Interpreter::DoKeyedStoreIC(Callable ic, InterpreterAssembler* assembler) {
Node* smi_slot = __ SmiTag(raw_slot);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
Node* context = __ GetContext();
- __ CallStub(ic.descriptor(), code_target, context,
- Arg(Descriptor::kReceiver, object), Arg(Descriptor::kName, name),
- Arg(Descriptor::kValue, value), Arg(Descriptor::kSlot, smi_slot),
- Arg(Descriptor::kVector, type_feedback_vector));
+ __ CallStub(ic.descriptor(), code_target, context, object, name, value,
+ smi_slot, type_feedback_vector);
__ Dispatch();
}
@@ -835,13 +859,36 @@ void Interpreter::DoStaKeyedPropertyStrict(InterpreterAssembler* assembler) {
DoKeyedStoreIC(ic, assembler);
}
+// StaDataPropertyInLiteral <object> <name> <flags>
+//
+// Define a property <name> with value from the accumulator in <object>.
+// Property attributes and whether set_function_name are stored in
+// DataPropertyInLiteralFlags <flags>.
+//
+// This definition is not observable and is used only for definitions
+// in object or class literals.
+void Interpreter::DoStaDataPropertyInLiteral(InterpreterAssembler* assembler) {
+ Node* object = __ LoadRegister(__ BytecodeOperandReg(0));
+ Node* name = __ LoadRegister(__ BytecodeOperandReg(1));
+ Node* value = __ GetAccumulator();
+ Node* flags = __ SmiFromWord32(__ BytecodeOperandFlag(2));
+ Node* vector_index = __ SmiTag(__ BytecodeOperandIdx(3));
+
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+ Node* context = __ GetContext();
+
+ __ CallRuntime(Runtime::kDefineDataPropertyInLiteral, context, object, name,
+ value, flags, type_feedback_vector, vector_index);
+ __ Dispatch();
+}
+
// LdaModuleVariable <cell_index> <depth>
//
// Load the contents of a module variable into the accumulator. The variable is
// identified by <cell_index>. <depth> is the depth of the current context
// relative to the module context.
void Interpreter::DoLdaModuleVariable(InterpreterAssembler* assembler) {
- Node* cell_index = __ BytecodeOperandImm(0);
+ Node* cell_index = __ BytecodeOperandImmIntPtr(0);
Node* depth = __ BytecodeOperandUImm(1);
Node* module_context = __ GetContextAtDepth(__ GetContext(), depth);
@@ -884,7 +931,7 @@ void Interpreter::DoLdaModuleVariable(InterpreterAssembler* assembler) {
// <depth> is the depth of the current context relative to the module context.
void Interpreter::DoStaModuleVariable(InterpreterAssembler* assembler) {
Node* value = __ GetAccumulator();
- Node* cell_index = __ BytecodeOperandImm(0);
+ Node* cell_index = __ BytecodeOperandImmIntPtr(0);
Node* depth = __ BytecodeOperandUImm(1);
Node* module_context = __ GetContextAtDepth(__ GetContext(), depth);
@@ -989,62 +1036,147 @@ void Interpreter::DoCompareOpWithFeedback(Token::Value compare_op,
// sometimes emit comparisons that shouldn't collect feedback (e.g.
// try-finally blocks and generators), and we could get rid of this by
// introducing Smi equality tests.
- Label skip_feedback_update(assembler);
- __ GotoIf(__ WordEqual(slot_index, __ IntPtrConstant(0)),
- &skip_feedback_update);
-
- Variable var_type_feedback(assembler, MachineRepresentation::kWord32);
- Label lhs_is_smi(assembler), lhs_is_not_smi(assembler),
- gather_rhs_type(assembler), do_compare(assembler);
- __ Branch(__ TaggedIsSmi(lhs), &lhs_is_smi, &lhs_is_not_smi);
+ Label gather_type_feedback(assembler), do_compare(assembler);
+ __ Branch(__ WordEqual(slot_index, __ IntPtrConstant(0)), &do_compare,
+ &gather_type_feedback);
- __ Bind(&lhs_is_smi);
- var_type_feedback.Bind(
- __ Int32Constant(CompareOperationFeedback::kSignedSmall));
- __ Goto(&gather_rhs_type);
-
- __ Bind(&lhs_is_not_smi);
+ __ Bind(&gather_type_feedback);
{
- Label lhs_is_number(assembler), lhs_is_not_number(assembler);
- Node* lhs_map = __ LoadMap(lhs);
- __ Branch(__ WordEqual(lhs_map, __ HeapNumberMapConstant()), &lhs_is_number,
- &lhs_is_not_number);
+ Variable var_type_feedback(assembler, MachineRepresentation::kTaggedSigned);
+ Label lhs_is_not_smi(assembler), lhs_is_not_number(assembler),
+ lhs_is_not_string(assembler), gather_rhs_type(assembler),
+ update_feedback(assembler);
+
+ __ GotoUnless(__ TaggedIsSmi(lhs), &lhs_is_not_smi);
- __ Bind(&lhs_is_number);
- var_type_feedback.Bind(__ Int32Constant(CompareOperationFeedback::kNumber));
+ var_type_feedback.Bind(
+ __ SmiConstant(CompareOperationFeedback::kSignedSmall));
__ Goto(&gather_rhs_type);
- __ Bind(&lhs_is_not_number);
- var_type_feedback.Bind(__ Int32Constant(CompareOperationFeedback::kAny));
- __ Goto(&do_compare);
- }
+ __ Bind(&lhs_is_not_smi);
+ {
+ Node* lhs_map = __ LoadMap(lhs);
+ __ GotoUnless(__ IsHeapNumberMap(lhs_map), &lhs_is_not_number);
+
+ var_type_feedback.Bind(__ SmiConstant(CompareOperationFeedback::kNumber));
+ __ Goto(&gather_rhs_type);
+
+ __ Bind(&lhs_is_not_number);
+ {
+ Node* lhs_instance_type = __ LoadInstanceType(lhs);
+ if (Token::IsOrderedRelationalCompareOp(compare_op)) {
+ Label lhs_is_not_oddball(assembler);
+ __ GotoUnless(
+ __ Word32Equal(lhs_instance_type, __ Int32Constant(ODDBALL_TYPE)),
+ &lhs_is_not_oddball);
+
+ var_type_feedback.Bind(
+ __ SmiConstant(CompareOperationFeedback::kNumberOrOddball));
+ __ Goto(&gather_rhs_type);
+
+ __ Bind(&lhs_is_not_oddball);
+ }
+
+ Label lhs_is_not_string(assembler);
+ __ GotoUnless(__ IsStringInstanceType(lhs_instance_type),
+ &lhs_is_not_string);
+
+ if (Token::IsOrderedRelationalCompareOp(compare_op)) {
+ var_type_feedback.Bind(
+ __ SmiConstant(CompareOperationFeedback::kString));
+ } else {
+ var_type_feedback.Bind(__ SelectSmiConstant(
+ __ Word32Equal(
+ __ Word32And(lhs_instance_type,
+ __ Int32Constant(kIsNotInternalizedMask)),
+ __ Int32Constant(kInternalizedTag)),
+ CompareOperationFeedback::kInternalizedString,
+ CompareOperationFeedback::kString));
+ }
+ __ Goto(&gather_rhs_type);
+
+ __ Bind(&lhs_is_not_string);
+ var_type_feedback.Bind(__ SmiConstant(CompareOperationFeedback::kAny));
+ __ Goto(&gather_rhs_type);
+ }
+ }
- __ Bind(&gather_rhs_type);
- {
- Label rhs_is_smi(assembler);
- __ GotoIf(__ TaggedIsSmi(rhs), &rhs_is_smi);
-
- Node* rhs_map = __ LoadMap(rhs);
- Node* rhs_type =
- __ Select(__ WordEqual(rhs_map, __ HeapNumberMapConstant()),
- __ Int32Constant(CompareOperationFeedback::kNumber),
- __ Int32Constant(CompareOperationFeedback::kAny));
- var_type_feedback.Bind(__ Word32Or(var_type_feedback.value(), rhs_type));
- __ Goto(&do_compare);
-
- __ Bind(&rhs_is_smi);
- var_type_feedback.Bind(
- __ Word32Or(var_type_feedback.value(),
- __ Int32Constant(CompareOperationFeedback::kSignedSmall)));
- __ Goto(&do_compare);
+ __ Bind(&gather_rhs_type);
+ {
+ Label rhs_is_not_smi(assembler), rhs_is_not_number(assembler);
+
+ __ GotoUnless(__ TaggedIsSmi(rhs), &rhs_is_not_smi);
+
+ var_type_feedback.Bind(
+ __ SmiOr(var_type_feedback.value(),
+ __ SmiConstant(CompareOperationFeedback::kSignedSmall)));
+ __ Goto(&update_feedback);
+
+ __ Bind(&rhs_is_not_smi);
+ {
+ Node* rhs_map = __ LoadMap(rhs);
+ __ GotoUnless(__ IsHeapNumberMap(rhs_map), &rhs_is_not_number);
+
+ var_type_feedback.Bind(
+ __ SmiOr(var_type_feedback.value(),
+ __ SmiConstant(CompareOperationFeedback::kNumber)));
+ __ Goto(&update_feedback);
+
+ __ Bind(&rhs_is_not_number);
+ {
+ Node* rhs_instance_type = __ LoadInstanceType(rhs);
+ if (Token::IsOrderedRelationalCompareOp(compare_op)) {
+ Label rhs_is_not_oddball(assembler);
+ __ GotoUnless(__ Word32Equal(rhs_instance_type,
+ __ Int32Constant(ODDBALL_TYPE)),
+ &rhs_is_not_oddball);
+
+ var_type_feedback.Bind(__ SmiOr(
+ var_type_feedback.value(),
+ __ SmiConstant(CompareOperationFeedback::kNumberOrOddball)));
+ __ Goto(&update_feedback);
+
+ __ Bind(&rhs_is_not_oddball);
+ }
+
+ Label rhs_is_not_string(assembler);
+ __ GotoUnless(__ IsStringInstanceType(rhs_instance_type),
+ &rhs_is_not_string);
+
+ if (Token::IsOrderedRelationalCompareOp(compare_op)) {
+ var_type_feedback.Bind(
+ __ SmiOr(var_type_feedback.value(),
+ __ SmiConstant(CompareOperationFeedback::kString)));
+ } else {
+ var_type_feedback.Bind(__ SmiOr(
+ var_type_feedback.value(),
+ __ SelectSmiConstant(
+ __ Word32Equal(
+ __ Word32And(rhs_instance_type,
+ __ Int32Constant(kIsNotInternalizedMask)),
+ __ Int32Constant(kInternalizedTag)),
+ CompareOperationFeedback::kInternalizedString,
+ CompareOperationFeedback::kString)));
+ }
+ __ Goto(&update_feedback);
+
+ __ Bind(&rhs_is_not_string);
+ var_type_feedback.Bind(
+ __ SmiConstant(CompareOperationFeedback::kAny));
+ __ Goto(&update_feedback);
+ }
+ }
+ }
+
+ __ Bind(&update_feedback);
+ {
+ __ UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
+ slot_index);
+ __ Goto(&do_compare);
+ }
}
__ Bind(&do_compare);
- __ UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
- slot_index);
- __ Goto(&skip_feedback_update);
-
- __ Bind(&skip_feedback_update);
Node* result;
switch (compare_op) {
case Token::EQ:
@@ -1126,8 +1258,9 @@ void Interpreter::DoBitwiseBinaryOp(Token::Value bitwise_op,
Node* slot_index = __ BytecodeOperandIdx(1);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
- Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32),
- var_rhs_type_feedback(assembler, MachineRepresentation::kWord32);
+ Variable var_lhs_type_feedback(assembler,
+ MachineRepresentation::kTaggedSigned),
+ var_rhs_type_feedback(assembler, MachineRepresentation::kTaggedSigned);
Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
context, lhs, &var_lhs_type_feedback);
Node* rhs_value = __ TruncateTaggedToWord32WithFeedback(
@@ -1166,10 +1299,9 @@ void Interpreter::DoBitwiseBinaryOp(Token::Value bitwise_op,
UNREACHABLE();
}
- Node* result_type =
- __ Select(__ TaggedIsSmi(result),
- __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
- __ Int32Constant(BinaryOperationFeedback::kNumber));
+ Node* result_type = __ SelectSmiConstant(
+ __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
+ BinaryOperationFeedback::kNumber);
if (FLAG_debug_code) {
Label ok(assembler);
@@ -1182,9 +1314,9 @@ void Interpreter::DoBitwiseBinaryOp(Token::Value bitwise_op,
}
Node* input_feedback =
- __ Word32Or(var_lhs_type_feedback.value(), var_rhs_type_feedback.value());
- __ UpdateFeedback(__ Word32Or(result_type, input_feedback),
- type_feedback_vector, slot_index);
+ __ SmiOr(var_lhs_type_feedback.value(), var_rhs_type_feedback.value());
+ __ UpdateFeedback(__ SmiOr(result_type, input_feedback), type_feedback_vector,
+ slot_index);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -1251,8 +1383,7 @@ void Interpreter::DoAddSmi(InterpreterAssembler* assembler) {
Node* reg_index = __ BytecodeOperandReg(1);
Node* left = __ LoadRegister(reg_index);
- Node* raw_int = __ BytecodeOperandImm(0);
- Node* right = __ SmiTag(raw_int);
+ Node* right = __ BytecodeOperandImmSmi(0);
Node* slot_index = __ BytecodeOperandIdx(2);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
@@ -1271,7 +1402,7 @@ void Interpreter::DoAddSmi(InterpreterAssembler* assembler) {
__ Branch(overflow, &slowpath, &if_notoverflow);
__ Bind(&if_notoverflow);
{
- __ UpdateFeedback(__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
+ __ UpdateFeedback(__ SmiConstant(BinaryOperationFeedback::kSignedSmall),
type_feedback_vector, slot_index);
var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair)));
__ Goto(&end);
@@ -1283,8 +1414,9 @@ void Interpreter::DoAddSmi(InterpreterAssembler* assembler) {
AddWithFeedbackStub stub(__ isolate());
Callable callable =
Callable(stub.GetCode(), AddWithFeedbackStub::Descriptor(__ isolate()));
- Node* args[] = {left, right, slot_index, type_feedback_vector, context};
- var_result.Bind(__ CallStubN(callable, args, 1));
+ var_result.Bind(__ CallStub(callable, context, left, right,
+ __ TruncateWordToWord32(slot_index),
+ type_feedback_vector));
__ Goto(&end);
}
__ Bind(&end);
@@ -1305,8 +1437,7 @@ void Interpreter::DoSubSmi(InterpreterAssembler* assembler) {
Node* reg_index = __ BytecodeOperandReg(1);
Node* left = __ LoadRegister(reg_index);
- Node* raw_int = __ BytecodeOperandImm(0);
- Node* right = __ SmiTag(raw_int);
+ Node* right = __ BytecodeOperandImmSmi(0);
Node* slot_index = __ BytecodeOperandIdx(2);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
@@ -1325,7 +1456,7 @@ void Interpreter::DoSubSmi(InterpreterAssembler* assembler) {
__ Branch(overflow, &slowpath, &if_notoverflow);
__ Bind(&if_notoverflow);
{
- __ UpdateFeedback(__ Int32Constant(BinaryOperationFeedback::kSignedSmall),
+ __ UpdateFeedback(__ SmiConstant(BinaryOperationFeedback::kSignedSmall),
type_feedback_vector, slot_index);
var_result.Bind(__ BitcastWordToTaggedSigned(__ Projection(0, pair)));
__ Goto(&end);
@@ -1337,8 +1468,9 @@ void Interpreter::DoSubSmi(InterpreterAssembler* assembler) {
SubtractWithFeedbackStub stub(__ isolate());
Callable callable = Callable(
stub.GetCode(), SubtractWithFeedbackStub::Descriptor(__ isolate()));
- Node* args[] = {left, right, slot_index, type_feedback_vector, context};
- var_result.Bind(__ CallStubN(callable, args, 1));
+ var_result.Bind(__ CallStub(callable, context, left, right,
+ __ TruncateWordToWord32(slot_index),
+ type_feedback_vector));
__ Goto(&end);
}
__ Bind(&end);
@@ -1355,22 +1487,21 @@ void Interpreter::DoSubSmi(InterpreterAssembler* assembler) {
void Interpreter::DoBitwiseOrSmi(InterpreterAssembler* assembler) {
Node* reg_index = __ BytecodeOperandReg(1);
Node* left = __ LoadRegister(reg_index);
- Node* raw_int = __ BytecodeOperandImm(0);
- Node* right = __ SmiTag(raw_int);
+ Node* right = __ BytecodeOperandImmSmi(0);
Node* context = __ GetContext();
Node* slot_index = __ BytecodeOperandIdx(2);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
- Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32);
+ Variable var_lhs_type_feedback(assembler,
+ MachineRepresentation::kTaggedSigned);
Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
context, left, &var_lhs_type_feedback);
Node* rhs_value = __ SmiToWord32(right);
Node* value = __ Word32Or(lhs_value, rhs_value);
Node* result = __ ChangeInt32ToTagged(value);
- Node* result_type =
- __ Select(__ TaggedIsSmi(result),
- __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
- __ Int32Constant(BinaryOperationFeedback::kNumber));
- __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
+ Node* result_type = __ SelectSmiConstant(
+ __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
+ BinaryOperationFeedback::kNumber);
+ __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()),
type_feedback_vector, slot_index);
__ SetAccumulator(result);
__ Dispatch();
@@ -1383,22 +1514,21 @@ void Interpreter::DoBitwiseOrSmi(InterpreterAssembler* assembler) {
void Interpreter::DoBitwiseAndSmi(InterpreterAssembler* assembler) {
Node* reg_index = __ BytecodeOperandReg(1);
Node* left = __ LoadRegister(reg_index);
- Node* raw_int = __ BytecodeOperandImm(0);
- Node* right = __ SmiTag(raw_int);
+ Node* right = __ BytecodeOperandImmSmi(0);
Node* context = __ GetContext();
Node* slot_index = __ BytecodeOperandIdx(2);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
- Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32);
+ Variable var_lhs_type_feedback(assembler,
+ MachineRepresentation::kTaggedSigned);
Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
context, left, &var_lhs_type_feedback);
Node* rhs_value = __ SmiToWord32(right);
Node* value = __ Word32And(lhs_value, rhs_value);
Node* result = __ ChangeInt32ToTagged(value);
- Node* result_type =
- __ Select(__ TaggedIsSmi(result),
- __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
- __ Int32Constant(BinaryOperationFeedback::kNumber));
- __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
+ Node* result_type = __ SelectSmiConstant(
+ __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
+ BinaryOperationFeedback::kNumber);
+ __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()),
type_feedback_vector, slot_index);
__ SetAccumulator(result);
__ Dispatch();
@@ -1412,23 +1542,22 @@ void Interpreter::DoBitwiseAndSmi(InterpreterAssembler* assembler) {
void Interpreter::DoShiftLeftSmi(InterpreterAssembler* assembler) {
Node* reg_index = __ BytecodeOperandReg(1);
Node* left = __ LoadRegister(reg_index);
- Node* raw_int = __ BytecodeOperandImm(0);
- Node* right = __ SmiTag(raw_int);
+ Node* right = __ BytecodeOperandImmSmi(0);
Node* context = __ GetContext();
Node* slot_index = __ BytecodeOperandIdx(2);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
- Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32);
+ Variable var_lhs_type_feedback(assembler,
+ MachineRepresentation::kTaggedSigned);
Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
context, left, &var_lhs_type_feedback);
Node* rhs_value = __ SmiToWord32(right);
Node* shift_count = __ Word32And(rhs_value, __ Int32Constant(0x1f));
Node* value = __ Word32Shl(lhs_value, shift_count);
Node* result = __ ChangeInt32ToTagged(value);
- Node* result_type =
- __ Select(__ TaggedIsSmi(result),
- __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
- __ Int32Constant(BinaryOperationFeedback::kNumber));
- __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
+ Node* result_type = __ SelectSmiConstant(
+ __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
+ BinaryOperationFeedback::kNumber);
+ __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()),
type_feedback_vector, slot_index);
__ SetAccumulator(result);
__ Dispatch();
@@ -1442,23 +1571,22 @@ void Interpreter::DoShiftLeftSmi(InterpreterAssembler* assembler) {
void Interpreter::DoShiftRightSmi(InterpreterAssembler* assembler) {
Node* reg_index = __ BytecodeOperandReg(1);
Node* left = __ LoadRegister(reg_index);
- Node* raw_int = __ BytecodeOperandImm(0);
- Node* right = __ SmiTag(raw_int);
+ Node* right = __ BytecodeOperandImmSmi(0);
Node* context = __ GetContext();
Node* slot_index = __ BytecodeOperandIdx(2);
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
- Variable var_lhs_type_feedback(assembler, MachineRepresentation::kWord32);
+ Variable var_lhs_type_feedback(assembler,
+ MachineRepresentation::kTaggedSigned);
Node* lhs_value = __ TruncateTaggedToWord32WithFeedback(
context, left, &var_lhs_type_feedback);
Node* rhs_value = __ SmiToWord32(right);
Node* shift_count = __ Word32And(rhs_value, __ Int32Constant(0x1f));
Node* value = __ Word32Sar(lhs_value, shift_count);
Node* result = __ ChangeInt32ToTagged(value);
- Node* result_type =
- __ Select(__ TaggedIsSmi(result),
- __ Int32Constant(BinaryOperationFeedback::kSignedSmall),
- __ Int32Constant(BinaryOperationFeedback::kNumber));
- __ UpdateFeedback(__ Word32Or(result_type, var_lhs_type_feedback.value()),
+ Node* result_type = __ SelectSmiConstant(
+ __ TaggedIsSmi(result), BinaryOperationFeedback::kSignedSmall,
+ BinaryOperationFeedback::kNumber);
+ __ UpdateFeedback(__ SmiOr(result_type, var_lhs_type_feedback.value()),
type_feedback_vector, slot_index);
__ SetAccumulator(result);
__ Dispatch();
@@ -1519,14 +1647,276 @@ void Interpreter::DoToObject(InterpreterAssembler* assembler) {
//
// Increments value in the accumulator by one.
void Interpreter::DoInc(InterpreterAssembler* assembler) {
- DoUnaryOpWithFeedback<IncStub>(assembler);
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Node* value = __ GetAccumulator();
+ Node* context = __ GetContext();
+ Node* slot_index = __ BytecodeOperandIdx(0);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+
+ // Shared entry for floating point increment.
+ Label do_finc(assembler), end(assembler);
+ Variable var_finc_value(assembler, MachineRepresentation::kFloat64);
+
+ // We might need to try again due to ToNumber conversion.
+ Variable value_var(assembler, MachineRepresentation::kTagged);
+ Variable result_var(assembler, MachineRepresentation::kTagged);
+ Variable var_type_feedback(assembler, MachineRepresentation::kTaggedSigned);
+ Variable* loop_vars[] = {&value_var, &var_type_feedback};
+ Label start(assembler, 2, loop_vars);
+ value_var.Bind(value);
+ var_type_feedback.Bind(
+ assembler->SmiConstant(BinaryOperationFeedback::kNone));
+ assembler->Goto(&start);
+ assembler->Bind(&start);
+ {
+ value = value_var.value();
+
+ Label if_issmi(assembler), if_isnotsmi(assembler);
+ assembler->Branch(assembler->TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
+
+ assembler->Bind(&if_issmi);
+ {
+ // Try fast Smi addition first.
+ Node* one = assembler->SmiConstant(Smi::FromInt(1));
+ Node* pair = assembler->IntPtrAddWithOverflow(
+ assembler->BitcastTaggedToWord(value),
+ assembler->BitcastTaggedToWord(one));
+ Node* overflow = assembler->Projection(1, pair);
+
+ // Check if the Smi addition overflowed.
+ Label if_overflow(assembler), if_notoverflow(assembler);
+ assembler->Branch(overflow, &if_overflow, &if_notoverflow);
+
+ assembler->Bind(&if_notoverflow);
+ var_type_feedback.Bind(assembler->SmiOr(
+ var_type_feedback.value(),
+ assembler->SmiConstant(BinaryOperationFeedback::kSignedSmall)));
+ result_var.Bind(
+ assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair)));
+ assembler->Goto(&end);
+
+ assembler->Bind(&if_overflow);
+ {
+ var_finc_value.Bind(assembler->SmiToFloat64(value));
+ assembler->Goto(&do_finc);
+ }
+ }
+
+ assembler->Bind(&if_isnotsmi);
+ {
+ // Check if the value is a HeapNumber.
+ Label if_valueisnumber(assembler),
+ if_valuenotnumber(assembler, Label::kDeferred);
+ Node* value_map = assembler->LoadMap(value);
+ assembler->Branch(assembler->IsHeapNumberMap(value_map),
+ &if_valueisnumber, &if_valuenotnumber);
+
+ assembler->Bind(&if_valueisnumber);
+ {
+ // Load the HeapNumber value.
+ var_finc_value.Bind(assembler->LoadHeapNumberValue(value));
+ assembler->Goto(&do_finc);
+ }
+
+ assembler->Bind(&if_valuenotnumber);
+ {
+ // We do not require an Or with earlier feedback here because once we
+ // convert the value to a number, we cannot reach this path. We can
+ // only reach this path on the first pass when the feedback is kNone.
+ CSA_ASSERT(assembler,
+ assembler->SmiEqual(
+ var_type_feedback.value(),
+ assembler->SmiConstant(BinaryOperationFeedback::kNone)));
+
+ Label if_valueisoddball(assembler), if_valuenotoddball(assembler);
+ Node* instance_type = assembler->LoadMapInstanceType(value_map);
+ Node* is_oddball = assembler->Word32Equal(
+ instance_type, assembler->Int32Constant(ODDBALL_TYPE));
+ assembler->Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball);
+
+ assembler->Bind(&if_valueisoddball);
+ {
+ // Convert Oddball to Number and check again.
+ value_var.Bind(
+ assembler->LoadObjectField(value, Oddball::kToNumberOffset));
+ var_type_feedback.Bind(assembler->SmiConstant(
+ BinaryOperationFeedback::kNumberOrOddball));
+ assembler->Goto(&start);
+ }
+
+ assembler->Bind(&if_valuenotoddball);
+ {
+ // Convert to a Number first and try again.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_type_feedback.Bind(
+ assembler->SmiConstant(BinaryOperationFeedback::kAny));
+ value_var.Bind(assembler->CallStub(callable, context, value));
+ assembler->Goto(&start);
+ }
+ }
+ }
+ }
+
+ assembler->Bind(&do_finc);
+ {
+ Node* finc_value = var_finc_value.value();
+ Node* one = assembler->Float64Constant(1.0);
+ Node* finc_result = assembler->Float64Add(finc_value, one);
+ var_type_feedback.Bind(assembler->SmiOr(
+ var_type_feedback.value(),
+ assembler->SmiConstant(BinaryOperationFeedback::kNumber)));
+ result_var.Bind(assembler->AllocateHeapNumberWithValue(finc_result));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
+ slot_index);
+
+ __ SetAccumulator(result_var.value());
+ __ Dispatch();
}
// Dec
//
// Decrements value in the accumulator by one.
void Interpreter::DoDec(InterpreterAssembler* assembler) {
- DoUnaryOpWithFeedback<DecStub>(assembler);
+ typedef CodeStubAssembler::Label Label;
+ typedef compiler::Node Node;
+ typedef CodeStubAssembler::Variable Variable;
+
+ Node* value = __ GetAccumulator();
+ Node* context = __ GetContext();
+ Node* slot_index = __ BytecodeOperandIdx(0);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+
+ // Shared entry for floating point decrement.
+ Label do_fdec(assembler), end(assembler);
+ Variable var_fdec_value(assembler, MachineRepresentation::kFloat64);
+
+ // We might need to try again due to ToNumber conversion.
+ Variable value_var(assembler, MachineRepresentation::kTagged);
+ Variable result_var(assembler, MachineRepresentation::kTagged);
+ Variable var_type_feedback(assembler, MachineRepresentation::kTaggedSigned);
+ Variable* loop_vars[] = {&value_var, &var_type_feedback};
+ Label start(assembler, 2, loop_vars);
+ var_type_feedback.Bind(
+ assembler->SmiConstant(BinaryOperationFeedback::kNone));
+ value_var.Bind(value);
+ assembler->Goto(&start);
+ assembler->Bind(&start);
+ {
+ value = value_var.value();
+
+ Label if_issmi(assembler), if_isnotsmi(assembler);
+ assembler->Branch(assembler->TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
+
+ assembler->Bind(&if_issmi);
+ {
+ // Try fast Smi subtraction first.
+ Node* one = assembler->SmiConstant(Smi::FromInt(1));
+ Node* pair = assembler->IntPtrSubWithOverflow(
+ assembler->BitcastTaggedToWord(value),
+ assembler->BitcastTaggedToWord(one));
+ Node* overflow = assembler->Projection(1, pair);
+
+ // Check if the Smi subtraction overflowed.
+ Label if_overflow(assembler), if_notoverflow(assembler);
+ assembler->Branch(overflow, &if_overflow, &if_notoverflow);
+
+ assembler->Bind(&if_notoverflow);
+ var_type_feedback.Bind(assembler->SmiOr(
+ var_type_feedback.value(),
+ assembler->SmiConstant(BinaryOperationFeedback::kSignedSmall)));
+ result_var.Bind(
+ assembler->BitcastWordToTaggedSigned(assembler->Projection(0, pair)));
+ assembler->Goto(&end);
+
+ assembler->Bind(&if_overflow);
+ {
+ var_fdec_value.Bind(assembler->SmiToFloat64(value));
+ assembler->Goto(&do_fdec);
+ }
+ }
+
+ assembler->Bind(&if_isnotsmi);
+ {
+ // Check if the value is a HeapNumber.
+ Label if_valueisnumber(assembler),
+ if_valuenotnumber(assembler, Label::kDeferred);
+ Node* value_map = assembler->LoadMap(value);
+ assembler->Branch(assembler->IsHeapNumberMap(value_map),
+ &if_valueisnumber, &if_valuenotnumber);
+
+ assembler->Bind(&if_valueisnumber);
+ {
+ // Load the HeapNumber value.
+ var_fdec_value.Bind(assembler->LoadHeapNumberValue(value));
+ assembler->Goto(&do_fdec);
+ }
+
+ assembler->Bind(&if_valuenotnumber);
+ {
+ // We do not require an Or with earlier feedback here because once we
+ // convert the value to a number, we cannot reach this path. We can
+ // only reach this path on the first pass when the feedback is kNone.
+ CSA_ASSERT(assembler,
+ assembler->SmiEqual(
+ var_type_feedback.value(),
+ assembler->SmiConstant(BinaryOperationFeedback::kNone)));
+
+ Label if_valueisoddball(assembler), if_valuenotoddball(assembler);
+ Node* instance_type = assembler->LoadMapInstanceType(value_map);
+ Node* is_oddball = assembler->Word32Equal(
+ instance_type, assembler->Int32Constant(ODDBALL_TYPE));
+ assembler->Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball);
+
+ assembler->Bind(&if_valueisoddball);
+ {
+ // Convert Oddball to Number and check again.
+ value_var.Bind(
+ assembler->LoadObjectField(value, Oddball::kToNumberOffset));
+ var_type_feedback.Bind(assembler->SmiConstant(
+ BinaryOperationFeedback::kNumberOrOddball));
+ assembler->Goto(&start);
+ }
+
+ assembler->Bind(&if_valuenotoddball);
+ {
+ // Convert to a Number first and try again.
+ Callable callable =
+ CodeFactory::NonNumberToNumber(assembler->isolate());
+ var_type_feedback.Bind(
+ assembler->SmiConstant(BinaryOperationFeedback::kAny));
+ value_var.Bind(assembler->CallStub(callable, context, value));
+ assembler->Goto(&start);
+ }
+ }
+ }
+ }
+
+ assembler->Bind(&do_fdec);
+ {
+ Node* fdec_value = var_fdec_value.value();
+ Node* one = assembler->Float64Constant(1.0);
+ Node* fdec_result = assembler->Float64Sub(fdec_value, one);
+ var_type_feedback.Bind(assembler->SmiOr(
+ var_type_feedback.value(),
+ assembler->SmiConstant(BinaryOperationFeedback::kNumber)));
+ result_var.Bind(assembler->AllocateHeapNumberWithValue(fdec_result));
+ assembler->Goto(&end);
+ }
+
+ assembler->Bind(&end);
+ assembler->UpdateFeedback(var_type_feedback.value(), type_feedback_vector,
+ slot_index);
+
+ __ SetAccumulator(result_var.value());
+ __ Dispatch();
}
// LogicalNot
@@ -1625,6 +2015,19 @@ void Interpreter::DoDeletePropertySloppy(InterpreterAssembler* assembler) {
DoDelete(Runtime::kDeleteProperty_Sloppy, assembler);
}
+// GetSuperConstructor
+//
+// Get the super constructor from the object referenced by the accumulator.
+// The result is stored in register |reg|.
+void Interpreter::DoGetSuperConstructor(InterpreterAssembler* assembler) {
+ Node* active_function = __ GetAccumulator();
+ Node* context = __ GetContext();
+ Node* result = __ GetSuperConstructor(active_function, context);
+ Node* reg = __ BytecodeOperandReg(0);
+ __ StoreRegister(result, reg);
+ __ Dispatch();
+}
+
void Interpreter::DoJSCall(InterpreterAssembler* assembler,
TailCallMode tail_call_mode) {
Node* function_reg = __ BytecodeOperandReg(0);
@@ -1756,6 +2159,26 @@ void Interpreter::DoCallJSRuntime(InterpreterAssembler* assembler) {
__ Dispatch();
}
+// NewWithSpread <first_arg> <arg_count>
+//
+// Call the constructor in |first_arg| with the new.target in |first_arg + 1|
+// for the |arg_count - 2| following arguments. The final argument is always a
+// spread.
+//
+void Interpreter::DoNewWithSpread(InterpreterAssembler* assembler) {
+ Node* first_arg_reg = __ BytecodeOperandReg(0);
+ Node* first_arg = __ RegisterLocation(first_arg_reg);
+ Node* args_count = __ BytecodeOperandCount(1);
+ Node* context = __ GetContext();
+
+ // Call into Runtime function NewWithSpread which does everything.
+ Node* runtime_function = __ Int32Constant(Runtime::kNewWithSpread);
+ Node* result =
+ __ CallRuntimeN(runtime_function, context, first_arg, args_count);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
// New <constructor> <first_arg> <arg_count>
//
// Call operator new with |constructor| and the first argument in
@@ -1763,7 +2186,6 @@ void Interpreter::DoCallJSRuntime(InterpreterAssembler* assembler) {
// registers. The new.target is in the accumulator.
//
void Interpreter::DoNew(InterpreterAssembler* assembler) {
- Callable ic = CodeFactory::InterpreterPushArgsAndConstruct(isolate_);
Node* new_target = __ GetAccumulator();
Node* constructor_reg = __ BytecodeOperandReg(0);
Node* constructor = __ LoadRegister(constructor_reg);
@@ -1846,11 +2268,90 @@ void Interpreter::DoTestInstanceOf(InterpreterAssembler* assembler) {
DoCompareOp(Token::INSTANCEOF, assembler);
}
+// TestUndetectable <src>
+//
+// Test if the value in the <src> register equals to null/undefined. This is
+// done by checking undetectable bit on the map of the object.
+void Interpreter::DoTestUndetectable(InterpreterAssembler* assembler) {
+ Node* reg_index = __ BytecodeOperandReg(0);
+ Node* object = __ LoadRegister(reg_index);
+
+ Label not_equal(assembler), end(assembler);
+ // If the object is an Smi then return false.
+ __ GotoIf(__ TaggedIsSmi(object), &not_equal);
+
+ // If it is a HeapObject, load the map and check for undetectable bit.
+ Node* map = __ LoadMap(object);
+ Node* map_bitfield = __ LoadMapBitField(map);
+ Node* map_undetectable =
+ __ Word32And(map_bitfield, __ Int32Constant(1 << Map::kIsUndetectable));
+ __ GotoIf(__ Word32Equal(map_undetectable, __ Int32Constant(0)), &not_equal);
+
+ __ SetAccumulator(__ BooleanConstant(true));
+ __ Goto(&end);
+
+ __ Bind(&not_equal);
+ {
+ __ SetAccumulator(__ BooleanConstant(false));
+ __ Goto(&end);
+ }
+
+ __ Bind(&end);
+ __ Dispatch();
+}
+
+// TestNull <src>
+//
+// Test if the value in the <src> register is strictly equal to null.
+void Interpreter::DoTestNull(InterpreterAssembler* assembler) {
+ Node* reg_index = __ BytecodeOperandReg(0);
+ Node* object = __ LoadRegister(reg_index);
+ Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
+
+ Label equal(assembler), end(assembler);
+ __ GotoIf(__ WordEqual(object, null_value), &equal);
+ __ SetAccumulator(__ BooleanConstant(false));
+ __ Goto(&end);
+
+ __ Bind(&equal);
+ {
+ __ SetAccumulator(__ BooleanConstant(true));
+ __ Goto(&end);
+ }
+
+ __ Bind(&end);
+ __ Dispatch();
+}
+
+// TestUndefined <src>
+//
+// Test if the value in the <src> register is strictly equal to undefined.
+void Interpreter::DoTestUndefined(InterpreterAssembler* assembler) {
+ Node* reg_index = __ BytecodeOperandReg(0);
+ Node* object = __ LoadRegister(reg_index);
+ Node* undefined_value =
+ __ HeapConstant(isolate_->factory()->undefined_value());
+
+ Label equal(assembler), end(assembler);
+ __ GotoIf(__ WordEqual(object, undefined_value), &equal);
+ __ SetAccumulator(__ BooleanConstant(false));
+ __ Goto(&end);
+
+ __ Bind(&equal);
+ {
+ __ SetAccumulator(__ BooleanConstant(true));
+ __ Goto(&end);
+ }
+
+ __ Bind(&end);
+ __ Dispatch();
+}
+
// Jump <imm>
//
// Jump by number of bytes represented by the immediate operand |imm|.
void Interpreter::DoJump(InterpreterAssembler* assembler) {
- Node* relative_jump = __ BytecodeOperandImm(0);
+ Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
__ Jump(relative_jump);
}
@@ -1869,7 +2370,7 @@ void Interpreter::DoJumpConstant(InterpreterAssembler* assembler) {
// accumulator contains true.
void Interpreter::DoJumpIfTrue(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
- Node* relative_jump = __ BytecodeOperandImm(0);
+ Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
Node* true_value = __ BooleanConstant(true);
__ JumpIfWordEqual(accumulator, true_value, relative_jump);
}
@@ -1892,7 +2393,7 @@ void Interpreter::DoJumpIfTrueConstant(InterpreterAssembler* assembler) {
// accumulator contains false.
void Interpreter::DoJumpIfFalse(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
- Node* relative_jump = __ BytecodeOperandImm(0);
+ Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
Node* false_value = __ BooleanConstant(false);
__ JumpIfWordEqual(accumulator, false_value, relative_jump);
}
@@ -1915,7 +2416,7 @@ void Interpreter::DoJumpIfFalseConstant(InterpreterAssembler* assembler) {
// referenced by the accumulator is true when the object is cast to boolean.
void Interpreter::DoJumpIfToBooleanTrue(InterpreterAssembler* assembler) {
Node* value = __ GetAccumulator();
- Node* relative_jump = __ BytecodeOperandImm(0);
+ Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
Label if_true(assembler), if_false(assembler);
__ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
__ Bind(&if_true);
@@ -1948,7 +2449,7 @@ void Interpreter::DoJumpIfToBooleanTrueConstant(
// referenced by the accumulator is false when the object is cast to boolean.
void Interpreter::DoJumpIfToBooleanFalse(InterpreterAssembler* assembler) {
Node* value = __ GetAccumulator();
- Node* relative_jump = __ BytecodeOperandImm(0);
+ Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
Label if_true(assembler), if_false(assembler);
__ BranchIfToBooleanIsTrue(value, &if_true, &if_false);
__ Bind(&if_true);
@@ -1982,7 +2483,7 @@ void Interpreter::DoJumpIfToBooleanFalseConstant(
void Interpreter::DoJumpIfNull(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
- Node* relative_jump = __ BytecodeOperandImm(0);
+ Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
__ JumpIfWordEqual(accumulator, null_value, relative_jump);
}
@@ -2006,7 +2507,7 @@ void Interpreter::DoJumpIfUndefined(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
Node* undefined_value =
__ HeapConstant(isolate_->factory()->undefined_value());
- Node* relative_jump = __ BytecodeOperandImm(0);
+ Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
__ JumpIfWordEqual(accumulator, undefined_value, relative_jump);
}
@@ -2023,6 +2524,49 @@ void Interpreter::DoJumpIfUndefinedConstant(InterpreterAssembler* assembler) {
__ JumpIfWordEqual(accumulator, undefined_value, relative_jump);
}
+// JumpIfJSReceiver <imm>
+//
+// Jump by number of bytes represented by an immediate operand if the object
+// referenced by the accumulator is a JSReceiver.
+void Interpreter::DoJumpIfJSReceiver(InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
+
+ Label if_object(assembler), if_notobject(assembler, Label::kDeferred),
+ if_notsmi(assembler);
+ __ Branch(__ TaggedIsSmi(accumulator), &if_notobject, &if_notsmi);
+
+ __ Bind(&if_notsmi);
+ __ Branch(__ IsJSReceiver(accumulator), &if_object, &if_notobject);
+ __ Bind(&if_object);
+ __ Jump(relative_jump);
+
+ __ Bind(&if_notobject);
+ __ Dispatch();
+}
+
+// JumpIfJSReceiverConstant <idx>
+//
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool if
+// the object referenced by the accumulator is a JSReceiver.
+void Interpreter::DoJumpIfJSReceiverConstant(InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* relative_jump = __ LoadAndUntagConstantPoolEntry(index);
+
+ Label if_object(assembler), if_notobject(assembler), if_notsmi(assembler);
+ __ Branch(__ TaggedIsSmi(accumulator), &if_notobject, &if_notsmi);
+
+ __ Bind(&if_notsmi);
+ __ Branch(__ IsJSReceiver(accumulator), &if_object, &if_notobject);
+
+ __ Bind(&if_object);
+ __ Jump(relative_jump);
+
+ __ Bind(&if_notobject);
+ __ Dispatch();
+}
+
// JumpIfNotHole <imm>
//
// Jump by number of bytes represented by an immediate operand if the object
@@ -2030,7 +2574,7 @@ void Interpreter::DoJumpIfUndefinedConstant(InterpreterAssembler* assembler) {
void Interpreter::DoJumpIfNotHole(InterpreterAssembler* assembler) {
Node* accumulator = __ GetAccumulator();
Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
- Node* relative_jump = __ BytecodeOperandImm(0);
+ Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
__ JumpIfWordNotEqual(accumulator, the_hole_value, relative_jump);
}
@@ -2052,7 +2596,7 @@ void Interpreter::DoJumpIfNotHoleConstant(InterpreterAssembler* assembler) {
// performs a loop nesting check and potentially triggers OSR in case the
// current OSR level matches (or exceeds) the specified |loop_depth|.
void Interpreter::DoJumpLoop(InterpreterAssembler* assembler) {
- Node* relative_jump = __ BytecodeOperandImm(0);
+ Node* relative_jump = __ BytecodeOperandImmIntPtr(0);
Node* loop_depth = __ BytecodeOperandImm(1);
Node* osr_level = __ LoadOSRNestingLevel();
@@ -2082,14 +2626,13 @@ void Interpreter::DoJumpLoop(InterpreterAssembler* assembler) {
void Interpreter::DoCreateRegExpLiteral(InterpreterAssembler* assembler) {
Node* index = __ BytecodeOperandIdx(0);
Node* pattern = __ LoadConstantPoolEntry(index);
- Node* literal_index_raw = __ BytecodeOperandIdx(1);
- Node* literal_index = __ SmiTag(literal_index_raw);
- Node* flags_raw = __ BytecodeOperandFlag(2);
- Node* flags = __ SmiTag(flags_raw);
+ Node* literal_index = __ BytecodeOperandIdxSmi(1);
+ Node* flags = __ SmiFromWord32(__ BytecodeOperandFlag(2));
Node* closure = __ LoadRegister(Register::function_closure());
Node* context = __ GetContext();
- Node* result = FastCloneRegExpStub::Generate(
- assembler, closure, literal_index, pattern, flags, context);
+ ConstructorBuiltinsAssembler constructor_assembler(assembler->state());
+ Node* result = constructor_assembler.EmitFastCloneRegExp(
+ closure, literal_index, pattern, flags, context);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -2099,35 +2642,32 @@ void Interpreter::DoCreateRegExpLiteral(InterpreterAssembler* assembler) {
// Creates an array literal for literal index <literal_idx> with
// CreateArrayLiteral flags <flags> and constant elements in <element_idx>.
void Interpreter::DoCreateArrayLiteral(InterpreterAssembler* assembler) {
- Node* literal_index_raw = __ BytecodeOperandIdx(1);
- Node* literal_index = __ SmiTag(literal_index_raw);
+ Node* literal_index = __ BytecodeOperandIdxSmi(1);
Node* closure = __ LoadRegister(Register::function_closure());
Node* context = __ GetContext();
Node* bytecode_flags = __ BytecodeOperandFlag(2);
Label fast_shallow_clone(assembler),
call_runtime(assembler, Label::kDeferred);
- Node* use_fast_shallow_clone = __ Word32And(
- bytecode_flags,
- __ Int32Constant(CreateArrayLiteralFlags::FastShallowCloneBit::kMask));
- __ Branch(use_fast_shallow_clone, &fast_shallow_clone, &call_runtime);
+ __ Branch(__ IsSetWord32<CreateArrayLiteralFlags::FastShallowCloneBit>(
+ bytecode_flags),
+ &fast_shallow_clone, &call_runtime);
__ Bind(&fast_shallow_clone);
{
DCHECK(FLAG_allocation_site_pretenuring);
- Node* result = FastCloneShallowArrayStub::Generate(
- assembler, closure, literal_index, context, &call_runtime,
- TRACK_ALLOCATION_SITE);
+ ConstructorBuiltinsAssembler constructor_assembler(assembler->state());
+ Node* result = constructor_assembler.EmitFastCloneShallowArray(
+ closure, literal_index, context, &call_runtime, TRACK_ALLOCATION_SITE);
__ SetAccumulator(result);
__ Dispatch();
}
__ Bind(&call_runtime);
{
- STATIC_ASSERT(CreateArrayLiteralFlags::FlagsBits::kShift == 0);
- Node* flags_raw = __ Word32And(
- bytecode_flags,
- __ Int32Constant(CreateArrayLiteralFlags::FlagsBits::kMask));
+ Node* flags_raw =
+ __ DecodeWordFromWord32<CreateArrayLiteralFlags::FlagsBits>(
+ bytecode_flags);
Node* flags = __ SmiTag(flags_raw);
Node* index = __ BytecodeOperandIdx(0);
Node* constant_elements = __ LoadConstantPoolEntry(index);
@@ -2144,24 +2684,24 @@ void Interpreter::DoCreateArrayLiteral(InterpreterAssembler* assembler) {
// Creates an object literal for literal index <literal_idx> with
// CreateObjectLiteralFlags <flags> and constant elements in <element_idx>.
void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
- Node* literal_index_raw = __ BytecodeOperandIdx(1);
- Node* literal_index = __ SmiTag(literal_index_raw);
+ Node* literal_index = __ BytecodeOperandIdxSmi(1);
Node* bytecode_flags = __ BytecodeOperandFlag(2);
Node* closure = __ LoadRegister(Register::function_closure());
// Check if we can do a fast clone or have to call the runtime.
Label if_fast_clone(assembler),
if_not_fast_clone(assembler, Label::kDeferred);
- Node* fast_clone_properties_count =
- __ DecodeWord32<CreateObjectLiteralFlags::FastClonePropertiesCountBits>(
- bytecode_flags);
- __ Branch(fast_clone_properties_count, &if_fast_clone, &if_not_fast_clone);
+ Node* fast_clone_properties_count = __ DecodeWordFromWord32<
+ CreateObjectLiteralFlags::FastClonePropertiesCountBits>(bytecode_flags);
+ __ Branch(__ WordNotEqual(fast_clone_properties_count, __ IntPtrConstant(0)),
+ &if_fast_clone, &if_not_fast_clone);
__ Bind(&if_fast_clone);
{
// If we can do a fast clone do the fast-path in FastCloneShallowObjectStub.
- Node* result = FastCloneShallowObjectStub::GenerateFastPath(
- assembler, &if_not_fast_clone, closure, literal_index,
+ ConstructorBuiltinsAssembler constructor_assembler(assembler->state());
+ Node* result = constructor_assembler.EmitFastCloneShallowObject(
+ &if_not_fast_clone, closure, literal_index,
fast_clone_properties_count);
__ StoreRegister(result, __ BytecodeOperandReg(3));
__ Dispatch();
@@ -2174,10 +2714,9 @@ void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
Node* constant_elements = __ LoadConstantPoolEntry(index);
Node* context = __ GetContext();
- STATIC_ASSERT(CreateObjectLiteralFlags::FlagsBits::kShift == 0);
- Node* flags_raw = __ Word32And(
- bytecode_flags,
- __ Int32Constant(CreateObjectLiteralFlags::FlagsBits::kMask));
+ Node* flags_raw =
+ __ DecodeWordFromWord32<CreateObjectLiteralFlags::FlagsBits>(
+ bytecode_flags);
Node* flags = __ SmiTag(flags_raw);
Node* result =
@@ -2189,31 +2728,38 @@ void Interpreter::DoCreateObjectLiteral(InterpreterAssembler* assembler) {
}
}
-// CreateClosure <index> <tenured>
+// CreateClosure <index> <slot> <tenured>
//
// Creates a new closure for SharedFunctionInfo at position |index| in the
// constant pool and with the PretenureFlag <tenured>.
void Interpreter::DoCreateClosure(InterpreterAssembler* assembler) {
Node* index = __ BytecodeOperandIdx(0);
Node* shared = __ LoadConstantPoolEntry(index);
- Node* flags = __ BytecodeOperandFlag(1);
+ Node* flags = __ BytecodeOperandFlag(2);
Node* context = __ GetContext();
Label call_runtime(assembler, Label::kDeferred);
- Node* fast_new_closure = __ Word32And(
- flags, __ Int32Constant(CreateClosureFlags::FastNewClosureBit::kMask));
- __ GotoUnless(fast_new_closure, &call_runtime);
- __ SetAccumulator(FastNewClosureStub::Generate(assembler, shared, context));
+ __ GotoUnless(__ IsSetWord32<CreateClosureFlags::FastNewClosureBit>(flags),
+ &call_runtime);
+ ConstructorBuiltinsAssembler constructor_assembler(assembler->state());
+ Node* vector_index = __ BytecodeOperandIdx(1);
+ vector_index = __ SmiTag(vector_index);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+ __ SetAccumulator(constructor_assembler.EmitFastNewClosure(
+ shared, type_feedback_vector, vector_index, context));
__ Dispatch();
__ Bind(&call_runtime);
{
- STATIC_ASSERT(CreateClosureFlags::PretenuredBit::kShift == 0);
- Node* tenured_raw = __ Word32And(
- flags, __ Int32Constant(CreateClosureFlags::PretenuredBit::kMask));
+ Node* tenured_raw =
+ __ DecodeWordFromWord32<CreateClosureFlags::PretenuredBit>(flags);
Node* tenured = __ SmiTag(tenured_raw);
- Node* result = __ CallRuntime(Runtime::kInterpreterNewClosure, context,
- shared, tenured);
+ type_feedback_vector = __ LoadTypeFeedbackVector();
+ vector_index = __ BytecodeOperandIdx(1);
+ vector_index = __ SmiTag(vector_index);
+ Node* result =
+ __ CallRuntime(Runtime::kInterpreterNewClosure, context, shared,
+ type_feedback_vector, vector_index, tenured);
__ SetAccumulator(result);
__ Dispatch();
}
@@ -2259,8 +2805,22 @@ void Interpreter::DoCreateFunctionContext(InterpreterAssembler* assembler) {
Node* closure = __ LoadRegister(Register::function_closure());
Node* slots = __ BytecodeOperandUImm(0);
Node* context = __ GetContext();
- __ SetAccumulator(
- FastNewFunctionContextStub::Generate(assembler, closure, slots, context));
+ ConstructorBuiltinsAssembler constructor_assembler(assembler->state());
+ __ SetAccumulator(constructor_assembler.EmitFastNewFunctionContext(
+ closure, slots, context, FUNCTION_SCOPE));
+ __ Dispatch();
+}
+
+// CreateEvalContext <slots>
+//
+// Creates a new context with number of |slots| for an eval closure.
+void Interpreter::DoCreateEvalContext(InterpreterAssembler* assembler) {
+ Node* closure = __ LoadRegister(Register::function_closure());
+ Node* slots = __ BytecodeOperandUImm(0);
+ Node* context = __ GetContext();
+ ConstructorBuiltinsAssembler constructor_assembler(assembler->state());
+ __ SetAccumulator(constructor_assembler.EmitFastNewFunctionContext(
+ closure, slots, context, EVAL_SCOPE));
__ Dispatch();
}
@@ -2371,6 +2931,22 @@ void Interpreter::DoStackCheck(InterpreterAssembler* assembler) {
}
}
+// SetPendingMessage
+//
+// Sets the pending message to the value in the accumulator, and returns the
+// previous pending message in the accumulator.
+void Interpreter::DoSetPendingMessage(InterpreterAssembler* assembler) {
+ Node* pending_message = __ ExternalConstant(
+ ExternalReference::address_of_pending_message_obj(isolate_));
+ Node* previous_message =
+ __ Load(MachineType::TaggedPointer(), pending_message);
+ Node* new_message = __ GetAccumulator();
+ __ StoreNoWriteBarrier(MachineRepresentation::kTaggedPointer, pending_message,
+ new_message);
+ __ SetAccumulator(previous_message);
+ __ Dispatch();
+}
+
// Throw
//
// Throws the exception in the accumulator.
@@ -2530,7 +3106,7 @@ void Interpreter::DoForInNext(InterpreterAssembler* assembler) {
// Check if we can use the for-in fast path potentially using the enum cache.
Label if_fast(assembler), if_slow(assembler, Label::kDeferred);
- Node* receiver_map = __ LoadObjectField(receiver, HeapObject::kMapOffset);
+ Node* receiver_map = __ LoadMap(receiver);
__ Branch(__ WordEqual(receiver_map, cache_type), &if_fast, &if_slow);
__ Bind(&if_fast);
{
@@ -2643,7 +3219,7 @@ void Interpreter::DoSuspendGenerator(InterpreterAssembler* assembler) {
__ Bind(&ok);
Node* array =
- __ LoadObjectField(generator, JSGeneratorObject::kOperandStackOffset);
+ __ LoadObjectField(generator, JSGeneratorObject::kRegisterFileOffset);
Node* context = __ GetContext();
Node* state = __ GetAccumulator();
@@ -2660,7 +3236,7 @@ void Interpreter::DoSuspendGenerator(InterpreterAssembler* assembler) {
__ Bind(&if_stepping);
{
Node* context = __ GetContext();
- __ CallRuntime(Runtime::kDebugRecordAsyncFunction, context, generator);
+ __ CallRuntime(Runtime::kDebugRecordGenerator, context, generator);
__ Goto(&ok);
}
}
@@ -2675,7 +3251,7 @@ void Interpreter::DoResumeGenerator(InterpreterAssembler* assembler) {
Node* generator = __ LoadRegister(generator_reg);
__ ImportRegisterFile(
- __ LoadObjectField(generator, JSGeneratorObject::kOperandStackOffset));
+ __ LoadObjectField(generator, JSGeneratorObject::kRegisterFileOffset));
Node* old_state =
__ LoadObjectField(generator, JSGeneratorObject::kContinuationOffset);
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index b10ae2e451..04f7e85b39 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -76,6 +76,14 @@ class Interpreter {
BYTECODE_LIST(DECLARE_BYTECODE_HANDLER_GENERATOR)
#undef DECLARE_BYTECODE_HANDLER_GENERATOR
+ typedef void (Interpreter::*BytecodeGeneratorFunc)(InterpreterAssembler*);
+
+ // Generates handler for given |bytecode| and |operand_scale| using
+ // |generator| and installs it into the dispatch table.
+ void InstallBytecodeHandler(Zone* zone, Bytecode bytecode,
+ OperandScale operand_scale,
+ BytecodeGeneratorFunc generator);
+
// Generates code to perform the binary operation via |Generator|.
template <class Generator>
void DoBinaryOpWithFeedback(InterpreterAssembler* assembler);
@@ -141,6 +149,7 @@ class Interpreter {
// Generates code to load a global.
compiler::Node* BuildLoadGlobal(Callable ic, compiler::Node* context,
+ compiler::Node* name_index,
compiler::Node* feedback_slot,
InterpreterAssembler* assembler);
diff --git a/deps/v8/src/interpreter/mkpeephole.cc b/deps/v8/src/interpreter/mkpeephole.cc
index 62d3a77e02..e6c3b76f28 100644
--- a/deps/v8/src/interpreter/mkpeephole.cc
+++ b/deps/v8/src/interpreter/mkpeephole.cc
@@ -192,6 +192,28 @@ PeepholeActionAndData PeepholeActionTableWriter::LookupActionAndData(
}
}
+ // Fuse LdaNull/LdaUndefined followed by a equality comparison with test
+ // undetectable. Testing undetectable is a simple check on the map which is
+ // more efficient than the full comparison operation.
+ if (last == Bytecode::kLdaNull || last == Bytecode::kLdaUndefined) {
+ if (current == Bytecode::kTestEqual) {
+ return {PeepholeAction::kTransformEqualityWithNullOrUndefinedAction,
+ Bytecode::kTestUndetectable};
+ }
+ }
+
+ // Fuse LdaNull/LdaUndefined followed by a strict equals with
+ // TestNull/TestUndefined.
+ if (current == Bytecode::kTestEqualStrict) {
+ if (last == Bytecode::kLdaNull) {
+ return {PeepholeAction::kTransformEqualityWithNullOrUndefinedAction,
+ Bytecode::kTestNull};
+ } else if (last == Bytecode::kLdaUndefined) {
+ return {PeepholeAction::kTransformEqualityWithNullOrUndefinedAction,
+ Bytecode::kTestUndefined};
+ }
+ }
+
// If there is no last bytecode to optimize against, store the incoming
// bytecode or for jumps emit incoming bytecode immediately.
if (last == Bytecode::kIllegal) {
diff --git a/deps/v8/src/isolate-inl.h b/deps/v8/src/isolate-inl.h
index a148968b27..fc88676823 100644
--- a/deps/v8/src/isolate-inl.h
+++ b/deps/v8/src/isolate-inl.h
@@ -148,6 +148,11 @@ bool Isolate::IsFastArrayIterationIntact() {
return fast_iteration->value() == Smi::FromInt(kProtectorValid);
}
+bool Isolate::IsArrayBufferNeuteringIntact() {
+ PropertyCell* fast_iteration = heap()->array_buffer_neutering_protector();
+ return fast_iteration->value() == Smi::FromInt(kProtectorValid);
+}
+
bool Isolate::IsArrayIteratorLookupChainIntact() {
Cell* array_iterator_cell = heap()->array_iterator_protector();
return array_iterator_cell->value() == Smi::FromInt(kProtectorValid);
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 0eab398238..c0018fe40e 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -9,6 +9,7 @@
#include <fstream> // NOLINT(readability/streams)
#include <sstream>
+#include "src/ast/ast-value-factory.h"
#include "src/ast/context-slot-cache.h"
#include "src/base/hashmap.h"
#include "src/base/platform/platform.h"
@@ -20,7 +21,7 @@
#include "src/codegen.h"
#include "src/compilation-cache.h"
#include "src/compilation-statistics.h"
-#include "src/compiler-dispatcher/compiler-dispatcher-tracer.h"
+#include "src/compiler-dispatcher/compiler-dispatcher.h"
#include "src/compiler-dispatcher/optimizing-compile-dispatcher.h"
#include "src/crankshaft/hydrogen.h"
#include "src/debug/debug.h"
@@ -47,6 +48,7 @@
#include "src/version.h"
#include "src/vm-state-inl.h"
#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
#include "src/zone/accounting-allocator.h"
namespace v8 {
@@ -358,7 +360,7 @@ class StackTraceHelper {
// Determines whether the given stack frame should be displayed in a stack
// trace.
bool IsVisibleInStackTrace(JSFunction* fun) {
- return ShouldIncludeFrame(fun) && IsNotInNativeScript(fun) &&
+ return ShouldIncludeFrame(fun) && IsNotHidden(fun) &&
IsInSameSecurityContext(fun);
}
@@ -386,12 +388,12 @@ class StackTraceHelper {
return false;
}
- bool IsNotInNativeScript(JSFunction* fun) {
- // Functions defined in native scripts are not visible unless directly
+ bool IsNotHidden(JSFunction* fun) {
+ // Functions defined not in user scripts are not visible unless directly
// exposed, in which case the native flag is set.
// The --builtins-in-stack-traces command line flag allows including
// internal call sites in the stack trace for debugging purposes.
- if (!FLAG_builtins_in_stack_traces && fun->shared()->IsBuiltin()) {
+ if (!FLAG_builtins_in_stack_traces && !fun->shared()->IsUserJavaScript()) {
return fun->shared()->native();
}
return true;
@@ -460,13 +462,14 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
js_frame->Summarize(&frames);
for (int i = frames.length() - 1; i >= 0; i--) {
- Handle<JSFunction> fun = frames[i].function();
+ const auto& summ = frames[i].AsJavaScript();
+ Handle<JSFunction> fun = summ.function();
// Filter out internal frames that we do not want to show.
if (!helper.IsVisibleInStackTrace(*fun)) continue;
Handle<Object> recv = frames[i].receiver();
- Handle<AbstractCode> abstract_code = frames[i].abstract_code();
+ Handle<AbstractCode> abstract_code = summ.abstract_code();
const int offset = frames[i].code_offset();
bool force_constructor = false;
@@ -509,28 +512,34 @@ Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
offset, flags);
} break;
- case StackFrame::WASM: {
- WasmFrame* wasm_frame = WasmFrame::cast(frame);
- Handle<Object> instance(wasm_frame->wasm_instance(), this);
+ case StackFrame::WASM_COMPILED: {
+ WasmCompiledFrame* wasm_frame = WasmCompiledFrame::cast(frame);
+ Handle<WasmInstanceObject> instance(wasm_frame->wasm_instance(), this);
const int wasm_function_index = wasm_frame->function_index();
Code* code = wasm_frame->unchecked_code();
Handle<AbstractCode> abstract_code(AbstractCode::cast(code), this);
const int offset =
static_cast<int>(wasm_frame->pc() - code->instruction_start());
- // TODO(wasm): The wasm object returned by the WasmFrame should always
- // be a wasm object.
- DCHECK(wasm::IsWasmInstance(*instance) || instance->IsUndefined(this));
-
- int flags = wasm::WasmIsAsmJs(*instance, this)
- ? FrameArray::kIsAsmJsWasmFrame
- : FrameArray::kIsWasmFrame;
+ int flags = 0;
+ if (instance->compiled_module()->is_asm_js()) {
+ flags |= FrameArray::kIsAsmJsWasmFrame;
+ if (wasm_frame->at_to_number_conversion()) {
+ flags |= FrameArray::kAsmJsAtNumberConversion;
+ }
+ } else {
+ flags |= FrameArray::kIsWasmFrame;
+ }
elements =
FrameArray::AppendWasmFrame(elements, instance, wasm_function_index,
abstract_code, offset, flags);
} break;
+ case StackFrame::WASM_INTERPRETER_ENTRY:
+ // TODO(clemensh): Add frames.
+ break;
+
default:
break;
}
@@ -620,21 +629,22 @@ class CaptureStackTraceHelper {
}
Handle<JSObject> NewStackFrameObject(FrameSummary& summ) {
- int position = summ.abstract_code()->SourcePosition(summ.code_offset());
- return NewStackFrameObject(summ.function(), position,
- summ.is_constructor());
+ if (summ.IsJavaScript()) return NewStackFrameObject(summ.AsJavaScript());
+ if (summ.IsWasm()) return NewStackFrameObject(summ.AsWasm());
+ UNREACHABLE();
+ return Handle<JSObject>::null();
}
- Handle<JSObject> NewStackFrameObject(Handle<JSFunction> fun, int position,
- bool is_constructor) {
+ Handle<JSObject> NewStackFrameObject(
+ const FrameSummary::JavaScriptFrameSummary& summ) {
Handle<JSObject> stack_frame =
factory()->NewJSObject(isolate_->object_function());
- Handle<Script> script(Script::cast(fun->shared()->script()), isolate_);
+ Handle<Script> script = Handle<Script>::cast(summ.script());
if (!line_key_.is_null()) {
Script::PositionInfo info;
- bool valid_pos =
- Script::GetPositionInfo(script, position, &info, Script::WITH_OFFSET);
+ bool valid_pos = Script::GetPositionInfo(script, summ.SourcePosition(),
+ &info, Script::WITH_OFFSET);
if (!column_key_.is_null() && valid_pos) {
JSObject::AddProperty(stack_frame, column_key_,
@@ -657,7 +667,7 @@ class CaptureStackTraceHelper {
}
if (!script_name_or_source_url_key_.is_null()) {
- Handle<Object> result = Script::GetNameOrSourceURL(script);
+ Handle<Object> result(script->GetNameOrSourceURL(), isolate_);
JSObject::AddProperty(stack_frame, script_name_or_source_url_key_, result,
NONE);
}
@@ -669,12 +679,13 @@ class CaptureStackTraceHelper {
}
if (!function_key_.is_null()) {
- Handle<Object> fun_name = JSFunction::GetDebugName(fun);
+ Handle<String> fun_name = summ.FunctionName();
JSObject::AddProperty(stack_frame, function_key_, fun_name, NONE);
}
if (!constructor_key_.is_null()) {
- Handle<Object> is_constructor_obj = factory()->ToBoolean(is_constructor);
+ Handle<Object> is_constructor_obj =
+ factory()->ToBoolean(summ.is_constructor());
JSObject::AddProperty(stack_frame, constructor_key_, is_constructor_obj,
NONE);
}
@@ -696,28 +707,28 @@ class CaptureStackTraceHelper {
return stack_frame;
}
- Handle<JSObject> NewStackFrameObject(WasmFrame* frame) {
+ Handle<JSObject> NewStackFrameObject(
+ const FrameSummary::WasmFrameSummary& summ) {
Handle<JSObject> stack_frame =
factory()->NewJSObject(isolate_->object_function());
if (!function_key_.is_null()) {
- Handle<String> name = wasm::GetWasmFunctionName(
- isolate_, handle(frame->wasm_instance(), isolate_),
- frame->function_index());
+ Handle<WasmCompiledModule> compiled_module(
+ summ.wasm_instance()->compiled_module(), isolate_);
+ Handle<String> name = WasmCompiledModule::GetFunctionName(
+ isolate_, compiled_module, summ.function_index());
JSObject::AddProperty(stack_frame, function_key_, name, NONE);
}
// Encode the function index as line number (1-based).
if (!line_key_.is_null()) {
JSObject::AddProperty(
stack_frame, line_key_,
- isolate_->factory()->NewNumberFromInt(frame->function_index() + 1),
+ isolate_->factory()->NewNumberFromInt(summ.function_index() + 1),
NONE);
}
// Encode the byte offset as column (1-based).
if (!column_key_.is_null()) {
- Code* code = frame->LookupCode();
- int offset = static_cast<int>(frame->pc() - code->instruction_start());
- int position = AbstractCode::cast(code)->SourcePosition(offset);
+ int position = summ.byte_offset();
// Make position 1-based.
if (position >= 0) ++position;
JSObject::AddProperty(stack_frame, column_key_,
@@ -725,7 +736,7 @@ class CaptureStackTraceHelper {
NONE);
}
if (!script_id_key_.is_null()) {
- int script_id = frame->script()->id();
+ int script_id = summ.script()->id();
JSObject::AddProperty(stack_frame, script_id_key_,
handle(Smi::FromInt(script_id), isolate_), NONE);
}
@@ -762,25 +773,16 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace(
for (StackTraceFrameIterator it(this); !it.done() && (frames_seen < limit);
it.Advance()) {
StandardFrame* frame = it.frame();
- if (frame->is_java_script()) {
- // Set initial size to the maximum inlining level + 1 for the outermost
- // function.
- List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
- JavaScriptFrame::cast(frame)->Summarize(&frames);
- for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) {
- Handle<JSFunction> fun = frames[i].function();
- // Filter frames from other security contexts.
- if (!(options & StackTrace::kExposeFramesAcrossSecurityOrigins) &&
- !this->context()->HasSameSecurityTokenAs(fun->context()))
- continue;
- Handle<JSObject> new_frame_obj = helper.NewStackFrameObject(frames[i]);
- stack_trace_elems->set(frames_seen, *new_frame_obj);
- frames_seen++;
- }
- } else {
- DCHECK(frame->is_wasm());
- WasmFrame* wasm_frame = WasmFrame::cast(frame);
- Handle<JSObject> new_frame_obj = helper.NewStackFrameObject(wasm_frame);
+ // Set initial size to the maximum inlining level + 1 for the outermost
+ // function.
+ List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
+ frame->Summarize(&frames);
+ for (int i = frames.length() - 1; i >= 0 && frames_seen < limit; i--) {
+ // Filter frames from other security contexts.
+ if (!(options & StackTrace::kExposeFramesAcrossSecurityOrigins) &&
+ !this->context()->HasSameSecurityTokenAs(*frames[i].native_context()))
+ continue;
+ Handle<JSObject> new_frame_obj = helper.NewStackFrameObject(frames[i]);
stack_trace_elems->set(frames_seen, *new_frame_obj);
frames_seen++;
}
@@ -1076,7 +1078,7 @@ Object* Isolate::Throw(Object* exception, MessageLocation* location) {
printf("Exception thrown:\n");
if (location) {
Handle<Script> script = location->script();
- Handle<Object> name = Script::GetNameOrSourceURL(script);
+ Handle<Object> name(script->GetNameOrSourceURL(), this);
printf("at ");
if (name->IsString() && String::cast(*name)->length() > 0)
String::cast(*name)->PrintOn(stdout);
@@ -1216,7 +1218,7 @@ Object* Isolate::UnwindAndFindHandler() {
if (FLAG_wasm_eh_prototype) {
if (frame->is_wasm() && is_catchable_by_wasm(exception)) {
int stack_slots = 0; // Will contain stack slot count of frame.
- WasmFrame* wasm_frame = static_cast<WasmFrame*>(frame);
+ WasmCompiledFrame* wasm_frame = static_cast<WasmCompiledFrame*>(frame);
offset = wasm_frame->LookupExceptionHandlerInTable(&stack_slots);
if (offset >= 0) {
// Compute the stack pointer from the frame pointer. This ensures that
@@ -1298,29 +1300,16 @@ Object* Isolate::UnwindAndFindHandler() {
}
}
- // For JavaScript frames we perform a range lookup in the handler table.
+ // For JavaScript frames we are guaranteed not to find a handler.
if (frame->is_java_script() && catchable_by_js) {
JavaScriptFrame* js_frame = static_cast<JavaScriptFrame*>(frame);
- int stack_depth = 0; // Will contain operand stack depth of handler.
- offset = js_frame->LookupExceptionHandlerInTable(&stack_depth, nullptr);
- if (offset >= 0) {
- // Compute the stack pointer from the frame pointer. This ensures that
- // operand stack slots are dropped for nested statements. Also restore
- // correct context for the handler which is pushed within the try-block.
- Address return_sp = frame->fp() -
- StandardFrameConstants::kFixedFrameSizeFromFp -
- stack_depth * kPointerSize;
- STATIC_ASSERT(TryBlockConstant::kElementCount == 1);
- context = Context::cast(Memory::Object_at(return_sp - kPointerSize));
-
- // Gather information from the frame.
- code = frame->LookupCode();
- handler_sp = return_sp;
- handler_fp = frame->fp();
- break;
- }
+ offset = js_frame->LookupExceptionHandlerInTable(nullptr, nullptr);
+ CHECK_EQ(-1, offset);
}
+ // TODO(clemensh): Handle unwinding interpreted wasm frames (stored in the
+ // WasmInterpreter C++ object).
+
RemoveMaterializedObjectsOnUnwind(frame);
}
@@ -1350,16 +1339,30 @@ HandlerTable::CatchPrediction PredictException(JavaScriptFrame* frame) {
List<FrameSummary> summaries;
frame->Summarize(&summaries);
for (const FrameSummary& summary : summaries) {
- Handle<AbstractCode> code = summary.abstract_code();
+ Handle<AbstractCode> code = summary.AsJavaScript().abstract_code();
+ if (code->IsCode() && code->kind() == AbstractCode::BUILTIN) {
+ if (code->GetCode()->is_promise_rejection()) {
+ return HandlerTable::PROMISE;
+ }
+
+ // This the exception throw in PromiseHandle which doesn't
+ // cause a promise rejection.
+ if (code->GetCode()->is_exception_caught()) {
+ return HandlerTable::CAUGHT;
+ }
+ }
+
if (code->kind() == AbstractCode::OPTIMIZED_FUNCTION) {
- DCHECK(summary.function()->shared()->asm_function());
- DCHECK(!FLAG_turbo_asm_deoptimization);
+ DCHECK(summary.AsJavaScript().function()->shared()->asm_function());
// asm code cannot contain try-catch.
continue;
}
+ // Must have been constructed from a bytecode array.
+ CHECK_EQ(AbstractCode::INTERPRETED_FUNCTION, code->kind());
int code_offset = summary.code_offset();
- int index =
- code->LookupRangeInHandlerTable(code_offset, nullptr, &prediction);
+ BytecodeArray* bytecode = code->GetBytecodeArray();
+ HandlerTable* table = HandlerTable::cast(bytecode->handler_table());
+ int index = table->LookupRange(code_offset, nullptr, &prediction);
if (index <= 0) continue;
if (prediction == HandlerTable::UNCAUGHT) continue;
return prediction;
@@ -1494,23 +1497,29 @@ bool Isolate::ComputeLocation(MessageLocation* target) {
StackTraceFrameIterator it(this);
if (it.done()) return false;
StandardFrame* frame = it.frame();
- // TODO(clemensh): handle wasm frames
- if (!frame->is_java_script()) return false;
- JSFunction* fun = JavaScriptFrame::cast(frame)->function();
- Object* script = fun->shared()->script();
- if (!script->IsScript() ||
- (Script::cast(script)->source()->IsUndefined(this))) {
- return false;
- }
- Handle<Script> casted_script(Script::cast(script), this);
// Compute the location from the function and the relocation info of the
// baseline code. For optimized code this will use the deoptimization
// information to get canonical location information.
List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
- JavaScriptFrame::cast(frame)->Summarize(&frames);
+ frame->Summarize(&frames);
FrameSummary& summary = frames.last();
- int pos = summary.abstract_code()->SourcePosition(summary.code_offset());
- *target = MessageLocation(casted_script, pos, pos + 1, handle(fun, this));
+ int pos = summary.SourcePosition();
+ Handle<SharedFunctionInfo> shared;
+ Handle<Object> script = summary.script();
+ if (!script->IsScript() ||
+ (Script::cast(*script)->source()->IsUndefined(this))) {
+ return false;
+ }
+
+ // TODO(wasm): Remove this once trap-if is always on.
+ // Background: Without trap-if, the information on the stack trace is
+ // incomplete (see bug v8:5007).
+ if (summary.IsWasmCompiled() && !FLAG_wasm_trap_if) return false;
+
+ if (summary.IsJavaScript()) {
+ shared = handle(summary.AsJavaScript().function()->shared());
+ }
+ *target = MessageLocation(Handle<Script>::cast(script), pos, pos + 1, shared);
return true;
}
@@ -1554,9 +1563,32 @@ bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
const int frame_count = elements->FrameCount();
for (int i = 0; i < frame_count; i++) {
- if (elements->IsWasmFrame(i)) {
- // TODO(clemensh): handle wasm frames
- return false;
+ if (elements->IsWasmFrame(i) || elements->IsAsmJsWasmFrame(i)) {
+ Handle<WasmCompiledModule> compiled_module(
+ WasmInstanceObject::cast(elements->WasmInstance(i))
+ ->compiled_module());
+ int func_index = elements->WasmFunctionIndex(i)->value();
+ int code_offset = elements->Offset(i)->value();
+ // TODO(wasm): Clean this up (bug 5007).
+ int pos = code_offset < 0
+ ? (-1 - code_offset)
+ : elements->Code(i)->SourcePosition(code_offset);
+ if (elements->IsAsmJsWasmFrame(i)) {
+ // For asm.js frames, make an additional translation step to get the
+ // asm.js source position.
+ bool at_to_number_conversion =
+ elements->Flags(i)->value() & FrameArray::kAsmJsAtNumberConversion;
+ pos = WasmCompiledModule::GetAsmJsSourcePosition(
+ compiled_module, func_index, pos, at_to_number_conversion);
+ } else {
+ // For pure wasm, make the function-local position module-relative by
+ // adding the function offset.
+ pos += compiled_module->GetFunctionOffset(func_index);
+ }
+ Handle<Script> script(compiled_module->script());
+
+ *target = MessageLocation(script, pos, pos + 1);
+ return true;
}
Handle<JSFunction> fun = handle(elements->Function(i), this);
@@ -1662,6 +1694,8 @@ bool Isolate::IsExternalHandlerOnTop(Object* exception) {
void Isolate::ReportPendingMessages() {
+ DCHECK(AllowExceptions::IsAllowed(this));
+
Object* exception = pending_exception();
// Try to propagate the exception to an external v8::TryCatch handler. If
@@ -1783,23 +1817,87 @@ void Isolate::PopPromise() {
global_handles()->Destroy(global_promise.location());
}
-bool Isolate::PromiseHasUserDefinedRejectHandler(Handle<Object> promise) {
- Handle<JSFunction> fun = promise_has_user_defined_reject_handler();
- Handle<Object> has_reject_handler;
- // If we are, e.g., overflowing the stack, don't try to call out to JS
- if (!AllowJavascriptExecution::IsAllowed(this)) return false;
- // Call the registered function to check for a handler
- if (Execution::TryCall(this, fun, promise, 0, NULL)
- .ToHandle(&has_reject_handler)) {
- return has_reject_handler->IsTrue(this);
- }
- // If an exception is thrown in the course of execution of this built-in
- // function, it indicates either a bug, or a synthetic uncatchable
- // exception in the shutdown path. In either case, it's OK to predict either
- // way in DevTools.
+namespace {
+bool InternalPromiseHasUserDefinedRejectHandler(Isolate* isolate,
+ Handle<JSPromise> promise);
+
+bool PromiseHandlerCheck(Isolate* isolate, Handle<JSReceiver> handler,
+ Handle<JSReceiver> deferred_promise) {
+ // Recurse to the forwarding Promise, if any. This may be due to
+ // - await reaction forwarding to the throwaway Promise, which has
+ // a dependency edge to the outer Promise.
+ // - PromiseIdResolveHandler forwarding to the output of .then
+ // - Promise.all/Promise.race forwarding to a throwaway Promise, which
+ // has a dependency edge to the generated outer Promise.
+ // Otherwise, this is a real reject handler for the Promise.
+ Handle<Symbol> key = isolate->factory()->promise_forwarding_handler_symbol();
+ Handle<Object> forwarding_handler = JSReceiver::GetDataProperty(handler, key);
+ if (forwarding_handler->IsUndefined(isolate)) {
+ return true;
+ }
+
+ if (!deferred_promise->IsJSPromise()) {
+ return true;
+ }
+
+ return InternalPromiseHasUserDefinedRejectHandler(
+ isolate, Handle<JSPromise>::cast(deferred_promise));
+}
+
+bool InternalPromiseHasUserDefinedRejectHandler(Isolate* isolate,
+ Handle<JSPromise> promise) {
+ // If this promise was marked as being handled by a catch block
+ // in an async function, then it has a user-defined reject handler.
+ if (promise->handled_hint()) return true;
+
+ // If this Promise is subsumed by another Promise (a Promise resolved
+ // with another Promise, or an intermediate, hidden, throwaway Promise
+ // within async/await), then recurse on the outer Promise.
+ // In this case, the dependency is one possible way that the Promise
+ // could be resolved, so it does not subsume the other following cases.
+ Handle<Symbol> key = isolate->factory()->promise_handled_by_symbol();
+ Handle<Object> outer_promise_obj = JSObject::GetDataProperty(promise, key);
+ if (outer_promise_obj->IsJSPromise() &&
+ InternalPromiseHasUserDefinedRejectHandler(
+ isolate, Handle<JSPromise>::cast(outer_promise_obj))) {
+ return true;
+ }
+
+ Handle<Object> queue(promise->reject_reactions(), isolate);
+ Handle<Object> deferred_promise(promise->deferred_promise(), isolate);
+
+ if (queue->IsUndefined(isolate)) {
+ return false;
+ }
+
+ if (queue->IsCallable()) {
+ return PromiseHandlerCheck(isolate, Handle<JSReceiver>::cast(queue),
+ Handle<JSReceiver>::cast(deferred_promise));
+ }
+
+ Handle<FixedArray> queue_arr = Handle<FixedArray>::cast(queue);
+ Handle<FixedArray> deferred_promise_arr =
+ Handle<FixedArray>::cast(deferred_promise);
+ for (int i = 0; i < deferred_promise_arr->length(); i++) {
+ Handle<JSReceiver> queue_item(JSReceiver::cast(queue_arr->get(i)));
+ Handle<JSReceiver> deferred_promise_item(
+ JSReceiver::cast(deferred_promise_arr->get(i)));
+ if (PromiseHandlerCheck(isolate, queue_item, deferred_promise_item)) {
+ return true;
+ }
+ }
+
return false;
}
+} // namespace
+
+bool Isolate::PromiseHasUserDefinedRejectHandler(Handle<Object> promise) {
+ if (!promise->IsJSPromise()) return false;
+ return InternalPromiseHasUserDefinedRejectHandler(
+ this, Handle<JSPromise>::cast(promise));
+}
+
Handle<Object> Isolate::GetPromiseOnStackOnThrow() {
Handle<Object> undefined = factory()->undefined_value();
ThreadLocalTop* tltop = thread_local_top();
@@ -1817,7 +1915,7 @@ Handle<Object> Isolate::GetPromiseOnStackOnThrow() {
continue;
case HandlerTable::CAUGHT:
case HandlerTable::DESUGARING:
- if (retval->IsJSObject()) {
+ if (retval->IsJSPromise()) {
// Caught the result of an inner async/await invocation.
// Mark the inner promise as caught in the "synchronous case" so
// that Debug::OnException will see. In the synchronous case,
@@ -1825,10 +1923,7 @@ Handle<Object> Isolate::GetPromiseOnStackOnThrow() {
// await, the function which has this exception event has not yet
// returned, so the generated Promise has not yet been marked
// by AsyncFunctionAwaitCaught with promiseHandledHintSymbol.
- Handle<Symbol> key = factory()->promise_handled_hint_symbol();
- JSObject::SetProperty(Handle<JSObject>::cast(retval), key,
- factory()->true_value(), STRICT)
- .Assert();
+ Handle<JSPromise>::cast(retval)->set_handled_hint(true);
}
return retval;
case HandlerTable::PROMISE:
@@ -2113,7 +2208,6 @@ Isolate::Isolate(bool enable_serializer)
global_handles_(NULL),
eternal_handles_(NULL),
thread_manager_(NULL),
- has_installed_extensions_(false),
regexp_stack_(NULL),
date_cache_(NULL),
call_descriptor_data_(NULL),
@@ -2121,6 +2215,8 @@ Isolate::Isolate(bool enable_serializer)
// be fixed once the default isolate cleanup is done.
random_number_generator_(NULL),
rail_mode_(PERFORMANCE_ANIMATION),
+ promise_hook_(NULL),
+ load_start_time_ms_(0),
serializer_enabled_(enable_serializer),
has_fatal_error_(false),
initialized_from_snapshot_(false),
@@ -2238,9 +2334,7 @@ void Isolate::Deinit() {
optimizing_compile_dispatcher_ = NULL;
}
- if (heap_.mark_compact_collector()->sweeping_in_progress()) {
- heap_.mark_compact_collector()->EnsureSweepingCompleted();
- }
+ heap_.mark_compact_collector()->EnsureSweepingCompleted();
DumpAndResetCompilationStats();
@@ -2272,6 +2366,10 @@ void Isolate::Deinit() {
delete heap_profiler_;
heap_profiler_ = NULL;
+ compiler_dispatcher_->AbortAll(CompilerDispatcher::BlockingBehavior::kBlock);
+ delete compiler_dispatcher_;
+ compiler_dispatcher_ = nullptr;
+
cancelable_task_manager()->CancelAndWait();
heap_.TearDown();
@@ -2280,8 +2378,8 @@ void Isolate::Deinit() {
delete interpreter_;
interpreter_ = NULL;
- delete compiler_dispatcher_tracer_;
- compiler_dispatcher_tracer_ = nullptr;
+ delete ast_string_constants_;
+ ast_string_constants_ = nullptr;
delete cpu_profiler_;
cpu_profiler_ = NULL;
@@ -2491,7 +2589,8 @@ bool Isolate::Init(Deserializer* des) {
cpu_profiler_ = new CpuProfiler(this);
heap_profiler_ = new HeapProfiler(heap());
interpreter_ = new interpreter::Interpreter(this);
- compiler_dispatcher_tracer_ = new CompilerDispatcherTracer(this);
+ compiler_dispatcher_ =
+ new CompilerDispatcher(this, V8::GetCurrentPlatform(), FLAG_stack_size);
// Enable logging before setting up the heap
logger_->SetUp(this);
@@ -2605,6 +2704,11 @@ bool Isolate::Init(Deserializer* des) {
time_millis_at_init_ = heap_.MonotonicallyIncreasingTimeInMs();
+ {
+ HandleScope scope(this);
+ ast_string_constants_ = new AstStringConstants(this, heap()->HashSeed());
+ }
+
if (!create_heap_objects) {
// Now that the heap is consistent, it's OK to generate the code for the
// deopt entry table that might have been referred to by optimized code in
@@ -2797,11 +2901,16 @@ Map* Isolate::get_initial_js_array_map(ElementsKind kind) {
bool Isolate::use_crankshaft() const {
- return FLAG_crankshaft &&
- !serializer_enabled_ &&
+ return FLAG_opt && FLAG_crankshaft && !serializer_enabled_ &&
CpuFeatures::SupportsCrankshaft();
}
+bool Isolate::NeedsSourcePositionsForProfiling() const {
+ return FLAG_trace_deopt || FLAG_trace_turbo || FLAG_trace_turbo_graph ||
+ FLAG_turbo_profiling || FLAG_perf_prof || is_profiling() ||
+ debug_->is_active() || logger_->is_logging();
+}
+
bool Isolate::IsArrayOrObjectPrototype(Object* object) {
Object* context = heap()->native_contexts_list();
while (!context->IsUndefined(this)) {
@@ -2815,6 +2924,26 @@ bool Isolate::IsArrayOrObjectPrototype(Object* object) {
return false;
}
+void Isolate::ClearOSROptimizedCode() {
+ DisallowHeapAllocation no_gc;
+ Object* context = heap()->native_contexts_list();
+ while (!context->IsUndefined(this)) {
+ Context* current_context = Context::cast(context);
+ current_context->ClearOptimizedCodeMap();
+ context = current_context->next_context_link();
+ }
+}
+
+void Isolate::EvictOSROptimizedCode(Code* code, const char* reason) {
+ DisallowHeapAllocation no_gc;
+ Object* context = heap()->native_contexts_list();
+ while (!context->IsUndefined(this)) {
+ Context* current_context = Context::cast(context);
+ current_context->EvictFromOptimizedCodeMap(code, reason);
+ context = current_context->next_context_link();
+ }
+}
+
bool Isolate::IsInAnyContext(Object* object, uint32_t index) {
DisallowHeapAllocation no_gc;
Object* context = heap()->native_contexts_list();
@@ -2970,6 +3099,15 @@ void Isolate::InvalidateArrayIteratorProtector() {
DCHECK(!IsArrayIteratorLookupChainIntact());
}
+void Isolate::InvalidateArrayBufferNeuteringProtector() {
+ DCHECK(factory()->array_buffer_neutering_protector()->value()->IsSmi());
+ DCHECK(IsArrayBufferNeuteringIntact());
+ PropertyCell::SetValueWithInvalidation(
+ factory()->array_buffer_neutering_protector(),
+ handle(Smi::FromInt(kProtectorInvalid), this));
+ DCHECK(!IsArrayBufferNeuteringIntact());
+}
+
bool Isolate::IsAnyInitialArrayPrototype(Handle<JSArray> array) {
DisallowHeapAllocation no_gc;
return IsInAnyContext(*array, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
@@ -3003,7 +3141,7 @@ int Isolate::GenerateIdentityHash(uint32_t mask) {
return hash != 0 ? hash : 1;
}
-Object* Isolate::FindCodeObject(Address a) {
+Code* Isolate::FindCodeObject(Address a) {
return inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer(a);
}
@@ -3016,33 +3154,39 @@ ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
#undef ISOLATE_FIELD_OFFSET
#endif
-
-Handle<JSObject> Isolate::SetUpSubregistry(Handle<JSObject> registry,
- Handle<Map> map, const char* cname) {
- Handle<String> name = factory()->InternalizeUtf8String(cname);
- Handle<JSObject> obj = factory()->NewJSObjectFromMap(map);
- JSObject::NormalizeProperties(obj, CLEAR_INOBJECT_PROPERTIES, 0,
- "SetupSymbolRegistry");
- JSObject::AddProperty(registry, name, obj, NONE);
- return obj;
-}
-
-
-Handle<JSObject> Isolate::GetSymbolRegistry() {
- if (heap()->symbol_registry()->IsSmi()) {
- Handle<Map> map = factory()->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
- Handle<JSObject> registry = factory()->NewJSObjectFromMap(map);
- heap()->set_symbol_registry(*registry);
-
- SetUpSubregistry(registry, map, "for");
- SetUpSubregistry(registry, map, "for_api");
- SetUpSubregistry(registry, map, "keyFor");
- SetUpSubregistry(registry, map, "private_api");
+Handle<Symbol> Isolate::SymbolFor(Heap::RootListIndex dictionary_index,
+ Handle<String> name, bool private_symbol) {
+ Handle<String> key = factory()->InternalizeString(name);
+ Handle<NameDictionary> dictionary =
+ Handle<NameDictionary>::cast(heap()->root_handle(dictionary_index));
+ int entry = dictionary->FindEntry(key);
+ Handle<Symbol> symbol;
+ if (entry == NameDictionary::kNotFound) {
+ symbol =
+ private_symbol ? factory()->NewPrivateSymbol() : factory()->NewSymbol();
+ symbol->set_name(*key);
+ dictionary = NameDictionary::Add(dictionary, key, symbol,
+ PropertyDetails::Empty(), &entry);
+ switch (dictionary_index) {
+ case Heap::kPublicSymbolTableRootIndex:
+ symbol->set_is_public(true);
+ heap()->set_public_symbol_table(*dictionary);
+ break;
+ case Heap::kApiSymbolTableRootIndex:
+ heap()->set_api_symbol_table(*dictionary);
+ break;
+ case Heap::kApiPrivateSymbolTableRootIndex:
+ heap()->set_api_private_symbol_table(*dictionary);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ symbol = Handle<Symbol>(Symbol::cast(dictionary->ValueAt(entry)));
}
- return Handle<JSObject>::cast(factory()->symbol_registry());
+ return symbol;
}
-
void Isolate::AddBeforeCallEnteredCallback(BeforeCallEnteredCallback callback) {
for (int i = 0; i < before_call_entered_callbacks_.length(); i++) {
if (callback == before_call_entered_callbacks_.at(i)) return;
@@ -3100,6 +3244,14 @@ void Isolate::FireCallCompletedCallback() {
}
}
+void Isolate::SetPromiseHook(PromiseHook hook) { promise_hook_ = hook; }
+
+void Isolate::RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
+ Handle<Object> parent) {
+ if (promise_hook_ == nullptr) return;
+ promise_hook_(type, v8::Utils::PromiseToLocal(promise),
+ v8::Utils::ToLocal(parent));
+}
void Isolate::SetPromiseRejectCallback(PromiseRejectCallback callback) {
promise_reject_callback_ = callback;
@@ -3122,96 +3274,89 @@ void Isolate::ReportPromiseReject(Handle<JSObject> promise,
namespace {
class PromiseDebugEventScope {
public:
- PromiseDebugEventScope(Isolate* isolate, Object* id, Object* name)
- : isolate_(isolate),
- id_(id, isolate_),
- name_(name, isolate_),
- is_debug_active_(isolate_->debug()->is_active() && id_->IsNumber() &&
- name_->IsString()) {
- if (is_debug_active_) {
- isolate_->debug()->OnAsyncTaskEvent(
- isolate_->factory()->will_handle_string(), id_,
- Handle<String>::cast(name_));
+ PromiseDebugEventScope(Isolate* isolate, int id)
+ : isolate_(isolate), id_(id) {
+ if (isolate_->debug()->is_active() && id_ != kDebugPromiseNoID) {
+ isolate_->debug()->OnAsyncTaskEvent(debug::kDebugWillHandle, id_);
}
}
~PromiseDebugEventScope() {
- if (is_debug_active_) {
- isolate_->debug()->OnAsyncTaskEvent(
- isolate_->factory()->did_handle_string(), id_,
- Handle<String>::cast(name_));
+ if (isolate_->debug()->is_active() && id_ != kDebugPromiseNoID) {
+ isolate_->debug()->OnAsyncTaskEvent(debug::kDebugDidHandle, id_);
}
}
private:
Isolate* isolate_;
- Handle<Object> id_;
- Handle<Object> name_;
- bool is_debug_active_;
+ int id_;
};
} // namespace
void Isolate::PromiseReactionJob(Handle<PromiseReactionJobInfo> info,
MaybeHandle<Object>* result,
MaybeHandle<Object>* maybe_exception) {
- PromiseDebugEventScope helper(this, info->debug_id(), info->debug_name());
+ PromiseDebugEventScope helper(this, info->debug_id());
Handle<Object> value(info->value(), this);
Handle<Object> tasks(info->tasks(), this);
Handle<JSFunction> promise_handle_fn = promise_handle();
Handle<Object> undefined = factory()->undefined_value();
-
- // If tasks is an array we have multiple onFulfilled/onRejected callbacks
- // associated with the promise. The deferred object for each callback
- // is attached to this array as well.
- // Otherwise, there is a single callback and the deferred object is attached
- // directly to PromiseReactionJobInfo.
- if (tasks->IsJSArray()) {
- Handle<JSArray> array = Handle<JSArray>::cast(tasks);
- DCHECK(array->length()->IsSmi());
- int length = Smi::cast(array->length())->value();
- ElementsAccessor* accessor = array->GetElementsAccessor();
- DCHECK(length % 2 == 0);
- for (int i = 0; i < length; i += 2) {
- DCHECK(accessor->HasElement(array, i));
- DCHECK(accessor->HasElement(array, i + 1));
- Handle<Object> argv[] = {value, accessor->Get(array, i),
- accessor->Get(array, i + 1)};
- *result = Execution::TryCall(this, promise_handle_fn, undefined,
- arraysize(argv), argv, maybe_exception);
+ Handle<Object> deferred_promise(info->deferred_promise(), this);
+
+ if (deferred_promise->IsFixedArray()) {
+ DCHECK(tasks->IsFixedArray());
+ Handle<FixedArray> deferred_promise_arr =
+ Handle<FixedArray>::cast(deferred_promise);
+ Handle<FixedArray> deferred_on_resolve_arr(
+ FixedArray::cast(info->deferred_on_resolve()), this);
+ Handle<FixedArray> deferred_on_reject_arr(
+ FixedArray::cast(info->deferred_on_reject()), this);
+ Handle<FixedArray> tasks_arr = Handle<FixedArray>::cast(tasks);
+ for (int i = 0; i < deferred_promise_arr->length(); i++) {
+ Handle<Object> argv[] = {value, handle(tasks_arr->get(i), this),
+ handle(deferred_promise_arr->get(i), this),
+ handle(deferred_on_resolve_arr->get(i), this),
+ handle(deferred_on_reject_arr->get(i), this)};
+ *result = Execution::TryCall(
+ this, promise_handle_fn, undefined, arraysize(argv), argv,
+ Execution::MessageHandling::kReport, maybe_exception);
// If execution is terminating, just bail out.
if (result->is_null() && maybe_exception->is_null()) {
return;
}
}
} else {
- Handle<Object> deferred(info->deferred(), this);
- Handle<Object> argv[] = {value, tasks, deferred};
- *result = Execution::TryCall(this, promise_handle_fn, undefined,
- arraysize(argv), argv, maybe_exception);
+ Handle<Object> argv[] = {value, tasks, deferred_promise,
+ handle(info->deferred_on_resolve(), this),
+ handle(info->deferred_on_reject(), this)};
+ *result = Execution::TryCall(
+ this, promise_handle_fn, undefined, arraysize(argv), argv,
+ Execution::MessageHandling::kReport, maybe_exception);
}
}
void Isolate::PromiseResolveThenableJob(
Handle<PromiseResolveThenableJobInfo> info, MaybeHandle<Object>* result,
MaybeHandle<Object>* maybe_exception) {
- PromiseDebugEventScope helper(this, info->debug_id(), info->debug_name());
+ PromiseDebugEventScope helper(this, info->debug_id());
Handle<JSReceiver> thenable(info->thenable(), this);
Handle<JSFunction> resolve(info->resolve(), this);
Handle<JSFunction> reject(info->reject(), this);
Handle<JSReceiver> then(info->then(), this);
Handle<Object> argv[] = {resolve, reject};
- *result = Execution::TryCall(this, then, thenable, arraysize(argv), argv,
- maybe_exception);
+ *result =
+ Execution::TryCall(this, then, thenable, arraysize(argv), argv,
+ Execution::MessageHandling::kReport, maybe_exception);
Handle<Object> reason;
if (maybe_exception->ToHandle(&reason)) {
DCHECK(result->is_null());
Handle<Object> reason_arg[] = {reason};
- *result =
- Execution::TryCall(this, reject, factory()->undefined_value(),
- arraysize(reason_arg), reason_arg, maybe_exception);
+ *result = Execution::TryCall(
+ this, reject, factory()->undefined_value(), arraysize(reason_arg),
+ reason_arg, Execution::MessageHandling::kReport, maybe_exception);
}
}
@@ -3249,6 +3394,7 @@ void Isolate::RunMicrotasks() {
void Isolate::RunMicrotasksInternal() {
if (!pending_microtask_count()) return;
TRACE_EVENT0("v8.execute", "RunMicrotasks");
+ TRACE_EVENT_CALL_STATS_SCOPED(this, "v8", "V8.RunMicrotasks");
while (pending_microtask_count() > 0) {
HandleScope scope(this);
int num_tasks = pending_microtask_count();
@@ -3290,9 +3436,9 @@ void Isolate::RunMicrotasksInternal() {
if (microtask->IsJSFunction()) {
Handle<JSFunction> microtask_function =
Handle<JSFunction>::cast(microtask);
- result = Execution::TryCall(this, microtask_function,
- factory()->undefined_value(), 0, NULL,
- &maybe_exception);
+ result = Execution::TryCall(
+ this, microtask_function, factory()->undefined_value(), 0,
+ nullptr, Execution::MessageHandling::kReport, &maybe_exception);
} else if (microtask->IsPromiseResolveThenableJobInfo()) {
PromiseResolveThenableJob(
Handle<PromiseResolveThenableJobInfo>::cast(microtask), &result,
@@ -3437,13 +3583,26 @@ void Isolate::CheckDetachedContextsAfterGC() {
if (new_length == 0) {
heap()->set_detached_contexts(heap()->empty_fixed_array());
} else if (new_length < length) {
- heap()->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
- *detached_contexts, length - new_length);
+ heap()->RightTrimFixedArray(*detached_contexts, length - new_length);
}
}
+double Isolate::LoadStartTimeMs() {
+ base::LockGuard<base::Mutex> guard(&rail_mutex_);
+ return load_start_time_ms_;
+}
+
void Isolate::SetRAILMode(RAILMode rail_mode) {
+ RAILMode old_rail_mode = rail_mode_.Value();
+ if (old_rail_mode != PERFORMANCE_LOAD && rail_mode == PERFORMANCE_LOAD) {
+ base::LockGuard<base::Mutex> guard(&rail_mutex_);
+ load_start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs();
+ }
rail_mode_.SetValue(rail_mode);
+ if (old_rail_mode == PERFORMANCE_LOAD && rail_mode != PERFORMANCE_LOAD) {
+ heap()->incremental_marking()->incremental_marking_job()->ScheduleTask(
+ heap());
+ }
if (FLAG_trace_rail) {
PrintIsolate(this, "RAIL mode: %s\n", RAILModeName(rail_mode));
}
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index 87bc45bb51..6bbc0fc343 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -35,6 +35,7 @@ namespace internal {
class AccessCompilerData;
class AddressToIndexHashMap;
+class AstStringConstants;
class BasicBlockProfiler;
class Bootstrapper;
class CancelableTaskManager;
@@ -46,7 +47,7 @@ class CodeRange;
class CodeStubDescriptor;
class CodeTracer;
class CompilationCache;
-class CompilerDispatcherTracer;
+class CompilerDispatcher;
class CompilationStatistics;
class ContextSlotCache;
class Counters;
@@ -250,7 +251,7 @@ class ThreadId {
static int AllocateThreadId();
- static int GetCurrentThreadId();
+ V8_EXPORT_PRIVATE static int GetCurrentThreadId();
base::Atomic32 id_;
@@ -383,7 +384,6 @@ class ThreadLocalTop BASE_EMBEDDED {
V(int, bad_char_shift_table, kUC16AlphabetSize) \
V(int, good_suffix_shift_table, (kBMMaxShift + 1)) \
V(int, suffix_table, (kBMMaxShift + 1)) \
- V(uint32_t, private_random_seed, 2) \
ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
typedef List<HeapObject*> DebugObjectCache;
@@ -394,6 +394,8 @@ typedef List<HeapObject*> DebugObjectCache;
V(OOMErrorCallback, oom_behavior, nullptr) \
V(LogEventCallback, event_logger, nullptr) \
V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, nullptr) \
+ V(AllowWasmCompileCallback, allow_wasm_compile_callback, nullptr) \
+ V(AllowWasmInstantiateCallback, allow_wasm_instantiate_callback, nullptr) \
V(ExternalReferenceRedirectorPointer*, external_reference_redirector, \
nullptr) \
/* State for Relocatable. */ \
@@ -404,15 +406,11 @@ typedef List<HeapObject*> DebugObjectCache;
V(intptr_t*, api_external_references, nullptr) \
V(AddressToIndexHashMap*, external_reference_map, nullptr) \
V(HeapObjectToIndexHashMap*, root_index_map, nullptr) \
- V(v8::DeserializeInternalFieldsCallback, \
- deserialize_internal_fields_callback, nullptr) \
V(int, pending_microtask_count, 0) \
- V(int, debug_microtask_count, 0) \
V(HStatistics*, hstatistics, nullptr) \
V(CompilationStatistics*, turbo_statistics, nullptr) \
V(HTracer*, htracer, nullptr) \
V(CodeTracer*, code_tracer, nullptr) \
- V(bool, fp_stubs_generated, false) \
V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu) \
V(PromiseRejectCallback, promise_reject_callback, nullptr) \
V(const v8::StartupData*, snapshot_blob, nullptr) \
@@ -422,6 +420,8 @@ typedef List<HeapObject*> DebugObjectCache;
V(bool, is_profiling, false) \
/* true if a trace is being formatted through Error.prepareStackTrace. */ \
V(bool, formatting_stack_trace, false) \
+ /* Perform side effect checks on function call and API callbacks. */ \
+ V(bool, needs_side_effect_check, false) \
ISOLATE_INIT_SIMULATOR_LIST(V)
#define THREAD_LOCAL_TOP_ACCESSOR(type, name) \
@@ -765,7 +765,9 @@ class Isolate {
Object* PromoteScheduledException();
// Attempts to compute the current source location, storing the
- // result in the target out parameter.
+ // result in the target out parameter. The source location is attached to a
+ // Message object as the location which should be shown to the user. It's
+ // typically the top-most meaningful location on the stack.
bool ComputeLocation(MessageLocation* target);
bool ComputeLocationFromException(MessageLocation* target,
Handle<Object> exception);
@@ -905,12 +907,6 @@ class Isolate {
Builtins* builtins() { return &builtins_; }
- void NotifyExtensionInstalled() {
- has_installed_extensions_ = true;
- }
-
- bool has_installed_extensions() { return has_installed_extensions_; }
-
unibrow::Mapping<unibrow::Ecma262Canonicalize>*
regexp_macro_assembler_canonicalize() {
return &regexp_macro_assembler_canonicalize_;
@@ -970,6 +966,8 @@ class Isolate {
bool initialized_from_snapshot() { return initialized_from_snapshot_; }
+ bool NeedsSourcePositionsForProfiling() const;
+
double time_millis_since_init() {
return heap_.MonotonicallyIncreasingTimeInMs() - time_millis_at_init_;
}
@@ -1001,6 +999,9 @@ class Isolate {
// Avoid deopt loops if fast Array Iterators migrate to slow Array Iterators.
inline bool IsFastArrayIterationIntact();
+ // Make sure we do check for neutered array buffers.
+ inline bool IsArrayBufferNeuteringIntact();
+
// On intent to set an element in object, make sure that appropriate
// notifications occur if the set is on the elements of the array or
// object prototype. Also ensure that changes to prototype chain between
@@ -1020,11 +1021,13 @@ class Isolate {
void InvalidateIsConcatSpreadableProtector();
void InvalidateStringLengthOverflowProtector();
void InvalidateArrayIteratorProtector();
+ void InvalidateArrayBufferNeuteringProtector();
// Returns true if array is the initial array prototype in any native context.
bool IsAnyInitialArrayPrototype(Handle<JSArray> array);
- CallInterfaceDescriptorData* call_descriptor_data(int index);
+ V8_EXPORT_PRIVATE CallInterfaceDescriptorData* call_descriptor_data(
+ int index);
AccessCompilerData* access_compiler_data() { return access_compiler_data_; }
@@ -1070,7 +1073,7 @@ class Isolate {
int GenerateIdentityHash(uint32_t mask);
// Given an address occupied by a live code object, return that object.
- Object* FindCodeObject(Address a);
+ Code* FindCodeObject(Address a);
int NextOptimizationId() {
int id = next_optimization_id_++;
@@ -1080,9 +1083,6 @@ class Isolate {
return id;
}
- // Get (and lazily initialize) the registry for per-isolate symbols.
- Handle<JSObject> GetSymbolRegistry();
-
void AddCallCompletedCallback(CallCompletedCallback callback);
void RemoveCallCompletedCallback(CallCompletedCallback callback);
void FireCallCompletedCallback();
@@ -1108,7 +1108,9 @@ class Isolate {
void EnqueueMicrotask(Handle<Object> microtask);
void RunMicrotasks();
bool IsRunningMicrotasks() const { return is_running_microtasks_; }
- int GetNextDebugMicrotaskId() { return debug_microtask_count_++; }
+
+ Handle<Symbol> SymbolFor(Heap::RootListIndex dictionary_index,
+ Handle<String> name, bool private_symbol);
void SetUseCounterCallback(v8::Isolate::UseCounterCallback callback);
void CountUsage(v8::Isolate::UseCounterFeature feature);
@@ -1122,6 +1124,13 @@ class Isolate {
int GetNextUniqueSharedFunctionInfoId() { return next_unique_sfi_id_++; }
#endif
+ Address promise_hook_address() {
+ return reinterpret_cast<Address>(&promise_hook_);
+ }
+ void SetPromiseHook(PromiseHook hook);
+ void RunPromiseHook(PromiseHookType type, Handle<JSPromise> promise,
+ Handle<Object> parent);
+
// Support for dynamically disabling tail call elimination.
Address is_tail_call_elimination_enabled_address() {
return reinterpret_cast<Address>(&is_tail_call_elimination_enabled_);
@@ -1149,18 +1158,32 @@ class Isolate {
return cancelable_task_manager_;
}
+ AstStringConstants* ast_string_constants() const {
+ return ast_string_constants_;
+ }
+
interpreter::Interpreter* interpreter() const { return interpreter_; }
AccountingAllocator* allocator() { return allocator_; }
- CompilerDispatcherTracer* compiler_dispatcher_tracer() const {
- return compiler_dispatcher_tracer_;
+ CompilerDispatcher* compiler_dispatcher() const {
+ return compiler_dispatcher_;
}
+ // Clear all optimized code stored in native contexts.
+ void ClearOSROptimizedCode();
+
+ // Ensure that a particular optimized code is evicted.
+ void EvictOSROptimizedCode(Code* code, const char* reason);
+
bool IsInAnyContext(Object* object, uint32_t index);
void SetRAILMode(RAILMode rail_mode);
+ RAILMode rail_mode() { return rail_mode_.Value(); }
+
+ double LoadStartTimeMs();
+
void IsolateInForegroundNotification();
void IsolateInBackgroundNotification();
@@ -1180,8 +1203,6 @@ class Isolate {
private:
friend struct GlobalState;
friend struct InitializeGlobalState;
- Handle<JSObject> SetUpSubregistry(Handle<JSObject> registry, Handle<Map> map,
- const char* name);
// These fields are accessed through the API, offsets must be kept in sync
// with v8::internal::Internals (in include/v8.h) constants. This is also
@@ -1337,7 +1358,6 @@ class Isolate {
ThreadManager* thread_manager_;
RuntimeState runtime_state_;
Builtins builtins_;
- bool has_installed_extensions_;
unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_;
unibrow::Mapping<unibrow::Ecma262Canonicalize>
@@ -1349,6 +1369,9 @@ class Isolate {
AccessCompilerData* access_compiler_data_;
base::RandomNumberGenerator* random_number_generator_;
base::AtomicValue<RAILMode> rail_mode_;
+ PromiseHook promise_hook_;
+ base::Mutex rail_mutex_;
+ double load_start_time_ms_;
// Whether the isolate has been created for snapshotting.
bool serializer_enabled_;
@@ -1381,9 +1404,11 @@ class Isolate {
std::unique_ptr<CodeEventDispatcher> code_event_dispatcher_;
FunctionEntryHook function_entry_hook_;
+ AstStringConstants* ast_string_constants_;
+
interpreter::Interpreter* interpreter_;
- CompilerDispatcherTracer* compiler_dispatcher_tracer_;
+ CompilerDispatcher* compiler_dispatcher_;
typedef std::pair<InterruptCallback, void*> InterruptEntry;
std::queue<InterruptEntry> api_interrupts_queue_;
@@ -1451,6 +1476,7 @@ class Isolate {
friend class ExecutionAccess;
friend class HandleScopeImplementer;
+ friend class HeapTester;
friend class OptimizingCompileDispatcher;
friend class SweeperThread;
friend class ThreadManager;
@@ -1571,14 +1597,13 @@ class StackLimitCheck BASE_EMBEDDED {
Isolate* isolate_;
};
-#define STACK_CHECK(isolate, result_value) \
- do { \
- StackLimitCheck stack_check(isolate); \
- if (stack_check.HasOverflowed()) { \
- isolate->Throw(*isolate->factory()->NewRangeError( \
- MessageTemplate::kStackOverflow)); \
- return result_value; \
- } \
+#define STACK_CHECK(isolate, result_value) \
+ do { \
+ StackLimitCheck stack_check(isolate); \
+ if (stack_check.HasOverflowed()) { \
+ isolate->StackOverflow(); \
+ return result_value; \
+ } \
} while (false)
// Support for temporarily postponing interrupts. When the outermost
diff --git a/deps/v8/src/js/array.js b/deps/v8/src/js/array.js
index e23810f4de..fca75a3f65 100644
--- a/deps/v8/src/js/array.js
+++ b/deps/v8/src/js/array.js
@@ -21,7 +21,6 @@ var MinSimple;
var ObjectHasOwnProperty;
var ObjectToString = utils.ImportNow("object_to_string");
var iteratorSymbol = utils.ImportNow("iterator_symbol");
-var speciesSymbol = utils.ImportNow("species_symbol");
var unscopablesSymbol = utils.ImportNow("unscopables_symbol");
utils.Import(function(from) {
@@ -723,7 +722,7 @@ function InnerArraySort(array, length, comparefn) {
else return x < y ? -1 : 1;
};
}
- var InsertionSort = function InsertionSort(a, from, to) {
+ function InsertionSort(a, from, to) {
for (var i = from + 1; i < to; i++) {
var element = a[i];
for (var j = i - 1; j >= from; j--) {
@@ -739,7 +738,7 @@ function InnerArraySort(array, length, comparefn) {
}
};
- var GetThirdIndex = function(a, from, to) {
+ function GetThirdIndex(a, from, to) {
var t_array = new InternalArray();
// Use both 'from' and 'to' to determine the pivot candidates.
var increment = 200 + ((to - from) & 15);
@@ -757,7 +756,7 @@ function InnerArraySort(array, length, comparefn) {
return third_index;
}
- var QuickSort = function QuickSort(a, from, to) {
+ function QuickSort(a, from, to) {
var third_index = 0;
while (true) {
// Insertion sort is faster for short arrays.
@@ -846,7 +845,7 @@ function InnerArraySort(array, length, comparefn) {
// Copy elements in the range 0..length from obj's prototype chain
// to obj itself, if obj has holes. Return one more than the maximal index
// of a prototype property.
- var CopyFromPrototype = function CopyFromPrototype(obj, length) {
+ function CopyFromPrototype(obj, length) {
var max = 0;
for (var proto = %object_get_prototype_of(obj); proto;
proto = %object_get_prototype_of(proto)) {
@@ -876,7 +875,7 @@ function InnerArraySort(array, length, comparefn) {
// Set a value of "undefined" on all indices in the range from..to
// where a prototype of obj has an element. I.e., shadow all prototype
// elements in that range.
- var ShadowPrototypeElements = function(obj, from, to) {
+ function ShadowPrototypeElements(obj, from, to) {
for (var proto = %object_get_prototype_of(obj); proto;
proto = %object_get_prototype_of(proto)) {
var indices = IS_PROXY(proto) ? to : %GetArrayKeys(proto, to);
@@ -899,7 +898,7 @@ function InnerArraySort(array, length, comparefn) {
}
};
- var SafeRemoveArrayHoles = function SafeRemoveArrayHoles(obj) {
+ function SafeRemoveArrayHoles(obj) {
// Copy defined elements from the end to fill in all holes and undefineds
// in the beginning of the array. Write undefineds and holes at the end
// after loop is finished.
@@ -1490,12 +1489,6 @@ function ArrayOf(...args) {
return array;
}
-
-function ArraySpecies() {
- return this;
-}
-
-
// -------------------------------------------------------------------
// Set up non-enumerable constructor property on the Array.prototype
@@ -1528,7 +1521,7 @@ utils.InstallFunctions(GlobalArray, DONT_ENUM, [
var specialFunctions = %SpecialArrayFunctions();
-var getFunction = function(name, jsBuiltin, len) {
+function getFunction(name, jsBuiltin, len) {
var f = jsBuiltin;
if (specialFunctions.hasOwnProperty(name)) {
f = specialFunctions[name];
@@ -1578,8 +1571,6 @@ utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
%FunctionSetName(ArrayValues, "values");
-utils.InstallGetter(GlobalArray, speciesSymbol, ArraySpecies);
-
%FinishArrayPrototypeSetup(GlobalArray.prototype);
// The internal Array prototype doesn't need to be fancy, since it's never
diff --git a/deps/v8/src/js/arraybuffer.js b/deps/v8/src/js/arraybuffer.js
index a1ff03daee..9cb93a600c 100644
--- a/deps/v8/src/js/arraybuffer.js
+++ b/deps/v8/src/js/arraybuffer.js
@@ -15,7 +15,6 @@ var GlobalArrayBuffer = global.ArrayBuffer;
var MaxSimple;
var MinSimple;
var SpeciesConstructor;
-var speciesSymbol = utils.ImportNow("species_symbol");
utils.Import(function(from) {
MaxSimple = from.MaxSimple;
@@ -75,13 +74,6 @@ function ArrayBufferSlice(start, end) {
return result;
}
-
-function ArrayBufferSpecies() {
- return this;
-}
-
-utils.InstallGetter(GlobalArrayBuffer, speciesSymbol, ArrayBufferSpecies);
-
utils.InstallFunctions(GlobalArrayBuffer.prototype, DONT_ENUM, [
"slice", ArrayBufferSlice
]);
diff --git a/deps/v8/src/js/async-await.js b/deps/v8/src/js/async-await.js
index a1cac0d5cd..f0104ed9ac 100644
--- a/deps/v8/src/js/async-await.js
+++ b/deps/v8/src/js/async-await.js
@@ -13,46 +13,26 @@
var AsyncFunctionNext;
var AsyncFunctionThrow;
-var GlobalPromise;
-var IsPromise;
-var NewPromiseCapability;
-var PerformPromiseThen;
-var PromiseCreate;
-var PromiseNextMicrotaskID;
-var RejectPromise;
-var ResolvePromise;
utils.Import(function(from) {
AsyncFunctionNext = from.AsyncFunctionNext;
AsyncFunctionThrow = from.AsyncFunctionThrow;
- GlobalPromise = from.GlobalPromise;
- IsPromise = from.IsPromise;
- NewPromiseCapability = from.NewPromiseCapability;
- PerformPromiseThen = from.PerformPromiseThen;
- PromiseCreate = from.PromiseCreate;
- RejectPromise = from.RejectPromise;
- ResolvePromise = from.ResolvePromise;
});
-var promiseAsyncStackIDSymbol =
- utils.ImportNow("promise_async_stack_id_symbol");
var promiseHandledBySymbol =
utils.ImportNow("promise_handled_by_symbol");
var promiseForwardingHandlerSymbol =
utils.ImportNow("promise_forwarding_handler_symbol");
-var promiseHandledHintSymbol =
- utils.ImportNow("promise_handled_hint_symbol");
-var promiseHasHandlerSymbol =
- utils.ImportNow("promise_has_handler_symbol");
// -------------------------------------------------------------------
function PromiseCastResolved(value) {
- if (IsPromise(value)) {
+ // TODO(caitp): This is non spec compliant. See v8:5694.
+ if (%is_promise(value)) {
return value;
} else {
- var promise = PromiseCreate();
- ResolvePromise(promise, value);
+ var promise = %promise_internal_constructor(UNDEFINED);
+ %promise_resolve(promise, value);
return promise;
}
}
@@ -90,15 +70,14 @@ function AsyncFunctionAwait(generator, awaited, outerPromise) {
return;
}
- // Just forwarding the exception, so no debugEvent for throwawayCapability
- var throwawayCapability = NewPromiseCapability(GlobalPromise, false);
+ var throwawayPromise = %promise_internal_constructor(promise);
// The Promise will be thrown away and not handled, but it shouldn't trigger
// unhandled reject events as its work is done
- SET_PRIVATE(throwawayCapability.promise, promiseHasHandlerSymbol, true);
+ %PromiseMarkAsHandled(throwawayPromise);
if (DEBUG_IS_ACTIVE) {
- if (IsPromise(awaited)) {
+ if (%is_promise(awaited)) {
// Mark the reject handler callback to be a forwarding edge, rather
// than a meaningful catch handler
SET_PRIVATE(onRejected, promiseForwardingHandlerSymbol, true);
@@ -106,11 +85,10 @@ function AsyncFunctionAwait(generator, awaited, outerPromise) {
// Mark the dependency to outerPromise in case the throwaway Promise is
// found on the Promise stack
- SET_PRIVATE(throwawayCapability.promise, promiseHandledBySymbol,
- outerPromise);
+ SET_PRIVATE(throwawayPromise, promiseHandledBySymbol, outerPromise);
}
- PerformPromiseThen(promise, onFulfilled, onRejected, throwawayCapability);
+ %perform_promise_then(promise, onFulfilled, onRejected, throwawayPromise);
}
// Called by the parser from the desugaring of 'await' when catch
@@ -122,43 +100,32 @@ function AsyncFunctionAwaitUncaught(generator, awaited, outerPromise) {
// Called by the parser from the desugaring of 'await' when catch
// prediction indicates that there is a locally surrounding catch block
function AsyncFunctionAwaitCaught(generator, awaited, outerPromise) {
- if (DEBUG_IS_ACTIVE && IsPromise(awaited)) {
- SET_PRIVATE(awaited, promiseHandledHintSymbol, true);
+ if (DEBUG_IS_ACTIVE && %is_promise(awaited)) {
+ %PromiseMarkHandledHint(awaited);
}
AsyncFunctionAwait(generator, awaited, outerPromise);
}
// How the parser rejects promises from async/await desugaring
function RejectPromiseNoDebugEvent(promise, reason) {
- return RejectPromise(promise, reason, false);
+ return %promise_internal_reject(promise, reason, false);
}
function AsyncFunctionPromiseCreate() {
- var promise = PromiseCreate();
+ var promise = %promise_internal_constructor(UNDEFINED);
if (DEBUG_IS_ACTIVE) {
// Push the Promise under construction in an async function on
// the catch prediction stack to handle exceptions thrown before
// the first await.
- %DebugPushPromise(promise);
// Assign ID and create a recurring task to save stack for future
// resumptions from await.
- var id = %DebugNextMicrotaskId();
- SET_PRIVATE(promise, promiseAsyncStackIDSymbol, id);
- %DebugAsyncTaskEvent("enqueueRecurring", id, "async function");
+ %DebugAsyncFunctionPromiseCreated(promise);
}
return promise;
}
function AsyncFunctionPromiseRelease(promise) {
if (DEBUG_IS_ACTIVE) {
- // Cancel
- var id = GET_PRIVATE(promise, promiseAsyncStackIDSymbol);
-
- // Don't send invalid events when catch prediction is turned on in
- // the middle of some async operation.
- if (!IS_UNDEFINED(id)) {
- %DebugAsyncTaskEvent("cancel", id, "async function");
- }
// Pop the Promise under construction in an async function on
// from catch prediction stack.
%DebugPopPromise();
diff --git a/deps/v8/src/js/collection.js b/deps/v8/src/js/collection.js
index a4ae904771..adb2688618 100644
--- a/deps/v8/src/js/collection.js
+++ b/deps/v8/src/js/collection.js
@@ -17,7 +17,6 @@ var hashCodeSymbol = utils.ImportNow("hash_code_symbol");
var MathRandom = global.Math.random;
var MapIterator;
var SetIterator;
-var speciesSymbol = utils.ImportNow("species_symbol");
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
@@ -251,12 +250,6 @@ function SetForEach(f, receiver) {
}
}
-
-function SetSpecies() {
- return this;
-}
-
-
// -------------------------------------------------------------------
%SetCode(GlobalSet, SetConstructor);
@@ -268,8 +261,6 @@ function SetSpecies() {
%FunctionSetLength(SetForEach, 1);
-utils.InstallGetter(GlobalSet, speciesSymbol, SetSpecies);
-
// Set up the non-enumerable functions on the Set prototype object.
utils.InstallGetter(GlobalSet.prototype, "size", SetGetSize);
utils.InstallFunctions(GlobalSet.prototype, DONT_ENUM, [
@@ -439,11 +430,6 @@ function MapForEach(f, receiver) {
}
}
-
-function MapSpecies() {
- return this;
-}
-
// -------------------------------------------------------------------
%SetCode(GlobalMap, MapConstructor);
@@ -455,8 +441,6 @@ function MapSpecies() {
%FunctionSetLength(MapForEach, 1);
-utils.InstallGetter(GlobalMap, speciesSymbol, MapSpecies);
-
// Set up the non-enumerable functions on the Map prototype object.
utils.InstallGetter(GlobalMap.prototype, "size", MapGetSize);
utils.InstallFunctions(GlobalMap.prototype, DONT_ENUM, [
diff --git a/deps/v8/src/js/i18n.js b/deps/v8/src/js/i18n.js
index b051b090bc..50ed5bcb89 100644
--- a/deps/v8/src/js/i18n.js
+++ b/deps/v8/src/js/i18n.js
@@ -20,9 +20,15 @@
var ArrayJoin;
var ArrayPush;
var GlobalDate = global.Date;
+var GlobalIntl = global.Intl;
+var GlobalIntlDateTimeFormat = GlobalIntl.DateTimeFormat;
+var GlobalIntlNumberFormat = GlobalIntl.NumberFormat;
+var GlobalIntlCollator = GlobalIntl.Collator;
+var GlobalIntlv8BreakIterator = GlobalIntl.v8BreakIterator;
var GlobalNumber = global.Number;
var GlobalRegExp = global.RegExp;
var GlobalString = global.String;
+var IntlFallbackSymbol = utils.ImportNow("intl_fallback_symbol");
var InstallFunctions = utils.InstallFunctions;
var InstallGetter = utils.InstallGetter;
var InternalArray = utils.InternalArray;
@@ -46,18 +52,11 @@ function InstallFunction(object, name, func) {
}
-function InstallConstructor(object, name, func) {
- %CheckIsBootstrapping();
- SetFunctionName(func, name);
- %AddNamedProperty(object, name, func, DONT_ENUM);
- %SetNativeFlag(func);
- %ToFastProperties(object);
-}
-
/**
* Adds bound method to the prototype of the given object.
*/
-function AddBoundMethod(obj, methodName, implementation, length, type) {
+function AddBoundMethod(obj, methodName, implementation, length, typename,
+ compat) {
%CheckIsBootstrapping();
var internalName = %CreatePrivateSymbol(methodName);
// Making getter an anonymous function will cause
@@ -66,32 +65,30 @@ function AddBoundMethod(obj, methodName, implementation, length, type) {
// than (as utils.InstallGetter would) on the SharedFunctionInfo
// associated with all functions returned from AddBoundMethod.
var getter = ANONYMOUS_FUNCTION(function() {
- if (!%IsInitializedIntlObjectOfType(this, type)) {
- throw %make_type_error(kMethodCalledOnWrongObject, methodName);
- }
- if (IS_UNDEFINED(this[internalName])) {
+ var receiver = Unwrap(this, typename, obj, methodName, compat);
+ if (IS_UNDEFINED(receiver[internalName])) {
var boundMethod;
if (IS_UNDEFINED(length) || length === 2) {
boundMethod =
- ANONYMOUS_FUNCTION((fst, snd) => implementation(this, fst, snd));
+ ANONYMOUS_FUNCTION((fst, snd) => implementation(receiver, fst, snd));
} else if (length === 1) {
- boundMethod = ANONYMOUS_FUNCTION(fst => implementation(this, fst));
+ boundMethod = ANONYMOUS_FUNCTION(fst => implementation(receiver, fst));
} else {
boundMethod = ANONYMOUS_FUNCTION((...args) => {
// DateTimeFormat.format needs to be 0 arg method, but can still
// receive an optional dateValue param. If one was provided, pass it
// along.
if (args.length > 0) {
- return implementation(this, args[0]);
+ return implementation(receiver, args[0]);
} else {
- return implementation(this);
+ return implementation(receiver);
}
});
}
%SetNativeFlag(boundMethod);
- this[internalName] = boundMethod;
+ receiver[internalName] = boundMethod;
}
- return this[internalName];
+ return receiver[internalName];
});
%FunctionRemovePrototype(getter);
@@ -99,11 +96,44 @@ function AddBoundMethod(obj, methodName, implementation, length, type) {
%SetNativeFlag(getter);
}
-// -------------------------------------------------------------------
+function IntlConstruct(receiver, constructor, create, newTarget, args,
+ compat) {
+ var locales = args[0];
+ var options = args[1];
+
+ if (IS_UNDEFINED(newTarget)) {
+ if (compat && receiver instanceof constructor) {
+ let success = %object_define_property(receiver, IntlFallbackSymbol,
+ { value: new constructor(locales, options) });
+ if (!success) {
+ throw %make_type_error(kReinitializeIntl, constructor);
+ }
+ return receiver;
+ }
+
+ return new constructor(locales, options);
+ }
+
+ return create(locales, options);
+}
+
+
+
+function Unwrap(receiver, typename, constructor, method, compat) {
+ if (!%IsInitializedIntlObjectOfType(receiver, typename)) {
+ if (compat && receiver instanceof constructor) {
+ let fallback = receiver[IntlFallbackSymbol];
+ if (%IsInitializedIntlObjectOfType(fallback, typename)) {
+ return fallback;
+ }
+ }
+ throw %make_type_error(kIncompatibleMethodReceiver, method, receiver);
+ }
+ return receiver;
+}
-var Intl = {};
-%AddNamedProperty(global, "Intl", Intl, DONT_ENUM);
+// -------------------------------------------------------------------
/**
* Caches available locales for each service.
@@ -911,11 +941,7 @@ var resolvedAccessor = {
};
// ECMA 402 section 8.2.1
-InstallFunction(Intl, 'getCanonicalLocales', function(locales) {
- if (!IS_UNDEFINED(new.target)) {
- throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
- }
-
+InstallFunction(GlobalIntl, 'getCanonicalLocales', function(locales) {
return makeArray(canonicalizeLocaleList(locales));
}
);
@@ -924,11 +950,7 @@ InstallFunction(Intl, 'getCanonicalLocales', function(locales) {
* Initializes the given object so it's a valid Collator instance.
* Useful for subclassing.
*/
-function initializeCollator(collator, locales, options) {
- if (%IsInitializedIntlObject(collator)) {
- throw %make_type_error(kReinitializeIntl, "Collator");
- }
-
+function CreateCollator(locales, options) {
if (IS_UNDEFINED(options)) {
options = {};
}
@@ -1015,12 +1037,9 @@ function initializeCollator(collator, locales, options) {
usage: {value: internalOptions.usage, writable: true}
});
- var internalCollator = %CreateCollator(requestedLocale,
- internalOptions,
- resolved);
+ var collator = %CreateCollator(requestedLocale, internalOptions, resolved);
- // Writable, configurable and enumerable are set to false by default.
- %MarkAsInitializedIntlObjectOfType(collator, 'collator', internalCollator);
+ %MarkAsInitializedIntlObjectOfType(collator, 'collator');
collator[resolvedSymbol] = resolved;
return collator;
@@ -1033,33 +1052,19 @@ function initializeCollator(collator, locales, options) {
*
* @constructor
*/
-InstallConstructor(Intl, 'Collator', function() {
- var locales = arguments[0];
- var options = arguments[1];
-
- if (!this || this === Intl) {
- // Constructor is called as a function.
- return new Intl.Collator(locales, options);
- }
-
- return initializeCollator(TO_OBJECT(this), locales, options);
- }
-);
+function CollatorConstructor() {
+ return IntlConstruct(this, GlobalIntlCollator, CreateCollator, new.target,
+ arguments);
+}
+%SetCode(GlobalIntlCollator, CollatorConstructor);
/**
* Collator resolvedOptions method.
*/
-InstallFunction(Intl.Collator.prototype, 'resolvedOptions', function() {
- if (!IS_UNDEFINED(new.target)) {
- throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
- }
-
- if (!%IsInitializedIntlObjectOfType(this, 'collator')) {
- throw %make_type_error(kResolvedOptionsCalledOnNonObject, "Collator");
- }
-
- var coll = this;
+InstallFunction(GlobalIntlCollator.prototype, 'resolvedOptions', function() {
+ var coll = Unwrap(this, 'collator', GlobalIntlCollator, 'resolvedOptions',
+ false);
var locale = getOptimalLanguageTag(coll[resolvedSymbol].requestedLocale,
coll[resolvedSymbol].locale);
@@ -1082,11 +1087,7 @@ InstallFunction(Intl.Collator.prototype, 'resolvedOptions', function() {
* order in the returned list as in the input list.
* Options are optional parameter.
*/
-InstallFunction(Intl.Collator, 'supportedLocalesOf', function(locales) {
- if (!IS_UNDEFINED(new.target)) {
- throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
- }
-
+InstallFunction(GlobalIntlCollator, 'supportedLocalesOf', function(locales) {
return supportedLocalesOf('collator', locales, arguments[1]);
}
);
@@ -1103,12 +1104,11 @@ InstallFunction(Intl.Collator, 'supportedLocalesOf', function(locales) {
* the sort order, or x comes after y in the sort order, respectively.
*/
function compare(collator, x, y) {
- return %InternalCompare(%GetImplFromInitializedIntlObject(collator),
- TO_STRING(x), TO_STRING(y));
+ return %InternalCompare(collator, TO_STRING(x), TO_STRING(y));
};
-AddBoundMethod(Intl.Collator, 'compare', compare, 2, 'collator');
+AddBoundMethod(GlobalIntlCollator, 'compare', compare, 2, 'collator', false);
/**
* Verifies that the input is a well-formed ISO 4217 currency code.
@@ -1116,7 +1116,7 @@ AddBoundMethod(Intl.Collator, 'compare', compare, 2, 'collator');
* For example \u00DFP (Eszett+P) becomes SSP.
*/
function isWellFormedCurrencyCode(currency) {
- return typeof currency == "string" && currency.length == 3 &&
+ return typeof currency === "string" && currency.length === 3 &&
IS_NULL(%regexp_internal_match(/[^A-Za-z]/, currency));
}
@@ -1152,11 +1152,7 @@ var patternAccessor = {
* Initializes the given object so it's a valid NumberFormat instance.
* Useful for subclassing.
*/
-function initializeNumberFormat(numberFormat, locales, options) {
- if (%IsInitializedIntlObject(numberFormat)) {
- throw %make_type_error(kReinitializeIntl, "NumberFormat");
- }
-
+function CreateNumberFormat(locales, options) {
if (IS_UNDEFINED(options)) {
options = {};
}
@@ -1252,16 +1248,15 @@ function initializeNumberFormat(numberFormat, locales, options) {
if (HAS_OWN_PROPERTY(internalOptions, 'maximumSignificantDigits')) {
defineWEProperty(resolved, 'maximumSignificantDigits', UNDEFINED);
}
- var formatter = %CreateNumberFormat(requestedLocale,
- internalOptions,
- resolved);
+ var numberFormat = %CreateNumberFormat(requestedLocale, internalOptions,
+ resolved);
if (internalOptions.style === 'currency') {
%object_define_property(resolved, 'currencyDisplay',
{value: currencyDisplay, writable: true});
}
- %MarkAsInitializedIntlObjectOfType(numberFormat, 'numberformat', formatter);
+ %MarkAsInitializedIntlObjectOfType(numberFormat, 'numberformat');
numberFormat[resolvedSymbol] = resolved;
return numberFormat;
@@ -1274,33 +1269,20 @@ function initializeNumberFormat(numberFormat, locales, options) {
*
* @constructor
*/
-InstallConstructor(Intl, 'NumberFormat', function() {
- var locales = arguments[0];
- var options = arguments[1];
-
- if (!this || this === Intl) {
- // Constructor is called as a function.
- return new Intl.NumberFormat(locales, options);
- }
-
- return initializeNumberFormat(TO_OBJECT(this), locales, options);
- }
-);
+function NumberFormatConstructor() {
+ return IntlConstruct(this, GlobalIntlNumberFormat, CreateNumberFormat,
+ new.target, arguments, true);
+}
+%SetCode(GlobalIntlNumberFormat, NumberFormatConstructor);
/**
* NumberFormat resolvedOptions method.
*/
-InstallFunction(Intl.NumberFormat.prototype, 'resolvedOptions', function() {
- if (!IS_UNDEFINED(new.target)) {
- throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
- }
-
- if (!%IsInitializedIntlObjectOfType(this, 'numberformat')) {
- throw %make_type_error(kResolvedOptionsCalledOnNonObject, "NumberFormat");
- }
-
- var format = this;
+InstallFunction(GlobalIntlNumberFormat.prototype, 'resolvedOptions',
+ function() {
+ var format = Unwrap(this, 'numberformat', GlobalIntlNumberFormat,
+ 'resolvedOptions', true);
var locale = getOptimalLanguageTag(format[resolvedSymbol].requestedLocale,
format[resolvedSymbol].locale);
@@ -1341,11 +1323,8 @@ InstallFunction(Intl.NumberFormat.prototype, 'resolvedOptions', function() {
* order in the returned list as in the input list.
* Options are optional parameter.
*/
-InstallFunction(Intl.NumberFormat, 'supportedLocalesOf', function(locales) {
- if (!IS_UNDEFINED(new.target)) {
- throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
- }
-
+InstallFunction(GlobalIntlNumberFormat, 'supportedLocalesOf',
+ function(locales) {
return supportedLocalesOf('numberformat', locales, arguments[1]);
}
);
@@ -1360,12 +1339,12 @@ function formatNumber(formatter, value) {
// Spec treats -0 and +0 as 0.
var number = TO_NUMBER(value) + 0;
- return %InternalNumberFormat(%GetImplFromInitializedIntlObject(formatter),
- number);
+ return %InternalNumberFormat(formatter, number);
}
-AddBoundMethod(Intl.NumberFormat, 'format', formatNumber, 1, 'numberformat');
+AddBoundMethod(GlobalIntlNumberFormat, 'format', formatNumber, 1,
+ 'numberformat', true);
/**
* Returns a string that matches LDML representation of the options object.
@@ -1518,6 +1497,8 @@ function toDateTimeOptions(options, required, defaults) {
options = TO_OBJECT(options);
}
+ options = %object_create(options);
+
var needsDefault = true;
if ((required === 'date' || required === 'any') &&
(!IS_UNDEFINED(options.weekday) || !IS_UNDEFINED(options.year) ||
@@ -1569,12 +1550,7 @@ function toDateTimeOptions(options, required, defaults) {
* Initializes the given object so it's a valid DateTimeFormat instance.
* Useful for subclassing.
*/
-function initializeDateTimeFormat(dateFormat, locales, options) {
-
- if (%IsInitializedIntlObject(dateFormat)) {
- throw %make_type_error(kReinitializeIntl, "DateTimeFormat");
- }
-
+function CreateDateTimeFormat(locales, options) {
if (IS_UNDEFINED(options)) {
options = {};
}
@@ -1636,14 +1612,14 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
year: {writable: true}
});
- var formatter = %CreateDateTimeFormat(
+ var dateFormat = %CreateDateTimeFormat(
requestedLocale, {skeleton: ldmlString, timeZone: tz}, resolved);
if (resolved.timeZone === "Etc/Unknown") {
throw %make_range_error(kUnsupportedTimeZone, tz);
}
- %MarkAsInitializedIntlObjectOfType(dateFormat, 'dateformat', formatter);
+ %MarkAsInitializedIntlObjectOfType(dateFormat, 'dateformat');
dateFormat[resolvedSymbol] = resolved;
return dateFormat;
@@ -1656,31 +1632,20 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
*
* @constructor
*/
-InstallConstructor(Intl, 'DateTimeFormat', function() {
- var locales = arguments[0];
- var options = arguments[1];
-
- if (!this || this === Intl) {
- // Constructor is called as a function.
- return new Intl.DateTimeFormat(locales, options);
- }
-
- return initializeDateTimeFormat(TO_OBJECT(this), locales, options);
- }
-);
+function DateTimeFormatConstructor() {
+ return IntlConstruct(this, GlobalIntlDateTimeFormat, CreateDateTimeFormat,
+ new.target, arguments, true);
+}
+%SetCode(GlobalIntlDateTimeFormat, DateTimeFormatConstructor);
/**
* DateTimeFormat resolvedOptions method.
*/
-InstallFunction(Intl.DateTimeFormat.prototype, 'resolvedOptions', function() {
- if (!IS_UNDEFINED(new.target)) {
- throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
- }
-
- if (!%IsInitializedIntlObjectOfType(this, 'dateformat')) {
- throw %make_type_error(kResolvedOptionsCalledOnNonObject, "DateTimeFormat");
- }
+InstallFunction(GlobalIntlDateTimeFormat.prototype, 'resolvedOptions',
+ function() {
+ var format = Unwrap(this, 'dateformat', GlobalIntlDateTimeFormat,
+ 'resolvedOptions', true);
/**
* Maps ICU calendar names to LDML/BCP47 types for key 'ca'.
@@ -1693,7 +1658,6 @@ InstallFunction(Intl.DateTimeFormat.prototype, 'resolvedOptions', function() {
'ethiopic-amete-alem': 'ethioaa'
};
- var format = this;
var fromPattern = fromLDMLString(format[resolvedSymbol][patternSymbol]);
var userCalendar = ICU_CALENDAR_MAP[format[resolvedSymbol].calendar];
if (IS_UNDEFINED(userCalendar)) {
@@ -1733,11 +1697,8 @@ InstallFunction(Intl.DateTimeFormat.prototype, 'resolvedOptions', function() {
* order in the returned list as in the input list.
* Options are optional parameter.
*/
-InstallFunction(Intl.DateTimeFormat, 'supportedLocalesOf', function(locales) {
- if (!IS_UNDEFINED(new.target)) {
- throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
- }
-
+InstallFunction(GlobalIntlDateTimeFormat, 'supportedLocalesOf',
+ function(locales) {
return supportedLocalesOf('dateformat', locales, arguments[1]);
}
);
@@ -1758,18 +1719,19 @@ function formatDate(formatter, dateValue) {
if (!NUMBER_IS_FINITE(dateMs)) throw %make_range_error(kDateRange);
- return %InternalDateFormat(%GetImplFromInitializedIntlObject(formatter),
- new GlobalDate(dateMs));
+ return %InternalDateFormat(formatter, new GlobalDate(dateMs));
}
function FormatDateToParts(dateValue) {
- if (!IS_UNDEFINED(new.target)) {
- throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
- }
CHECK_OBJECT_COERCIBLE(this, "Intl.DateTimeFormat.prototype.formatToParts");
if (!IS_OBJECT(this)) {
throw %make_type_error(kCalledOnNonObject, this);
}
+ if (!%IsInitializedIntlObjectOfType(this, 'dateformat')) {
+ throw %make_type_error(kIncompatibleMethodReceiver,
+ 'Intl.DateTimeFormat.prototype.formatToParts',
+ this);
+ }
var dateMs;
if (IS_UNDEFINED(dateValue)) {
dateMs = %DateCurrentTime();
@@ -1779,15 +1741,15 @@ function FormatDateToParts(dateValue) {
if (!NUMBER_IS_FINITE(dateMs)) throw %make_range_error(kDateRange);
- return %InternalDateFormatToParts(
- %GetImplFromInitializedIntlObject(this), new GlobalDate(dateMs));
+ return %InternalDateFormatToParts(this, new GlobalDate(dateMs));
}
%FunctionSetLength(FormatDateToParts, 0);
// 0 because date is optional argument.
-AddBoundMethod(Intl.DateTimeFormat, 'format', formatDate, 0, 'dateformat');
+AddBoundMethod(GlobalIntlDateTimeFormat, 'format', formatDate, 0, 'dateformat',
+ true);
/**
@@ -1835,11 +1797,7 @@ function canonicalizeTimeZoneID(tzID) {
* Initializes the given object so it's a valid BreakIterator instance.
* Useful for subclassing.
*/
-function initializeBreakIterator(iterator, locales, options) {
- if (%IsInitializedIntlObject(iterator)) {
- throw %make_type_error(kReinitializeIntl, "v8BreakIterator");
- }
-
+function CreateBreakIterator(locales, options) {
if (IS_UNDEFINED(options)) {
options = {};
}
@@ -1858,12 +1816,9 @@ function initializeBreakIterator(iterator, locales, options) {
locale: {writable: true}
});
- var internalIterator = %CreateBreakIterator(locale.locale,
- internalOptions,
- resolved);
+ var iterator = %CreateBreakIterator(locale.locale, internalOptions, resolved);
- %MarkAsInitializedIntlObjectOfType(iterator, 'breakiterator',
- internalIterator);
+ %MarkAsInitializedIntlObjectOfType(iterator, 'breakiterator');
iterator[resolvedSymbol] = resolved;
return iterator;
@@ -1876,34 +1831,25 @@ function initializeBreakIterator(iterator, locales, options) {
*
* @constructor
*/
-InstallConstructor(Intl, 'v8BreakIterator', function() {
- var locales = arguments[0];
- var options = arguments[1];
-
- if (!this || this === Intl) {
- // Constructor is called as a function.
- return new Intl.v8BreakIterator(locales, options);
- }
-
- return initializeBreakIterator(TO_OBJECT(this), locales, options);
- }
-);
+function v8BreakIteratorConstructor() {
+ return IntlConstruct(this, GlobalIntlv8BreakIterator, CreateBreakIterator,
+ new.target, arguments);
+}
+%SetCode(GlobalIntlv8BreakIterator, v8BreakIteratorConstructor);
/**
* BreakIterator resolvedOptions method.
*/
-InstallFunction(Intl.v8BreakIterator.prototype, 'resolvedOptions',
+InstallFunction(GlobalIntlv8BreakIterator.prototype, 'resolvedOptions',
function() {
if (!IS_UNDEFINED(new.target)) {
throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
}
- if (!%IsInitializedIntlObjectOfType(this, 'breakiterator')) {
- throw %make_type_error(kResolvedOptionsCalledOnNonObject, "v8BreakIterator");
- }
+ var segmenter = Unwrap(this, 'breakiterator', GlobalIntlv8BreakIterator,
+ 'resolvedOptions', false);
- var segmenter = this;
var locale =
getOptimalLanguageTag(segmenter[resolvedSymbol].requestedLocale,
segmenter[resolvedSymbol].locale);
@@ -1922,7 +1868,7 @@ InstallFunction(Intl.v8BreakIterator.prototype, 'resolvedOptions',
* order in the returned list as in the input list.
* Options are optional parameter.
*/
-InstallFunction(Intl.v8BreakIterator, 'supportedLocalesOf',
+InstallFunction(GlobalIntlv8BreakIterator, 'supportedLocalesOf',
function(locales) {
if (!IS_UNDEFINED(new.target)) {
throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
@@ -1938,8 +1884,7 @@ InstallFunction(Intl.v8BreakIterator, 'supportedLocalesOf',
* gets discarded.
*/
function adoptText(iterator, text) {
- %BreakIteratorAdoptText(%GetImplFromInitializedIntlObject(iterator),
- TO_STRING(text));
+ %BreakIteratorAdoptText(iterator, TO_STRING(text));
}
@@ -1947,7 +1892,7 @@ function adoptText(iterator, text) {
* Returns index of the first break in the string and moves current pointer.
*/
function first(iterator) {
- return %BreakIteratorFirst(%GetImplFromInitializedIntlObject(iterator));
+ return %BreakIteratorFirst(iterator);
}
@@ -1955,7 +1900,7 @@ function first(iterator) {
* Returns the index of the next break and moves the pointer.
*/
function next(iterator) {
- return %BreakIteratorNext(%GetImplFromInitializedIntlObject(iterator));
+ return %BreakIteratorNext(iterator);
}
@@ -1963,7 +1908,7 @@ function next(iterator) {
* Returns index of the current break.
*/
function current(iterator) {
- return %BreakIteratorCurrent(%GetImplFromInitializedIntlObject(iterator));
+ return %BreakIteratorCurrent(iterator);
}
@@ -1971,25 +1916,26 @@ function current(iterator) {
* Returns type of the current break.
*/
function breakType(iterator) {
- return %BreakIteratorBreakType(%GetImplFromInitializedIntlObject(iterator));
+ return %BreakIteratorBreakType(iterator);
}
-AddBoundMethod(Intl.v8BreakIterator, 'adoptText', adoptText, 1,
+AddBoundMethod(GlobalIntlv8BreakIterator, 'adoptText', adoptText, 1,
'breakiterator');
-AddBoundMethod(Intl.v8BreakIterator, 'first', first, 0, 'breakiterator');
-AddBoundMethod(Intl.v8BreakIterator, 'next', next, 0, 'breakiterator');
-AddBoundMethod(Intl.v8BreakIterator, 'current', current, 0, 'breakiterator');
-AddBoundMethod(Intl.v8BreakIterator, 'breakType', breakType, 0,
+AddBoundMethod(GlobalIntlv8BreakIterator, 'first', first, 0, 'breakiterator');
+AddBoundMethod(GlobalIntlv8BreakIterator, 'next', next, 0, 'breakiterator');
+AddBoundMethod(GlobalIntlv8BreakIterator, 'current', current, 0,
+ 'breakiterator');
+AddBoundMethod(GlobalIntlv8BreakIterator, 'breakType', breakType, 0,
'breakiterator');
// Save references to Intl objects and methods we use, for added security.
var savedObjects = {
- 'collator': Intl.Collator,
- 'numberformat': Intl.NumberFormat,
- 'dateformatall': Intl.DateTimeFormat,
- 'dateformatdate': Intl.DateTimeFormat,
- 'dateformattime': Intl.DateTimeFormat
+ 'collator': GlobalIntlCollator,
+ 'numberformat': GlobalIntlNumberFormat,
+ 'dateformatall': GlobalIntlDateTimeFormat,
+ 'dateformatdate': GlobalIntlDateTimeFormat,
+ 'dateformattime': GlobalIntlDateTimeFormat
};
@@ -2054,18 +2000,11 @@ function LocaleConvertCase(s, locales, isToUpper) {
// StringSplit is slower than this.
var pos = %StringIndexOf(language, '-', 0);
- if (pos != -1) {
+ if (pos !== -1) {
language = %_Call(StringSubstring, language, 0, pos);
}
- var CUSTOM_CASE_LANGUAGES = ['az', 'el', 'lt', 'tr'];
- var langIndex = %ArrayIndexOf(CUSTOM_CASE_LANGUAGES, language, 0);
- if (langIndex == -1) {
- // language-independent case conversion.
- return isToUpper ? %StringToUpperCaseI18N(s) : %StringToLowerCaseI18N(s);
- }
- return %StringLocaleConvertCase(s, isToUpper,
- CUSTOM_CASE_LANGUAGES[langIndex]);
+ return %StringLocaleConvertCase(s, isToUpper, language);
}
/**
@@ -2073,10 +2012,6 @@ function LocaleConvertCase(s, locales, isToUpper) {
* Overrides the built-in method.
*/
OverrideFunction(GlobalString.prototype, 'localeCompare', function(that) {
- if (!IS_UNDEFINED(new.target)) {
- throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
- }
-
if (IS_NULL_OR_UNDEFINED(this)) {
throw %make_type_error(kMethodInvokedOnNullOrUndefined);
}
@@ -2098,10 +2033,6 @@ OverrideFunction(GlobalString.prototype, 'localeCompare', function(that) {
*/
OverrideFunction(GlobalString.prototype, 'normalize', function() {
- if (!IS_UNDEFINED(new.target)) {
- throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
- }
-
CHECK_OBJECT_COERCIBLE(this, "String.prototype.normalize");
var s = TO_STRING(this);
@@ -2121,27 +2052,16 @@ OverrideFunction(GlobalString.prototype, 'normalize', function() {
);
function ToLowerCaseI18N() {
- if (!IS_UNDEFINED(new.target)) {
- throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
- }
CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLowerCase");
- var s = TO_STRING(this);
- return %StringToLowerCaseI18N(s);
+ return %StringToLowerCaseI18N(TO_STRING(this));
}
function ToUpperCaseI18N() {
- if (!IS_UNDEFINED(new.target)) {
- throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
- }
CHECK_OBJECT_COERCIBLE(this, "String.prototype.toUpperCase");
- var s = TO_STRING(this);
- return %StringToUpperCaseI18N(s);
+ return %StringToUpperCaseI18N(TO_STRING(this));
}
function ToLocaleLowerCaseI18N(locales) {
- if (!IS_UNDEFINED(new.target)) {
- throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
- }
CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLocaleLowerCase");
return LocaleConvertCase(TO_STRING(this), locales, false);
}
@@ -2149,9 +2069,6 @@ function ToLocaleLowerCaseI18N(locales) {
%FunctionSetLength(ToLocaleLowerCaseI18N, 0);
function ToLocaleUpperCaseI18N(locales) {
- if (!IS_UNDEFINED(new.target)) {
- throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
- }
CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLocaleUpperCase");
return LocaleConvertCase(TO_STRING(this), locales, true);
}
@@ -2176,10 +2093,6 @@ utils.Export(function(to) {
* If locale or options are omitted, defaults are used.
*/
OverrideFunction(GlobalNumber.prototype, 'toLocaleString', function() {
- if (!IS_UNDEFINED(new.target)) {
- throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
- }
-
if (!(this instanceof GlobalNumber) && typeof(this) !== 'number') {
throw %make_type_error(kMethodInvokedOnWrongType, "Number");
}
@@ -2218,10 +2131,6 @@ function toLocaleDateTime(date, locales, options, required, defaults, service) {
* present in the output.
*/
OverrideFunction(GlobalDate.prototype, 'toLocaleString', function() {
- if (!IS_UNDEFINED(new.target)) {
- throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
- }
-
var locales = arguments[0];
var options = arguments[1];
return toLocaleDateTime(
@@ -2236,10 +2145,6 @@ OverrideFunction(GlobalDate.prototype, 'toLocaleString', function() {
* in the output.
*/
OverrideFunction(GlobalDate.prototype, 'toLocaleDateString', function() {
- if (!IS_UNDEFINED(new.target)) {
- throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
- }
-
var locales = arguments[0];
var options = arguments[1];
return toLocaleDateTime(
@@ -2254,10 +2159,6 @@ OverrideFunction(GlobalDate.prototype, 'toLocaleDateString', function() {
* in the output.
*/
OverrideFunction(GlobalDate.prototype, 'toLocaleTimeString', function() {
- if (!IS_UNDEFINED(new.target)) {
- throw %make_type_error(kOrdinaryFunctionCalledAsConstructor);
- }
-
var locales = arguments[0];
var options = arguments[1];
return toLocaleDateTime(
diff --git a/deps/v8/src/js/macros.py b/deps/v8/src/js/macros.py
index 5ad578a2be..955c89fd07 100644
--- a/deps/v8/src/js/macros.py
+++ b/deps/v8/src/js/macros.py
@@ -60,7 +60,6 @@ macro IS_NULL_OR_UNDEFINED(arg) = (arg == null);
macro IS_NUMBER(arg) = (typeof(arg) === 'number');
macro IS_OBJECT(arg) = (typeof(arg) === 'object');
macro IS_PROXY(arg) = (%_IsJSProxy(arg));
-macro IS_REGEXP(arg) = (%_IsRegExp(arg));
macro IS_SCRIPT(arg) = (%_ClassOf(arg) === 'Script');
macro IS_SET(arg) = (%_ClassOf(arg) === 'Set');
macro IS_SET_ITERATOR(arg) = (%_ClassOf(arg) === 'Set Iterator');
diff --git a/deps/v8/src/js/prologue.js b/deps/v8/src/js/prologue.js
index dba77d7d8d..e51ab558b6 100644
--- a/deps/v8/src/js/prologue.js
+++ b/deps/v8/src/js/prologue.js
@@ -112,20 +112,6 @@ function InstallGetter(object, name, getter, attributes, prefix) {
}
-// Helper function to install a getter/setter accessor property.
-function InstallGetterSetter(object, name, getter, setter, attributes) {
- %CheckIsBootstrapping();
- if (IS_UNDEFINED(attributes)) attributes = DONT_ENUM;
- SetFunctionName(getter, name, "get");
- SetFunctionName(setter, name, "set");
- %FunctionRemovePrototype(getter);
- %FunctionRemovePrototype(setter);
- %DefineAccessorPropertyUnchecked(object, name, getter, setter, attributes);
- %SetNativeFlag(getter);
- %SetNativeFlag(setter);
-}
-
-
function OverrideFunction(object, name, f, afterInitialBootstrap) {
%CheckIsBootstrapping();
%object_define_property(object, name, { value: f,
@@ -270,7 +256,6 @@ utils.SetFunctionName = SetFunctionName;
utils.InstallConstants = InstallConstants;
utils.InstallFunctions = InstallFunctions;
utils.InstallGetter = InstallGetter;
-utils.InstallGetterSetter = InstallGetterSetter;
utils.OverrideFunction = OverrideFunction;
utils.SetUpLockedPrototype = SetUpLockedPrototype;
utils.PostNatives = PostNatives;
@@ -281,7 +266,7 @@ utils.PostDebug = PostDebug;
// -----------------------------------------------------------------------
-%OptimizeObjectForAddingMultipleProperties(extrasUtils, 5);
+%OptimizeObjectForAddingMultipleProperties(extrasUtils, 7);
extrasUtils.logStackTrace = function logStackTrace() {
%DebugTrace();
@@ -322,6 +307,15 @@ extrasUtils.uncurryThis = function uncurryThis(func) {
};
};
+// We pass true to trigger the debugger's on exception handler.
+extrasUtils.rejectPromise = function rejectPromise(promise, reason) {
+ %promise_internal_reject(promise, reason, true);
+}
+
+extrasUtils.markPromiseAsHandled = function markPromiseAsHandled(promise) {
+ %PromiseMarkAsHandled(promise);
+};
+
%ToFastProperties(extrasUtils);
})
diff --git a/deps/v8/src/js/promise.js b/deps/v8/src/js/promise.js
index 0b37c643d1..95ab793591 100644
--- a/deps/v8/src/js/promise.js
+++ b/deps/v8/src/js/promise.js
@@ -12,193 +12,16 @@
// Imports
var InternalArray = utils.InternalArray;
-var promiseAsyncStackIDSymbol =
- utils.ImportNow("promise_async_stack_id_symbol");
var promiseHandledBySymbol =
utils.ImportNow("promise_handled_by_symbol");
var promiseForwardingHandlerSymbol =
utils.ImportNow("promise_forwarding_handler_symbol");
-var promiseHasHandlerSymbol =
- utils.ImportNow("promise_has_handler_symbol");
-var promiseRejectReactionsSymbol =
- utils.ImportNow("promise_reject_reactions_symbol");
-var promiseFulfillReactionsSymbol =
- utils.ImportNow("promise_fulfill_reactions_symbol");
-var promiseDeferredReactionSymbol =
- utils.ImportNow("promise_deferred_reaction_symbol");
-var promiseHandledHintSymbol =
- utils.ImportNow("promise_handled_hint_symbol");
-var promiseRawSymbol = utils.ImportNow("promise_raw_symbol");
-var promiseStateSymbol = utils.ImportNow("promise_state_symbol");
-var promiseResultSymbol = utils.ImportNow("promise_result_symbol");
-var SpeciesConstructor;
-var speciesSymbol = utils.ImportNow("species_symbol");
-var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
-var ObjectHasOwnProperty;
-
-utils.Import(function(from) {
- ObjectHasOwnProperty = from.ObjectHasOwnProperty;
- SpeciesConstructor = from.SpeciesConstructor;
-});
+var GlobalPromise = global.Promise;
// -------------------------------------------------------------------
-// [[PromiseState]] values:
-// These values should be kept in sync with PromiseStatus in globals.h
-const kPending = 0;
-const kFulfilled = +1;
-const kRejected = +2;
-
-const kResolveCallback = 0;
-const kRejectCallback = 1;
-
-// ES#sec-promise-executor
-// Promise ( executor )
-var GlobalPromise = function Promise(executor) {
- if (executor === promiseRawSymbol) {
- return %_NewObject(GlobalPromise, new.target);
- }
- if (IS_UNDEFINED(new.target)) throw %make_type_error(kNotAPromise, this);
- if (!IS_CALLABLE(executor)) {
- throw %make_type_error(kResolverNotAFunction, executor);
- }
-
- var promise = PromiseInit(%_NewObject(GlobalPromise, new.target));
- // Calling the reject function would be a new exception, so debugEvent = true
- // TODO(gsathya): Remove container for callbacks when this is moved
- // to CPP/TF.
- var callbacks = %create_resolving_functions(promise, true);
- var debug_is_active = DEBUG_IS_ACTIVE;
- try {
- if (debug_is_active) %DebugPushPromise(promise);
- executor(callbacks[kResolveCallback], callbacks[kRejectCallback]);
- } %catch (e) { // Natives syntax to mark this catch block.
- %_Call(callbacks[kRejectCallback], UNDEFINED, e);
- } finally {
- if (debug_is_active) %DebugPopPromise();
- }
-
- return promise;
-}
-
// Core functionality.
-function PromiseSet(promise, status, value) {
- SET_PRIVATE(promise, promiseStateSymbol, status);
- SET_PRIVATE(promise, promiseResultSymbol, value);
-
- // There are 3 possible states for the resolve, reject symbols when we add
- // a new callback --
- // 1) UNDEFINED -- This is the zero state where there is no callback
- // registered. When we see this state, we directly attach the callbacks to
- // the symbol.
- // 2) !IS_ARRAY -- There is a single callback directly attached to the
- // symbols. We need to create a new array to store additional callbacks.
- // 3) IS_ARRAY -- There are multiple callbacks already registered,
- // therefore we can just push the new callback to the existing array.
- SET_PRIVATE(promise, promiseFulfillReactionsSymbol, UNDEFINED);
- SET_PRIVATE(promise, promiseRejectReactionsSymbol, UNDEFINED);
-
- // This symbol is used only when one deferred needs to be attached. When more
- // than one deferred need to be attached the promise, we attach them directly
- // to the promiseFulfillReactionsSymbol and promiseRejectReactionsSymbol and
- // reset this back to UNDEFINED.
- SET_PRIVATE(promise, promiseDeferredReactionSymbol, UNDEFINED);
-
- return promise;
-}
-
-function PromiseCreateAndSet(status, value) {
- var promise = new GlobalPromise(promiseRawSymbol);
- // If debug is active, notify about the newly created promise first.
- if (DEBUG_IS_ACTIVE) PromiseSet(promise, kPending, UNDEFINED);
- return PromiseSet(promise, status, value);
-}
-
-function PromiseInit(promise) {
- return PromiseSet(promise, kPending, UNDEFINED);
-}
-
-function PromiseHandle(value, handler, deferred) {
- var debug_is_active = DEBUG_IS_ACTIVE;
- try {
- if (debug_is_active) %DebugPushPromise(deferred.promise);
- var result = handler(value);
- if (IS_UNDEFINED(deferred.resolve)) {
- ResolvePromise(deferred.promise, result);
- } else {
- %_Call(deferred.resolve, UNDEFINED, result);
- }
- } %catch (exception) { // Natives syntax to mark this catch block.
- try {
- if (IS_UNDEFINED(deferred.reject)) {
- // Pass false for debugEvent so .then chaining does not trigger
- // redundant ExceptionEvents.
- %PromiseReject(deferred.promise, exception, false);
- PromiseSet(deferred.promise, kRejected, exception);
- } else {
- %_Call(deferred.reject, UNDEFINED, exception);
- }
- } catch (e) { }
- } finally {
- if (debug_is_active) %DebugPopPromise();
- }
-}
-
-function PromiseDebugGetInfo(deferreds, status) {
- var id, name, instrumenting = DEBUG_IS_ACTIVE;
-
- if (instrumenting) {
- // In an async function, reuse the existing stack related to the outer
- // Promise. Otherwise, e.g. in a direct call to then, save a new stack.
- // Promises with multiple reactions with one or more of them being async
- // functions will not get a good stack trace, as async functions require
- // different stacks from direct Promise use, but we save and restore a
- // stack once for all reactions. TODO(littledan): Improve this case.
- if (!IS_UNDEFINED(deferreds) &&
- HAS_PRIVATE(deferreds.promise, promiseHandledBySymbol) &&
- HAS_PRIVATE(GET_PRIVATE(deferreds.promise, promiseHandledBySymbol),
- promiseAsyncStackIDSymbol)) {
- id = GET_PRIVATE(GET_PRIVATE(deferreds.promise, promiseHandledBySymbol),
- promiseAsyncStackIDSymbol);
- name = "async function";
- } else {
- id = %DebugNextMicrotaskId();
- name = status === kFulfilled ? "Promise.resolve" : "Promise.reject";
- %DebugAsyncTaskEvent("enqueue", id, name);
- }
- }
- return [id, name];
-}
-
-function PromiseAttachCallbacks(promise, deferred, onResolve, onReject) {
- var maybeResolveCallbacks =
- GET_PRIVATE(promise, promiseFulfillReactionsSymbol);
- if (IS_UNDEFINED(maybeResolveCallbacks)) {
- SET_PRIVATE(promise, promiseFulfillReactionsSymbol, onResolve);
- SET_PRIVATE(promise, promiseRejectReactionsSymbol, onReject);
- SET_PRIVATE(promise, promiseDeferredReactionSymbol, deferred);
- } else if (!IS_ARRAY(maybeResolveCallbacks)) {
- var resolveCallbacks = new InternalArray();
- var rejectCallbacks = new InternalArray();
- var existingDeferred = GET_PRIVATE(promise, promiseDeferredReactionSymbol);
-
- resolveCallbacks.push(
- maybeResolveCallbacks, existingDeferred, onResolve, deferred);
- rejectCallbacks.push(GET_PRIVATE(promise, promiseRejectReactionsSymbol),
- existingDeferred,
- onReject,
- deferred);
-
- SET_PRIVATE(promise, promiseFulfillReactionsSymbol, resolveCallbacks);
- SET_PRIVATE(promise, promiseRejectReactionsSymbol, rejectCallbacks);
- SET_PRIVATE(promise, promiseDeferredReactionSymbol, UNDEFINED);
- } else {
- maybeResolveCallbacks.push(onResolve, deferred);
- GET_PRIVATE(promise, promiseRejectReactionsSymbol).push(onReject, deferred);
- }
-}
-
function PromiseIdResolveHandler(x) { return x; }
function PromiseIdRejectHandler(r) { %_ReThrow(r); }
SET_PRIVATE(PromiseIdRejectHandler, promiseForwardingHandlerSymbol, true);
@@ -206,229 +29,8 @@ SET_PRIVATE(PromiseIdRejectHandler, promiseForwardingHandlerSymbol, true);
// -------------------------------------------------------------------
// Define exported functions.
-// For bootstrapper.
-
-// ES#sec-ispromise IsPromise ( x )
-function IsPromise(x) {
- return IS_RECEIVER(x) && HAS_DEFINED_PRIVATE(x, promiseStateSymbol);
-}
-
-function PromiseCreate() {
- return PromiseInit(new GlobalPromise(promiseRawSymbol));
-}
-
-// ES#sec-promise-resolve-functions
-// Promise Resolve Functions, steps 6-13
-function ResolvePromise(promise, resolution) {
- if (resolution === promise) {
- var exception = %make_type_error(kPromiseCyclic, resolution);
- %PromiseReject(promise, exception, true);
- PromiseSet(promise, kRejected, exception);
- return;
- }
- if (IS_RECEIVER(resolution)) {
- // 25.4.1.3.2 steps 8-12
- try {
- var then = resolution.then;
- } catch (e) {
- %PromiseReject(promise, e, true);
- PromiseSet(promise, kRejected, e);
- return;
- }
-
- // Resolution is a native promise and if it's already resolved or
- // rejected, shortcircuit the resolution procedure by directly
- // reusing the value from the promise.
- if (IsPromise(resolution) && then === PromiseThen) {
- var thenableState = GET_PRIVATE(resolution, promiseStateSymbol);
- if (thenableState === kFulfilled) {
- // This goes inside the if-else to save one symbol lookup in
- // the slow path.
- var thenableValue = GET_PRIVATE(resolution, promiseResultSymbol);
- %PromiseFulfill(promise, kFulfilled, thenableValue,
- promiseFulfillReactionsSymbol);
- PromiseSet(promise, kFulfilled, thenableValue);
- SET_PRIVATE(promise, promiseHasHandlerSymbol, true);
- return;
- } else if (thenableState === kRejected) {
- var thenableValue = GET_PRIVATE(resolution, promiseResultSymbol);
- if (!HAS_DEFINED_PRIVATE(resolution, promiseHasHandlerSymbol)) {
- // Promise has already been rejected, but had no handler.
- // Revoke previously triggered reject event.
- %PromiseRevokeReject(resolution);
- }
- // Don't cause a debug event as this case is forwarding a rejection
- %PromiseReject(promise, thenableValue, false);
- PromiseSet(promise, kRejected, thenableValue);
- SET_PRIVATE(resolution, promiseHasHandlerSymbol, true);
- return;
- }
- }
-
- if (IS_CALLABLE(then)) {
- if (DEBUG_IS_ACTIVE && IsPromise(resolution)) {
- // Mark the dependency of the new promise on the resolution
- SET_PRIVATE(resolution, promiseHandledBySymbol, promise);
- }
- %EnqueuePromiseResolveThenableJob(promise, resolution, then);
- return;
- }
- }
- %PromiseFulfill(promise, kFulfilled, resolution,
- promiseFulfillReactionsSymbol);
- PromiseSet(promise, kFulfilled, resolution);
-}
-
-// Only used by async-await.js
-function RejectPromise(promise, reason, debugEvent) {
- %PromiseReject(promise, reason, debugEvent);
- PromiseSet(promise, kRejected, reason);
-}
-
-// Export to bindings
-function DoRejectPromise(promise, reason) {
- %PromiseReject(promise, reason, true);
- PromiseSet(promise, kRejected, reason);
-}
-
-// ES#sec-newpromisecapability
-// NewPromiseCapability ( C )
-function NewPromiseCapability(C, debugEvent) {
- if (C === GlobalPromise) {
- // Optimized case, avoid extra closure.
- var promise = PromiseCreate();
- // TODO(gsathya): Remove container for callbacks when this is
- // moved to CPP/TF.
- var callbacks = %create_resolving_functions(promise, debugEvent);
- return {
- promise: promise,
- resolve: callbacks[kResolveCallback],
- reject: callbacks[kRejectCallback]
- };
- }
-
- var result = {promise: UNDEFINED, resolve: UNDEFINED, reject: UNDEFINED };
- result.promise = new C((resolve, reject) => {
- if (!IS_UNDEFINED(result.resolve) || !IS_UNDEFINED(result.reject))
- throw %make_type_error(kPromiseExecutorAlreadyInvoked);
- result.resolve = resolve;
- result.reject = reject;
- });
-
- if (!IS_CALLABLE(result.resolve) || !IS_CALLABLE(result.reject))
- throw %make_type_error(kPromiseNonCallable);
-
- return result;
-}
-
-// ES#sec-promise.reject
-// Promise.reject ( x )
-function PromiseReject(r) {
- if (!IS_RECEIVER(this)) {
- throw %make_type_error(kCalledOnNonObject, PromiseResolve);
- }
- if (this === GlobalPromise) {
- // Optimized case, avoid extra closure.
- var promise = PromiseCreateAndSet(kRejected, r);
- // Trigger debug events if the debugger is on, as Promise.reject is
- // equivalent to throwing an exception directly.
- %PromiseRejectEventFromStack(promise, r);
- return promise;
- } else {
- var promiseCapability = NewPromiseCapability(this, true);
- %_Call(promiseCapability.reject, UNDEFINED, r);
- return promiseCapability.promise;
- }
-}
-
-function PerformPromiseThen(promise, onResolve, onReject, resultCapability) {
- if (!IS_CALLABLE(onResolve)) onResolve = PromiseIdResolveHandler;
- if (!IS_CALLABLE(onReject)) onReject = PromiseIdRejectHandler;
-
- var status = GET_PRIVATE(promise, promiseStateSymbol);
- switch (status) {
- case kPending:
- PromiseAttachCallbacks(promise, resultCapability, onResolve, onReject);
- break;
- case kFulfilled:
- %EnqueuePromiseReactionJob(GET_PRIVATE(promise, promiseResultSymbol),
- onResolve, resultCapability, kFulfilled);
- break;
- case kRejected:
- if (!HAS_DEFINED_PRIVATE(promise, promiseHasHandlerSymbol)) {
- // Promise has already been rejected, but had no handler.
- // Revoke previously triggered reject event.
- %PromiseRevokeReject(promise);
- }
- %EnqueuePromiseReactionJob(GET_PRIVATE(promise, promiseResultSymbol),
- onReject, resultCapability, kRejected);
- break;
- }
-
- // Mark this promise as having handler.
- SET_PRIVATE(promise, promiseHasHandlerSymbol, true);
- return resultCapability.promise;
-}
-
-// ES#sec-promise.prototype.then
-// Promise.prototype.then ( onFulfilled, onRejected )
-// Multi-unwrapped chaining with thenable coercion.
-function PromiseThen(onResolve, onReject) {
- var status = GET_PRIVATE(this, promiseStateSymbol);
- if (IS_UNDEFINED(status)) {
- throw %make_type_error(kNotAPromise, this);
- }
-
- var constructor = SpeciesConstructor(this, GlobalPromise);
- var resultCapability;
-
- // The resultCapability.promise is only ever fulfilled internally,
- // so we don't need the closures to protect against accidentally
- // calling them multiple times.
- if (constructor === GlobalPromise) {
- // TODO(gsathya): Combine this into NewPromiseCapability.
- resultCapability = {
- promise: PromiseCreate(),
- resolve: UNDEFINED,
- reject: UNDEFINED
- };
- } else {
- // Pass false for debugEvent so .then chaining does not trigger
- // redundant ExceptionEvents.
- resultCapability = NewPromiseCapability(constructor, false);
- }
- return PerformPromiseThen(this, onResolve, onReject, resultCapability);
-}
-
-// ES#sec-promise.prototype.catch
-// Promise.prototype.catch ( onRejected )
-function PromiseCatch(onReject) {
- return this.then(UNDEFINED, onReject);
-}
-
// Combinators.
-// ES#sec-promise.resolve
-// Promise.resolve ( x )
-function PromiseResolve(x) {
- if (!IS_RECEIVER(this)) {
- throw %make_type_error(kCalledOnNonObject, PromiseResolve);
- }
- if (IsPromise(x) && x.constructor === this) return x;
-
- // Avoid creating resolving functions.
- if (this === GlobalPromise) {
- var promise = PromiseCreate();
- ResolvePromise(promise, x);
- return promise;
- }
-
- // debugEvent is not so meaningful here as it will be resolved
- var promiseCapability = NewPromiseCapability(this, true);
- %_Call(promiseCapability.resolve, UNDEFINED, x);
- return promiseCapability.promise;
-}
-
// ES#sec-promise.all
// Promise.all ( iterable )
function PromiseAll(iterable) {
@@ -438,7 +40,7 @@ function PromiseAll(iterable) {
// false debugEvent so that forwarding the rejection through all does not
// trigger redundant ExceptionEvents
- var deferred = NewPromiseCapability(this, false);
+ var deferred = %new_promise_capability(this, false);
var resolutions = new InternalArray();
var count;
@@ -474,7 +76,7 @@ function PromiseAll(iterable) {
deferred.reject);
// For catch prediction, mark that rejections here are semantically
// handled by the combined Promise.
- if (instrumenting && IsPromise(throwawayPromise)) {
+ if (instrumenting && %is_promise(throwawayPromise)) {
SET_PRIVATE(throwawayPromise, promiseHandledBySymbol, deferred.promise);
}
++i;
@@ -502,7 +104,7 @@ function PromiseRace(iterable) {
// false debugEvent so that forwarding the rejection through race does not
// trigger redundant ExceptionEvents
- var deferred = NewPromiseCapability(this, false);
+ var deferred = %new_promise_capability(this, false);
// For catch prediction, don't treat the .then calls as handling it;
// instead, recurse outwards.
@@ -517,7 +119,7 @@ function PromiseRace(iterable) {
deferred.reject);
// For catch prediction, mark that rejections here are semantically
// handled by the combined Promise.
- if (instrumenting && IsPromise(throwawayPromise)) {
+ if (instrumenting && %is_promise(throwawayPromise)) {
SET_PRIVATE(throwawayPromise, promiseHandledBySymbol, deferred.promise);
}
}
@@ -527,129 +129,17 @@ function PromiseRace(iterable) {
return deferred.promise;
}
-
-// Utility for debugger
-
-function PromiseHasUserDefinedRejectHandlerCheck(handler, deferred) {
- // Recurse to the forwarding Promise, if any. This may be due to
- // - await reaction forwarding to the throwaway Promise, which has
- // a dependency edge to the outer Promise.
- // - PromiseIdResolveHandler forwarding to the output of .then
- // - Promise.all/Promise.race forwarding to a throwaway Promise, which
- // has a dependency edge to the generated outer Promise.
- if (GET_PRIVATE(handler, promiseForwardingHandlerSymbol)) {
- return PromiseHasUserDefinedRejectHandlerRecursive(deferred.promise);
- }
-
- // Otherwise, this is a real reject handler for the Promise
- return true;
-}
-
-function PromiseHasUserDefinedRejectHandlerRecursive(promise) {
- // If this promise was marked as being handled by a catch block
- // in an async function, then it has a user-defined reject handler.
- if (GET_PRIVATE(promise, promiseHandledHintSymbol)) return true;
-
- // If this Promise is subsumed by another Promise (a Promise resolved
- // with another Promise, or an intermediate, hidden, throwaway Promise
- // within async/await), then recurse on the outer Promise.
- // In this case, the dependency is one possible way that the Promise
- // could be resolved, so it does not subsume the other following cases.
- var outerPromise = GET_PRIVATE(promise, promiseHandledBySymbol);
- if (outerPromise &&
- PromiseHasUserDefinedRejectHandlerRecursive(outerPromise)) {
- return true;
- }
-
- var queue = GET_PRIVATE(promise, promiseRejectReactionsSymbol);
- var deferred = GET_PRIVATE(promise, promiseDeferredReactionSymbol);
-
- if (IS_UNDEFINED(queue)) return false;
-
- if (!IS_ARRAY(queue)) {
- return PromiseHasUserDefinedRejectHandlerCheck(queue, deferred);
- }
-
- for (var i = 0; i < queue.length; i += 2) {
- if (PromiseHasUserDefinedRejectHandlerCheck(queue[i], queue[i + 1])) {
- return true;
- }
- }
- return false;
-}
-
-// Return whether the promise will be handled by a user-defined reject
-// handler somewhere down the promise chain. For this, we do a depth-first
-// search for a reject handler that's not the default PromiseIdRejectHandler.
-// This function also traverses dependencies of one Promise on another,
-// set up through async/await and Promises resolved with Promises.
-function PromiseHasUserDefinedRejectHandler() {
- return PromiseHasUserDefinedRejectHandlerRecursive(this);
-};
-
-function MarkPromiseAsHandled(promise) {
- SET_PRIVATE(promise, promiseHasHandlerSymbol, true);
-}
-
-
-function PromiseSpecies() {
- return this;
-}
-
// -------------------------------------------------------------------
// Install exported functions.
-%AddNamedProperty(global, 'Promise', GlobalPromise, DONT_ENUM);
-%AddNamedProperty(GlobalPromise.prototype, toStringTagSymbol, "Promise",
- DONT_ENUM | READ_ONLY);
-
utils.InstallFunctions(GlobalPromise, DONT_ENUM, [
- "reject", PromiseReject,
"all", PromiseAll,
"race", PromiseRace,
- "resolve", PromiseResolve
-]);
-
-utils.InstallGetter(GlobalPromise, speciesSymbol, PromiseSpecies);
-
-utils.InstallFunctions(GlobalPromise.prototype, DONT_ENUM, [
- "then", PromiseThen,
- "catch", PromiseCatch
]);
%InstallToContext([
- "promise_catch", PromiseCatch,
- "promise_create", PromiseCreate,
- "promise_has_user_defined_reject_handler", PromiseHasUserDefinedRejectHandler,
- "promise_reject", DoRejectPromise,
- // TODO(gsathya): Remove this once we update the promise builtin.
- "promise_internal_reject", RejectPromise,
- "promise_resolve", ResolvePromise,
- "promise_then", PromiseThen,
- "promise_handle", PromiseHandle,
- "promise_debug_get_info", PromiseDebugGetInfo
+ "promise_id_resolve_handler", PromiseIdResolveHandler,
+ "promise_id_reject_handler", PromiseIdRejectHandler
]);
-// This allows extras to create promises quickly without building extra
-// resolve/reject closures, and allows them to later resolve and reject any
-// promise without having to hold on to those closures forever.
-utils.InstallFunctions(extrasUtils, 0, [
- "createPromise", PromiseCreate,
- "resolvePromise", ResolvePromise,
- "rejectPromise", DoRejectPromise,
- "markPromiseAsHandled", MarkPromiseAsHandled
-]);
-
-utils.Export(function(to) {
- to.IsPromise = IsPromise;
- to.PromiseCreate = PromiseCreate;
- to.PromiseThen = PromiseThen;
-
- to.GlobalPromise = GlobalPromise;
- to.NewPromiseCapability = NewPromiseCapability;
- to.PerformPromiseThen = PerformPromiseThen;
- to.ResolvePromise = ResolvePromise;
- to.RejectPromise = RejectPromise;
-});
-
})
diff --git a/deps/v8/src/js/string.js b/deps/v8/src/js/string.js
index 3a9254c713..c0587350cd 100644
--- a/deps/v8/src/js/string.js
+++ b/deps/v8/src/js/string.js
@@ -333,7 +333,7 @@ function StringToLocaleUpperCase() {
// ES6 draft, revision 26 (2014-07-18), section B.2.3.2.1
function HtmlEscape(str) {
- return %_Call(StringReplace, TO_STRING(str), /"/g, "&quot;");
+ return %RegExpInternalReplace(/"/g, TO_STRING(str), "&quot;");
}
diff --git a/deps/v8/src/js/symbol.js b/deps/v8/src/js/symbol.js
deleted file mode 100644
index 4ec31ae9bb..0000000000
--- a/deps/v8/src/js/symbol.js
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GlobalSymbol = global.Symbol;
-var hasInstanceSymbol = utils.ImportNow("has_instance_symbol");
-var isConcatSpreadableSymbol =
- utils.ImportNow("is_concat_spreadable_symbol");
-var iteratorSymbol = utils.ImportNow("iterator_symbol");
-var matchSymbol = utils.ImportNow("match_symbol");
-var replaceSymbol = utils.ImportNow("replace_symbol");
-var searchSymbol = utils.ImportNow("search_symbol");
-var speciesSymbol = utils.ImportNow("species_symbol");
-var splitSymbol = utils.ImportNow("split_symbol");
-var toPrimitiveSymbol = utils.ImportNow("to_primitive_symbol");
-var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
-var unscopablesSymbol = utils.ImportNow("unscopables_symbol");
-
-// -------------------------------------------------------------------
-
-function SymbolFor(key) {
- key = TO_STRING(key);
- var registry = %SymbolRegistry();
- if (IS_UNDEFINED(registry.for[key])) {
- var symbol = %CreateSymbol(key);
- registry.for[key] = symbol;
- registry.keyFor[symbol] = key;
- }
- return registry.for[key];
-}
-
-
-function SymbolKeyFor(symbol) {
- if (!IS_SYMBOL(symbol)) throw %make_type_error(kSymbolKeyFor, symbol);
- return %SymbolRegistry().keyFor[symbol];
-}
-
-// -------------------------------------------------------------------
-
-utils.InstallConstants(GlobalSymbol, [
- "hasInstance", hasInstanceSymbol,
- "isConcatSpreadable", isConcatSpreadableSymbol,
- "iterator", iteratorSymbol,
- "match", matchSymbol,
- "replace", replaceSymbol,
- "search", searchSymbol,
- "species", speciesSymbol,
- "split", splitSymbol,
- "toPrimitive", toPrimitiveSymbol,
- "toStringTag", toStringTagSymbol,
- "unscopables", unscopablesSymbol,
-]);
-
-utils.InstallFunctions(GlobalSymbol, DONT_ENUM, [
- "for", SymbolFor,
- "keyFor", SymbolKeyFor
-]);
-
-})
diff --git a/deps/v8/src/js/typedarray.js b/deps/v8/src/js/typedarray.js
index 7667e18d78..3a5cb84755 100644
--- a/deps/v8/src/js/typedarray.js
+++ b/deps/v8/src/js/typedarray.js
@@ -41,7 +41,6 @@ var SpeciesConstructor;
var ToPositiveInteger;
var ToIndex;
var iteratorSymbol = utils.ImportNow("iterator_symbol");
-var speciesSymbol = utils.ImportNow("species_symbol");
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
macro TYPED_ARRAYS(FUNCTION)
@@ -260,7 +259,7 @@ function NAMEConstructor(arg1, arg2, arg3) {
NAMEConstructByTypedArray(this, arg1);
} else if (IS_RECEIVER(arg1)) {
var iteratorFn = arg1[iteratorSymbol];
- if (IS_UNDEFINED(iteratorFn) || iteratorFn === ArrayValues) {
+ if (IS_UNDEFINED(iteratorFn)) {
NAMEConstructByArrayLike(this, arg1, arg1.length);
} else {
NAMEConstructByIterable(this, arg1, iteratorFn);
@@ -847,10 +846,6 @@ function TypedArrayConstructor() {
throw %make_type_error(kConstructAbstractClass, "TypedArray");
}
-function TypedArraySpecies() {
- return this;
-}
-
// -------------------------------------------------------------------
%SetCode(GlobalTypedArray, TypedArrayConstructor);
@@ -858,7 +853,6 @@ utils.InstallFunctions(GlobalTypedArray, DONT_ENUM, [
"from", TypedArrayFrom,
"of", TypedArrayOf
]);
-utils.InstallGetter(GlobalTypedArray, speciesSymbol, TypedArraySpecies);
utils.InstallGetter(GlobalTypedArray.prototype, toStringTagSymbol,
TypedArrayGetToStringTag);
utils.InstallFunctions(GlobalTypedArray.prototype, DONT_ENUM, [
diff --git a/deps/v8/src/json-parser.cc b/deps/v8/src/json-parser.cc
index 5e79b611a2..6c65234d50 100644
--- a/deps/v8/src/json-parser.cc
+++ b/deps/v8/src/json-parser.cc
@@ -399,8 +399,8 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
->NowContains(value)) {
Handle<FieldType> value_type(
value->OptimalType(isolate(), expected_representation));
- Map::GeneralizeFieldType(target, descriptor,
- expected_representation, value_type);
+ Map::GeneralizeField(target, descriptor, expected_representation,
+ value_type);
}
DCHECK(target->instance_descriptors()
->GetFieldType(descriptor)
@@ -482,6 +482,7 @@ void JsonParser<seq_one_byte>::CommitStateToJsonObject(
int length = properties->length();
for (int i = 0; i < length; i++) {
Handle<Object> value = (*properties)[i];
+ // Initializing store.
json_object->WriteToField(i, *value);
}
}
diff --git a/deps/v8/src/json-stringifier.cc b/deps/v8/src/json-stringifier.cc
index 29685c20e2..b91b57142a 100644
--- a/deps/v8/src/json-stringifier.cc
+++ b/deps/v8/src/json-stringifier.cc
@@ -534,7 +534,8 @@ JsonStringifier::Result JsonStringifier::SerializeJSObject(
PropertyDetails details = map->instance_descriptors()->GetDetails(i);
if (details.IsDontEnum()) continue;
Handle<Object> property;
- if (details.type() == DATA && *map == js_obj->map()) {
+ if (details.location() == kField && *map == js_obj->map()) {
+ DCHECK_EQ(kData, details.kind());
FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
property = JSObject::FastPropertyAt(js_obj, details.representation(),
field_index);
diff --git a/deps/v8/src/keys.cc b/deps/v8/src/keys.cc
index 9b6c8f3381..35ca22301f 100644
--- a/deps/v8/src/keys.cc
+++ b/deps/v8/src/keys.cc
@@ -328,12 +328,13 @@ Handle<FixedArray> GetFastEnumPropertyKeys(Isolate* isolate,
if (key->IsSymbol()) continue;
storage->set(index, key);
if (!indices.is_null()) {
- if (details.type() != DATA) {
- indices = Handle<FixedArray>();
- } else {
+ if (details.location() == kField) {
+ DCHECK_EQ(kData, details.kind());
FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
int load_by_field_index = field_index.GetLoadByFieldIndex();
indices->set(index, Smi::FromInt(load_by_field_index));
+ } else {
+ indices = Handle<FixedArray>();
}
}
index++;
diff --git a/deps/v8/src/layout-descriptor-inl.h b/deps/v8/src/layout-descriptor-inl.h
index bade05e2e8..4f193b30e8 100644
--- a/deps/v8/src/layout-descriptor-inl.h
+++ b/deps/v8/src/layout-descriptor-inl.h
@@ -28,7 +28,7 @@ Handle<LayoutDescriptor> LayoutDescriptor::New(Isolate* isolate, int length) {
bool LayoutDescriptor::InobjectUnboxedField(int inobject_properties,
PropertyDetails details) {
- if (details.type() != DATA || !details.representation().IsDouble()) {
+ if (details.location() != kField || !details.representation().IsDouble()) {
return false;
}
// We care only about in-object properties.
diff --git a/deps/v8/src/layout-descriptor.cc b/deps/v8/src/layout-descriptor.cc
index 11a72e732d..001bfe0637 100644
--- a/deps/v8/src/layout-descriptor.cc
+++ b/deps/v8/src/layout-descriptor.cc
@@ -8,6 +8,7 @@
#include "src/base/bits.h"
#include "src/handles-inl.h"
+#include "src/objects-inl.h"
using v8::base::bits::CountTrailingZeros32;
@@ -245,7 +246,7 @@ LayoutDescriptor* LayoutDescriptor::Trim(Heap* heap, Map* map,
if (current_length != array_length) {
DCHECK_LT(array_length, current_length);
int delta = current_length - array_length;
- heap->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(this, delta);
+ heap->RightTrimFixedArray(this, delta);
}
memset(DataPtr(), 0, DataSize());
LayoutDescriptor* layout_descriptor =
diff --git a/deps/v8/src/layout-descriptor.h b/deps/v8/src/layout-descriptor.h
index 5a80e73f1f..b75536a36f 100644
--- a/deps/v8/src/layout-descriptor.h
+++ b/deps/v8/src/layout-descriptor.h
@@ -83,6 +83,7 @@ class LayoutDescriptor : public FixedTypedArray<Uint32ArrayTraits> {
// For our gdb macros, we should perhaps change these in the future.
void Print();
+ void ShortPrint(std::ostream& os);
void Print(std::ostream& os); // NOLINT
#endif
diff --git a/deps/v8/src/libplatform/default-platform.cc b/deps/v8/src/libplatform/default-platform.cc
index 866a4471c7..0e2144b648 100644
--- a/deps/v8/src/libplatform/default-platform.cc
+++ b/deps/v8/src/libplatform/default-platform.cc
@@ -30,6 +30,12 @@ bool PumpMessageLoop(v8::Platform* platform, v8::Isolate* isolate) {
return reinterpret_cast<DefaultPlatform*>(platform)->PumpMessageLoop(isolate);
}
+void RunIdleTasks(v8::Platform* platform, v8::Isolate* isolate,
+ double idle_time_in_seconds) {
+ reinterpret_cast<DefaultPlatform*>(platform)->RunIdleTasks(
+ isolate, idle_time_in_seconds);
+}
+
void SetTracingController(
v8::Platform* platform,
v8::platform::tracing::TracingController* tracing_controller) {
@@ -69,6 +75,12 @@ DefaultPlatform::~DefaultPlatform() {
i->second.pop();
}
}
+ for (auto& i : main_thread_idle_queue_) {
+ while (!i.second.empty()) {
+ delete i.second.front();
+ i.second.pop();
+ }
+ }
}
@@ -118,6 +130,15 @@ Task* DefaultPlatform::PopTaskInMainThreadDelayedQueue(v8::Isolate* isolate) {
return deadline_and_task.second;
}
+IdleTask* DefaultPlatform::PopTaskInMainThreadIdleQueue(v8::Isolate* isolate) {
+ auto it = main_thread_idle_queue_.find(isolate);
+ if (it == main_thread_idle_queue_.end() || it->second.empty()) {
+ return nullptr;
+ }
+ IdleTask* task = it->second.front();
+ it->second.pop();
+ return task;
+}
bool DefaultPlatform::PumpMessageLoop(v8::Isolate* isolate) {
Task* task = NULL;
@@ -142,8 +163,25 @@ bool DefaultPlatform::PumpMessageLoop(v8::Isolate* isolate) {
return true;
}
+void DefaultPlatform::RunIdleTasks(v8::Isolate* isolate,
+ double idle_time_in_seconds) {
+ double deadline_in_seconds =
+ MonotonicallyIncreasingTime() + idle_time_in_seconds;
+ while (deadline_in_seconds > MonotonicallyIncreasingTime()) {
+ {
+ IdleTask* task;
+ {
+ base::LockGuard<base::Mutex> guard(&lock_);
+ task = PopTaskInMainThreadIdleQueue(isolate);
+ }
+ if (task == nullptr) return;
+ task->Run(deadline_in_seconds);
+ delete task;
+ }
+ }
+}
-void DefaultPlatform::CallOnBackgroundThread(Task *task,
+void DefaultPlatform::CallOnBackgroundThread(Task* task,
ExpectedRuntime expected_runtime) {
EnsureInitialized();
queue_.Append(task);
@@ -164,15 +202,13 @@ void DefaultPlatform::CallDelayedOnForegroundThread(Isolate* isolate,
main_thread_delayed_queue_[isolate].push(std::make_pair(deadline, task));
}
-
void DefaultPlatform::CallIdleOnForegroundThread(Isolate* isolate,
IdleTask* task) {
- UNREACHABLE();
+ base::LockGuard<base::Mutex> guard(&lock_);
+ main_thread_idle_queue_[isolate].push(task);
}
-
-bool DefaultPlatform::IdleTasksEnabled(Isolate* isolate) { return false; }
-
+bool DefaultPlatform::IdleTasksEnabled(Isolate* isolate) { return true; }
double DefaultPlatform::MonotonicallyIncreasingTime() {
return base::TimeTicks::HighResolutionNow().ToInternalValue() /
diff --git a/deps/v8/src/libplatform/default-platform.h b/deps/v8/src/libplatform/default-platform.h
index 4b52c28129..0ab8e33ee5 100644
--- a/deps/v8/src/libplatform/default-platform.h
+++ b/deps/v8/src/libplatform/default-platform.h
@@ -41,6 +41,8 @@ class V8_PLATFORM_EXPORT DefaultPlatform : public NON_EXPORTED_BASE(Platform) {
bool PumpMessageLoop(v8::Isolate* isolate);
+ void RunIdleTasks(v8::Isolate* isolate, double idle_time_in_seconds);
+
// v8::Platform implementation.
size_t NumberOfAvailableBackgroundThreads() override;
void CallOnBackgroundThread(Task* task,
@@ -74,13 +76,15 @@ class V8_PLATFORM_EXPORT DefaultPlatform : public NON_EXPORTED_BASE(Platform) {
Task* PopTaskInMainThreadQueue(v8::Isolate* isolate);
Task* PopTaskInMainThreadDelayedQueue(v8::Isolate* isolate);
+ IdleTask* PopTaskInMainThreadIdleQueue(v8::Isolate* isolate);
base::Mutex lock_;
bool initialized_;
int thread_pool_size_;
std::vector<WorkerThread*> thread_pool_;
TaskQueue queue_;
- std::map<v8::Isolate*, std::queue<Task*> > main_thread_queue_;
+ std::map<v8::Isolate*, std::queue<Task*>> main_thread_queue_;
+ std::map<v8::Isolate*, std::queue<IdleTask*>> main_thread_idle_queue_;
typedef std::pair<double, Task*> DelayedEntry;
std::map<v8::Isolate*,
diff --git a/deps/v8/src/libplatform/tracing/trace-config.cc b/deps/v8/src/libplatform/tracing/trace-config.cc
index e77d191e5e..ff90eff71c 100644
--- a/deps/v8/src/libplatform/tracing/trace-config.cc
+++ b/deps/v8/src/libplatform/tracing/trace-config.cc
@@ -21,8 +21,13 @@ TraceConfig* TraceConfig::CreateDefaultTraceConfig() {
}
bool TraceConfig::IsCategoryGroupEnabled(const char* category_group) const {
- for (auto included_category : included_categories_) {
- if (strcmp(included_category.data(), category_group) == 0) return true;
+ std::stringstream category_stream(category_group);
+ while (category_stream.good()) {
+ std::string category;
+ getline(category_stream, category, ',');
+ for (const auto& included_category : included_categories_) {
+ if (category == included_category) return true;
+ }
}
return false;
}
diff --git a/deps/v8/src/list-inl.h b/deps/v8/src/list-inl.h
index 9a2d11f96a..5ef6d6e340 100644
--- a/deps/v8/src/list-inl.h
+++ b/deps/v8/src/list-inl.h
@@ -9,6 +9,7 @@
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
@@ -34,7 +35,7 @@ template<typename T, class P>
void List<T, P>::AddAll(const Vector<T>& other, P alloc) {
int result_length = length_ + other.length();
if (capacity_ < result_length) Resize(result_length, alloc);
- if (base::is_fundamental<T>()) {
+ if (std::is_fundamental<T>()) {
memcpy(data_ + length_, other.start(), sizeof(*data_) * other.length());
} else {
for (int i = 0; i < other.length(); i++) data_[length_ + i] = other.at(i);
diff --git a/deps/v8/src/list.h b/deps/v8/src/list.h
index 049286572b..b59ece463e 100644
--- a/deps/v8/src/list.h
+++ b/deps/v8/src/list.h
@@ -8,7 +8,7 @@
#include <algorithm>
#include "src/checks.h"
-#include "src/utils.h"
+#include "src/vector.h"
namespace v8 {
namespace internal {
@@ -64,8 +64,8 @@ class List {
// not safe to use after operations that can change the list's
// backing store (e.g. Add).
inline T& operator[](int i) const {
- DCHECK(0 <= i);
- SLOW_DCHECK(static_cast<unsigned>(i) < static_cast<unsigned>(length_));
+ DCHECK_LE(0, i);
+ DCHECK_GT(static_cast<unsigned>(length_), static_cast<unsigned>(i));
return data_[i];
}
inline T& at(int i) const { return operator[](i); }
diff --git a/deps/v8/src/log-utils.h b/deps/v8/src/log-utils.h
index b165b3ee9a..bc414e848d 100644
--- a/deps/v8/src/log-utils.h
+++ b/deps/v8/src/log-utils.h
@@ -110,9 +110,9 @@ class Log {
// Implementation of writing to a log file.
int WriteToFile(const char* msg, int length) {
- DCHECK(output_handle_ != NULL);
+ DCHECK_NOT_NULL(output_handle_);
size_t rv = fwrite(msg, 1, length, output_handle_);
- DCHECK(static_cast<size_t>(length) == rv);
+ DCHECK_EQ(length, rv);
USE(rv);
fflush(output_handle_);
return length;
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index bc52d053f3..0095cf43a7 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -1279,7 +1279,7 @@ void Logger::RuntimeCallTimerEvent() {
if (counter == nullptr) return;
Log::MessageBuilder msg(log_);
msg.Append("active-runtime-timer,");
- msg.AppendDoubleQuotedString(counter->name);
+ msg.AppendDoubleQuotedString(counter->name());
msg.WriteToLogFile();
}
@@ -1329,6 +1329,17 @@ void Logger::LogFailure() {
StopProfiler();
}
+static void AddFunctionAndCode(SharedFunctionInfo* sfi,
+ AbstractCode* code_object,
+ Handle<SharedFunctionInfo>* sfis,
+ Handle<AbstractCode>* code_objects, int offset) {
+ if (sfis != NULL) {
+ sfis[offset] = Handle<SharedFunctionInfo>(sfi);
+ }
+ if (code_objects != NULL) {
+ code_objects[offset] = Handle<AbstractCode>(code_object);
+ }
+}
class EnumerateOptimizedFunctionsVisitor: public OptimizedFunctionVisitor {
public:
@@ -1345,14 +1356,11 @@ class EnumerateOptimizedFunctionsVisitor: public OptimizedFunctionVisitor {
Object* maybe_script = sfi->script();
if (maybe_script->IsScript()
&& !Script::cast(maybe_script)->HasValidSource()) return;
- if (sfis_ != NULL) {
- sfis_[*count_] = Handle<SharedFunctionInfo>(sfi);
- }
- if (code_objects_ != NULL) {
- DCHECK(function->abstract_code()->kind() ==
- AbstractCode::OPTIMIZED_FUNCTION);
- code_objects_[*count_] = Handle<AbstractCode>(function->abstract_code());
- }
+
+ DCHECK(function->abstract_code()->kind() ==
+ AbstractCode::OPTIMIZED_FUNCTION);
+ AddFunctionAndCode(sfi, function->abstract_code(), sfis_, code_objects_,
+ *count_);
*count_ = *count_ + 1;
}
@@ -1377,14 +1385,19 @@ static int EnumerateCompiledFunctions(Heap* heap,
if (sfi->is_compiled()
&& (!sfi->script()->IsScript()
|| Script::cast(sfi->script())->HasValidSource())) {
- if (sfis != NULL) {
- sfis[compiled_funcs_count] = Handle<SharedFunctionInfo>(sfi);
+ // In some cases, an SFI might have (and have executing!) both bytecode
+ // and baseline code, so check for both and add them both if needed.
+ if (sfi->HasBytecodeArray()) {
+ AddFunctionAndCode(sfi, AbstractCode::cast(sfi->bytecode_array()), sfis,
+ code_objects, compiled_funcs_count);
+ ++compiled_funcs_count;
}
- if (code_objects != NULL) {
- code_objects[compiled_funcs_count] =
- Handle<AbstractCode>(sfi->abstract_code());
+
+ if (!sfi->IsInterpreted()) {
+ AddFunctionAndCode(sfi, AbstractCode::cast(sfi->code()), sfis,
+ code_objects, compiled_funcs_count);
+ ++compiled_funcs_count;
}
- ++compiled_funcs_count;
}
}
@@ -1469,6 +1482,10 @@ void Logger::LogCodeObject(Object* object) {
description = "A Wasm to JavaScript adapter";
tag = CodeEventListener::STUB_TAG;
break;
+ case AbstractCode::WASM_INTERPRETER_ENTRY:
+ description = "A Wasm to Interpreter adapter";
+ tag = CodeEventListener::STUB_TAG;
+ break;
case AbstractCode::NUMBER_OF_KINDS:
UNIMPLEMENTED();
}
@@ -1512,7 +1529,6 @@ void Logger::LogBytecodeHandlers() {
void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
Handle<AbstractCode> code) {
- Handle<String> func_name(shared->DebugName());
if (shared->script()->IsScript()) {
Handle<Script> script(Script::cast(shared->script()));
int line_num = Script::GetLineNumber(script, shared->start_position()) + 1;
@@ -1551,11 +1567,12 @@ void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
#if USES_FUNCTION_DESCRIPTORS
entry_point = *FUNCTION_ENTRYPOINT_ADDRESS(entry_point);
#endif
- PROFILE(isolate_, CallbackEvent(*func_name, entry_point));
+ PROFILE(isolate_, CallbackEvent(shared->DebugName(), entry_point));
}
} else {
- PROFILE(isolate_, CodeCreateEvent(CodeEventListener::LAZY_COMPILE_TAG,
- *code, *shared, *func_name));
+ PROFILE(isolate_,
+ CodeCreateEvent(CodeEventListener::LAZY_COMPILE_TAG, *code, *shared,
+ isolate_->heap()->empty_string()));
}
}
diff --git a/deps/v8/src/lookup.cc b/deps/v8/src/lookup.cc
index 186823df84..1075f90e16 100644
--- a/deps/v8/src/lookup.cc
+++ b/deps/v8/src/lookup.cc
@@ -73,7 +73,7 @@ void LookupIterator::Next() {
JSReceiver* holder = *holder_;
Map* map = holder->map();
- if (map->instance_type() <= LAST_SPECIAL_RECEIVER_TYPE) {
+ if (map->IsSpecialReceiverMap()) {
state_ = IsElement() ? LookupInSpecialHolder<true>(map, holder)
: LookupInSpecialHolder<false>(map, holder);
if (IsFound()) return;
@@ -275,8 +275,7 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
JSObject::MigrateToMap(holder, new_map);
ReloadPropertyInformation<false>();
} else {
- PropertyDetails details(attributes, v8::internal::DATA, 0,
- PropertyCellType::kMutable);
+ PropertyDetails details(kData, attributes, 0, PropertyCellType::kMutable);
if (holder->IsJSGlobalObject()) {
Handle<GlobalDictionary> dictionary(holder->global_dictionary());
@@ -344,7 +343,7 @@ void LookupIterator::PrepareTransitionToDataProperty(
// SetNextEnumerationIndex.
int index = dictionary->NextEnumerationIndex();
dictionary->SetNextEnumerationIndex(index + 1);
- property_details_ = PropertyDetails(attributes, i::DATA, index,
+ property_details_ = PropertyDetails(kData, attributes, index,
PropertyCellType::kUninitialized);
PropertyCellType new_type =
PropertyCell::UpdatedType(cell, value, property_details_);
@@ -355,7 +354,7 @@ void LookupIterator::PrepareTransitionToDataProperty(
} else {
// Don't set enumeration index (it will be set during value store).
property_details_ =
- PropertyDetails(attributes, i::DATA, 0, PropertyCellType::kNoCell);
+ PropertyDetails(kData, attributes, 0, PropertyCellType::kNoCell);
transition_ = map;
}
return;
@@ -369,7 +368,7 @@ void LookupIterator::PrepareTransitionToDataProperty(
if (transition->is_dictionary_map()) {
// Don't set enumeration index (it will be set during value store).
property_details_ =
- PropertyDetails(attributes, i::DATA, 0, PropertyCellType::kNoCell);
+ PropertyDetails(kData, attributes, 0, PropertyCellType::kNoCell);
} else {
property_details_ = transition->GetLastDescriptorDetails();
has_property_ = true;
@@ -518,19 +517,15 @@ void LookupIterator::TransitionToAccessorPair(Handle<Object> pair,
Handle<JSObject> receiver = GetStoreTarget();
holder_ = receiver;
- PropertyDetails details(attributes, ACCESSOR_CONSTANT, 0,
- PropertyCellType::kMutable);
+ PropertyDetails details(kAccessor, attributes, 0, PropertyCellType::kMutable);
if (IsElement()) {
// TODO(verwaest): Move code into the element accessor.
Handle<SeededNumberDictionary> dictionary =
JSObject::NormalizeElements(receiver);
- // We unconditionally pass used_as_prototype=false here because the call
- // to RequireSlowElements takes care of the required IC clearing and
- // we don't want to walk the heap twice.
- dictionary =
- SeededNumberDictionary::Set(dictionary, index_, pair, details, false);
+ dictionary = SeededNumberDictionary::Set(dictionary, index_, pair, details,
+ receiver);
receiver->RequireSlowElements(*dictionary);
if (receiver->HasSlowArgumentsElements()) {
@@ -596,7 +591,8 @@ Handle<Object> LookupIterator::FetchValue() const {
result = PropertyCell::cast(result)->value();
} else if (!holder_->HasFastProperties()) {
result = holder_->property_dictionary()->ValueAt(number_);
- } else if (property_details_.type() == v8::internal::DATA) {
+ } else if (property_details_.location() == kField) {
+ DCHECK_EQ(kData, property_details_.kind());
Handle<JSObject> holder = GetHolder<JSObject>();
FieldIndex field_index = FieldIndex::ForDescriptor(holder->map(), number_);
return JSObject::FastPropertyAt(holder, property_details_.representation(),
@@ -610,14 +606,16 @@ Handle<Object> LookupIterator::FetchValue() const {
int LookupIterator::GetFieldDescriptorIndex() const {
DCHECK(has_property_);
DCHECK(holder_->HasFastProperties());
- DCHECK_EQ(v8::internal::DATA, property_details_.type());
+ DCHECK_EQ(kField, property_details_.location());
+ DCHECK_EQ(kData, property_details_.kind());
return descriptor_number();
}
int LookupIterator::GetAccessorIndex() const {
DCHECK(has_property_);
DCHECK(holder_->HasFastProperties());
- DCHECK_EQ(v8::internal::ACCESSOR_CONSTANT, property_details_.type());
+ DCHECK_EQ(kDescriptor, property_details_.location());
+ DCHECK_EQ(kAccessor, property_details_.kind());
return descriptor_number();
}
@@ -625,7 +623,8 @@ int LookupIterator::GetAccessorIndex() const {
int LookupIterator::GetConstantIndex() const {
DCHECK(has_property_);
DCHECK(holder_->HasFastProperties());
- DCHECK_EQ(v8::internal::DATA_CONSTANT, property_details_.type());
+ DCHECK_EQ(kDescriptor, property_details_.location());
+ DCHECK_EQ(kData, property_details_.kind());
DCHECK(!IsElement());
return descriptor_number();
}
@@ -634,7 +633,7 @@ int LookupIterator::GetConstantIndex() const {
FieldIndex LookupIterator::GetFieldIndex() const {
DCHECK(has_property_);
DCHECK(holder_->HasFastProperties());
- DCHECK_EQ(v8::internal::DATA, property_details_.type());
+ DCHECK_EQ(kField, property_details_.location());
DCHECK(!IsElement());
Map* holder_map = holder_->map();
int index =
@@ -646,7 +645,7 @@ FieldIndex LookupIterator::GetFieldIndex() const {
Handle<FieldType> LookupIterator::GetFieldType() const {
DCHECK(has_property_);
DCHECK(holder_->HasFastProperties());
- DCHECK_EQ(v8::internal::DATA, property_details_.type());
+ DCHECK_EQ(kField, property_details_.location());
return handle(
holder_->map()->instance_descriptors()->GetFieldType(descriptor_number()),
isolate_);
@@ -683,11 +682,11 @@ void LookupIterator::WriteDataValue(Handle<Object> value) {
ElementsAccessor* accessor = object->GetElementsAccessor();
accessor->Set(object, number_, *value);
} else if (holder->HasFastProperties()) {
- if (property_details_.type() == v8::internal::DATA) {
+ if (property_details_.location() == kField) {
JSObject::cast(*holder)->WriteToField(descriptor_number(),
property_details_, *value);
} else {
- DCHECK_EQ(v8::internal::DATA_CONSTANT, property_details_.type());
+ DCHECK_EQ(kDescriptor, property_details_.location());
}
} else if (holder->IsJSGlobalObject()) {
GlobalDictionary* dictionary = JSObject::cast(*holder)->global_dictionary();
diff --git a/deps/v8/src/lookup.h b/deps/v8/src/lookup.h
index e0b40c40fe..5f7a293e36 100644
--- a/deps/v8/src/lookup.h
+++ b/deps/v8/src/lookup.h
@@ -288,7 +288,7 @@ class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
void NextInternal(Map* map, JSReceiver* holder);
template <bool is_element>
inline State LookupInHolder(Map* map, JSReceiver* holder) {
- return map->instance_type() <= LAST_SPECIAL_RECEIVER_TYPE
+ return map->IsSpecialReceiverMap()
? LookupInSpecialHolder<is_element>(map, holder)
: LookupInRegularHolder<is_element>(map, holder);
}
diff --git a/deps/v8/src/machine-type.h b/deps/v8/src/machine-type.h
index 844c956e7b..a59aced36e 100644
--- a/deps/v8/src/machine-type.h
+++ b/deps/v8/src/machine-type.h
@@ -79,28 +79,16 @@ class MachineType {
return semantic() == MachineSemantic::kUint32 ||
semantic() == MachineSemantic::kUint64;
}
-
static MachineRepresentation PointerRepresentation() {
return (kPointerSize == 4) ? MachineRepresentation::kWord32
: MachineRepresentation::kWord64;
}
- static MachineType Pointer() {
- return MachineType(PointerRepresentation(), MachineSemantic::kNone);
+ static MachineType UintPtr() {
+ return (kPointerSize == 4) ? Uint32() : Uint64();
}
static MachineType IntPtr() {
return (kPointerSize == 4) ? Int32() : Int64();
}
- static MachineType Float32() {
- return MachineType(MachineRepresentation::kFloat32,
- MachineSemantic::kNumber);
- }
- static MachineType Float64() {
- return MachineType(MachineRepresentation::kFloat64,
- MachineSemantic::kNumber);
- }
- static MachineType Simd128() {
- return MachineType(MachineRepresentation::kSimd128, MachineSemantic::kNone);
- }
static MachineType Int8() {
return MachineType(MachineRepresentation::kWord8, MachineSemantic::kInt32);
}
@@ -128,6 +116,20 @@ class MachineType {
return MachineType(MachineRepresentation::kWord64,
MachineSemantic::kUint64);
}
+ static MachineType Float32() {
+ return MachineType(MachineRepresentation::kFloat32,
+ MachineSemantic::kNumber);
+ }
+ static MachineType Float64() {
+ return MachineType(MachineRepresentation::kFloat64,
+ MachineSemantic::kNumber);
+ }
+ static MachineType Simd128() {
+ return MachineType(MachineRepresentation::kSimd128, MachineSemantic::kNone);
+ }
+ static MachineType Pointer() {
+ return MachineType(PointerRepresentation(), MachineSemantic::kNone);
+ }
static MachineType TaggedPointer() {
return MachineType(MachineRepresentation::kTaggedPointer,
MachineSemantic::kAny);
diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h
index b6830450e7..1c76c69299 100644
--- a/deps/v8/src/macro-assembler.h
+++ b/deps/v8/src/macro-assembler.h
@@ -5,8 +5,7 @@
#ifndef V8_MACRO_ASSEMBLER_H_
#define V8_MACRO_ASSEMBLER_H_
-#include "src/assembler.h"
-
+#include "src/assembler-inl.h"
// Helper types to make boolean flag easier to read at call-site.
enum InvokeFlag {
@@ -36,47 +35,29 @@ enum AllocationFlags {
};
#if V8_TARGET_ARCH_IA32
-#include "src/ia32/assembler-ia32.h"
-#include "src/ia32/assembler-ia32-inl.h"
#include "src/ia32/macro-assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
-#include "src/x64/assembler-x64.h"
-#include "src/x64/assembler-x64-inl.h"
#include "src/x64/macro-assembler-x64.h"
#elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/assembler-arm64.h"
-#include "src/arm64/assembler-arm64-inl.h"
#include "src/arm64/constants-arm64.h"
#include "src/arm64/macro-assembler-arm64.h"
#include "src/arm64/macro-assembler-arm64-inl.h"
#elif V8_TARGET_ARCH_ARM
-#include "src/arm/assembler-arm.h"
-#include "src/arm/assembler-arm-inl.h"
#include "src/arm/constants-arm.h"
#include "src/arm/macro-assembler-arm.h"
#elif V8_TARGET_ARCH_PPC
-#include "src/ppc/assembler-ppc.h"
-#include "src/ppc/assembler-ppc-inl.h"
#include "src/ppc/constants-ppc.h"
#include "src/ppc/macro-assembler-ppc.h"
#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/assembler-mips.h"
-#include "src/mips/assembler-mips-inl.h"
#include "src/mips/constants-mips.h"
#include "src/mips/macro-assembler-mips.h"
#elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/assembler-mips64.h"
-#include "src/mips64/assembler-mips64-inl.h"
#include "src/mips64/constants-mips64.h"
#include "src/mips64/macro-assembler-mips64.h"
#elif V8_TARGET_ARCH_S390
-#include "src/s390/assembler-s390.h"
-#include "src/s390/assembler-s390-inl.h"
#include "src/s390/constants-s390.h"
#include "src/s390/macro-assembler-s390.h"
#elif V8_TARGET_ARCH_X87
-#include "src/x87/assembler-x87.h"
-#include "src/x87/assembler-x87-inl.h"
#include "src/x87/macro-assembler-x87.h"
#else
#error Unsupported target architecture.
diff --git a/deps/v8/src/map-updater.cc b/deps/v8/src/map-updater.cc
new file mode 100644
index 0000000000..a9b9a60f6d
--- /dev/null
+++ b/deps/v8/src/map-updater.cc
@@ -0,0 +1,615 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/map-updater.h"
+
+#include "src/field-type.h"
+#include "src/handles.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/objects.h"
+#include "src/transitions.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+inline bool EqualImmutableValues(Object* obj1, Object* obj2) {
+ if (obj1 == obj2) return true; // Valid for both kData and kAccessor kinds.
+ // TODO(ishell): compare AccessorPairs.
+ return false;
+}
+
+inline bool LocationFitsInto(PropertyLocation what, PropertyLocation where) {
+ return where == kField || what == kDescriptor;
+}
+
+} // namespace
+
+Name* MapUpdater::GetKey(int descriptor) const {
+ return old_descriptors_->GetKey(descriptor);
+}
+
+PropertyDetails MapUpdater::GetDetails(int descriptor) const {
+ DCHECK_LE(0, descriptor);
+ if (descriptor == modified_descriptor_) {
+ return PropertyDetails(new_kind_, new_attributes_, new_location_,
+ new_representation_);
+ }
+ return old_descriptors_->GetDetails(descriptor);
+}
+
+Object* MapUpdater::GetValue(int descriptor) const {
+ DCHECK_LE(0, descriptor);
+ if (descriptor == modified_descriptor_) {
+ DCHECK_EQ(kDescriptor, new_location_);
+ return *new_value_;
+ }
+ DCHECK_EQ(kDescriptor, GetDetails(descriptor).location());
+ return old_descriptors_->GetValue(descriptor);
+}
+
+FieldType* MapUpdater::GetFieldType(int descriptor) const {
+ DCHECK_LE(0, descriptor);
+ if (descriptor == modified_descriptor_) {
+ DCHECK_EQ(kField, new_location_);
+ return *new_field_type_;
+ }
+ DCHECK_EQ(kField, GetDetails(descriptor).location());
+ return old_descriptors_->GetFieldType(descriptor);
+}
+
+Handle<FieldType> MapUpdater::GetOrComputeFieldType(
+ int descriptor, PropertyLocation location,
+ Representation representation) const {
+ DCHECK_LE(0, descriptor);
+ // |location| is just a pre-fetched GetDetails(descriptor).location().
+ DCHECK_EQ(location, GetDetails(descriptor).location());
+ if (location == kField) {
+ return handle(GetFieldType(descriptor), isolate_);
+ } else {
+ return GetValue(descriptor)->OptimalType(isolate_, representation);
+ }
+}
+
+Handle<FieldType> MapUpdater::GetOrComputeFieldType(
+ Handle<DescriptorArray> descriptors, int descriptor,
+ PropertyLocation location, Representation representation) {
+ // |location| is just a pre-fetched GetDetails(descriptor).location().
+ DCHECK_EQ(descriptors->GetDetails(descriptor).location(), location);
+ if (location == kField) {
+ return handle(descriptors->GetFieldType(descriptor), isolate_);
+ } else {
+ return descriptors->GetValue(descriptor)
+ ->OptimalType(isolate_, representation);
+ }
+}
+
+Handle<Map> MapUpdater::ReconfigureToDataField(int descriptor,
+ PropertyAttributes attributes,
+ Representation representation,
+ Handle<FieldType> field_type) {
+ DCHECK_EQ(kInitialized, state_);
+ DCHECK_LE(0, descriptor);
+ DCHECK(!old_map_->is_dictionary_map());
+ modified_descriptor_ = descriptor;
+ new_kind_ = kData;
+ new_attributes_ = attributes;
+ new_location_ = kField;
+ new_representation_ = representation;
+ new_field_type_ = field_type;
+
+ PropertyDetails old_details =
+ old_descriptors_->GetDetails(modified_descriptor_);
+
+ // If property kind is not reconfigured merge the result with
+ // representation/field type from the old descriptor.
+ if (old_details.kind() == new_kind_) {
+ Representation old_representation = old_details.representation();
+ new_representation_ = new_representation_.generalize(old_representation);
+
+ Handle<FieldType> old_field_type =
+ GetOrComputeFieldType(old_descriptors_, modified_descriptor_,
+ old_details.location(), new_representation_);
+
+ new_field_type_ = Map::GeneralizeFieldType(
+ old_representation, old_field_type, new_representation_,
+ new_field_type_, isolate_);
+ }
+
+ if (TryRecofigureToDataFieldInplace() == kEnd) return result_map_;
+ if (FindRootMap() == kEnd) return result_map_;
+ if (FindTargetMap() == kEnd) return result_map_;
+ ConstructNewMap();
+ DCHECK_EQ(kEnd, state_);
+ return result_map_;
+}
+
+Handle<Map> MapUpdater::ReconfigureElementsKind(ElementsKind elements_kind) {
+ DCHECK_EQ(kInitialized, state_);
+ new_elements_kind_ = elements_kind;
+
+ if (FindRootMap() == kEnd) return result_map_;
+ if (FindTargetMap() == kEnd) return result_map_;
+ ConstructNewMap();
+ DCHECK_EQ(kEnd, state_);
+ return result_map_;
+}
+
+Handle<Map> MapUpdater::Update() {
+ DCHECK_EQ(kInitialized, state_);
+ DCHECK(old_map_->is_deprecated());
+
+ if (FindRootMap() == kEnd) return result_map_;
+ if (FindTargetMap() == kEnd) return result_map_;
+ ConstructNewMap();
+ DCHECK_EQ(kEnd, state_);
+ return result_map_;
+}
+
+MapUpdater::State MapUpdater::CopyGeneralizeAllFields(const char* reason) {
+ result_map_ = Map::CopyGeneralizeAllFields(old_map_, new_elements_kind_,
+ modified_descriptor_, new_kind_,
+ new_attributes_, reason);
+ state_ = kEnd;
+ return state_; // Done.
+}
+
+MapUpdater::State MapUpdater::TryRecofigureToDataFieldInplace() {
+ // If it's just a representation generalization case (i.e. property kind and
+ // attributes stays unchanged) it's fine to transition from None to anything
+ // but double without any modification to the object, because the default
+ // uninitialized value for representation None can be overwritten by both
+ // smi and tagged values. Doubles, however, would require a box allocation.
+ if (new_representation_.IsNone() || new_representation_.IsDouble()) {
+ return state_; // Not done yet.
+ }
+
+ PropertyDetails old_details =
+ old_descriptors_->GetDetails(modified_descriptor_);
+ Representation old_representation = old_details.representation();
+ if (!old_representation.IsNone()) {
+ return state_; // Not done yet.
+ }
+
+ DCHECK_EQ(new_kind_, old_details.kind());
+ DCHECK_EQ(new_attributes_, old_details.attributes());
+ DCHECK_EQ(kField, old_details.location());
+ if (FLAG_trace_generalization) {
+ old_map_->PrintGeneralization(
+ stdout, "uninitialized field", modified_descriptor_, old_nof_, old_nof_,
+ false, old_representation, new_representation_,
+ handle(old_descriptors_->GetFieldType(modified_descriptor_), isolate_),
+ MaybeHandle<Object>(), new_field_type_, MaybeHandle<Object>());
+ }
+ Handle<Map> field_owner(old_map_->FindFieldOwner(modified_descriptor_),
+ isolate_);
+
+ Map::GeneralizeField(field_owner, modified_descriptor_, new_representation_,
+ new_field_type_);
+ // Check that the descriptor array was updated.
+ DCHECK(old_descriptors_->GetDetails(modified_descriptor_)
+ .representation()
+ .Equals(new_representation_));
+ DCHECK(old_descriptors_->GetFieldType(modified_descriptor_)
+ ->NowIs(new_field_type_));
+
+ result_map_ = old_map_;
+ state_ = kEnd;
+ return state_; // Done.
+}
+
+MapUpdater::State MapUpdater::FindRootMap() {
+ DCHECK_EQ(kInitialized, state_);
+ // Check the state of the root map.
+ root_map_ = handle(old_map_->FindRootMap(), isolate_);
+ int root_nof = root_map_->NumberOfOwnDescriptors();
+ if (!old_map_->EquivalentToForTransition(*root_map_)) {
+ return CopyGeneralizeAllFields("GenAll_NotEquivalent");
+ }
+
+ ElementsKind from_kind = root_map_->elements_kind();
+ ElementsKind to_kind = new_elements_kind_;
+ // TODO(ishell): Add a test for SLOW_SLOPPY_ARGUMENTS_ELEMENTS.
+ if (from_kind != to_kind && to_kind != DICTIONARY_ELEMENTS &&
+ to_kind != SLOW_STRING_WRAPPER_ELEMENTS &&
+ to_kind != SLOW_SLOPPY_ARGUMENTS_ELEMENTS &&
+ !(IsTransitionableFastElementsKind(from_kind) &&
+ IsMoreGeneralElementsKindTransition(from_kind, to_kind))) {
+ return CopyGeneralizeAllFields("GenAll_InvalidElementsTransition");
+ }
+
+ if (modified_descriptor_ >= 0 && modified_descriptor_ < root_nof) {
+ PropertyDetails old_details =
+ old_descriptors_->GetDetails(modified_descriptor_);
+ if (old_details.kind() != new_kind_ ||
+ old_details.attributes() != new_attributes_) {
+ return CopyGeneralizeAllFields("GenAll_RootModification1");
+ }
+ if (!new_representation_.fits_into(old_details.representation())) {
+ return CopyGeneralizeAllFields("GenAll_RootModification2");
+ }
+ if (old_details.location() != kField) {
+ return CopyGeneralizeAllFields("GenAll_RootModification3");
+ }
+ DCHECK_EQ(kData, old_details.kind());
+ DCHECK_EQ(kData, new_kind_);
+ DCHECK_EQ(kField, new_location_);
+ FieldType* old_field_type =
+ old_descriptors_->GetFieldType(modified_descriptor_);
+ if (!new_field_type_->NowIs(old_field_type)) {
+ return CopyGeneralizeAllFields("GenAll_RootModification4");
+ }
+ }
+
+ // From here on, use the map with correct elements kind as root map.
+ if (from_kind != to_kind) {
+ root_map_ = Map::AsElementsKind(root_map_, to_kind);
+ }
+ state_ = kAtRootMap;
+ return state_; // Not done yet.
+}
+
+MapUpdater::State MapUpdater::FindTargetMap() {
+ DCHECK_EQ(kAtRootMap, state_);
+ target_map_ = root_map_;
+
+ int root_nof = root_map_->NumberOfOwnDescriptors();
+ for (int i = root_nof; i < old_nof_; ++i) {
+ PropertyDetails old_details = GetDetails(i);
+ Map* transition = TransitionArray::SearchTransition(
+ *target_map_, old_details.kind(), GetKey(i), old_details.attributes());
+ if (transition == NULL) break;
+ Handle<Map> tmp_map(transition, isolate_);
+
+ Handle<DescriptorArray> tmp_descriptors(tmp_map->instance_descriptors(),
+ isolate_);
+
+ // Check if target map is incompatible.
+ PropertyDetails tmp_details = tmp_descriptors->GetDetails(i);
+ DCHECK_EQ(old_details.kind(), tmp_details.kind());
+ DCHECK_EQ(old_details.attributes(), tmp_details.attributes());
+ if (old_details.kind() == kAccessor &&
+ !EqualImmutableValues(GetValue(i), tmp_descriptors->GetValue(i))) {
+ // TODO(ishell): mutable accessors are not implemented yet.
+ return CopyGeneralizeAllFields("GenAll_Incompatible");
+ }
+ // Check if old location fits into tmp location.
+ if (!LocationFitsInto(old_details.location(), tmp_details.location())) {
+ break;
+ }
+
+ // Check if old representation fits into tmp representation.
+ Representation tmp_representation = tmp_details.representation();
+ if (!old_details.representation().fits_into(tmp_representation)) {
+ break;
+ }
+
+ if (tmp_details.location() == kField) {
+ Handle<FieldType> old_field_type =
+ GetOrComputeFieldType(i, old_details.location(), tmp_representation);
+ Map::GeneralizeField(tmp_map, i, tmp_representation, old_field_type);
+ } else {
+ // kDescriptor: Check that the value matches.
+ if (!EqualImmutableValues(GetValue(i), tmp_descriptors->GetValue(i))) {
+ break;
+ }
+ }
+ DCHECK(!tmp_map->is_deprecated());
+ target_map_ = tmp_map;
+ }
+
+ // Directly change the map if the target map is more general.
+ int target_nof = target_map_->NumberOfOwnDescriptors();
+ if (target_nof == old_nof_) {
+#ifdef DEBUG
+ if (modified_descriptor_ >= 0) {
+ DescriptorArray* target_descriptors = target_map_->instance_descriptors();
+ PropertyDetails details =
+ target_descriptors->GetDetails(modified_descriptor_);
+ DCHECK_EQ(new_kind_, details.kind());
+ DCHECK_EQ(new_attributes_, details.attributes());
+ DCHECK_EQ(new_location_, details.location());
+ DCHECK(new_representation_.fits_into(details.representation()));
+ if (new_location_ == kField) {
+ DCHECK_EQ(kField, details.location());
+ DCHECK(new_field_type_->NowIs(
+ target_descriptors->GetFieldType(modified_descriptor_)));
+ } else {
+ DCHECK(details.location() == kField ||
+ EqualImmutableValues(*new_value_, target_descriptors->GetValue(
+ modified_descriptor_)));
+ }
+ }
+#endif
+ if (*target_map_ != *old_map_) {
+ old_map_->NotifyLeafMapLayoutChange();
+ }
+ result_map_ = target_map_;
+ state_ = kEnd;
+ return state_; // Done.
+ }
+
+ // Find the last compatible target map in the transition tree.
+ for (int i = target_nof; i < old_nof_; ++i) {
+ PropertyDetails old_details = GetDetails(i);
+ Map* transition = TransitionArray::SearchTransition(
+ *target_map_, old_details.kind(), GetKey(i), old_details.attributes());
+ if (transition == NULL) break;
+ Handle<Map> tmp_map(transition, isolate_);
+ Handle<DescriptorArray> tmp_descriptors(tmp_map->instance_descriptors(),
+ isolate_);
+
+#ifdef DEBUG
+ // Check that target map is compatible.
+ PropertyDetails tmp_details = tmp_descriptors->GetDetails(i);
+ DCHECK_EQ(old_details.kind(), tmp_details.kind());
+ DCHECK_EQ(old_details.attributes(), tmp_details.attributes());
+#endif
+ if (old_details.kind() == kAccessor &&
+ !EqualImmutableValues(GetValue(i), tmp_descriptors->GetValue(i))) {
+ return CopyGeneralizeAllFields("GenAll_Incompatible");
+ }
+ DCHECK(!tmp_map->is_deprecated());
+ target_map_ = tmp_map;
+ }
+
+ state_ = kAtTargetMap;
+ return state_; // Not done yet.
+}
+
+Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
+ int target_nof = target_map_->NumberOfOwnDescriptors();
+ Handle<DescriptorArray> target_descriptors(
+ target_map_->instance_descriptors(), isolate_);
+
+ // Allocate a new descriptor array large enough to hold the required
+ // descriptors, with minimally the exact same size as the old descriptor
+ // array.
+ int new_slack =
+ Max(old_nof_, old_descriptors_->number_of_descriptors()) - old_nof_;
+ Handle<DescriptorArray> new_descriptors =
+ DescriptorArray::Allocate(isolate_, old_nof_, new_slack);
+ DCHECK(new_descriptors->length() > target_descriptors->length() ||
+ new_descriptors->NumberOfSlackDescriptors() > 0 ||
+ new_descriptors->number_of_descriptors() ==
+ old_descriptors_->number_of_descriptors());
+ DCHECK(new_descriptors->number_of_descriptors() == old_nof_);
+
+ int root_nof = root_map_->NumberOfOwnDescriptors();
+
+ // Given that we passed root modification check in FindRootMap() so
+ // the root descriptors are either not modified at all or already more
+ // general than we requested. Take |root_nof| entries as is.
+ // 0 -> |root_nof|
+ int current_offset = 0;
+ for (int i = 0; i < root_nof; ++i) {
+ PropertyDetails old_details = old_descriptors_->GetDetails(i);
+ if (old_details.location() == kField) {
+ current_offset += old_details.field_width_in_words();
+ }
+ Descriptor d(handle(GetKey(i), isolate_),
+ handle(old_descriptors_->GetValue(i), isolate_), old_details);
+ new_descriptors->Set(i, &d);
+ }
+
+ // Merge "updated" old_descriptor entries with target_descriptor entries.
+ // |root_nof| -> |target_nof|
+ for (int i = root_nof; i < target_nof; ++i) {
+ Handle<Name> key(GetKey(i), isolate_);
+ PropertyDetails old_details = GetDetails(i);
+ PropertyDetails target_details = target_descriptors->GetDetails(i);
+
+ PropertyKind next_kind = old_details.kind();
+ PropertyAttributes next_attributes = old_details.attributes();
+ PropertyLocation next_location =
+ old_details.location() == kField ||
+ target_details.location() == kField ||
+ !EqualImmutableValues(target_descriptors->GetValue(i),
+ GetValue(i))
+ ? kField
+ : kDescriptor;
+
+ Representation next_representation =
+ old_details.representation().generalize(
+ target_details.representation());
+
+ DCHECK_EQ(next_kind, target_details.kind());
+ DCHECK_EQ(next_attributes, target_details.attributes());
+
+ if (next_location == kField) {
+ Handle<FieldType> old_field_type =
+ GetOrComputeFieldType(i, old_details.location(), next_representation);
+
+ Handle<FieldType> target_field_type =
+ GetOrComputeFieldType(target_descriptors, i,
+ target_details.location(), next_representation);
+
+ Handle<FieldType> next_field_type = Map::GeneralizeFieldType(
+ old_details.representation(), old_field_type, next_representation,
+ target_field_type, isolate_);
+
+ Handle<Object> wrapped_type(Map::WrapFieldType(next_field_type));
+ Descriptor d;
+ if (next_kind == kData) {
+ d = Descriptor::DataField(key, current_offset, wrapped_type,
+ next_attributes, next_representation);
+ } else {
+ // TODO(ishell): mutable accessors are not implemented yet.
+ UNIMPLEMENTED();
+ }
+ current_offset += d.GetDetails().field_width_in_words();
+ new_descriptors->Set(i, &d);
+ } else {
+ DCHECK_EQ(kDescriptor, next_location);
+
+ Handle<Object> value(GetValue(i), isolate_);
+ Descriptor d;
+ if (next_kind == kData) {
+ d = Descriptor::DataConstant(key, value, next_attributes);
+ } else {
+ DCHECK_EQ(kAccessor, next_kind);
+ d = Descriptor::AccessorConstant(key, value, next_attributes);
+ }
+ new_descriptors->Set(i, &d);
+ }
+ }
+
+ // Take "updated" old_descriptor entries.
+ // |target_nof| -> |old_nof|
+ for (int i = target_nof; i < old_nof_; ++i) {
+ PropertyDetails old_details = GetDetails(i);
+ Handle<Name> key(GetKey(i), isolate_);
+
+ PropertyKind next_kind = old_details.kind();
+ PropertyAttributes next_attributes = old_details.attributes();
+ PropertyLocation next_location = old_details.location();
+ Representation next_representation = old_details.representation();
+
+ Descriptor d;
+ if (next_location == kField) {
+ Handle<FieldType> old_field_type =
+ GetOrComputeFieldType(i, old_details.location(), next_representation);
+
+ Handle<Object> wrapped_type(Map::WrapFieldType(old_field_type));
+ Descriptor d;
+ if (next_kind == kData) {
+ d = Descriptor::DataField(key, current_offset, wrapped_type,
+ next_attributes, next_representation);
+ } else {
+ // TODO(ishell): mutable accessors are not implemented yet.
+ UNIMPLEMENTED();
+ }
+ current_offset += d.GetDetails().field_width_in_words();
+ new_descriptors->Set(i, &d);
+ } else {
+ DCHECK_EQ(kDescriptor, next_location);
+
+ Handle<Object> value(GetValue(i), isolate_);
+ if (next_kind == kData) {
+ d = Descriptor::DataConstant(key, value, next_attributes);
+ } else {
+ DCHECK_EQ(kAccessor, next_kind);
+ d = Descriptor::AccessorConstant(key, value, next_attributes);
+ }
+ new_descriptors->Set(i, &d);
+ }
+ }
+
+ new_descriptors->Sort();
+ return new_descriptors;
+}
+
+Handle<Map> MapUpdater::FindSplitMap(Handle<DescriptorArray> descriptors) {
+ DisallowHeapAllocation no_allocation;
+
+ int root_nof = root_map_->NumberOfOwnDescriptors();
+ Map* current = *root_map_;
+ for (int i = root_nof; i < old_nof_; i++) {
+ Name* name = descriptors->GetKey(i);
+ PropertyDetails details = descriptors->GetDetails(i);
+ Map* next = TransitionArray::SearchTransition(current, details.kind(), name,
+ details.attributes());
+ if (next == NULL) break;
+ DescriptorArray* next_descriptors = next->instance_descriptors();
+
+ PropertyDetails next_details = next_descriptors->GetDetails(i);
+ DCHECK_EQ(details.kind(), next_details.kind());
+ DCHECK_EQ(details.attributes(), next_details.attributes());
+ if (details.location() != next_details.location()) break;
+ if (!details.representation().Equals(next_details.representation())) break;
+
+ if (next_details.location() == kField) {
+ FieldType* next_field_type = next_descriptors->GetFieldType(i);
+ if (!descriptors->GetFieldType(i)->NowIs(next_field_type)) {
+ break;
+ }
+ } else {
+ if (!EqualImmutableValues(descriptors->GetValue(i),
+ next_descriptors->GetValue(i))) {
+ break;
+ }
+ }
+ current = next;
+ }
+ return handle(current, isolate_);
+}
+
+MapUpdater::State MapUpdater::ConstructNewMap() {
+ Handle<DescriptorArray> new_descriptors = BuildDescriptorArray();
+
+ Handle<Map> split_map = FindSplitMap(new_descriptors);
+ int split_nof = split_map->NumberOfOwnDescriptors();
+ DCHECK_NE(old_nof_, split_nof);
+
+ PropertyDetails split_details = GetDetails(split_nof);
+
+ // Invalidate a transition target at |key|.
+ Map* maybe_transition = TransitionArray::SearchTransition(
+ *split_map, split_details.kind(), GetKey(split_nof),
+ split_details.attributes());
+ if (maybe_transition != NULL) {
+ maybe_transition->DeprecateTransitionTree();
+ }
+
+ // If |maybe_transition| is not NULL then the transition array already
+ // contains entry for given descriptor. This means that the transition
+ // could be inserted regardless of whether transitions array is full or not.
+ if (maybe_transition == NULL &&
+ !TransitionArray::CanHaveMoreTransitions(split_map)) {
+ return CopyGeneralizeAllFields("GenAll_CantHaveMoreTransitions");
+ }
+
+ old_map_->NotifyLeafMapLayoutChange();
+
+ if (FLAG_trace_generalization && modified_descriptor_ >= 0) {
+ PropertyDetails old_details =
+ old_descriptors_->GetDetails(modified_descriptor_);
+ PropertyDetails new_details =
+ new_descriptors->GetDetails(modified_descriptor_);
+ MaybeHandle<FieldType> old_field_type;
+ MaybeHandle<FieldType> new_field_type;
+ MaybeHandle<Object> old_value;
+ MaybeHandle<Object> new_value;
+ if (old_details.location() == kField) {
+ old_field_type = handle(
+ old_descriptors_->GetFieldType(modified_descriptor_), isolate_);
+ } else {
+ old_value =
+ handle(old_descriptors_->GetValue(modified_descriptor_), isolate_);
+ }
+ if (new_details.location() == kField) {
+ new_field_type =
+ handle(new_descriptors->GetFieldType(modified_descriptor_), isolate_);
+ } else {
+ new_value =
+ handle(new_descriptors->GetValue(modified_descriptor_), isolate_);
+ }
+
+ old_map_->PrintGeneralization(
+ stdout, "", modified_descriptor_, split_nof, old_nof_,
+ old_details.location() == kDescriptor && new_location_ == kField,
+ old_details.representation(), new_details.representation(),
+ old_field_type, old_value, new_field_type, new_value);
+ }
+
+ Handle<LayoutDescriptor> new_layout_descriptor =
+ LayoutDescriptor::New(split_map, new_descriptors, old_nof_);
+
+ Handle<Map> new_map = Map::AddMissingTransitions(split_map, new_descriptors,
+ new_layout_descriptor);
+
+ // Deprecated part of the transition tree is no longer reachable, so replace
+ // current instance descriptors in the "survived" part of the tree with
+ // the new descriptors to maintain descriptors sharing invariant.
+ split_map->ReplaceDescriptors(*new_descriptors, *new_layout_descriptor);
+
+ result_map_ = new_map;
+ state_ = kEnd;
+ return state_; // Done.
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/map-updater.h b/deps/v8/src/map-updater.h
new file mode 100644
index 0000000000..68b720365b
--- /dev/null
+++ b/deps/v8/src/map-updater.h
@@ -0,0 +1,173 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MAP_RECONFIGURER_H_
+#define V8_MAP_RECONFIGURER_H_
+
+#include "src/elements-kind.h"
+#include "src/globals.h"
+#include "src/handles.h"
+#include "src/objects.h"
+#include "src/property-details.h"
+
+namespace v8 {
+namespace internal {
+
+// The |MapUpdater| class implements all sorts of map reconfigurations
+// including changes of elements kind, property attributes, property kind,
+// property location and field representations/type changes. It ensures that
+// the reconfigured map and all the intermediate maps are properly integrated
+// into the exising transition tree.
+//
+// To avoid high degrees over polymorphism, and to stabilize quickly, on every
+// rewrite the new type is deduced by merging the current type with any
+// potential new (partial) version of the type in the transition tree.
+// To do this, on each rewrite:
+// - Search the root of the transition tree using FindRootMap.
+// - Find/create a |root_map| with requested |new_elements_kind|.
+// - Find |target_map|, the newest matching version of this map using the
+// "updated" |old_map|'s descriptor array (i.e. whose entry at |modify_index|
+// is considered to be of |new_kind| and having |new_attributes|) to walk
+// the transition tree.
+// - Merge/generalize the "updated" descriptor array of the |old_map| and
+// descriptor array of the |target_map|.
+// - Generalize the |modify_index| descriptor using |new_representation| and
+// |new_field_type|.
+// - Walk the tree again starting from the root towards |target_map|. Stop at
+// |split_map|, the first map who's descriptor array does not match the merged
+// descriptor array.
+// - If |target_map| == |split_map|, |target_map| is in the expected state.
+// Return it.
+// - Otherwise, invalidate the outdated transition target from |target_map|, and
+// replace its transition tree with a new branch for the updated descriptors.
+class MapUpdater {
+ public:
+ MapUpdater(Isolate* isolate, Handle<Map> old_map)
+ : isolate_(isolate),
+ old_map_(old_map),
+ old_descriptors_(old_map->instance_descriptors(), isolate_),
+ old_nof_(old_map_->NumberOfOwnDescriptors()),
+ new_elements_kind_(old_map_->elements_kind()) {}
+
+ // Prepares for reconfiguring of a property at |descriptor| to data field
+ // with given |attributes| and |representation|/|field_type| and
+ // performs the steps 1-5.
+ Handle<Map> ReconfigureToDataField(int descriptor,
+ PropertyAttributes attributes,
+ Representation representation,
+ Handle<FieldType> field_type);
+
+ // Prepares for reconfiguring elements kind and performs the steps 1-5.
+ Handle<Map> ReconfigureElementsKind(ElementsKind elements_kind);
+
+ // Prepares for updating deprecated map to most up-to-date non-deprecated
+ // version and performs the steps 1-5.
+ Handle<Map> Update();
+
+ private:
+ enum State { kInitialized, kAtRootMap, kAtTargetMap, kEnd };
+
+ // Try to reconfigure property in-place without rebuilding transition tree
+ // and creating new maps. See implementation for details.
+ State TryRecofigureToDataFieldInplace();
+
+ // Step 1.
+ // - Search the root of the transition tree using FindRootMap.
+ // - Find/create a |root_map_| with requested |new_elements_kind_|.
+ State FindRootMap();
+
+ // Step 2.
+ // - Find |target_map_|, the newest matching version of this map using the
+ // "updated" |old_map|'s descriptor array (i.e. whose entry at
+ // |modified_descriptor_| is considered to be of |new_kind| and having
+ // |new_attributes|) to walk the transition tree.
+ State FindTargetMap();
+
+ // Step 3.
+ // - Merge/generalize the "updated" descriptor array of the |old_map_| and
+ // descriptor array of the |target_map_|.
+ // - Generalize the |modified_descriptor_| using |new_representation| and
+ // |new_field_type_|.
+ Handle<DescriptorArray> BuildDescriptorArray();
+
+ // Step 4.
+ // - Walk the tree again starting from the root towards |target_map|. Stop at
+ // |split_map|, the first map who's descriptor array does not match the
+ // merged descriptor array.
+ Handle<Map> FindSplitMap(Handle<DescriptorArray> descriptors);
+
+ // Step 5.
+ // - If |target_map| == |split_map|, |target_map| is in the expected state.
+ // Return it.
+ // - Otherwise, invalidate the outdated transition target from |target_map|,
+ // and replace its transition tree with a new branch for the updated
+ // descriptors.
+ State ConstructNewMap();
+
+ // When a requested reconfiguration can not be done the result is a copy
+ // of |old_map_| where every field has |Tagged| representation and |Any|
+ // field type. This map is disconnected from the transition tree.
+ State CopyGeneralizeAllFields(const char* reason);
+
+ // Returns name of a |descriptor| property.
+ inline Name* GetKey(int descriptor) const;
+
+ // Returns property details of a |descriptor| in "updated" |old_descrtiptors_|
+ // array.
+ inline PropertyDetails GetDetails(int descriptor) const;
+
+ // Returns value of a |descriptor| with kDescriptor location in "updated"
+ // |old_descrtiptors_| array.
+ inline Object* GetValue(int descriptor) const;
+
+ // Returns field type for a |descriptor| with kField location in "updated"
+ // |old_descrtiptors_| array.
+ inline FieldType* GetFieldType(int descriptor) const;
+
+ // If a |descriptor| property in "updated" |old_descriptors_| has kField
+ // location then returns it's field type otherwise computes optimal field
+ // type for the descriptor's value and |representation|. The |location|
+ // value must be a pre-fetched location for |descriptor|.
+ inline Handle<FieldType> GetOrComputeFieldType(
+ int descriptor, PropertyLocation location,
+ Representation representation) const;
+
+ // If a |descriptor| property in given |descriptors| array has kField
+ // location then returns it's field type otherwise computes optimal field
+ // type for the descriptor's value and |representation|.
+ // The |location| value must be a pre-fetched location for |descriptor|.
+ inline Handle<FieldType> GetOrComputeFieldType(
+ Handle<DescriptorArray> descriptors, int descriptor,
+ PropertyLocation location, Representation representation);
+
+ Isolate* isolate_;
+ Handle<Map> old_map_;
+ Handle<DescriptorArray> old_descriptors_;
+ Handle<Map> root_map_;
+ Handle<Map> target_map_;
+ Handle<Map> result_map_;
+ int old_nof_;
+
+ State state_ = kInitialized;
+ ElementsKind new_elements_kind_;
+
+ // If |modified_descriptor_| is not equal to -1 them the fields below form
+ // an "update" of the |old_map_|'s descriptors.
+ int modified_descriptor_ = -1;
+ PropertyKind new_kind_ = kData;
+ PropertyAttributes new_attributes_ = NONE;
+ PropertyLocation new_location_ = kField;
+ Representation new_representation_ = Representation::None();
+
+ // Data specific to kField location.
+ Handle<FieldType> new_field_type_;
+
+ // Data specific to kDescriptor location.
+ Handle<Object> new_value_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_MAP_RECONFIGURER_H_
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index eea77e34d8..b5150ef400 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -21,11 +21,11 @@ MessageLocation::MessageLocation(Handle<Script> script, int start_pos,
int end_pos)
: script_(script), start_pos_(start_pos), end_pos_(end_pos) {}
MessageLocation::MessageLocation(Handle<Script> script, int start_pos,
- int end_pos, Handle<JSFunction> function)
+ int end_pos, Handle<SharedFunctionInfo> shared)
: script_(script),
start_pos_(start_pos),
end_pos_(end_pos),
- function_(function) {}
+ shared_(shared) {}
MessageLocation::MessageLocation() : start_pos_(-1), end_pos_(-1) {}
// If no message listeners have been registered this one is called
@@ -47,10 +47,9 @@ void MessageHandler::DefaultMessageReport(Isolate* isolate,
}
}
-
Handle<JSMessageObject> MessageHandler::MakeMessageObject(
Isolate* isolate, MessageTemplate::Template message,
- MessageLocation* location, Handle<Object> argument,
+ const MessageLocation* location, Handle<Object> argument,
Handle<JSArray> stack_frames) {
Factory* factory = isolate->factory();
@@ -75,50 +74,63 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject(
return message_obj;
}
-
-void MessageHandler::ReportMessage(Isolate* isolate, MessageLocation* loc,
+void MessageHandler::ReportMessage(Isolate* isolate, const MessageLocation* loc,
Handle<JSMessageObject> message) {
- // We are calling into embedder's code which can throw exceptions.
- // Thus we need to save current exception state, reset it to the clean one
- // and ignore scheduled exceptions callbacks can throw.
-
- // We pass the exception object into the message handler callback though.
- Object* exception_object = isolate->heap()->undefined_value();
- if (isolate->has_pending_exception()) {
- exception_object = isolate->pending_exception();
- }
- Handle<Object> exception(exception_object, isolate);
+ v8::Local<v8::Message> api_message_obj = v8::Utils::MessageToLocal(message);
- Isolate::ExceptionScope exception_scope(isolate);
- isolate->clear_pending_exception();
- isolate->set_external_caught_exception(false);
+ if (api_message_obj->ErrorLevel() == v8::Isolate::kMessageError) {
+ // We are calling into embedder's code which can throw exceptions.
+ // Thus we need to save current exception state, reset it to the clean one
+ // and ignore scheduled exceptions callbacks can throw.
- // Turn the exception on the message into a string if it is an object.
- if (message->argument()->IsJSObject()) {
- HandleScope scope(isolate);
- Handle<Object> argument(message->argument(), isolate);
+ // We pass the exception object into the message handler callback though.
+ Object* exception_object = isolate->heap()->undefined_value();
+ if (isolate->has_pending_exception()) {
+ exception_object = isolate->pending_exception();
+ }
+ Handle<Object> exception(exception_object, isolate);
- MaybeHandle<Object> maybe_stringified;
- Handle<Object> stringified;
- // Make sure we don't leak uncaught internally generated Error objects.
- if (argument->IsJSError()) {
- maybe_stringified = Object::NoSideEffectsToString(isolate, argument);
- } else {
- v8::TryCatch catcher(reinterpret_cast<v8::Isolate*>(isolate));
- catcher.SetVerbose(false);
- catcher.SetCaptureMessage(false);
+ Isolate::ExceptionScope exception_scope(isolate);
+ isolate->clear_pending_exception();
+ isolate->set_external_caught_exception(false);
- maybe_stringified = Object::ToString(isolate, argument);
- }
+ // Turn the exception on the message into a string if it is an object.
+ if (message->argument()->IsJSObject()) {
+ HandleScope scope(isolate);
+ Handle<Object> argument(message->argument(), isolate);
+
+ MaybeHandle<Object> maybe_stringified;
+ Handle<Object> stringified;
+ // Make sure we don't leak uncaught internally generated Error objects.
+ if (argument->IsJSError()) {
+ maybe_stringified = Object::NoSideEffectsToString(isolate, argument);
+ } else {
+ v8::TryCatch catcher(reinterpret_cast<v8::Isolate*>(isolate));
+ catcher.SetVerbose(false);
+ catcher.SetCaptureMessage(false);
- if (!maybe_stringified.ToHandle(&stringified)) {
- stringified = isolate->factory()->NewStringFromAsciiChecked("exception");
+ maybe_stringified = Object::ToString(isolate, argument);
+ }
+
+ if (!maybe_stringified.ToHandle(&stringified)) {
+ stringified =
+ isolate->factory()->NewStringFromAsciiChecked("exception");
+ }
+ message->set_argument(*stringified);
}
- message->set_argument(*stringified);
+
+ v8::Local<v8::Value> api_exception_obj = v8::Utils::ToLocal(exception);
+ ReportMessageNoExceptions(isolate, loc, message, api_exception_obj);
+ } else {
+ ReportMessageNoExceptions(isolate, loc, message, v8::Local<v8::Value>());
}
+}
+void MessageHandler::ReportMessageNoExceptions(
+ Isolate* isolate, const MessageLocation* loc, Handle<Object> message,
+ v8::Local<v8::Value> api_exception_obj) {
v8::Local<v8::Message> api_message_obj = v8::Utils::MessageToLocal(message);
- v8::Local<v8::Value> api_exception_obj = v8::Utils::ToLocal(exception);
+ int error_level = api_message_obj->ErrorLevel();
Handle<TemplateList> global_listeners =
isolate->factory()->message_listeners();
@@ -134,6 +146,11 @@ void MessageHandler::ReportMessage(Isolate* isolate, MessageLocation* loc,
if (global_listeners->get(i)->IsUndefined(isolate)) continue;
FixedArray* listener = FixedArray::cast(global_listeners->get(i));
Foreign* callback_obj = Foreign::cast(listener->get(0));
+ int32_t message_levels =
+ static_cast<int32_t>(Smi::cast(listener->get(2))->value());
+ if (!(message_levels & error_level)) {
+ continue;
+ }
v8::MessageCallback callback =
FUNCTION_CAST<v8::MessageCallback>(callback_obj->foreign_address());
Handle<Object> callback_data(listener->get(1), isolate);
@@ -165,6 +182,115 @@ std::unique_ptr<char[]> MessageHandler::GetLocalizedMessage(
return GetMessage(isolate, data)->ToCString(DISALLOW_NULLS);
}
+namespace {
+
+Object* EvalFromFunctionName(Isolate* isolate, Handle<Script> script) {
+ if (script->eval_from_shared()->IsUndefined(isolate))
+ return isolate->heap()->undefined_value();
+
+ Handle<SharedFunctionInfo> shared(
+ SharedFunctionInfo::cast(script->eval_from_shared()));
+ // Find the name of the function calling eval.
+ if (shared->name()->BooleanValue()) {
+ return shared->name();
+ }
+
+ return shared->inferred_name();
+}
+
+Object* EvalFromScript(Isolate* isolate, Handle<Script> script) {
+ if (script->eval_from_shared()->IsUndefined(isolate))
+ return isolate->heap()->undefined_value();
+
+ Handle<SharedFunctionInfo> eval_from_shared(
+ SharedFunctionInfo::cast(script->eval_from_shared()));
+ return eval_from_shared->script()->IsScript()
+ ? eval_from_shared->script()
+ : isolate->heap()->undefined_value();
+}
+
+MaybeHandle<String> FormatEvalOrigin(Isolate* isolate, Handle<Script> script) {
+ Handle<Object> sourceURL(script->GetNameOrSourceURL(), isolate);
+ if (!sourceURL->IsUndefined(isolate)) {
+ DCHECK(sourceURL->IsString());
+ return Handle<String>::cast(sourceURL);
+ }
+
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendCString("eval at ");
+
+ Handle<Object> eval_from_function_name =
+ handle(EvalFromFunctionName(isolate, script), isolate);
+ if (eval_from_function_name->BooleanValue()) {
+ Handle<String> str;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, str, Object::ToString(isolate, eval_from_function_name),
+ String);
+ builder.AppendString(str);
+ } else {
+ builder.AppendCString("<anonymous>");
+ }
+
+ Handle<Object> eval_from_script_obj =
+ handle(EvalFromScript(isolate, script), isolate);
+ if (eval_from_script_obj->IsScript()) {
+ Handle<Script> eval_from_script =
+ Handle<Script>::cast(eval_from_script_obj);
+ builder.AppendCString(" (");
+ if (eval_from_script->compilation_type() == Script::COMPILATION_TYPE_EVAL) {
+ // Eval script originated from another eval.
+ Handle<String> str;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, str, FormatEvalOrigin(isolate, eval_from_script), String);
+ builder.AppendString(str);
+ } else {
+ DCHECK(eval_from_script->compilation_type() !=
+ Script::COMPILATION_TYPE_EVAL);
+ // eval script originated from "real" source.
+ Handle<Object> name_obj = handle(eval_from_script->name(), isolate);
+ if (eval_from_script->name()->IsString()) {
+ builder.AppendString(Handle<String>::cast(name_obj));
+
+ Script::PositionInfo info;
+ if (Script::GetPositionInfo(eval_from_script, script->GetEvalPosition(),
+ &info, Script::NO_OFFSET)) {
+ builder.AppendCString(":");
+
+ Handle<String> str = isolate->factory()->NumberToString(
+ handle(Smi::FromInt(info.line + 1), isolate));
+ builder.AppendString(str);
+
+ builder.AppendCString(":");
+
+ str = isolate->factory()->NumberToString(
+ handle(Smi::FromInt(info.column + 1), isolate));
+ builder.AppendString(str);
+ }
+ } else {
+ DCHECK(!eval_from_script->name()->IsString());
+ builder.AppendCString("unknown source");
+ }
+ }
+ builder.AppendCString(")");
+ }
+
+ Handle<String> result;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, result, builder.Finish(), String);
+ return result;
+}
+
+} // namespace
+
+Handle<Object> StackFrameBase::GetEvalOrigin() {
+ if (!HasScript()) return isolate_->factory()->undefined_value();
+ return FormatEvalOrigin(isolate_, GetScript()).ToHandleChecked();
+}
+
+bool StackFrameBase::IsEval() {
+ return HasScript() &&
+ GetScript()->compilation_type() == Script::COMPILATION_TYPE_EVAL;
+}
+
void JSStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
int frame_ix) {
DCHECK(!array->IsWasmFrame(frame_ix));
@@ -179,10 +305,12 @@ void JSStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
is_strict_ = (flags & FrameArray::kIsStrict) != 0;
}
+JSStackFrame::JSStackFrame() {}
+
JSStackFrame::JSStackFrame(Isolate* isolate, Handle<Object> receiver,
Handle<JSFunction> function,
Handle<AbstractCode> code, int offset)
- : isolate_(isolate),
+ : StackFrameBase(isolate),
receiver_(receiver),
function_(function),
code_(code),
@@ -190,8 +318,6 @@ JSStackFrame::JSStackFrame(Isolate* isolate, Handle<Object> receiver,
force_constructor_(false),
is_strict_(false) {}
-JSStackFrame::JSStackFrame() {}
-
Handle<Object> JSStackFrame::GetFunction() const {
return Handle<Object>::cast(function_);
}
@@ -245,7 +371,7 @@ Handle<Object> JSStackFrame::GetScriptNameOrSourceUrl() {
}
Handle<Object> JSStackFrame::GetMethodName() {
- if (receiver_->IsNull(isolate_) || receiver_->IsUndefined(isolate_)) {
+ if (receiver_->IsNullOrUndefined(isolate_)) {
return isolate_->factory()->null_value();
}
@@ -298,110 +424,11 @@ Handle<Object> JSStackFrame::GetMethodName() {
return isolate_->factory()->null_value();
}
-namespace {
-
-Object* EvalFromFunctionName(Isolate* isolate, Handle<Script> script) {
- if (script->eval_from_shared()->IsUndefined(isolate))
- return isolate->heap()->undefined_value();
-
- Handle<SharedFunctionInfo> shared(
- SharedFunctionInfo::cast(script->eval_from_shared()));
- // Find the name of the function calling eval.
- if (shared->name()->BooleanValue()) {
- return shared->name();
- }
-
- return shared->inferred_name();
-}
-
-Object* EvalFromScript(Isolate* isolate, Handle<Script> script) {
- if (script->eval_from_shared()->IsUndefined(isolate))
- return isolate->heap()->undefined_value();
-
- Handle<SharedFunctionInfo> eval_from_shared(
- SharedFunctionInfo::cast(script->eval_from_shared()));
- return eval_from_shared->script()->IsScript()
- ? eval_from_shared->script()
- : isolate->heap()->undefined_value();
-}
-
-MaybeHandle<String> FormatEvalOrigin(Isolate* isolate, Handle<Script> script) {
- Handle<Object> sourceURL = Script::GetNameOrSourceURL(script);
- if (!sourceURL->IsUndefined(isolate)) {
- DCHECK(sourceURL->IsString());
- return Handle<String>::cast(sourceURL);
- }
-
- IncrementalStringBuilder builder(isolate);
- builder.AppendCString("eval at ");
-
- Handle<Object> eval_from_function_name =
- handle(EvalFromFunctionName(isolate, script), isolate);
- if (eval_from_function_name->BooleanValue()) {
- Handle<String> str;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, str, Object::ToString(isolate, eval_from_function_name),
- String);
- builder.AppendString(str);
- } else {
- builder.AppendCString("<anonymous>");
- }
-
- Handle<Object> eval_from_script_obj =
- handle(EvalFromScript(isolate, script), isolate);
- if (eval_from_script_obj->IsScript()) {
- Handle<Script> eval_from_script =
- Handle<Script>::cast(eval_from_script_obj);
- builder.AppendCString(" (");
- if (eval_from_script->compilation_type() == Script::COMPILATION_TYPE_EVAL) {
- // Eval script originated from another eval.
- Handle<String> str;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, str, FormatEvalOrigin(isolate, eval_from_script), String);
- builder.AppendString(str);
- } else {
- DCHECK(eval_from_script->compilation_type() !=
- Script::COMPILATION_TYPE_EVAL);
- // eval script originated from "real" source.
- Handle<Object> name_obj = handle(eval_from_script->name(), isolate);
- if (eval_from_script->name()->IsString()) {
- builder.AppendString(Handle<String>::cast(name_obj));
-
- Script::PositionInfo info;
- if (Script::GetPositionInfo(eval_from_script, script->GetEvalPosition(),
- &info, Script::NO_OFFSET)) {
- builder.AppendCString(":");
-
- Handle<String> str = isolate->factory()->NumberToString(
- handle(Smi::FromInt(info.line + 1), isolate));
- builder.AppendString(str);
-
- builder.AppendCString(":");
-
- str = isolate->factory()->NumberToString(
- handle(Smi::FromInt(info.column + 1), isolate));
- builder.AppendString(str);
- }
- } else {
- DCHECK(!eval_from_script->name()->IsString());
- builder.AppendCString("unknown source");
- }
- }
- builder.AppendCString(")");
- }
-
- Handle<String> result;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, result, builder.Finish(), String);
- return result;
-}
-
-} // namespace
-
Handle<Object> JSStackFrame::GetTypeName() {
// TODO(jgruber): Check for strict/constructor here as in
// CallSitePrototypeGetThis.
- if (receiver_->IsNull(isolate_) || receiver_->IsUndefined(isolate_))
+ if (receiver_->IsNullOrUndefined(isolate_))
return isolate_->factory()->null_value();
if (receiver_->IsJSProxy()) return isolate_->factory()->Proxy_string();
@@ -411,11 +438,6 @@ Handle<Object> JSStackFrame::GetTypeName() {
return JSReceiver::GetConstructorName(receiver_object);
}
-Handle<Object> JSStackFrame::GetEvalOrigin() {
- if (!HasScript()) return isolate_->factory()->undefined_value();
- return FormatEvalOrigin(isolate_, GetScript()).ToHandleChecked();
-}
-
int JSStackFrame::GetLineNumber() {
DCHECK_LE(0, GetPosition());
if (HasScript()) return Script::GetLineNumber(GetScript(), GetPosition()) + 1;
@@ -435,13 +457,7 @@ bool JSStackFrame::IsNative() {
}
bool JSStackFrame::IsToplevel() {
- return receiver_->IsJSGlobalProxy() || receiver_->IsNull(isolate_) ||
- receiver_->IsUndefined(isolate_);
-}
-
-bool JSStackFrame::IsEval() {
- return HasScript() &&
- GetScript()->compilation_type() == Script::COMPILATION_TYPE_EVAL;
+ return receiver_->IsJSGlobalProxy() || receiver_->IsNullOrUndefined(isolate_);
}
bool JSStackFrame::IsConstructor() {
@@ -619,6 +635,8 @@ Handle<Script> JSStackFrame::GetScript() const {
return handle(Script::cast(function_->shared()->script()), isolate_);
}
+WasmStackFrame::WasmStackFrame() {}
+
void WasmStackFrame::FromFrameArray(Isolate* isolate, Handle<FrameArray> array,
int frame_ix) {
// This function is called for both wasm and asm.js->wasm frames.
@@ -638,9 +656,10 @@ Handle<Object> WasmStackFrame::GetFunction() const {
Handle<Object> WasmStackFrame::GetFunctionName() {
Handle<Object> name;
Handle<WasmCompiledModule> compiled_module(
- Handle<WasmInstanceObject>::cast(wasm_instance_)->get_compiled_module(),
+ Handle<WasmInstanceObject>::cast(wasm_instance_)->compiled_module(),
isolate_);
- if (!WasmCompiledModule::GetFunctionName(compiled_module, wasm_func_index_)
+ if (!WasmCompiledModule::GetFunctionNameOrNull(isolate_, compiled_module,
+ wasm_func_index_)
.ToHandle(&name)) {
name = isolate_->factory()->null_value();
}
@@ -673,6 +692,7 @@ MaybeHandle<String> WasmStackFrame::ToString() {
}
int WasmStackFrame::GetPosition() const {
+ // TODO(wasm): Clean this up (bug 5007).
return (offset_ < 0) ? (-1 - offset_) : code_->SourcePosition(offset_);
}
@@ -680,6 +700,25 @@ Handle<Object> WasmStackFrame::Null() const {
return isolate_->factory()->null_value();
}
+bool WasmStackFrame::HasScript() const { return true; }
+
+Handle<Script> WasmStackFrame::GetScript() const {
+ return handle(
+ WasmInstanceObject::cast(*wasm_instance_)->compiled_module()->script(),
+ isolate_);
+}
+
+AsmJsWasmStackFrame::AsmJsWasmStackFrame() {}
+
+void AsmJsWasmStackFrame::FromFrameArray(Isolate* isolate,
+ Handle<FrameArray> array,
+ int frame_ix) {
+ DCHECK(array->IsAsmJsWasmFrame(frame_ix));
+ WasmStackFrame::FromFrameArray(isolate, array, frame_ix);
+ is_at_number_conversion_ =
+ array->Flags(frame_ix)->value() & FrameArray::kAsmJsAtNumberConversion;
+}
+
Handle<Object> AsmJsWasmStackFrame::GetReceiver() const {
return isolate_->global_proxy();
}
@@ -706,8 +745,12 @@ Handle<Object> AsmJsWasmStackFrame::GetScriptNameOrSourceUrl() {
int AsmJsWasmStackFrame::GetPosition() const {
DCHECK_LE(0, offset_);
int byte_offset = code_->SourcePosition(offset_);
- return wasm::GetAsmWasmSourcePosition(Handle<JSObject>::cast(wasm_instance_),
- wasm_func_index_, byte_offset);
+ Handle<WasmCompiledModule> compiled_module(
+ WasmInstanceObject::cast(*wasm_instance_)->compiled_module(), isolate_);
+ DCHECK_LE(0, byte_offset);
+ return WasmCompiledModule::GetAsmJsSourcePosition(
+ compiled_module, wasm_func_index_, static_cast<uint32_t>(byte_offset),
+ is_at_number_conversion_);
}
int AsmJsWasmStackFrame::GetLineNumber() {
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index 86cc8d0dff..745bc2a469 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -23,25 +23,26 @@ class AbstractCode;
class FrameArray;
class JSMessageObject;
class LookupIterator;
+class SharedFunctionInfo;
class SourceInfo;
class MessageLocation {
public:
MessageLocation(Handle<Script> script, int start_pos, int end_pos);
MessageLocation(Handle<Script> script, int start_pos, int end_pos,
- Handle<JSFunction> function);
+ Handle<SharedFunctionInfo> shared);
MessageLocation();
Handle<Script> script() const { return script_; }
int start_pos() const { return start_pos_; }
int end_pos() const { return end_pos_; }
- Handle<JSFunction> function() const { return function_; }
+ Handle<SharedFunctionInfo> shared() const { return shared_; }
private:
Handle<Script> script_;
int start_pos_;
int end_pos_;
- Handle<JSFunction> function_;
+ Handle<SharedFunctionInfo> shared_;
};
class StackFrameBase {
@@ -56,7 +57,7 @@ class StackFrameBase {
virtual Handle<Object> GetScriptNameOrSourceUrl() = 0;
virtual Handle<Object> GetMethodName() = 0;
virtual Handle<Object> GetTypeName() = 0;
- virtual Handle<Object> GetEvalOrigin() = 0;
+ virtual Handle<Object> GetEvalOrigin();
virtual int GetPosition() const = 0;
// Return 1-based line number, including line offset.
@@ -66,11 +67,20 @@ class StackFrameBase {
virtual bool IsNative() = 0;
virtual bool IsToplevel() = 0;
- virtual bool IsEval() = 0;
+ virtual bool IsEval();
virtual bool IsConstructor() = 0;
virtual bool IsStrict() const = 0;
virtual MaybeHandle<String> ToString() = 0;
+
+ protected:
+ StackFrameBase() {}
+ explicit StackFrameBase(Isolate* isolate) : isolate_(isolate) {}
+ Isolate* isolate_;
+
+ private:
+ virtual bool HasScript() const = 0;
+ virtual Handle<Script> GetScript() const = 0;
};
class JSStackFrame : public StackFrameBase {
@@ -88,7 +98,6 @@ class JSStackFrame : public StackFrameBase {
Handle<Object> GetScriptNameOrSourceUrl() override;
Handle<Object> GetMethodName() override;
Handle<Object> GetTypeName() override;
- Handle<Object> GetEvalOrigin() override;
int GetPosition() const override;
int GetLineNumber() override;
@@ -96,7 +105,6 @@ class JSStackFrame : public StackFrameBase {
bool IsNative() override;
bool IsToplevel() override;
- bool IsEval() override;
bool IsConstructor() override;
bool IsStrict() const override { return is_strict_; }
@@ -106,10 +114,8 @@ class JSStackFrame : public StackFrameBase {
JSStackFrame();
void FromFrameArray(Isolate* isolate, Handle<FrameArray> array, int frame_ix);
- bool HasScript() const;
- Handle<Script> GetScript() const;
-
- Isolate* isolate_;
+ bool HasScript() const override;
+ Handle<Script> GetScript() const override;
Handle<Object> receiver_;
Handle<JSFunction> function_;
@@ -134,7 +140,6 @@ class WasmStackFrame : public StackFrameBase {
Handle<Object> GetScriptNameOrSourceUrl() override { return Null(); }
Handle<Object> GetMethodName() override { return Null(); }
Handle<Object> GetTypeName() override { return Null(); }
- Handle<Object> GetEvalOrigin() override { return Null(); }
int GetPosition() const override;
int GetLineNumber() override { return wasm_func_index_; }
@@ -142,7 +147,6 @@ class WasmStackFrame : public StackFrameBase {
bool IsNative() override { return false; }
bool IsToplevel() override { return false; }
- bool IsEval() override { return false; }
bool IsConstructor() override { return false; }
bool IsStrict() const override { return false; }
@@ -151,7 +155,8 @@ class WasmStackFrame : public StackFrameBase {
protected:
Handle<Object> Null() const;
- Isolate* isolate_;
+ bool HasScript() const override;
+ Handle<Script> GetScript() const override;
// TODO(wasm): Use proper typing.
Handle<Object> wasm_instance_;
@@ -160,9 +165,11 @@ class WasmStackFrame : public StackFrameBase {
int offset_;
private:
+ WasmStackFrame();
void FromFrameArray(Isolate* isolate, Handle<FrameArray> array, int frame_ix);
friend class FrameArrayIterator;
+ friend class AsmJsWasmStackFrame;
};
class AsmJsWasmStackFrame : public WasmStackFrame {
@@ -180,6 +187,13 @@ class AsmJsWasmStackFrame : public WasmStackFrame {
int GetColumnNumber() override;
MaybeHandle<String> ToString() override;
+
+ private:
+ friend class FrameArrayIterator;
+ AsmJsWasmStackFrame();
+ void FromFrameArray(Isolate* isolate, Handle<FrameArray> array, int frame_ix);
+
+ bool is_at_number_conversion_;
};
class FrameArrayIterator {
@@ -255,7 +269,6 @@ class ErrorUtils : public AllStatic {
"ArrayBuffer subclass returned this from species constructor") \
T(ArrayFunctionsOnFrozen, "Cannot modify frozen array elements") \
T(ArrayFunctionsOnSealed, "Cannot add/remove sealed array elements") \
- T(ArrayNotSubclassable, "Subclassing Arrays is not currently supported.") \
T(CalledNonCallable, "% is not a function") \
T(CalledOnNonObject, "% called on non-object") \
T(CalledOnNullOrUndefined, "% called on null or undefined") \
@@ -329,7 +342,9 @@ class ErrorUtils : public AllStatic {
T(NotIterable, "% is not iterable") \
T(NotPropertyName, "% is not a valid property name") \
T(NotTypedArray, "this is not a typed array.") \
- T(NotSharedTypedArray, "% is not a shared typed array.") \
+ T(NotSuperConstructor, "Super constructor % of % is not a constructor") \
+ T(NotSuperConstructorAnonymousClass, \
+ "Super constructor % of anonymous class is not a constructor") \
T(NotIntegerSharedTypedArray, "% is not an integer shared typed array.") \
T(NotInt32SharedTypedArray, "% is not an int32 shared typed array.") \
T(ObjectGetterExpectingFunction, \
@@ -443,9 +458,6 @@ class ErrorUtils : public AllStatic {
T(RegExpNonObject, "% getter called on non-object %") \
T(RegExpNonRegExp, "% getter called on non-RegExp object") \
T(ReinitializeIntl, "Trying to re-initialize % object.") \
- T(ResolvedOptionsCalledOnNonObject, \
- "resolvedOptions method called on a non-object or on a object that is " \
- "not Intl.%.") \
T(ResolverNotAFunction, "Promise resolver % is not a function") \
T(RestrictedFunctionProperties, \
"'caller' and 'arguments' are restricted function properties and cannot " \
@@ -474,8 +486,8 @@ class ErrorUtils : public AllStatic {
T(VarRedeclaration, "Identifier '%' has already been declared") \
T(WrongArgs, "%: Arguments list has wrong type") \
/* ReferenceError */ \
- T(NonMethod, "'super' is referenced from non-method") \
T(NotDefined, "% is not defined") \
+ T(SuperAlreadyCalled, "Super constructor may only be called once") \
T(UnsupportedSuper, "Unsupported reference to 'super'") \
/* RangeError */ \
T(DateRange, "Provided date is not in valid range.") \
@@ -503,8 +515,8 @@ class ErrorUtils : public AllStatic {
T(InvalidStringLength, "Invalid string length") \
T(InvalidTimeValue, "Invalid time value") \
T(InvalidTypedArrayAlignment, "% of % should be a multiple of %") \
+ T(InvalidTypedArrayIndex, "Invalid typed array index") \
T(InvalidTypedArrayLength, "Invalid typed array length") \
- T(InvalidTypedArrayOffset, "Start offset is too large:") \
T(InvalidSimdIndex, "Index out of bounds for SIMD operation") \
T(InvalidSimdLaneValue, "Lane value out of bounds for SIMD operation") \
T(LetInLexicalBinding, "let is disallowed as a lexically bound name") \
@@ -595,9 +607,13 @@ class ErrorUtils : public AllStatic {
"In strict mode code, functions can only be declared at top level or " \
"inside a block.") \
T(StrictOctalLiteral, "Octal literals are not allowed in strict mode.") \
+ T(StrictDecimalWithLeadingZero, \
+ "Decimals with leading zeros are not allowed in strict mode.") \
+ T(StrictOctalEscape, \
+ "Octal escape sequences are not allowed in strict mode.") \
T(StrictWith, "Strict mode code may not include a with statement") \
T(TemplateOctalLiteral, \
- "Octal literals are not allowed in template strings.") \
+ "Octal escape sequences are not allowed in template strings.") \
T(ThisFormalParameter, "'this' is not a valid formal parameter name") \
T(AwaitBindingIdentifier, \
"'await' is not a valid identifier name in an async function") \
@@ -639,6 +655,7 @@ class ErrorUtils : public AllStatic {
T(YieldInParameter, "Yield expression not allowed in formal parameter") \
/* EvalError */ \
T(CodeGenFromStrings, "%") \
+ T(NoSideEffectDebugEvaluate, "Possible side-effect in debug-evaluate") \
/* URIError */ \
T(URIMalformed, "URI malformed") \
/* Wasm errors (currently Error) */ \
@@ -652,12 +669,16 @@ class ErrorUtils : public AllStatic {
T(WasmTrapFuncSigMismatch, "function signature mismatch") \
T(WasmTrapInvalidIndex, "invalid index into function table") \
T(WasmTrapTypeError, "invalid type") \
+ /* Asm.js validation related */ \
+ T(AsmJsInvalid, "Invalid asm.js: %") \
+ T(AsmJsCompiled, "Converted asm.js to WebAssembly: %") \
+ T(AsmJsInstantiated, "Instantiated asm.js: %") \
/* DataCloneError messages */ \
T(DataCloneError, "% could not be cloned.") \
T(DataCloneErrorNeuteredArrayBuffer, \
"An ArrayBuffer is neutered and could not be cloned.") \
- T(DataCloneErrorSharedArrayBufferNotTransferred, \
- "A SharedArrayBuffer could not be cloned. SharedArrayBuffer must be " \
+ T(DataCloneErrorSharedArrayBufferTransferred, \
+ "A SharedArrayBuffer could not be cloned. SharedArrayBuffer must not be " \
"transferred.") \
T(DataCloneDeserializationError, "Unable to deserialize cloned data.") \
T(DataCloneDeserializationVersionError, \
@@ -692,11 +713,11 @@ class MessageHandler {
// Returns a message object for the API to use.
static Handle<JSMessageObject> MakeMessageObject(
Isolate* isolate, MessageTemplate::Template type,
- MessageLocation* location, Handle<Object> argument,
+ const MessageLocation* location, Handle<Object> argument,
Handle<JSArray> stack_frames);
// Report a formatted message (needs JS allocation).
- static void ReportMessage(Isolate* isolate, MessageLocation* loc,
+ static void ReportMessage(Isolate* isolate, const MessageLocation* loc,
Handle<JSMessageObject> message);
static void DefaultMessageReport(Isolate* isolate, const MessageLocation* loc,
@@ -704,6 +725,12 @@ class MessageHandler {
static Handle<String> GetMessage(Isolate* isolate, Handle<Object> data);
static std::unique_ptr<char[]> GetLocalizedMessage(Isolate* isolate,
Handle<Object> data);
+
+ private:
+ static void ReportMessageNoExceptions(Isolate* isolate,
+ const MessageLocation* loc,
+ Handle<Object> message_obj,
+ v8::Local<v8::Value> api_exception_obj);
};
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index 865e64c87d..784185ac0d 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -204,13 +204,18 @@ uint32_t RelocInfo::wasm_memory_size_reference() {
return reinterpret_cast<uint32_t>(Assembler::target_address_at(pc_, host_));
}
+uint32_t RelocInfo::wasm_function_table_size_reference() {
+ DCHECK(IsWasmFunctionTableSizeReference(rmode_));
+ return reinterpret_cast<uint32_t>(Assembler::target_address_at(pc_, host_));
+}
+
void RelocInfo::unchecked_update_wasm_memory_reference(
Address address, ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
}
-void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
- ICacheFlushMode flush_mode) {
+void RelocInfo::unchecked_update_wasm_size(uint32_t size,
+ ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate_, pc_, host_,
reinterpret_cast<Address>(size), flush_mode);
}
@@ -895,8 +900,7 @@ void Assembler::print(Label* L) {
} else {
PrintF("%d\n", instr);
}
- next(&l, internal_reference_positions_.find(l.pos()) !=
- internal_reference_positions_.end());
+ next(&l, is_internal_reference(&l));
}
} else {
PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
@@ -910,14 +914,15 @@ void Assembler::bind_to(Label* L, int pos) {
bool is_internal = false;
if (L->is_linked() && !trampoline_emitted_) {
unbound_labels_count_--;
- next_buffer_check_ += kTrampolineSlotsSize;
+ if (!is_internal_reference(L)) {
+ next_buffer_check_ += kTrampolineSlotsSize;
+ }
}
while (L->is_linked()) {
int32_t fixup_pos = L->pos();
int32_t dist = pos - fixup_pos;
- is_internal = internal_reference_positions_.find(fixup_pos) !=
- internal_reference_positions_.end();
+ is_internal = is_internal_reference(L);
next(L, is_internal); // Call next before overwriting link with target at
// fixup_pos.
Instr instr = instr_at(fixup_pos);
@@ -934,7 +939,6 @@ void Assembler::bind_to(Label* L, int pos) {
CHECK((trampoline_pos - fixup_pos) <= branch_offset);
target_at_put(fixup_pos, trampoline_pos, false);
fixup_pos = trampoline_pos;
- dist = pos - fixup_pos;
}
target_at_put(fixup_pos, pos, false);
} else {
@@ -1779,9 +1783,18 @@ void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) {
// Helper for base-reg + offset, when offset is larger than int16.
void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
DCHECK(!src.rm().is(at));
- lui(at, (src.offset_ >> kLuiShift) & kImm16Mask);
- ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
- addu(at, at, src.rm()); // Add base register.
+ if (IsMipsArchVariant(kMips32r6)) {
+ int32_t hi = (src.offset_ >> kLuiShift) & kImm16Mask;
+ if (src.offset_ & kNegOffset) {
+ hi += 1;
+ }
+ aui(at, src.rm(), hi);
+ addiu(at, at, src.offset_ & kImm16Mask);
+ } else {
+ lui(at, (src.offset_ >> kLuiShift) & kImm16Mask);
+ ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
+ addu(at, at, src.rm()); // Add base register.
+ }
}
// Helper for base-reg + upper part of offset, when offset is larger than int16.
@@ -1797,8 +1810,13 @@ int32_t Assembler::LoadRegPlusUpperOffsetPartToAt(const MemOperand& src) {
if (src.offset_ & kNegOffset) {
hi += 1;
}
- lui(at, hi);
- addu(at, at, src.rm());
+
+ if (IsMipsArchVariant(kMips32r6)) {
+ aui(at, src.rm(), hi);
+ } else {
+ lui(at, hi);
+ addu(at, at, src.rm());
+ }
return (src.offset_ & kImm16Mask);
}
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index 1df6e3f5ad..dec4c18889 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -68,7 +68,7 @@ namespace internal {
#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
V(f0) V(f2) V(f4) V(f6) V(f8) V(f10) V(f12) V(f14) \
- V(f16) V(f18) V(f20) V(f22) V(f24) V(f26)
+ V(f16) V(f18) V(f20) V(f22) V(f24)
// clang-format on
// CPU Registers.
@@ -282,8 +282,7 @@ const DoubleRegister f31 = {31};
#define kLithiumScratchDouble f30
#define kDoubleRegZero f28
// Used on mips32r6 for compare operations.
-// We use the last non-callee saved odd register for O32 ABI
-#define kDoubleCompareReg f19
+#define kDoubleCompareReg f26
// FPU (coprocessor 1) control registers.
// Currently only FCSR (#31) is implemented.
@@ -552,6 +551,17 @@ class Assembler : public AssemblerBase {
static const int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstrSize;
+ // Max offset for instructions with 16-bit offset field
+ static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
+
+ // Max offset for compact branch instructions with 26-bit offset field
+ static const int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1;
+
+#ifdef _MIPS_ARCH_MIPS32R6
+ static const int kTrampolineSlotsSize = 2 * kInstrSize;
+#else
+ static const int kTrampolineSlotsSize = 4 * kInstrSize;
+#endif
// ---------------------------------------------------------------------------
// Code generation.
@@ -1029,9 +1039,6 @@ class Assembler : public AssemblerBase {
// Debugging.
- // Mark generator continuation.
- void RecordGeneratorContinuation();
-
// Mark address of a debug break slot.
void RecordDebugBreakSlot(RelocInfo::Mode mode);
@@ -1169,6 +1176,9 @@ class Assembler : public AssemblerBase {
}
bool IsPrevInstrCompactBranch() { return prev_instr_compact_branch_; }
+ static bool IsCompactBranchSupported() {
+ return IsMipsArchVariant(kMips32r6);
+ }
inline int UnboundLabelsCount() { return unbound_labels_count_; }
@@ -1443,18 +1453,15 @@ class Assembler : public AssemblerBase {
// branch instruction generation, where we use jump instructions rather
// than regular branch instructions.
bool trampoline_emitted_;
-#ifdef _MIPS_ARCH_MIPS32R6
- static const int kTrampolineSlotsSize = 2 * kInstrSize;
-#else
- static const int kTrampolineSlotsSize = 4 * kInstrSize;
-#endif
- static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
- static const int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1;
static const int kInvalidSlotPos = -1;
// Internal reference positions, required for unbounded internal reference
// labels.
std::set<int> internal_reference_positions_;
+ bool is_internal_reference(Label* L) {
+ return internal_reference_positions_.find(L->pos()) !=
+ internal_reference_positions_.end();
+ }
void EmittedCompactBranchInstruction() { prev_instr_compact_branch_ = true; }
void ClearCompactBranchState() { prev_instr_compact_branch_ = false; }
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 966214be8c..f75c02f677 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -33,17 +33,6 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kNewArray);
}
-void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
- descriptor->Initialize(a0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
-void FastFunctionBindStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
- descriptor->Initialize(a0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
Condition cc);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
@@ -684,8 +673,11 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cc == eq) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(lhs, rhs);
- __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+ __ Push(cp);
+ __ Call(strict() ? isolate()->builtins()->StrictEqual()
+ : isolate()->builtins()->Equal(),
+ RelocInfo::CODE_TARGET);
+ __ Pop(cp);
}
// Turn true into 0 and false into some non-zero value.
STATIC_ASSERT(EQUAL == 0);
@@ -916,7 +908,6 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
SaveFPRegsMode mode = kSaveFPRegs;
CEntryStub(isolate, 1, mode).GetCode();
StoreBufferOverflowStub(isolate, mode).GetCode();
- isolate->set_fp_stubs_generated(true);
}
@@ -2218,51 +2209,6 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
}
-
-enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- String::Encoding encoding) {
- if (FLAG_debug_code) {
- // Check that destination is word aligned.
- __ And(scratch, dest, Operand(kPointerAlignmentMask));
- __ Check(eq,
- kDestinationOfCopyNotAligned,
- scratch,
- Operand(zero_reg));
- }
-
- // Assumes word reads and writes are little endian.
- // Nothing to do for zero characters.
- Label done;
-
- if (encoding == String::TWO_BYTE_ENCODING) {
- __ Addu(count, count, count);
- }
-
- Register limit = count; // Read until dest equals this.
- __ Addu(limit, dest, Operand(count));
-
- Label loop_entry, loop;
- // Copy bytes from src to dest until dest hits limit.
- __ Branch(&loop_entry);
- __ bind(&loop);
- __ lbu(scratch, MemOperand(src));
- __ Addu(src, src, Operand(1));
- __ sb(scratch, MemOperand(dest));
- __ Addu(dest, dest, Operand(1));
- __ bind(&loop_entry);
- __ Branch(&loop, lt, dest, Operand(limit));
-
- __ bind(&done);
-}
-
-
void StringHelper::GenerateFlatOneByteStringEquals(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3) {
@@ -2889,85 +2835,6 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ Branch(miss, ne, at, Operand(zero_reg));
}
-
-// Probe the name dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found. Jump to
-// the |miss| label otherwise.
-// If lookup was successful |scratch2| will be equal to elements + 4 * index.
-void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register scratch1,
- Register scratch2) {
- DCHECK(!elements.is(scratch1));
- DCHECK(!elements.is(scratch2));
- DCHECK(!name.is(scratch1));
- DCHECK(!name.is(scratch2));
-
- __ AssertName(name);
-
- // Compute the capacity mask.
- __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
- __ sra(scratch1, scratch1, kSmiTagSize); // convert smi to int
- __ Subu(scratch1, scratch1, Operand(1));
-
- // Generate an unrolled loop that performs a few probes before
- // giving up. Measurements done on Gmail indicate that 2 probes
- // cover ~93% of loads from dictionaries.
- for (int i = 0; i < kInlinedProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ lw(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
- if (i > 0) {
- // Add the probe offset (i + i * i) left shifted to avoid right shifting
- // the hash in a separate instruction. The value hash + i + i * i is right
- // shifted in the following and instruction.
- DCHECK(NameDictionary::GetProbeOffset(i) <
- 1 << (32 - Name::kHashFieldOffset));
- __ Addu(scratch2, scratch2, Operand(
- NameDictionary::GetProbeOffset(i) << Name::kHashShift));
- }
- __ srl(scratch2, scratch2, Name::kHashShift);
- __ And(scratch2, scratch1, scratch2);
-
- // Scale the index by multiplying by the element size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- // scratch2 = scratch2 * 3.
-
- __ Lsa(scratch2, scratch2, scratch2, 1);
-
- // Check if the key is identical to the name.
- __ Lsa(scratch2, elements, scratch2, 2);
- __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
- __ Branch(done, eq, name, Operand(at));
- }
-
- const int spill_mask =
- (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
- a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
- ~(scratch1.bit() | scratch2.bit());
-
- __ MultiPush(spill_mask);
- if (name.is(a0)) {
- DCHECK(!elements.is(a1));
- __ Move(a1, name);
- __ Move(a0, elements);
- } else {
- __ Move(a0, elements);
- __ Move(a1, name);
- }
- NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
- __ CallStub(&stub);
- __ mov(scratch2, a2);
- __ mov(at, v0);
- __ MultiPop(spill_mask);
-
- __ Branch(done, ne, at, Operand(zero_reg));
- __ Branch(miss, eq, at, Operand(zero_reg));
-}
-
-
void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
@@ -3266,233 +3133,6 @@ void CallICTrampolineStub::Generate(MacroAssembler* masm) {
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
-
-static void HandleArrayCases(MacroAssembler* masm, Register feedback,
- Register receiver_map, Register scratch1,
- Register scratch2, bool is_polymorphic,
- Label* miss) {
- // feedback initially contains the feedback array
- Label next_loop, prepare_next;
- Label start_polymorphic;
-
- Register cached_map = scratch1;
-
- __ lw(cached_map,
- FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
- __ lw(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
- __ Branch(&start_polymorphic, ne, receiver_map, Operand(cached_map));
- // found, now call handler.
- Register handler = feedback;
- __ lw(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
- __ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(t9);
-
-
- Register length = scratch2;
- __ bind(&start_polymorphic);
- __ lw(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
- if (!is_polymorphic) {
- // If the IC could be monomorphic we have to make sure we don't go past the
- // end of the feedback array.
- __ Branch(miss, eq, length, Operand(Smi::FromInt(2)));
- }
-
- Register too_far = length;
- Register pointer_reg = feedback;
-
- // +-----+------+------+-----+-----+ ... ----+
- // | map | len | wm0 | h0 | wm1 | hN |
- // +-----+------+------+-----+-----+ ... ----+
- // 0 1 2 len-1
- // ^ ^
- // | |
- // pointer_reg too_far
- // aka feedback scratch2
- // also need receiver_map
- // use cached_map (scratch1) to look in the weak map values.
- __ Lsa(too_far, feedback, length, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ Addu(pointer_reg, feedback,
- Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
-
- __ bind(&next_loop);
- __ lw(cached_map, MemOperand(pointer_reg));
- __ lw(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
- __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map));
- __ lw(handler, MemOperand(pointer_reg, kPointerSize));
- __ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(t9);
-
- __ bind(&prepare_next);
- __ Addu(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
- __ Branch(&next_loop, lt, pointer_reg, Operand(too_far));
-
- // We exhausted our array of map handler pairs.
- __ jmp(miss);
-}
-
-
-static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
- Register receiver_map, Register feedback,
- Register vector, Register slot,
- Register scratch, Label* compare_map,
- Label* load_smi_map, Label* try_array) {
- __ JumpIfSmi(receiver, load_smi_map);
- __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ bind(compare_map);
- Register cached_map = scratch;
- // Move the weak map into the weak_cell register.
- __ lw(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
- __ Branch(try_array, ne, cached_map, Operand(receiver_map));
- Register handler = feedback;
-
- __ Lsa(handler, vector, slot, kPointerSizeLog2 - kSmiTagSize);
- __ lw(handler,
- FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
- __ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(t9);
-}
-
-void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
- KeyedStoreICStub stub(isolate(), state());
- stub.GenerateForTrampoline(masm);
-}
-
-void KeyedStoreICStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
-
-void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-
-static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
- Register receiver_map, Register scratch1,
- Register scratch2, Label* miss) {
- // feedback initially contains the feedback array
- Label next_loop, prepare_next;
- Label start_polymorphic;
- Label transition_call;
-
- Register cached_map = scratch1;
- Register too_far = scratch2;
- Register pointer_reg = feedback;
- __ lw(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
-
- // +-----+------+------+-----+-----+-----+ ... ----+
- // | map | len | wm0 | wt0 | h0 | wm1 | hN |
- // +-----+------+------+-----+-----+ ----+ ... ----+
- // 0 1 2 len-1
- // ^ ^
- // | |
- // pointer_reg too_far
- // aka feedback scratch2
- // also need receiver_map
- // use cached_map (scratch1) to look in the weak map values.
- __ Lsa(too_far, feedback, too_far, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ Addu(pointer_reg, feedback,
- Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
-
- __ bind(&next_loop);
- __ lw(cached_map, MemOperand(pointer_reg));
- __ lw(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
- __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map));
- // Is it a transitioning store?
- __ lw(too_far, MemOperand(pointer_reg, kPointerSize));
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&transition_call, ne, too_far, Operand(at));
- __ lw(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
- __ Addu(t9, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(t9);
-
- __ bind(&transition_call);
- __ lw(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
- __ JumpIfSmi(too_far, miss);
-
- __ lw(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
-
- // Load the map into the correct register.
- DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
- __ mov(feedback, too_far);
-
- __ Addu(t9, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(t9);
-
- __ bind(&prepare_next);
- __ Addu(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
- __ Branch(&next_loop, lt, pointer_reg, Operand(too_far));
-
- // We exhausted our array of map handler pairs.
- __ jmp(miss);
-}
-
-void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // a1
- Register key = StoreWithVectorDescriptor::NameRegister(); // a2
- Register vector = StoreWithVectorDescriptor::VectorRegister(); // a3
- Register slot = StoreWithVectorDescriptor::SlotRegister(); // t0
- DCHECK(StoreWithVectorDescriptor::ValueRegister().is(a0)); // a0
- Register feedback = t1;
- Register receiver_map = t2;
- Register scratch1 = t5;
-
- __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
- __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
- scratch1, &compare_map, &load_smi_map, &try_array);
-
- __ bind(&try_array);
- // Is it a fixed array?
- __ lw(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
- __ Branch(&not_array, ne, scratch1, Operand(at));
-
- // We have a polymorphic element handler.
- Label polymorphic, try_poly_name;
- __ bind(&polymorphic);
-
- Register scratch2 = t4;
-
- HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
- &miss);
-
- __ bind(&not_array);
- // Is it generic?
- __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
- __ Branch(&try_poly_name, ne, feedback, Operand(at));
- Handle<Code> megamorphic_stub =
- KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
- __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
-
- __ bind(&try_poly_name);
- // We might have a name in feedback, and a fixed array in the next slot.
- __ Branch(&miss, ne, key, Operand(feedback));
- // If the name comparison succeeded, we know we have a fixed array with
- // at least one map/handler pair.
- __ Lsa(feedback, vector, slot, kPointerSizeLog2 - kSmiTagSize);
- __ lw(feedback,
- FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
- HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
- &miss);
-
- __ bind(&miss);
- KeyedStoreIC::GenerateMiss(masm);
-
- __ bind(&load_smi_map);
- __ Branch(USE_DELAY_SLOT, &compare_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); // In delay slot.
-}
-
-
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
@@ -3847,127 +3487,6 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateCase(masm, FAST_ELEMENTS);
}
-
-void FastNewObjectStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a1 : target
- // -- a3 : new target
- // -- cp : context
- // -- ra : return address
- // -----------------------------------
- __ AssertFunction(a1);
- __ AssertReceiver(a3);
-
- // Verify that the new target is a JSFunction.
- Label new_object;
- __ GetObjectType(a3, a2, a2);
- __ Branch(&new_object, ne, a2, Operand(JS_FUNCTION_TYPE));
-
- // Load the initial map and verify that it's in fact a map.
- __ lw(a2, FieldMemOperand(a3, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(a2, &new_object);
- __ GetObjectType(a2, a0, a0);
- __ Branch(&new_object, ne, a0, Operand(MAP_TYPE));
-
- // Fall back to runtime if the target differs from the new target's
- // initial map constructor.
- __ lw(a0, FieldMemOperand(a2, Map::kConstructorOrBackPointerOffset));
- __ Branch(&new_object, ne, a0, Operand(a1));
-
- // Allocate the JSObject on the heap.
- Label allocate, done_allocate;
- __ lbu(t0, FieldMemOperand(a2, Map::kInstanceSizeOffset));
- __ Allocate(t0, v0, t1, a0, &allocate, SIZE_IN_WORDS);
- __ bind(&done_allocate);
-
- // Initialize the JSObject fields.
- __ sw(a2, FieldMemOperand(v0, JSObject::kMapOffset));
- __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
- __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
- STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ Addu(a1, v0, Operand(JSObject::kHeaderSize - kHeapObjectTag));
-
- // ----------- S t a t e -------------
- // -- v0 : result (tagged)
- // -- a1 : result fields (untagged)
- // -- t1 : result end (untagged)
- // -- a2 : initial map
- // -- cp : context
- // -- ra : return address
- // -----------------------------------
-
- // Perform in-object slack tracking if requested.
- Label slack_tracking;
- STATIC_ASSERT(Map::kNoSlackTracking == 0);
- __ lw(a3, FieldMemOperand(a2, Map::kBitField3Offset));
- __ And(at, a3, Operand(Map::ConstructionCounter::kMask));
- __ Branch(USE_DELAY_SLOT, &slack_tracking, ne, at, Operand(0));
- __ LoadRoot(a0, Heap::kUndefinedValueRootIndex); // In delay slot.
- {
- // Initialize all in-object fields with undefined.
- __ InitializeFieldsWithFiller(a1, t1, a0);
- __ Ret();
- }
- __ bind(&slack_tracking);
- {
- // Decrease generous allocation count.
- STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
- __ Subu(a3, a3, Operand(1 << Map::ConstructionCounter::kShift));
- __ sw(a3, FieldMemOperand(a2, Map::kBitField3Offset));
-
- // Initialize the in-object fields with undefined.
- __ lbu(t0, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
- __ sll(t0, t0, kPointerSizeLog2);
- __ subu(t0, t1, t0);
- __ InitializeFieldsWithFiller(a1, t0, a0);
-
- // Initialize the remaining (reserved) fields with one pointer filler map.
- __ LoadRoot(a0, Heap::kOnePointerFillerMapRootIndex);
- __ InitializeFieldsWithFiller(a1, t1, a0);
-
- // Check if we can finalize the instance size.
- Label finalize;
- STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
- __ And(a3, a3, Operand(Map::ConstructionCounter::kMask));
- __ Branch(&finalize, eq, a3, Operand(zero_reg));
- __ Ret();
-
- // Finalize the instance size.
- __ bind(&finalize);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(v0, a2);
- __ CallRuntime(Runtime::kFinalizeInstanceSize);
- __ Pop(v0);
- }
- __ Ret();
- }
-
- // Fall back to %AllocateInNewSpace.
- __ bind(&allocate);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- __ sll(t0, t0, kPointerSizeLog2 + kSmiTagSize);
- __ Push(a2, t0);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ Pop(a2);
- }
- __ lbu(t1, FieldMemOperand(a2, Map::kInstanceSizeOffset));
- __ Lsa(t1, v0, t1, kPointerSizeLog2);
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ Subu(t1, t1, Operand(kHeapObjectTag));
- __ jmp(&done_allocate);
-
- // Fall back to %NewObject.
- __ bind(&new_object);
- __ Push(a1, a3);
- __ TailCallRuntime(Runtime::kNewObject);
-}
-
-
void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a1 : function
diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h
index 751095d8d8..e2dd4a9b28 100644
--- a/deps/v8/src/mips/code-stubs-mips.h
+++ b/deps/v8/src/mips/code-stubs-mips.h
@@ -16,17 +16,6 @@ void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
class StringHelper : public AllStatic {
public:
- // Generate code for copying a large number of characters. This function
- // is allowed to spend extra time setting up conditions to make copying
- // faster. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- String::Encoding encoding);
-
// Compares two flat one-byte strings and returns result in v0.
static void GenerateCompareFlatOneByteStrings(
MacroAssembler* masm, Register left, Register right, Register scratch1,
@@ -311,14 +300,6 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
Handle<Name> name,
Register scratch0);
- static void GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register r0,
- Register r1);
-
bool SometimesSetsUpAFrame() override { return false; }
private:
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index 8aaeaca367..a57299abf6 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -605,351 +605,6 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ ACCESS_MASM(masm)
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register target_map,
- AllocationSiteMode mode,
- Label* allocation_memento_found) {
- Register scratch_elements = t0;
- DCHECK(!AreAliased(receiver, key, value, target_map,
- scratch_elements));
-
- if (mode == TRACK_ALLOCATION_SITE) {
- DCHECK(allocation_memento_found != NULL);
- __ JumpIfJSArrayHasAllocationMemento(
- receiver, scratch_elements, allocation_memento_found);
- }
-
- // Set transitioned map.
- __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver,
- HeapObject::kMapOffset,
- target_map,
- t5,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateSmiToDouble(
- MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register target_map,
- AllocationSiteMode mode,
- Label* fail) {
- // Register ra contains the return address.
- Label loop, entry, convert_hole, gc_required, only_change_map, done;
- Register elements = t0;
- Register length = t1;
- Register array = t2;
- Register array_end = array;
-
- // target_map parameter can be clobbered.
- Register scratch1 = target_map;
- Register scratch2 = t5;
- Register scratch3 = t3;
-
- // Verify input registers don't conflict with locals.
- DCHECK(!AreAliased(receiver, key, value, target_map,
- elements, length, array, scratch2));
-
- Register scratch = t6;
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
- __ Branch(&only_change_map, eq, at, Operand(elements));
-
- __ push(ra);
- __ lw(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
- // elements: source FixedArray
- // length: number of elements (smi-tagged)
-
- // Allocate new FixedDoubleArray.
- __ sll(scratch, length, 2);
- __ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize);
- __ Allocate(scratch, array, t3, scratch2, &gc_required, DOUBLE_ALIGNMENT);
- // array: destination FixedDoubleArray, tagged as heap object
-
- // Set destination FixedDoubleArray's length and map.
- __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
- __ sw(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
- // Update receiver's map.
- __ sw(scratch2, FieldMemOperand(array, HeapObject::kMapOffset));
-
- __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver,
- HeapObject::kMapOffset,
- target_map,
- scratch2,
- kRAHasBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- // Replace receiver's backing store with newly created FixedDoubleArray.
- __ Addu(scratch1, array, Operand(kHeapObjectTag - kHeapObjectTag));
- __ sw(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ RecordWriteField(receiver,
- JSObject::kElementsOffset,
- scratch1,
- scratch2,
- kRAHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
-
- // Prepare for conversion loop.
- __ Addu(scratch1, elements,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ Addu(scratch3, array,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- __ Lsa(array_end, scratch3, length, 2);
-
- // Repurpose registers no longer in use.
- Register hole_lower = elements;
- Register hole_upper = length;
- __ li(hole_lower, Operand(kHoleNanLower32));
- __ li(hole_upper, Operand(kHoleNanUpper32));
-
- // scratch1: begin of source FixedArray element fields, not tagged
- // hole_lower: kHoleNanLower32
- // hole_upper: kHoleNanUpper32
- // array_end: end of destination FixedDoubleArray, not tagged
- // scratch3: begin of FixedDoubleArray element fields, not tagged
-
- __ Branch(&entry);
-
- __ bind(&only_change_map);
- __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver,
- HeapObject::kMapOffset,
- target_map,
- scratch2,
- kRAHasBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Branch(&done);
-
- // Call into runtime if GC is required.
- __ bind(&gc_required);
- __ lw(ra, MemOperand(sp, 0));
- __ Branch(USE_DELAY_SLOT, fail);
- __ addiu(sp, sp, kPointerSize); // In delay slot.
-
- // Convert and copy elements.
- __ bind(&loop);
- __ lw(scratch2, MemOperand(scratch1));
- __ Addu(scratch1, scratch1, kIntSize);
- // scratch2: current element
- __ UntagAndJumpIfNotSmi(scratch2, scratch2, &convert_hole);
-
- // Normal smi, convert to double and store.
- __ mtc1(scratch2, f0);
- __ cvt_d_w(f0, f0);
- __ sdc1(f0, MemOperand(scratch3));
- __ Branch(USE_DELAY_SLOT, &entry);
- __ addiu(scratch3, scratch3, kDoubleSize); // In delay slot.
-
- // Hole found, store the-hole NaN.
- __ bind(&convert_hole);
- if (FLAG_debug_code) {
- // Restore a "smi-untagged" heap object.
- __ SmiTag(scratch2);
- __ Or(scratch2, scratch2, Operand(1));
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(scratch2));
- }
- // mantissa
- __ sw(hole_lower, MemOperand(scratch3, Register::kMantissaOffset));
- // exponent
- __ sw(hole_upper, MemOperand(scratch3, Register::kExponentOffset));
- __ addiu(scratch3, scratch3, kDoubleSize);
-
- __ bind(&entry);
- __ Branch(&loop, lt, scratch3, Operand(array_end));
-
- __ bind(&done);
- __ pop(ra);
-}
-
-
-void ElementsTransitionGenerator::GenerateDoubleToObject(
- MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register target_map,
- AllocationSiteMode mode,
- Label* fail) {
- // Register ra contains the return address.
- Label entry, loop, convert_hole, gc_required, only_change_map;
- Register elements = t0;
- Register array = t2;
- Register length = t1;
- Register scratch = t5;
-
- // Verify input registers don't conflict with locals.
- DCHECK(!AreAliased(receiver, key, value, target_map,
- elements, array, length, scratch));
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
- __ Branch(&only_change_map, eq, at, Operand(elements));
-
- __ MultiPush(
- value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
-
- __ lw(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
- // elements: source FixedArray
- // length: number of elements (smi-tagged)
-
- // Allocate new FixedArray.
- // Re-use value and target_map registers, as they have been saved on the
- // stack.
- Register array_size = value;
- Register allocate_scratch = target_map;
- __ sll(array_size, length, 1);
- __ Addu(array_size, array_size, FixedDoubleArray::kHeaderSize);
- __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
- NO_ALLOCATION_FLAGS);
- // array: destination FixedArray, not tagged as heap object
- // Set destination FixedDoubleArray's length and map.
- __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
- __ sw(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
- __ sw(scratch, FieldMemOperand(array, HeapObject::kMapOffset));
-
- // Prepare for conversion loop.
- Register src_elements = elements;
- Register dst_elements = target_map;
- Register dst_end = length;
- Register heap_number_map = scratch;
- __ Addu(src_elements, src_elements, Operand(
- FixedDoubleArray::kHeaderSize - kHeapObjectTag
- + Register::kExponentOffset));
- __ Addu(dst_elements, array,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ Lsa(dst_end, dst_elements, dst_end, 1);
-
- // Allocating heap numbers in the loop below can fail and cause a jump to
- // gc_required. We can't leave a partly initialized FixedArray behind,
- // so pessimistically fill it with holes now.
- Label initialization_loop, initialization_loop_entry;
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ Branch(&initialization_loop_entry);
- __ bind(&initialization_loop);
- __ sw(scratch, MemOperand(dst_elements));
- __ Addu(dst_elements, dst_elements, Operand(kPointerSize));
- __ bind(&initialization_loop_entry);
- __ Branch(&initialization_loop, lt, dst_elements, Operand(dst_end));
-
- __ Addu(dst_elements, array,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- // Using offsetted addresses.
- // dst_elements: begin of destination FixedArray element fields, not tagged
- // src_elements: begin of source FixedDoubleArray element fields, not tagged,
- // points to the exponent
- // dst_end: end of destination FixedArray, not tagged
- // array: destination FixedArray
- // heap_number_map: heap number map
- __ Branch(&entry);
-
- // Call into runtime if GC is required.
- __ bind(&gc_required);
- __ MultiPop(
- value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
-
- __ Branch(fail);
-
- __ bind(&loop);
- Register upper_bits = key;
- __ lw(upper_bits, MemOperand(src_elements));
- __ Addu(src_elements, src_elements, kDoubleSize);
- // upper_bits: current element's upper 32 bit
- // src_elements: address of next element's upper 32 bit
- __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
-
- // Non-hole double, copy value into a heap number.
- Register heap_number = receiver;
- Register scratch2 = value;
- Register scratch3 = t6;
- __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
- &gc_required);
- // heap_number: new heap number
- // Load mantissa of current element, src_elements
- // point to exponent of next element.
- __ lw(scratch2, MemOperand(src_elements, (Register::kMantissaOffset
- - Register::kExponentOffset - kDoubleSize)));
- __ sw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
- __ sw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
- __ mov(scratch2, dst_elements);
- __ sw(heap_number, MemOperand(dst_elements));
- __ Addu(dst_elements, dst_elements, kIntSize);
- __ RecordWrite(array,
- scratch2,
- heap_number,
- kRAHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Branch(&entry);
-
- // Replace the-hole NaN with the-hole pointer.
- __ bind(&convert_hole);
- __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
- __ sw(scratch2, MemOperand(dst_elements));
- __ Addu(dst_elements, dst_elements, kIntSize);
-
- __ bind(&entry);
- __ Branch(&loop, lt, dst_elements, Operand(dst_end));
-
- __ MultiPop(receiver.bit() | target_map.bit() | value.bit() | key.bit());
- // Replace receiver's backing store with newly created and filled FixedArray.
- __ sw(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ RecordWriteField(receiver,
- JSObject::kElementsOffset,
- array,
- scratch,
- kRAHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ pop(ra);
-
- __ bind(&only_change_map);
- // Update receiver's map.
- __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver,
- HeapObject::kMapOffset,
- target_map,
- scratch,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-
void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Register string,
Register index,
@@ -1076,37 +731,29 @@ bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
return result;
}
+Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
+ if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
-void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
- MarkingParity* parity) {
- if (IsYoungSequence(isolate, sequence)) {
- *age = kNoAgeCodeAge;
- *parity = NO_MARKING_PARITY;
- } else {
- Address target_address = Assembler::target_address_at(
- sequence + Assembler::kInstrSize);
- Code* stub = GetCodeFromTargetAddress(target_address);
- GetCodeAgeAndParity(stub, age, parity);
- }
+ Address target_address =
+ Assembler::target_address_at(sequence + Assembler::kInstrSize);
+ Code* stub = GetCodeFromTargetAddress(target_address);
+ return GetAgeOfCodeAgeStub(stub);
}
-
-void Code::PatchPlatformCodeAge(Isolate* isolate,
- byte* sequence,
- Code::Age age,
- MarkingParity parity) {
+void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
+ Code::Age age) {
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
Assembler::FlushICache(isolate, sequence, young_length);
} else {
- Code* stub = GetCodeAgeStub(isolate, age, parity);
+ Code* stub = GetCodeAgeStub(isolate, age);
CodePatcher patcher(isolate, sequence,
young_length / Assembler::kInstrSize);
// Mark this code sequence for FindPlatformCodeAgeSequence().
patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
// Load the stub address to t9 and call it,
- // GetCodeAgeAndParity() extracts the stub address from this instruction.
+ // GetCodeAge() extracts the stub address from this instruction.
patcher.masm()->li(
t9,
Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/mips/interface-descriptors-mips.cc
index 486ae68324..c6233c5993 100644
--- a/deps/v8/src/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/mips/interface-descriptors-mips.cc
@@ -64,13 +64,7 @@ const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {a2};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-void FastNewObjectDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1, a3};
+ Register registers[] = {a1, a2, a3};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index c3abe4fa6f..25413f9a54 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -1998,6 +1998,49 @@ void MacroAssembler::Mfhc1(Register rt, FPURegister fs) {
}
}
+void MacroAssembler::Madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
+ FPURegister ft, FPURegister scratch) {
+ if (IsMipsArchVariant(kMips32r2)) {
+ madd_s(fd, fr, fs, ft);
+ } else {
+ DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+ mul_s(scratch, fs, ft);
+ add_s(fd, fr, scratch);
+ }
+}
+
+void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
+ FPURegister ft, FPURegister scratch) {
+ if (IsMipsArchVariant(kMips32r2)) {
+ madd_d(fd, fr, fs, ft);
+ } else {
+ DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+ mul_d(scratch, fs, ft);
+ add_d(fd, fr, scratch);
+ }
+}
+
+void MacroAssembler::Msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
+ FPURegister ft, FPURegister scratch) {
+ if (IsMipsArchVariant(kMips32r2)) {
+ msub_s(fd, fr, fs, ft);
+ } else {
+ DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+ mul_s(scratch, fs, ft);
+ sub_s(fd, scratch, fr);
+ }
+}
+
+void MacroAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
+ FPURegister ft, FPURegister scratch) {
+ if (IsMipsArchVariant(kMips32r2)) {
+ msub_d(fd, fr, fs, ft);
+ } else {
+ DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+ mul_d(scratch, fs, ft);
+ sub_d(fd, scratch, fr);
+ }
+}
void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
Label* nan, Condition cond, FPURegister cmp1,
@@ -2325,186 +2368,6 @@ void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
}
}
-#define __ masm->
-
-static bool ZeroHelper_d(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
- FPURegister src1, FPURegister src2, Label* equal) {
- if (src1.is(src2)) {
- __ Move(dst, src1);
- return true;
- }
-
- Label other, compare_not_equal;
- FPURegister left, right;
- if (kind == MaxMinKind::kMin) {
- left = src1;
- right = src2;
- } else {
- left = src2;
- right = src1;
- }
-
- __ BranchF64(&compare_not_equal, nullptr, ne, src1, src2);
- // Left and right hand side are equal, check for -0 vs. +0.
- __ FmoveHigh(t8, src1);
- __ Branch(&other, eq, t8, Operand(0x80000000));
- __ Move_d(dst, right);
- __ Branch(equal);
- __ bind(&other);
- __ Move_d(dst, left);
- __ Branch(equal);
- __ bind(&compare_not_equal);
- return false;
-}
-
-static bool ZeroHelper_s(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
- FPURegister src1, FPURegister src2, Label* equal) {
- if (src1.is(src2)) {
- __ Move(dst, src1);
- return true;
- }
-
- Label other, compare_not_equal;
- FPURegister left, right;
- if (kind == MaxMinKind::kMin) {
- left = src1;
- right = src2;
- } else {
- left = src2;
- right = src1;
- }
-
- __ BranchF32(&compare_not_equal, nullptr, ne, src1, src2);
- // Left and right hand side are equal, check for -0 vs. +0.
- __ FmoveLow(t8, src1);
- __ Branch(&other, eq, t8, Operand(0x80000000));
- __ Move_s(dst, right);
- __ Branch(equal);
- __ bind(&other);
- __ Move_s(dst, left);
- __ Branch(equal);
- __ bind(&compare_not_equal);
- return false;
-}
-
-#undef __
-
-void MacroAssembler::MinNaNCheck_d(FPURegister dst, FPURegister src1,
- FPURegister src2, Label* nan) {
- if (nan) {
- BranchF64(nullptr, nan, eq, src1, src2);
- }
- if (IsMipsArchVariant(kMips32r6)) {
- min_d(dst, src1, src2);
- } else {
- Label skip;
- if (!ZeroHelper_d(this, MaxMinKind::kMin, dst, src1, src2, &skip)) {
- if (dst.is(src1)) {
- BranchF64(&skip, nullptr, le, src1, src2);
- Move_d(dst, src2);
- } else if (dst.is(src2)) {
- BranchF64(&skip, nullptr, ge, src1, src2);
- Move_d(dst, src1);
- } else {
- Label right;
- BranchF64(&right, nullptr, gt, src1, src2);
- Move_d(dst, src1);
- Branch(&skip);
- bind(&right);
- Move_d(dst, src2);
- }
- }
- bind(&skip);
- }
-}
-
-void MacroAssembler::MaxNaNCheck_d(FPURegister dst, FPURegister src1,
- FPURegister src2, Label* nan) {
- if (nan) {
- BranchF64(nullptr, nan, eq, src1, src2);
- }
- if (IsMipsArchVariant(kMips32r6)) {
- max_d(dst, src1, src2);
- } else {
- Label skip;
- if (!ZeroHelper_d(this, MaxMinKind::kMax, dst, src1, src2, &skip)) {
- if (dst.is(src1)) {
- BranchF64(&skip, nullptr, ge, src1, src2);
- Move_d(dst, src2);
- } else if (dst.is(src2)) {
- BranchF64(&skip, nullptr, le, src1, src2);
- Move_d(dst, src1);
- } else {
- Label right;
- BranchF64(&right, nullptr, lt, src1, src2);
- Move_d(dst, src1);
- Branch(&skip);
- bind(&right);
- Move_d(dst, src2);
- }
- }
- bind(&skip);
- }
-}
-
-void MacroAssembler::MinNaNCheck_s(FPURegister dst, FPURegister src1,
- FPURegister src2, Label* nan) {
- if (nan) {
- BranchF32(nullptr, nan, eq, src1, src2);
- }
- if (IsMipsArchVariant(kMips32r6)) {
- min_s(dst, src1, src2);
- } else {
- Label skip;
- if (!ZeroHelper_s(this, MaxMinKind::kMin, dst, src1, src2, &skip)) {
- if (dst.is(src1)) {
- BranchF32(&skip, nullptr, le, src1, src2);
- Move_s(dst, src2);
- } else if (dst.is(src2)) {
- BranchF32(&skip, nullptr, ge, src1, src2);
- Move_s(dst, src1);
- } else {
- Label right;
- BranchF32(&right, nullptr, gt, src1, src2);
- Move_s(dst, src1);
- Branch(&skip);
- bind(&right);
- Move_s(dst, src2);
- }
- }
- bind(&skip);
- }
-}
-
-void MacroAssembler::MaxNaNCheck_s(FPURegister dst, FPURegister src1,
- FPURegister src2, Label* nan) {
- if (nan) {
- BranchF32(nullptr, nan, eq, src1, src2);
- }
- if (IsMipsArchVariant(kMips32r6)) {
- max_s(dst, src1, src2);
- } else {
- Label skip;
- if (!ZeroHelper_s(this, MaxMinKind::kMax, dst, src1, src2, &skip)) {
- if (dst.is(src1)) {
- BranchF32(&skip, nullptr, ge, src1, src2);
- Move_s(dst, src2);
- } else if (dst.is(src2)) {
- BranchF32(&skip, nullptr, le, src1, src2);
- Move_s(dst, src1);
- } else {
- Label right;
- BranchF32(&right, nullptr, lt, src1, src2);
- Move_s(dst, src1);
- Branch(&skip);
- bind(&right);
- Move_s(dst, src2);
- }
- }
- bind(&skip);
- }
-}
-
void MacroAssembler::Clz(Register rd, Register rs) {
if (IsMipsArchVariant(kLoongson)) {
DCHECK(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
@@ -4334,110 +4197,6 @@ void MacroAssembler::FastAllocate(Register object_size, Register result,
Addu(result, result, Operand(kHeapObjectTag));
}
-void MacroAssembler::AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- sll(scratch1, length, 1); // Length in bytes, not chars.
- addiu(scratch1, scratch1,
- kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
- And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
-
- // Allocate two-byte string in new space.
- Allocate(scratch1, result, scratch2, scratch3, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map, length and hash field.
- InitializeNewString(result,
- length,
- Heap::kStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteString(Register result, Register length,
- Register scratch1, Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string
- // while observing object alignment.
- DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- DCHECK(kCharSize == 1);
- addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
- And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
-
- // Allocate one-byte string in new space.
- Allocate(scratch1, result, scratch2, scratch3, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map, length and hash field.
- InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
- scratch1, scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
- InitializeNewString(result,
- length,
- Heap::kConsStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
- scratch1, scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- InitializeNewString(result,
- length,
- Heap::kSlicedStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
- scratch1, scratch2);
-}
-
-
void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
Label* not_unique_name) {
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
@@ -4518,77 +4277,6 @@ void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
Branch(&loop, ult, current_address, Operand(end_address));
}
-void MacroAssembler::CheckFastObjectElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- Branch(fail, ls, scratch,
- Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
- Branch(fail, hi, scratch,
- Operand(Map::kMaximumBitField2FastHoleyElementValue));
-}
-
-
-void MacroAssembler::CheckFastSmiElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- Branch(fail, hi, scratch,
- Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
-}
-
-
-void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
- Register key_reg,
- Register elements_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* fail,
- int elements_offset) {
- DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1, scratch2,
- scratch3));
- Label smi_value, done;
-
- // Handle smi values specially.
- JumpIfSmi(value_reg, &smi_value);
-
- // Ensure that the object is a heap number
- CheckMap(value_reg,
- scratch1,
- Heap::kHeapNumberMapRootIndex,
- fail,
- DONT_DO_SMI_CHECK);
-
- // Double value, turn potential sNaN into qNan.
- DoubleRegister double_result = f0;
- DoubleRegister double_scratch = f2;
-
- ldc1(double_result, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
- Branch(USE_DELAY_SLOT, &done); // Canonicalization is one instruction.
- FPUCanonicalizeNaN(double_result, double_result);
-
- bind(&smi_value);
- Register untagged_value = scratch2;
- SmiUntag(untagged_value, value_reg);
- mtc1(untagged_value, double_scratch);
- cvt_d_w(double_result, double_scratch);
-
- bind(&done);
- Addu(scratch1, elements_reg,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
- elements_offset));
- Lsa(scratch1, scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
- // scratch1 is now effective address of the double element
- sdc1(double_result, MemOperand(scratch1, 0));
-}
-
void MacroAssembler::CompareMapAndBranch(Register obj,
Register scratch,
Handle<Map> map,
@@ -4870,17 +4558,15 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
}
-
-void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual) {
- Label skip_flooding;
- ExternalReference last_step_action =
- ExternalReference::debug_last_step_action_address(isolate());
- STATIC_ASSERT(StepFrame > StepIn);
- li(t0, Operand(last_step_action));
+void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_hook;
+ ExternalReference debug_hook_active =
+ ExternalReference::debug_hook_on_function_call_address(isolate());
+ li(t0, Operand(debug_hook_active));
lb(t0, MemOperand(t0));
- Branch(&skip_flooding, lt, t0, Operand(StepIn));
+ Branch(&skip_hook, eq, t0, Operand(zero_reg));
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -4897,7 +4583,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
}
Push(fun);
Push(fun);
- CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ CallRuntime(Runtime::kDebugOnFunctionCall);
Pop(fun);
if (new_target.is_valid()) {
Pop(new_target);
@@ -4911,7 +4597,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
SmiUntag(expected.reg());
}
}
- bind(&skip_flooding);
+ bind(&skip_hook);
}
@@ -4925,8 +4611,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK(function.is(a1));
DCHECK_IMPLIES(new_target.is_valid(), new_target.is(a3));
- if (call_wrapper.NeedsDebugStepCheck()) {
- FloodFunctionIfStepping(function, new_target, expected, actual);
+ if (call_wrapper.NeedsDebugHookCheck()) {
+ CheckDebugHook(function, new_target, expected, actual);
}
// Clear the new.target register if not given.
@@ -5603,27 +5289,6 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
}
-
-void MacroAssembler::LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match) {
- DCHECK(IsFastElementsKind(expected_kind));
- DCHECK(IsFastElementsKind(transitioned_kind));
-
- // Check that the function's map is the same as the expected cached map.
- lw(scratch, NativeContextMemOperand());
- lw(at, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
- Branch(no_map_match, ne, map_in_out, Operand(at));
-
- // Use the transitioned cached map.
- lw(map_in_out,
- ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
-}
-
-
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
lw(dst, NativeContextMemOperand());
lw(dst, ContextMemOperand(dst, index));
@@ -5661,7 +5326,7 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
nop(Assembler::CODE_AGE_MARKER_NOP);
// Load the stub address to t9 and call it,
- // GetCodeAgeAndParity() extracts the stub address from this instruction.
+ // GetCodeAge() extracts the stub address from this instruction.
li(t9,
Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
CONSTANT_SIZE);
@@ -5955,14 +5620,6 @@ void MacroAssembler::UntagAndJumpIfSmi(Register dst,
SmiUntag(dst, src);
}
-
-void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
- Register src,
- Label* non_smi_case) {
- JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
- SmiUntag(dst, src);
-}
-
void MacroAssembler::JumpIfSmi(Register value,
Label* smi_label,
Register scratch,
@@ -6157,6 +5814,179 @@ void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
scratch2, failure);
}
+void MacroAssembler::Float32Max(FPURegister dst, FPURegister src1,
+ FPURegister src2, Label* out_of_line) {
+ if (src1.is(src2)) {
+ Move_s(dst, src1);
+ return;
+ }
+
+ // Check if one of operands is NaN.
+ BranchF32(nullptr, out_of_line, eq, src1, src2);
+
+ if (IsMipsArchVariant(kMips32r6)) {
+ max_s(dst, src1, src2);
+ } else {
+ Label return_left, return_right, done;
+
+ BranchF32(&return_right, nullptr, lt, src1, src2);
+ BranchF32(&return_left, nullptr, lt, src2, src1);
+
+ // Operands are equal, but check for +/-0.
+ mfc1(t8, src1);
+ Branch(&return_left, eq, t8, Operand(zero_reg));
+ Branch(&return_right);
+
+ bind(&return_right);
+ if (!src2.is(dst)) {
+ Move_s(dst, src2);
+ }
+ Branch(&done);
+
+ bind(&return_left);
+ if (!src1.is(dst)) {
+ Move_s(dst, src1);
+ }
+
+ bind(&done);
+ }
+}
+
+void MacroAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ add_s(dst, src1, src2);
+}
+
+void MacroAssembler::Float32Min(FPURegister dst, FPURegister src1,
+ FPURegister src2, Label* out_of_line) {
+ if (src1.is(src2)) {
+ Move_s(dst, src1);
+ return;
+ }
+
+ // Check if one of operands is NaN.
+ BranchF32(nullptr, out_of_line, eq, src1, src2);
+
+ if (IsMipsArchVariant(kMips32r6)) {
+ min_s(dst, src1, src2);
+ } else {
+ Label return_left, return_right, done;
+
+ BranchF32(&return_left, nullptr, lt, src1, src2);
+ BranchF32(&return_right, nullptr, lt, src2, src1);
+
+ // Left equals right => check for -0.
+ mfc1(t8, src1);
+ Branch(&return_right, eq, t8, Operand(zero_reg));
+ Branch(&return_left);
+
+ bind(&return_right);
+ if (!src2.is(dst)) {
+ Move_s(dst, src2);
+ }
+ Branch(&done);
+
+ bind(&return_left);
+ if (!src1.is(dst)) {
+ Move_s(dst, src1);
+ }
+
+ bind(&done);
+ }
+}
+
+void MacroAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ add_s(dst, src1, src2);
+}
+
+void MacroAssembler::Float64Max(DoubleRegister dst, DoubleRegister src1,
+ DoubleRegister src2, Label* out_of_line) {
+ if (src1.is(src2)) {
+ Move_d(dst, src1);
+ return;
+ }
+
+ // Check if one of operands is NaN.
+ BranchF64(nullptr, out_of_line, eq, src1, src2);
+
+ if (IsMipsArchVariant(kMips32r6)) {
+ max_d(dst, src1, src2);
+ } else {
+ Label return_left, return_right, done;
+
+ BranchF64(&return_right, nullptr, lt, src1, src2);
+ BranchF64(&return_left, nullptr, lt, src2, src1);
+
+ // Left equals right => check for -0.
+ Mfhc1(t8, src1);
+ Branch(&return_left, eq, t8, Operand(zero_reg));
+ Branch(&return_right);
+
+ bind(&return_right);
+ if (!src2.is(dst)) {
+ Move_d(dst, src2);
+ }
+ Branch(&done);
+
+ bind(&return_left);
+ if (!src1.is(dst)) {
+ Move_d(dst, src1);
+ }
+
+ bind(&done);
+ }
+}
+
+void MacroAssembler::Float64MaxOutOfLine(DoubleRegister dst,
+ DoubleRegister src1,
+ DoubleRegister src2) {
+ add_d(dst, src1, src2);
+}
+
+void MacroAssembler::Float64Min(DoubleRegister dst, DoubleRegister src1,
+ DoubleRegister src2, Label* out_of_line) {
+ if (src1.is(src2)) {
+ Move_d(dst, src1);
+ return;
+ }
+
+ // Check if one of operands is NaN.
+ BranchF64(nullptr, out_of_line, eq, src1, src2);
+
+ if (IsMipsArchVariant(kMips32r6)) {
+ min_d(dst, src1, src2);
+ } else {
+ Label return_left, return_right, done;
+
+ BranchF64(&return_left, nullptr, lt, src1, src2);
+ BranchF64(&return_right, nullptr, lt, src2, src1);
+
+ // Left equals right => check for -0.
+ Mfhc1(t8, src1);
+ Branch(&return_right, eq, t8, Operand(zero_reg));
+ Branch(&return_left);
+
+ bind(&return_right);
+ if (!src2.is(dst)) {
+ Move_d(dst, src2);
+ }
+ Branch(&done);
+
+ bind(&return_left);
+ if (!src1.is(dst)) {
+ Move_d(dst, src1);
+ }
+
+ bind(&done);
+ }
+}
+
+void MacroAssembler::Float64MinOutOfLine(DoubleRegister dst,
+ DoubleRegister src1,
+ DoubleRegister src2) {
+ add_d(dst, src1, src2);
+}
void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
Register first, Register second, Register scratch1, Register scratch2,
@@ -6172,19 +6002,6 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
}
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
- Register scratch,
- Label* failure) {
- const int kFlatOneByteStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatOneByteStringTag =
- kStringTag | kOneByteStringTag | kSeqStringTag;
- And(scratch, type, Operand(kFlatOneByteStringMask));
- Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
-}
-
-
static const int kRegisterPassedArguments = 4;
int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
@@ -6622,40 +6439,6 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
return no_reg;
}
-
-void MacroAssembler::JumpIfDictionaryInPrototypeChain(
- Register object,
- Register scratch0,
- Register scratch1,
- Label* found) {
- DCHECK(!scratch1.is(scratch0));
- Factory* factory = isolate()->factory();
- Register current = scratch0;
- Label loop_again, end;
-
- // Scratch contained elements pointer.
- Move(current, object);
- lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
- lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
- Branch(&end, eq, current, Operand(factory->null_value()));
-
- // Loop based on the map going up the prototype chain.
- bind(&loop_again);
- lw(current, FieldMemOperand(current, HeapObject::kMapOffset));
- lbu(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
- STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
- STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
- Branch(found, lo, scratch1, Operand(JS_OBJECT_TYPE));
- lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
- DecodeField<Map::ElementsKindBits>(scratch1);
- Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
- lw(current, FieldMemOperand(current, Map::kPrototypeOffset));
- Branch(&loop_again, ne, current, Operand(factory->null_value()));
-
- bind(&end);
-}
-
-
bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
Register reg5, Register reg6, Register reg7, Register reg8,
Register reg9, Register reg10) {
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 824a3bf14d..66ac930ad2 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -208,6 +208,12 @@ class MacroAssembler: public Assembler {
Heap::RootListIndex index,
BranchDelaySlot bdslot = PROTECT);
+// Number of instructions needed for calculation of switch table entry address
+#ifdef _MIPS_ARCH_MIPS32R6
+ static const int kSwitchTablePrologueSize = 5;
+#else
+ static const int kSwitchTablePrologueSize = 10;
+#endif
// GetLabelFunction must be lambda '[](size_t index) -> Label*' or a
// functor/function with 'Label *func(size_t index)' declaration.
template <typename Func>
@@ -305,17 +311,6 @@ class MacroAssembler: public Assembler {
void Movt(Register rd, Register rs, uint16_t cc = 0);
void Movf(Register rd, Register rs, uint16_t cc = 0);
- // Min, Max macros.
- // On pre-r6 these functions may modify at and t8 registers.
- void MinNaNCheck_d(FPURegister dst, FPURegister src1, FPURegister src2,
- Label* nan = nullptr);
- void MaxNaNCheck_d(FPURegister dst, FPURegister src1, FPURegister src2,
- Label* nan = nullptr);
- void MinNaNCheck_s(FPURegister dst, FPURegister src1, FPURegister src2,
- Label* nan = nullptr);
- void MaxNaNCheck_s(FPURegister dst, FPURegister src1, FPURegister src2,
- Label* nan = nullptr);
-
void Clz(Register rd, Register rs);
// Jump unconditionally to given label.
@@ -560,32 +555,6 @@ class MacroAssembler: public Assembler {
void FastAllocate(Register object_size, Register result, Register result_new,
Register scratch, AllocationFlags flags);
- void AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
- void AllocateOneByteString(Register result, Register length,
- Register scratch1, Register scratch2,
- Register scratch3, Label* gc_required);
- void AllocateTwoByteConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateOneByteConsString(Register result, Register length,
- Register scratch1, Register scratch2,
- Label* gc_required);
- void AllocateTwoByteSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateOneByteSlicedString(Register result, Register length,
- Register scratch1, Register scratch2,
- Label* gc_required);
-
// Allocates a heap number or jumps to the gc_required label if the young
// space is full and a scavenge is needed. All registers are clobbered also
// when control continues at the gc_required label.
@@ -892,6 +861,15 @@ class MacroAssembler: public Assembler {
// general-purpose register.
void Mfhc1(Register rt, FPURegister fs);
+ void Madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
+ FPURegister scratch);
+ void Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
+ FPURegister scratch);
+ void Msub_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
+ FPURegister scratch);
+ void Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
+ FPURegister scratch);
+
// Wrapper functions for the different cmp/branch types.
inline void BranchF32(Label* target, Label* nan, Condition cc,
FPURegister cmp1, FPURegister cmp2,
@@ -1037,17 +1015,6 @@ class MacroAssembler: public Assembler {
LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
}
- // Conditionally load the cached Array transitioned map of type
- // transitioned_kind from the native context if the map in register
- // map_in_out is the cached Array map in the native context of
- // expected_kind.
- void LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match);
-
void LoadNativeContextSlot(int index, Register dst);
// Load the initial map from the global function. The registers
@@ -1080,9 +1047,10 @@ class MacroAssembler: public Assembler {
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
- void FloodFunctionIfStepping(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual);
+ // On function call, call into the debugger if necessary.
+ void CheckDebugHook(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
@@ -1158,30 +1126,6 @@ class MacroAssembler: public Assembler {
FieldMemOperand(object_map, Map::kInstanceTypeOffset));
}
- // Check if a map for a JSObject indicates that the object can have both smi
- // and HeapObject elements. Jump to the specified label if it does not.
- void CheckFastObjectElements(Register map,
- Register scratch,
- Label* fail);
-
- // Check if a map for a JSObject indicates that the object has fast smi only
- // elements. Jump to the specified label if it does not.
- void CheckFastSmiElements(Register map,
- Register scratch,
- Label* fail);
-
- // Check to see if maybe_number can be stored as a double in
- // FastDoubleElements. If it can, store it at the index specified by key in
- // the FastDoubleElements array elements. Otherwise jump to fail.
- void StoreNumberToDoubleElements(Register value_reg,
- Register key_reg,
- Register elements_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* fail,
- int elements_offset = 0);
-
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
// "branch_to" if the result of the comparison is "cond". If multiple map
@@ -1331,6 +1275,31 @@ class MacroAssembler: public Assembler {
Label* overflow_label, Label* no_overflow_label,
Register scratch = at);
+ // Perform a floating-point min or max operation with the
+ // (IEEE-754-compatible) semantics of MIPS32's Release 6 MIN.fmt/MAX.fmt.
+ // Some cases, typically NaNs or +/-0.0, are expected to be rare and are
+ // handled in out-of-line code. The specific behaviour depends on supported
+ // instructions.
+ //
+ // These functions assume (and assert) that !src1.is(src2). It is permitted
+ // for the result to alias either input register.
+ void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* out_of_line);
+ void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* out_of_line);
+ void Float64Max(DoubleRegister dst, DoubleRegister src1, DoubleRegister src2,
+ Label* out_of_line);
+ void Float64Min(DoubleRegister dst, DoubleRegister src1, DoubleRegister src2,
+ Label* out_of_line);
+
+ // Generate out-of-line cases for the macros above.
+ void Float32MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
+ void Float32MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
+ void Float64MaxOutOfLine(DoubleRegister dst, DoubleRegister src1,
+ DoubleRegister src2);
+ void Float64MinOutOfLine(DoubleRegister dst, DoubleRegister src1,
+ DoubleRegister src2);
+
// -------------------------------------------------------------------------
// Runtime calls.
@@ -1557,10 +1526,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Souce and destination can be the same register.
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
- // Untag the source value into destination and jump if source is not a smi.
- // Souce and destination can be the same register.
- void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
-
// Jump the register contains a smi.
void JumpIfSmi(Register value,
Label* smi_label,
@@ -1630,11 +1595,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
Register first_object_instance_type, Register second_object_instance_type,
Register scratch1, Register scratch2, Label* failure);
- // Check if instance type is sequential one-byte string and jump to label if
- // it is not.
- void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
- Label* failure);
-
void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
void EmitSeqStringSetCharCheck(Register string,
@@ -1731,20 +1691,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
Register scratch_reg,
Label* no_memento_found);
- void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
- Register scratch_reg,
- Label* memento_found) {
- Label no_memento_found;
- TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
- &no_memento_found);
- Branch(memento_found);
- bind(&no_memento_found);
- }
-
- // Jumps to found label if a prototype map has dictionary elements.
- void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
- Register scratch1, Label* found);
-
bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; }
private:
@@ -1871,13 +1817,13 @@ template <typename Func>
void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
Func GetLabelFunction) {
if (kArchVariant >= kMips32r6) {
- BlockTrampolinePoolFor(case_count + 5);
+ BlockTrampolinePoolFor(case_count + kSwitchTablePrologueSize);
addiupc(at, 5);
Lsa(at, at, index, kPointerSizeLog2);
lw(at, MemOperand(at));
} else {
Label here;
- BlockTrampolinePoolFor(case_count + 10);
+ BlockTrampolinePoolFor(case_count + kSwitchTablePrologueSize);
push(ra);
bal(&here);
sll(at, index, kPointerSizeLog2); // Branch delay slot.
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index bd423996d8..7ff3d144e7 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -2537,11 +2537,11 @@ void Simulator::DecodeTypeRegisterDRsType() {
break;
case MADDF_D:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_double(fd_reg(), fd + (fs * ft));
+ set_fpu_register_double(fd_reg(), std::fma(fs, ft, fd));
break;
case MSUBF_D:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_double(fd_reg(), fd - (fs * ft));
+ set_fpu_register_double(fd_reg(), std::fma(-fs, ft, fd));
break;
case MUL_D:
set_fpu_register_double(
@@ -2964,11 +2964,11 @@ void Simulator::DecodeTypeRegisterSRsType() {
break;
case MADDF_S:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_float(fd_reg(), fd + (fs * ft));
+ set_fpu_register_float(fd_reg(), std::fma(fs, ft, fd));
break;
case MSUBF_S:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_float(fd_reg(), fd - (fs * ft));
+ set_fpu_register_float(fd_reg(), std::fma(-fs, ft, fd));
break;
case MUL_S:
set_fpu_register_float(
diff --git a/deps/v8/src/mips64/assembler-mips64.cc b/deps/v8/src/mips64/assembler-mips64.cc
index b35b166a2e..b670058964 100644
--- a/deps/v8/src/mips64/assembler-mips64.cc
+++ b/deps/v8/src/mips64/assembler-mips64.cc
@@ -183,13 +183,19 @@ uint32_t RelocInfo::wasm_memory_size_reference() {
reinterpret_cast<intptr_t>((Assembler::target_address_at(pc_, host_))));
}
+uint32_t RelocInfo::wasm_function_table_size_reference() {
+ DCHECK(IsWasmFunctionTableSizeReference(rmode_));
+ return static_cast<uint32_t>(
+ reinterpret_cast<intptr_t>((Assembler::target_address_at(pc_, host_))));
+}
+
void RelocInfo::unchecked_update_wasm_memory_reference(
Address address, ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
}
-void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
- ICacheFlushMode flush_mode) {
+void RelocInfo::unchecked_update_wasm_size(uint32_t size,
+ ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate_, pc_, host_,
reinterpret_cast<Address>(size), flush_mode);
}
@@ -859,8 +865,7 @@ void Assembler::print(Label* L) {
} else {
PrintF("%d\n", instr);
}
- next(&l, internal_reference_positions_.find(l.pos()) !=
- internal_reference_positions_.end());
+ next(&l, is_internal_reference(&l));
}
} else {
PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
@@ -874,14 +879,15 @@ void Assembler::bind_to(Label* L, int pos) {
bool is_internal = false;
if (L->is_linked() && !trampoline_emitted_) {
unbound_labels_count_--;
- next_buffer_check_ += kTrampolineSlotsSize;
+ if (!is_internal_reference(L)) {
+ next_buffer_check_ += kTrampolineSlotsSize;
+ }
}
while (L->is_linked()) {
int fixup_pos = L->pos();
int dist = pos - fixup_pos;
- is_internal = internal_reference_positions_.find(fixup_pos) !=
- internal_reference_positions_.end();
+ is_internal = is_internal_reference(L);
next(L, is_internal); // Call next before overwriting link with target at
// fixup_pos.
Instr instr = instr_at(fixup_pos);
@@ -898,7 +904,6 @@ void Assembler::bind_to(Label* L, int pos) {
CHECK((trampoline_pos - fixup_pos) <= branch_offset);
target_at_put(fixup_pos, trampoline_pos, false);
fixup_pos = trampoline_pos;
- dist = pos - fixup_pos;
}
target_at_put(fixup_pos, pos, false);
} else {
@@ -1940,19 +1945,64 @@ void Assembler::dlsa(Register rd, Register rt, Register rs, uint8_t sa) {
void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
DCHECK(!src.rm().is(at));
DCHECK(is_int32(src.offset_));
- daddiu(at, zero_reg, (src.offset_ >> kLuiShift) & kImm16Mask);
- dsll(at, at, kLuiShift);
- ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
- daddu(at, at, src.rm()); // Add base register.
+
+ if (kArchVariant == kMips64r6) {
+ int32_t hi = (src.offset_ >> kLuiShift) & kImm16Mask;
+ if (src.offset_ & kNegOffset) {
+ if ((hi & kNegOffset) != ((hi + 1) & kNegOffset)) {
+ lui(at, (src.offset_ >> kLuiShift) & kImm16Mask);
+ ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
+ daddu(at, at, src.rm()); // Add base register.
+ return;
+ }
+
+ hi += 1;
+ }
+
+ daui(at, src.rm(), hi);
+ daddiu(at, at, src.offset_ & kImm16Mask);
+ } else {
+ lui(at, (src.offset_ >> kLuiShift) & kImm16Mask);
+ ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
+ daddu(at, at, src.rm()); // Add base register.
+ }
}
+// Helper for base-reg + upper part of offset, when offset is larger than int16.
+// Loads higher part of the offset to AT register.
+// Returns lower part of the offset to be used as offset
+// in Load/Store instructions
+int32_t Assembler::LoadRegPlusUpperOffsetPartToAt(const MemOperand& src) {
+ DCHECK(!src.rm().is(at));
+ DCHECK(is_int32(src.offset_));
+ int32_t hi = (src.offset_ >> kLuiShift) & kImm16Mask;
+ // If the highest bit of the lower part of the offset is 1, this would make
+ // the offset in the load/store instruction negative. We need to compensate
+ // for this by adding 1 to the upper part of the offset.
+ if (src.offset_ & kNegOffset) {
+ if ((hi & kNegOffset) != ((hi + 1) & kNegOffset)) {
+ LoadRegPlusOffsetToAt(src);
+ return 0;
+ }
+
+ hi += 1;
+ }
+
+ if (kArchVariant == kMips64r6) {
+ daui(at, src.rm(), hi);
+ } else {
+ lui(at, hi);
+ daddu(at, at, src.rm());
+ }
+ return (src.offset_ & kImm16Mask);
+}
void Assembler::lb(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(LB, at, rd, 0); // Equiv to lb(rd, MemOperand(at, 0));
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ GenInstrImmediate(LB, at, rd, off16);
}
}
@@ -1961,8 +2011,8 @@ void Assembler::lbu(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(LBU, at, rd, 0); // Equiv to lbu(rd, MemOperand(at, 0));
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ GenInstrImmediate(LBU, at, rd, off16);
}
}
@@ -1971,8 +2021,8 @@ void Assembler::lh(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(LH, at, rd, 0); // Equiv to lh(rd, MemOperand(at, 0));
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ GenInstrImmediate(LH, at, rd, off16);
}
}
@@ -1981,8 +2031,8 @@ void Assembler::lhu(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(LHU, at, rd, 0); // Equiv to lhu(rd, MemOperand(at, 0));
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ GenInstrImmediate(LHU, at, rd, off16);
}
}
@@ -1991,8 +2041,8 @@ void Assembler::lw(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(LW, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ GenInstrImmediate(LW, at, rd, off16);
}
}
@@ -2001,8 +2051,8 @@ void Assembler::lwu(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(LWU, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(LWU, at, rd, 0); // Equiv to lwu(rd, MemOperand(at, 0));
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ GenInstrImmediate(LWU, at, rd, off16);
}
}
@@ -2025,8 +2075,8 @@ void Assembler::sb(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to store.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(SB, at, rd, 0); // Equiv to sb(rd, MemOperand(at, 0));
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ GenInstrImmediate(SB, at, rd, off16);
}
}
@@ -2035,8 +2085,8 @@ void Assembler::sh(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to store.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(SH, at, rd, 0); // Equiv to sh(rd, MemOperand(at, 0));
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ GenInstrImmediate(SH, at, rd, off16);
}
}
@@ -2045,8 +2095,8 @@ void Assembler::sw(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to store.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(SW, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ GenInstrImmediate(SW, at, rd, off16);
}
}
@@ -2130,8 +2180,8 @@ void Assembler::ld(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(LD, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(LD, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ GenInstrImmediate(LD, at, rd, off16);
}
}
@@ -2140,8 +2190,8 @@ void Assembler::sd(Register rd, const MemOperand& rs) {
if (is_int16(rs.offset_)) {
GenInstrImmediate(SD, rs.rm(), rd, rs.offset_);
} else { // Offset > 16 bits, use multiple instructions to store.
- LoadRegPlusOffsetToAt(rs);
- GenInstrImmediate(SD, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ GenInstrImmediate(SD, at, rd, off16);
}
}
@@ -2551,8 +2601,8 @@ void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
if (is_int16(src.offset_)) {
GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(src);
- GenInstrImmediate(LWC1, at, fd, 0);
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
+ GenInstrImmediate(LWC1, at, fd, off16);
}
}
@@ -2561,8 +2611,8 @@ void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
if (is_int16(src.offset_)) {
GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(src);
- GenInstrImmediate(LDC1, at, fd, 0);
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
+ GenInstrImmediate(LDC1, at, fd, off16);
}
}
@@ -2571,8 +2621,8 @@ void Assembler::swc1(FPURegister fd, const MemOperand& src) {
if (is_int16(src.offset_)) {
GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(src);
- GenInstrImmediate(SWC1, at, fd, 0);
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
+ GenInstrImmediate(SWC1, at, fd, off16);
}
}
@@ -2582,8 +2632,8 @@ void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
if (is_int16(src.offset_)) {
GenInstrImmediate(SDC1, src.rm(), fd, src.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
- LoadRegPlusOffsetToAt(src);
- GenInstrImmediate(SDC1, at, fd, 0);
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
+ GenInstrImmediate(SDC1, at, fd, off16);
}
}
diff --git a/deps/v8/src/mips64/assembler-mips64.h b/deps/v8/src/mips64/assembler-mips64.h
index 056cc425f9..e3786a7e8c 100644
--- a/deps/v8/src/mips64/assembler-mips64.h
+++ b/deps/v8/src/mips64/assembler-mips64.h
@@ -559,6 +559,13 @@ class Assembler : public AssemblerBase {
static const int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstrSize;
+ // Max offset for instructions with 16-bit offset field
+ static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
+
+ // Max offset for compact branch instructions with 26-bit offset field
+ static const int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1;
+
+ static const int kTrampolineSlotsSize = 2 * kInstrSize;
// ---------------------------------------------------------------------------
// Code generation.
@@ -1091,9 +1098,6 @@ class Assembler : public AssemblerBase {
// Debugging.
- // Mark generator continuation.
- void RecordGeneratorContinuation();
-
// Mark address of a debug break slot.
void RecordDebugBreakSlot(RelocInfo::Mode mode);
@@ -1226,6 +1230,7 @@ class Assembler : public AssemblerBase {
}
bool IsPrevInstrCompactBranch() { return prev_instr_compact_branch_; }
+ static bool IsCompactBranchSupported() { return kArchVariant == kMips64r6; }
inline int UnboundLabelsCount() { return unbound_labels_count_; }
@@ -1236,6 +1241,7 @@ class Assembler : public AssemblerBase {
// Helpers.
void LoadRegPlusOffsetToAt(const MemOperand& src);
+ int32_t LoadRegPlusUpperOffsetPartToAt(const MemOperand& src);
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
@@ -1497,14 +1503,15 @@ class Assembler : public AssemblerBase {
// branch instruction generation, where we use jump instructions rather
// than regular branch instructions.
bool trampoline_emitted_;
- static const int kTrampolineSlotsSize = 2 * kInstrSize;
- static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
- static const int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1;
static const int kInvalidSlotPos = -1;
// Internal reference positions, required for unbounded internal reference
// labels.
std::set<int64_t> internal_reference_positions_;
+ bool is_internal_reference(Label* L) {
+ return internal_reference_positions_.find(L->pos()) !=
+ internal_reference_positions_.end();
+ }
void EmittedCompactBranchInstruction() { prev_instr_compact_branch_ = true; }
void ClearCompactBranchState() { prev_instr_compact_branch_ = false; }
diff --git a/deps/v8/src/mips64/code-stubs-mips64.cc b/deps/v8/src/mips64/code-stubs-mips64.cc
index 97f5b73f9c..e570447e8f 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.cc
+++ b/deps/v8/src/mips64/code-stubs-mips64.cc
@@ -32,17 +32,6 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kNewArray);
}
-void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
- descriptor->Initialize(a0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
-void FastFunctionBindStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
- descriptor->Initialize(a0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
Condition cc);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
@@ -681,8 +670,11 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cc == eq) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(lhs, rhs);
- __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+ __ Push(cp);
+ __ Call(strict() ? isolate()->builtins()->StrictEqual()
+ : isolate()->builtins()->Equal(),
+ RelocInfo::CODE_TARGET);
+ __ Pop(cp);
}
// Turn true into 0 and false into some non-zero value.
STATIC_ASSERT(EQUAL == 0);
@@ -913,7 +905,6 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
SaveFPRegsMode mode = kSaveFPRegs;
CEntryStub(isolate, 1, mode).GetCode();
StoreBufferOverflowStub(isolate, mode).GetCode();
- isolate->set_fp_stubs_generated(true);
}
@@ -2221,51 +2212,6 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
}
-
-enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- String::Encoding encoding) {
- if (FLAG_debug_code) {
- // Check that destination is word aligned.
- __ And(scratch, dest, Operand(kPointerAlignmentMask));
- __ Check(eq,
- kDestinationOfCopyNotAligned,
- scratch,
- Operand(zero_reg));
- }
-
- // Assumes word reads and writes are little endian.
- // Nothing to do for zero characters.
- Label done;
-
- if (encoding == String::TWO_BYTE_ENCODING) {
- __ Daddu(count, count, count);
- }
-
- Register limit = count; // Read until dest equals this.
- __ Daddu(limit, dest, Operand(count));
-
- Label loop_entry, loop;
- // Copy bytes from src to dest until dest hits limit.
- __ Branch(&loop_entry);
- __ bind(&loop);
- __ lbu(scratch, MemOperand(src));
- __ daddiu(src, src, 1);
- __ sb(scratch, MemOperand(dest));
- __ daddiu(dest, dest, 1);
- __ bind(&loop_entry);
- __ Branch(&loop, lt, dest, Operand(limit));
-
- __ bind(&done);
-}
-
-
void StringHelper::GenerateFlatOneByteStringEquals(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3) {
@@ -2893,84 +2839,6 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ Branch(miss, ne, at, Operand(zero_reg));
}
-
-// Probe the name dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found. Jump to
-// the |miss| label otherwise.
-// If lookup was successful |scratch2| will be equal to elements + 4 * index.
-void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register scratch1,
- Register scratch2) {
- DCHECK(!elements.is(scratch1));
- DCHECK(!elements.is(scratch2));
- DCHECK(!name.is(scratch1));
- DCHECK(!name.is(scratch2));
-
- __ AssertName(name);
-
- // Compute the capacity mask.
- __ ld(scratch1, FieldMemOperand(elements, kCapacityOffset));
- __ SmiUntag(scratch1);
- __ Dsubu(scratch1, scratch1, Operand(1));
-
- // Generate an unrolled loop that performs a few probes before
- // giving up. Measurements done on Gmail indicate that 2 probes
- // cover ~93% of loads from dictionaries.
- for (int i = 0; i < kInlinedProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ lwu(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
- if (i > 0) {
- // Add the probe offset (i + i * i) left shifted to avoid right shifting
- // the hash in a separate instruction. The value hash + i + i * i is right
- // shifted in the following and instruction.
- DCHECK(NameDictionary::GetProbeOffset(i) <
- 1 << (32 - Name::kHashFieldOffset));
- __ Daddu(scratch2, scratch2, Operand(
- NameDictionary::GetProbeOffset(i) << Name::kHashShift));
- }
- __ dsrl(scratch2, scratch2, Name::kHashShift);
- __ And(scratch2, scratch1, scratch2);
-
- // Scale the index by multiplying by the entry size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- // scratch2 = scratch2 * 3.
- __ Dlsa(scratch2, scratch2, scratch2, 1);
-
- // Check if the key is identical to the name.
- __ Dlsa(scratch2, elements, scratch2, kPointerSizeLog2);
- __ ld(at, FieldMemOperand(scratch2, kElementsStartOffset));
- __ Branch(done, eq, name, Operand(at));
- }
-
- const int spill_mask =
- (ra.bit() | a6.bit() | a5.bit() | a4.bit() |
- a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
- ~(scratch1.bit() | scratch2.bit());
-
- __ MultiPush(spill_mask);
- if (name.is(a0)) {
- DCHECK(!elements.is(a1));
- __ Move(a1, name);
- __ Move(a0, elements);
- } else {
- __ Move(a0, elements);
- __ Move(a1, name);
- }
- NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
- __ CallStub(&stub);
- __ mov(scratch2, a2);
- __ mov(at, v0);
- __ MultiPop(spill_mask);
-
- __ Branch(done, ne, at, Operand(zero_reg));
- __ Branch(miss, eq, at, Operand(zero_reg));
-}
-
-
void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
@@ -3269,233 +3137,6 @@ void CallICTrampolineStub::Generate(MacroAssembler* masm) {
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
-
-static void HandleArrayCases(MacroAssembler* masm, Register feedback,
- Register receiver_map, Register scratch1,
- Register scratch2, bool is_polymorphic,
- Label* miss) {
- // feedback initially contains the feedback array
- Label next_loop, prepare_next;
- Label start_polymorphic;
-
- Register cached_map = scratch1;
-
- __ ld(cached_map,
- FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
- __ ld(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
- __ Branch(&start_polymorphic, ne, receiver_map, Operand(cached_map));
- // found, now call handler.
- Register handler = feedback;
- __ ld(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
- __ Daddu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(t9);
-
- Register length = scratch2;
- __ bind(&start_polymorphic);
- __ ld(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
- if (!is_polymorphic) {
- // If the IC could be monomorphic we have to make sure we don't go past the
- // end of the feedback array.
- __ Branch(miss, eq, length, Operand(Smi::FromInt(2)));
- }
-
- Register too_far = length;
- Register pointer_reg = feedback;
-
- // +-----+------+------+-----+-----+ ... ----+
- // | map | len | wm0 | h0 | wm1 | hN |
- // +-----+------+------+-----+-----+ ... ----+
- // 0 1 2 len-1
- // ^ ^
- // | |
- // pointer_reg too_far
- // aka feedback scratch2
- // also need receiver_map
- // use cached_map (scratch1) to look in the weak map values.
- __ SmiScale(too_far, length, kPointerSizeLog2);
- __ Daddu(too_far, feedback, Operand(too_far));
- __ Daddu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ Daddu(pointer_reg, feedback,
- Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
-
- __ bind(&next_loop);
- __ ld(cached_map, MemOperand(pointer_reg));
- __ ld(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
- __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map));
- __ ld(handler, MemOperand(pointer_reg, kPointerSize));
- __ Daddu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(t9);
-
- __ bind(&prepare_next);
- __ Daddu(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
- __ Branch(&next_loop, lt, pointer_reg, Operand(too_far));
-
- // We exhausted our array of map handler pairs.
- __ Branch(miss);
-}
-
-
-static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
- Register receiver_map, Register feedback,
- Register vector, Register slot,
- Register scratch, Label* compare_map,
- Label* load_smi_map, Label* try_array) {
- __ JumpIfSmi(receiver, load_smi_map);
- __ ld(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ bind(compare_map);
- Register cached_map = scratch;
- // Move the weak map into the weak_cell register.
- __ ld(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
- __ Branch(try_array, ne, cached_map, Operand(receiver_map));
- Register handler = feedback;
- __ SmiScale(handler, slot, kPointerSizeLog2);
- __ Daddu(handler, vector, Operand(handler));
- __ ld(handler,
- FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
- __ Daddu(t9, handler, Code::kHeaderSize - kHeapObjectTag);
- __ Jump(t9);
-}
-
-void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
- KeyedStoreICStub stub(isolate(), state());
- stub.GenerateForTrampoline(masm);
-}
-
-void KeyedStoreICStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
-
-void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-
-static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
- Register receiver_map, Register scratch1,
- Register scratch2, Label* miss) {
- // feedback initially contains the feedback array
- Label next_loop, prepare_next;
- Label start_polymorphic;
- Label transition_call;
-
- Register cached_map = scratch1;
- Register too_far = scratch2;
- Register pointer_reg = feedback;
-
- __ ld(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
-
- // +-----+------+------+-----+-----+-----+ ... ----+
- // | map | len | wm0 | wt0 | h0 | wm1 | hN |
- // +-----+------+------+-----+-----+ ----+ ... ----+
- // 0 1 2 len-1
- // ^ ^
- // | |
- // pointer_reg too_far
- // aka feedback scratch2
- // also need receiver_map
- // use cached_map (scratch1) to look in the weak map values.
- __ SmiScale(too_far, too_far, kPointerSizeLog2);
- __ Daddu(too_far, feedback, Operand(too_far));
- __ Daddu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ Daddu(pointer_reg, feedback,
- Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
-
- __ bind(&next_loop);
- __ ld(cached_map, MemOperand(pointer_reg));
- __ ld(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
- __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map));
- // Is it a transitioning store?
- __ ld(too_far, MemOperand(pointer_reg, kPointerSize));
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(&transition_call, ne, too_far, Operand(at));
-
- __ ld(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
- __ Daddu(t9, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(t9);
-
- __ bind(&transition_call);
- __ ld(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
- __ JumpIfSmi(too_far, miss);
-
- __ ld(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
- // Load the map into the correct register.
- DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
- __ Move(feedback, too_far);
- __ Daddu(t9, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(t9);
-
- __ bind(&prepare_next);
- __ Daddu(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
- __ Branch(&next_loop, lt, pointer_reg, Operand(too_far));
-
- // We exhausted our array of map handler pairs.
- __ Branch(miss);
-}
-
-void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // a1
- Register key = StoreWithVectorDescriptor::NameRegister(); // a2
- Register vector = StoreWithVectorDescriptor::VectorRegister(); // a3
- Register slot = StoreWithVectorDescriptor::SlotRegister(); // a4
- DCHECK(StoreWithVectorDescriptor::ValueRegister().is(a0)); // a0
- Register feedback = a5;
- Register receiver_map = a6;
- Register scratch1 = a7;
-
- __ SmiScale(scratch1, slot, kPointerSizeLog2);
- __ Daddu(feedback, vector, Operand(scratch1));
- __ ld(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
- scratch1, &compare_map, &load_smi_map, &try_array);
-
- __ bind(&try_array);
- // Is it a fixed array?
- __ ld(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
- __ Branch(&not_array, ne, scratch1, Heap::kFixedArrayMapRootIndex);
-
- // We have a polymorphic element handler.
- Label try_poly_name;
-
- Register scratch2 = t0;
-
- HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
- &miss);
-
- __ bind(&not_array);
- // Is it generic?
- __ Branch(&try_poly_name, ne, feedback, Heap::kmegamorphic_symbolRootIndex);
- Handle<Code> megamorphic_stub =
- KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
- __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
-
- __ bind(&try_poly_name);
- // We might have a name in feedback, and a fixed array in the next slot.
- __ Branch(&miss, ne, key, Operand(feedback));
- // If the name comparison succeeded, we know we have a fixed array with
- // at least one map/handler pair.
- __ SmiScale(scratch1, slot, kPointerSizeLog2);
- __ Daddu(feedback, vector, Operand(scratch1));
- __ ld(feedback,
- FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
- HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
- &miss);
-
- __ bind(&miss);
- KeyedStoreIC::GenerateMiss(masm);
-
- __ bind(&load_smi_map);
- __ Branch(USE_DELAY_SLOT, &compare_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); // In delay slot.
-}
-
-
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
@@ -3850,128 +3491,6 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateCase(masm, FAST_ELEMENTS);
}
-
-void FastNewObjectStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a1 : target
- // -- a3 : new target
- // -- cp : context
- // -- ra : return address
- // -----------------------------------
- __ AssertFunction(a1);
- __ AssertReceiver(a3);
-
- // Verify that the new target is a JSFunction.
- Label new_object;
- __ GetObjectType(a3, a2, a2);
- __ Branch(&new_object, ne, a2, Operand(JS_FUNCTION_TYPE));
-
- // Load the initial map and verify that it's in fact a map.
- __ ld(a2, FieldMemOperand(a3, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(a2, &new_object);
- __ GetObjectType(a2, a0, a0);
- __ Branch(&new_object, ne, a0, Operand(MAP_TYPE));
-
- // Fall back to runtime if the target differs from the new target's
- // initial map constructor.
- __ ld(a0, FieldMemOperand(a2, Map::kConstructorOrBackPointerOffset));
- __ Branch(&new_object, ne, a0, Operand(a1));
-
- // Allocate the JSObject on the heap.
- Label allocate, done_allocate;
- __ lbu(a4, FieldMemOperand(a2, Map::kInstanceSizeOffset));
- __ Allocate(a4, v0, a5, a0, &allocate, SIZE_IN_WORDS);
- __ bind(&done_allocate);
-
- // Initialize the JSObject fields.
- __ sd(a2, FieldMemOperand(v0, JSObject::kMapOffset));
- __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
- __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
- STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ Daddu(a1, v0, Operand(JSObject::kHeaderSize - kHeapObjectTag));
-
- // ----------- S t a t e -------------
- // -- v0 : result (tagged)
- // -- a1 : result fields (untagged)
- // -- a5 : result end (untagged)
- // -- a2 : initial map
- // -- cp : context
- // -- ra : return address
- // -----------------------------------
-
- // Perform in-object slack tracking if requested.
- Label slack_tracking;
- STATIC_ASSERT(Map::kNoSlackTracking == 0);
- __ lwu(a3, FieldMemOperand(a2, Map::kBitField3Offset));
- __ And(at, a3, Operand(Map::ConstructionCounter::kMask));
- __ Branch(USE_DELAY_SLOT, &slack_tracking, ne, at, Operand(zero_reg));
- __ LoadRoot(a0, Heap::kUndefinedValueRootIndex); // In delay slot.
- {
- // Initialize all in-object fields with undefined.
- __ InitializeFieldsWithFiller(a1, a5, a0);
- __ Ret();
- }
- __ bind(&slack_tracking);
- {
- // Decrease generous allocation count.
- STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
- __ Subu(a3, a3, Operand(1 << Map::ConstructionCounter::kShift));
- __ sw(a3, FieldMemOperand(a2, Map::kBitField3Offset));
-
- // Initialize the in-object fields with undefined.
- __ lbu(a4, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
- __ dsll(a4, a4, kPointerSizeLog2);
- __ Dsubu(a4, a5, a4);
- __ InitializeFieldsWithFiller(a1, a4, a0);
-
- // Initialize the remaining (reserved) fields with one pointer filler map.
- __ LoadRoot(a0, Heap::kOnePointerFillerMapRootIndex);
- __ InitializeFieldsWithFiller(a1, a5, a0);
-
- // Check if we can finalize the instance size.
- Label finalize;
- STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
- __ And(a3, a3, Operand(Map::ConstructionCounter::kMask));
- __ Branch(&finalize, eq, a3, Operand(zero_reg));
- __ Ret();
-
- // Finalize the instance size.
- __ bind(&finalize);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(v0, a2);
- __ CallRuntime(Runtime::kFinalizeInstanceSize);
- __ Pop(v0);
- }
- __ Ret();
- }
-
- // Fall back to %AllocateInNewSpace.
- __ bind(&allocate);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- __ dsll(a4, a4, kPointerSizeLog2 + kSmiShiftSize + kSmiTagSize);
- __ SmiTag(a4);
- __ Push(a2, a4);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ Pop(a2);
- }
- __ lbu(a5, FieldMemOperand(a2, Map::kInstanceSizeOffset));
- __ Dlsa(a5, v0, a5, kPointerSizeLog2);
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ Dsubu(a5, a5, Operand(kHeapObjectTag));
- __ jmp(&done_allocate);
-
- // Fall back to %NewObject.
- __ bind(&new_object);
- __ Push(a1, a3);
- __ TailCallRuntime(Runtime::kNewObject);
-}
-
-
void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a1 : function
diff --git a/deps/v8/src/mips64/code-stubs-mips64.h b/deps/v8/src/mips64/code-stubs-mips64.h
index fdaf4c80df..42f90ad886 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.h
+++ b/deps/v8/src/mips64/code-stubs-mips64.h
@@ -16,17 +16,6 @@ void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
class StringHelper : public AllStatic {
public:
- // Generate code for copying a large number of characters. This function
- // is allowed to spend extra time setting up conditions to make copying
- // faster. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- String::Encoding encoding);
-
// Compares two flat one-byte strings and returns result in v0.
static void GenerateCompareFlatOneByteStrings(
MacroAssembler* masm, Register left, Register right, Register scratch1,
@@ -312,14 +301,6 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
Handle<Name> name,
Register scratch0);
- static void GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register r0,
- Register r1);
-
bool SometimesSetsUpAFrame() override { return false; }
private:
diff --git a/deps/v8/src/mips64/codegen-mips64.cc b/deps/v8/src/mips64/codegen-mips64.cc
index 943c2a6e63..134fe4dd88 100644
--- a/deps/v8/src/mips64/codegen-mips64.cc
+++ b/deps/v8/src/mips64/codegen-mips64.cc
@@ -607,348 +607,6 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ ACCESS_MASM(masm)
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register target_map,
- AllocationSiteMode mode,
- Label* allocation_memento_found) {
- Register scratch_elements = a4;
- DCHECK(!AreAliased(receiver, key, value, target_map,
- scratch_elements));
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ JumpIfJSArrayHasAllocationMemento(
- receiver, scratch_elements, allocation_memento_found);
- }
-
- // Set transitioned map.
- __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver,
- HeapObject::kMapOffset,
- target_map,
- t1,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateSmiToDouble(
- MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register target_map,
- AllocationSiteMode mode,
- Label* fail) {
- // Register ra contains the return address.
- Label loop, entry, convert_hole, gc_required, only_change_map, done;
- Register elements = a4;
- Register length = a5;
- Register array = a6;
- Register array_end = array;
-
- // target_map parameter can be clobbered.
- Register scratch1 = target_map;
- Register scratch2 = t1;
- Register scratch3 = a7;
-
- // Verify input registers don't conflict with locals.
- DCHECK(!AreAliased(receiver, key, value, target_map,
- elements, length, array, scratch2));
-
- Register scratch = t2;
- if (mode == TRACK_ALLOCATION_SITE) {
- __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
- __ Branch(&only_change_map, eq, at, Operand(elements));
-
- __ push(ra);
- __ ld(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
- // elements: source FixedArray
- // length: number of elements (smi-tagged)
-
- // Allocate new FixedDoubleArray.
- __ SmiScale(scratch, length, kDoubleSizeLog2);
- __ Daddu(scratch, scratch, FixedDoubleArray::kHeaderSize);
- __ Allocate(scratch, array, t3, scratch2, &gc_required, DOUBLE_ALIGNMENT);
- __ Dsubu(array, array, kHeapObjectTag);
- // array: destination FixedDoubleArray, not tagged as heap object
-
- // Set destination FixedDoubleArray's length and map.
- __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
- __ sd(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
- // Update receiver's map.
- __ sd(scratch2, MemOperand(array, HeapObject::kMapOffset));
-
- __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver,
- HeapObject::kMapOffset,
- target_map,
- scratch2,
- kRAHasBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- // Replace receiver's backing store with newly created FixedDoubleArray.
- __ Daddu(scratch1, array, Operand(kHeapObjectTag));
- __ sd(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ RecordWriteField(receiver,
- JSObject::kElementsOffset,
- scratch1,
- scratch2,
- kRAHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
-
- // Prepare for conversion loop.
- __ Daddu(scratch1, elements,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ Daddu(scratch3, array, Operand(FixedDoubleArray::kHeaderSize));
- __ SmiScale(array_end, length, kDoubleSizeLog2);
- __ Daddu(array_end, array_end, scratch3);
-
- // Repurpose registers no longer in use.
- Register hole_lower = elements;
- Register hole_upper = length;
- __ li(hole_lower, Operand(kHoleNanLower32));
- __ li(hole_upper, Operand(kHoleNanUpper32));
-
- // scratch1: begin of source FixedArray element fields, not tagged
- // hole_lower: kHoleNanLower32
- // hole_upper: kHoleNanUpper32
- // array_end: end of destination FixedDoubleArray, not tagged
- // scratch3: begin of FixedDoubleArray element fields, not tagged
-
- __ Branch(&entry);
-
- __ bind(&only_change_map);
- __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver,
- HeapObject::kMapOffset,
- target_map,
- scratch2,
- kRAHasBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Branch(&done);
-
- // Call into runtime if GC is required.
- __ bind(&gc_required);
- __ ld(ra, MemOperand(sp, 0));
- __ Branch(USE_DELAY_SLOT, fail);
- __ daddiu(sp, sp, kPointerSize); // In delay slot.
-
- // Convert and copy elements.
- __ bind(&loop);
- __ ld(scratch2, MemOperand(scratch1));
- __ Daddu(scratch1, scratch1, kPointerSize);
- // scratch2: current element
- __ JumpIfNotSmi(scratch2, &convert_hole);
- __ SmiUntag(scratch2);
-
- // Normal smi, convert to double and store.
- __ mtc1(scratch2, f0);
- __ cvt_d_w(f0, f0);
- __ sdc1(f0, MemOperand(scratch3));
- __ Branch(USE_DELAY_SLOT, &entry);
- __ daddiu(scratch3, scratch3, kDoubleSize); // In delay slot.
-
- // Hole found, store the-hole NaN.
- __ bind(&convert_hole);
- if (FLAG_debug_code) {
- // Restore a "smi-untagged" heap object.
- __ Or(scratch2, scratch2, Operand(1));
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(scratch2));
- }
- // mantissa
- __ sw(hole_lower, MemOperand(scratch3, Register::kMantissaOffset));
- // exponent
- __ sw(hole_upper, MemOperand(scratch3, Register::kExponentOffset));
- __ Daddu(scratch3, scratch3, kDoubleSize);
-
- __ bind(&entry);
- __ Branch(&loop, lt, scratch3, Operand(array_end));
-
- __ bind(&done);
- __ pop(ra);
-}
-
-
-void ElementsTransitionGenerator::GenerateDoubleToObject(
- MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register target_map,
- AllocationSiteMode mode,
- Label* fail) {
- // Register ra contains the return address.
- Label entry, loop, convert_hole, gc_required, only_change_map;
- Register elements = a4;
- Register array = a6;
- Register length = a5;
- Register scratch = t1;
-
- // Verify input registers don't conflict with locals.
- DCHECK(!AreAliased(receiver, key, value, target_map,
- elements, array, length, scratch));
- if (mode == TRACK_ALLOCATION_SITE) {
- __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
- __ Branch(&only_change_map, eq, at, Operand(elements));
-
- __ MultiPush(
- value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
-
- __ ld(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
- // elements: source FixedArray
- // length: number of elements (smi-tagged)
-
- // Allocate new FixedArray.
- // Re-use value and target_map registers, as they have been saved on the
- // stack.
- Register array_size = value;
- Register allocate_scratch = target_map;
- __ SmiScale(array_size, length, kPointerSizeLog2);
- __ Daddu(array_size, array_size, FixedDoubleArray::kHeaderSize);
- __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
- NO_ALLOCATION_FLAGS);
- __ Dsubu(array, array, kHeapObjectTag);
- // array: destination FixedArray, not tagged as heap object
- // Set destination FixedDoubleArray's length and map.
- __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
- __ sd(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
- __ sd(scratch, MemOperand(array, HeapObject::kMapOffset));
-
- // Prepare for conversion loop.
- Register src_elements = elements;
- Register dst_elements = target_map;
- Register dst_end = length;
- Register heap_number_map = scratch;
- __ Daddu(src_elements, src_elements,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- __ Daddu(dst_elements, array, Operand(FixedArray::kHeaderSize));
- __ SmiScale(dst_end, dst_end, kPointerSizeLog2);
- __ Daddu(dst_end, dst_elements, dst_end);
-
- // Allocating heap numbers in the loop below can fail and cause a jump to
- // gc_required. We can't leave a partly initialized FixedArray behind,
- // so pessimistically fill it with holes now.
- Label initialization_loop, initialization_loop_entry;
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ Branch(&initialization_loop_entry);
- __ bind(&initialization_loop);
- __ sd(scratch, MemOperand(dst_elements));
- __ Daddu(dst_elements, dst_elements, Operand(kPointerSize));
- __ bind(&initialization_loop_entry);
- __ Branch(&initialization_loop, lt, dst_elements, Operand(dst_end));
-
- __ Daddu(dst_elements, array, Operand(FixedArray::kHeaderSize));
- __ Daddu(array, array, Operand(kHeapObjectTag));
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- // Using offsetted addresses.
- // dst_elements: begin of destination FixedArray element fields, not tagged
- // src_elements: begin of source FixedDoubleArray element fields, not tagged,
- // points to the exponent
- // dst_end: end of destination FixedArray, not tagged
- // array: destination FixedArray
- // heap_number_map: heap number map
- __ Branch(&entry);
-
- // Call into runtime if GC is required.
- __ bind(&gc_required);
- __ MultiPop(
- value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
-
- __ Branch(fail);
-
- __ bind(&loop);
- Register upper_bits = key;
- __ lw(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
- __ Daddu(src_elements, src_elements, kDoubleSize);
- // upper_bits: current element's upper 32 bit
- // src_elements: address of next element
- __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
-
- // Non-hole double, copy value into a heap number.
- Register heap_number = receiver;
- Register scratch2 = value;
- Register scratch3 = t2;
- __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
- &gc_required);
- // heap_number: new heap number
- // Load current element, src_elements point to next element.
-
- __ ld(scratch2, MemOperand(src_elements, -kDoubleSize));
- __ sd(scratch2, FieldMemOperand(heap_number, HeapNumber::kValueOffset));
-
- __ mov(scratch2, dst_elements);
- __ sd(heap_number, MemOperand(dst_elements));
- __ Daddu(dst_elements, dst_elements, kPointerSize);
- __ RecordWrite(array,
- scratch2,
- heap_number,
- kRAHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Branch(&entry);
-
- // Replace the-hole NaN with the-hole pointer.
- __ bind(&convert_hole);
- __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
- __ sd(scratch2, MemOperand(dst_elements));
- __ Daddu(dst_elements, dst_elements, kPointerSize);
-
- __ bind(&entry);
- __ Branch(&loop, lt, dst_elements, Operand(dst_end));
-
- __ MultiPop(receiver.bit() | target_map.bit() | value.bit() | key.bit());
- // Replace receiver's backing store with newly created and filled FixedArray.
- __ sd(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ RecordWriteField(receiver,
- JSObject::kElementsOffset,
- array,
- scratch,
- kRAHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ pop(ra);
-
- __ bind(&only_change_map);
- // Update receiver's map.
- __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver,
- HeapObject::kMapOffset,
- target_map,
- scratch,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-
void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Register string,
Register index,
@@ -1077,37 +735,29 @@ bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
return result;
}
+Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
+ if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
-void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
- MarkingParity* parity) {
- if (IsYoungSequence(isolate, sequence)) {
- *age = kNoAgeCodeAge;
- *parity = NO_MARKING_PARITY;
- } else {
- Address target_address = Assembler::target_address_at(
- sequence + Assembler::kInstrSize);
- Code* stub = GetCodeFromTargetAddress(target_address);
- GetCodeAgeAndParity(stub, age, parity);
- }
+ Address target_address =
+ Assembler::target_address_at(sequence + Assembler::kInstrSize);
+ Code* stub = GetCodeFromTargetAddress(target_address);
+ return GetAgeOfCodeAgeStub(stub);
}
-
-void Code::PatchPlatformCodeAge(Isolate* isolate,
- byte* sequence,
- Code::Age age,
- MarkingParity parity) {
+void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
+ Code::Age age) {
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
Assembler::FlushICache(isolate, sequence, young_length);
} else {
- Code* stub = GetCodeAgeStub(isolate, age, parity);
+ Code* stub = GetCodeAgeStub(isolate, age);
CodePatcher patcher(isolate, sequence,
young_length / Assembler::kInstrSize);
// Mark this code sequence for FindPlatformCodeAgeSequence().
patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
// Load the stub address to t9 and call it,
- // GetCodeAgeAndParity() extracts the stub address from this instruction.
+ // GetCodeAge() extracts the stub address from this instruction.
patcher.masm()->li(
t9,
Operand(reinterpret_cast<uint64_t>(stub->instruction_start())),
diff --git a/deps/v8/src/mips64/interface-descriptors-mips64.cc b/deps/v8/src/mips64/interface-descriptors-mips64.cc
index c6a917f5d4..6f8a0979ff 100644
--- a/deps/v8/src/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/mips64/interface-descriptors-mips64.cc
@@ -64,13 +64,7 @@ const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {a2};
- data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
-}
-
-void FastNewObjectDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {a1, a3};
+ Register registers[] = {a1, a2, a3};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index a3ab4a8840..480bae10f3 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -2212,19 +2212,49 @@ void MacroAssembler::Trunc_ul_s(FPURegister fd, Register rs,
bind(&fail);
}
+void MacroAssembler::Madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
+ FPURegister ft, FPURegister scratch) {
+ if (kArchVariant == kMips64r2) {
+ madd_s(fd, fr, fs, ft);
+ } else {
+ DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+ mul_s(scratch, fs, ft);
+ add_s(fd, fr, scratch);
+ }
+}
void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft, FPURegister scratch) {
- if (0) { // TODO(plind): find reasonable arch-variant symbol names.
+ if (kArchVariant == kMips64r2) {
madd_d(fd, fr, fs, ft);
} else {
- // Can not change source regs's value.
DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
mul_d(scratch, fs, ft);
add_d(fd, fr, scratch);
}
}
+void MacroAssembler::Msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
+ FPURegister ft, FPURegister scratch) {
+ if (kArchVariant == kMips64r2) {
+ msub_s(fd, fr, fs, ft);
+ } else {
+ DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+ mul_s(scratch, fs, ft);
+ sub_s(fd, scratch, fr);
+ }
+}
+
+void MacroAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
+ FPURegister ft, FPURegister scratch) {
+ if (kArchVariant == kMips64r2) {
+ msub_d(fd, fr, fs, ft);
+ } else {
+ DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+ mul_d(scratch, fs, ft);
+ sub_d(fd, scratch, fr);
+ }
+}
void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
Label* nan, Condition cond, FPURegister cmp1,
@@ -2524,186 +2554,6 @@ void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
movf(rd, rs, cc);
}
-#define __ masm->
-
-static bool ZeroHelper_d(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
- FPURegister src1, FPURegister src2, Label* equal) {
- if (src1.is(src2)) {
- __ Move(dst, src1);
- return true;
- }
-
- Label other, compare_not_equal;
- FPURegister left, right;
- if (kind == MaxMinKind::kMin) {
- left = src1;
- right = src2;
- } else {
- left = src2;
- right = src1;
- }
-
- __ BranchF64(&compare_not_equal, nullptr, ne, src1, src2);
- // Left and right hand side are equal, check for -0 vs. +0.
- __ dmfc1(t8, src1);
- __ Branch(&other, eq, t8, Operand(0x8000000000000000));
- __ Move_d(dst, right);
- __ Branch(equal);
- __ bind(&other);
- __ Move_d(dst, left);
- __ Branch(equal);
- __ bind(&compare_not_equal);
- return false;
-}
-
-static bool ZeroHelper_s(MacroAssembler* masm, MaxMinKind kind, FPURegister dst,
- FPURegister src1, FPURegister src2, Label* equal) {
- if (src1.is(src2)) {
- __ Move(dst, src1);
- return true;
- }
-
- Label other, compare_not_equal;
- FPURegister left, right;
- if (kind == MaxMinKind::kMin) {
- left = src1;
- right = src2;
- } else {
- left = src2;
- right = src1;
- }
-
- __ BranchF32(&compare_not_equal, nullptr, ne, src1, src2);
- // Left and right hand side are equal, check for -0 vs. +0.
- __ FmoveLow(t8, src1);
- __ dsll32(t8, t8, 0);
- __ Branch(&other, eq, t8, Operand(0x8000000000000000));
- __ Move_s(dst, right);
- __ Branch(equal);
- __ bind(&other);
- __ Move_s(dst, left);
- __ Branch(equal);
- __ bind(&compare_not_equal);
- return false;
-}
-
-#undef __
-
-void MacroAssembler::MinNaNCheck_d(FPURegister dst, FPURegister src1,
- FPURegister src2, Label* nan) {
- if (nan) {
- BranchF64(nullptr, nan, eq, src1, src2);
- }
- if (kArchVariant >= kMips64r6) {
- min_d(dst, src1, src2);
- } else {
- Label skip;
- if (!ZeroHelper_d(this, MaxMinKind::kMin, dst, src1, src2, &skip)) {
- if (dst.is(src1)) {
- BranchF64(&skip, nullptr, le, src1, src2);
- Move_d(dst, src2);
- } else if (dst.is(src2)) {
- BranchF64(&skip, nullptr, ge, src1, src2);
- Move_d(dst, src1);
- } else {
- Label right;
- BranchF64(&right, nullptr, gt, src1, src2);
- Move_d(dst, src1);
- Branch(&skip);
- bind(&right);
- Move_d(dst, src2);
- }
- }
- bind(&skip);
- }
-}
-
-void MacroAssembler::MaxNaNCheck_d(FPURegister dst, FPURegister src1,
- FPURegister src2, Label* nan) {
- if (nan) {
- BranchF64(nullptr, nan, eq, src1, src2);
- }
- if (kArchVariant >= kMips64r6) {
- max_d(dst, src1, src2);
- } else {
- Label skip;
- if (!ZeroHelper_d(this, MaxMinKind::kMax, dst, src1, src2, &skip)) {
- if (dst.is(src1)) {
- BranchF64(&skip, nullptr, ge, src1, src2);
- Move_d(dst, src2);
- } else if (dst.is(src2)) {
- BranchF64(&skip, nullptr, le, src1, src2);
- Move_d(dst, src1);
- } else {
- Label right;
- BranchF64(&right, nullptr, lt, src1, src2);
- Move_d(dst, src1);
- Branch(&skip);
- bind(&right);
- Move_d(dst, src2);
- }
- }
- bind(&skip);
- }
-}
-
-void MacroAssembler::MinNaNCheck_s(FPURegister dst, FPURegister src1,
- FPURegister src2, Label* nan) {
- if (nan) {
- BranchF32(nullptr, nan, eq, src1, src2);
- }
- if (kArchVariant >= kMips64r6) {
- min_s(dst, src1, src2);
- } else {
- Label skip;
- if (!ZeroHelper_s(this, MaxMinKind::kMin, dst, src1, src2, &skip)) {
- if (dst.is(src1)) {
- BranchF32(&skip, nullptr, le, src1, src2);
- Move_s(dst, src2);
- } else if (dst.is(src2)) {
- BranchF32(&skip, nullptr, ge, src1, src2);
- Move_s(dst, src1);
- } else {
- Label right;
- BranchF32(&right, nullptr, gt, src1, src2);
- Move_s(dst, src1);
- Branch(&skip);
- bind(&right);
- Move_s(dst, src2);
- }
- }
- bind(&skip);
- }
-}
-
-void MacroAssembler::MaxNaNCheck_s(FPURegister dst, FPURegister src1,
- FPURegister src2, Label* nan) {
- if (nan) {
- BranchF32(nullptr, nan, eq, src1, src2);
- }
- if (kArchVariant >= kMips64r6) {
- max_s(dst, src1, src2);
- } else {
- Label skip;
- if (!ZeroHelper_s(this, MaxMinKind::kMax, dst, src1, src2, &skip)) {
- if (dst.is(src1)) {
- BranchF32(&skip, nullptr, ge, src1, src2);
- Move_s(dst, src2);
- } else if (dst.is(src2)) {
- BranchF32(&skip, nullptr, le, src1, src2);
- Move_s(dst, src1);
- } else {
- Label right;
- BranchF32(&right, nullptr, lt, src1, src2);
- Move_s(dst, src1);
- Branch(&skip);
- bind(&right);
- Move_s(dst, src2);
- }
- }
- bind(&skip);
- }
-}
void MacroAssembler::Clz(Register rd, Register rs) {
clz(rd, rs);
@@ -4472,111 +4322,6 @@ void MacroAssembler::FastAllocate(Register object_size, Register result,
Daddu(result, result, Operand(kHeapObjectTag));
}
-void MacroAssembler::AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- dsll(scratch1, length, 1); // Length in bytes, not chars.
- daddiu(scratch1, scratch1,
- kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
- And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
-
- // Allocate two-byte string in new space.
- Allocate(scratch1, result, scratch2, scratch3, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map, length and hash field.
- InitializeNewString(result,
- length,
- Heap::kStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteString(Register result, Register length,
- Register scratch1, Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string
- // while observing object alignment.
- DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- DCHECK(kCharSize == 1);
- daddiu(scratch1, length,
- kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
- And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
-
- // Allocate one-byte string in new space.
- Allocate(scratch1, result, scratch2, scratch3, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map, length and hash field.
- InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
- scratch1, scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
- InitializeNewString(result,
- length,
- Heap::kConsStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
- scratch1, scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- InitializeNewString(result,
- length,
- Heap::kSlicedStringMapRootIndex,
- scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
- scratch1, scratch2);
-}
-
-
void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
Label* not_unique_name) {
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
@@ -4657,76 +4402,6 @@ void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
Branch(&loop, ult, current_address, Operand(end_address));
}
-void MacroAssembler::CheckFastObjectElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- Branch(fail, ls, scratch,
- Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
- Branch(fail, hi, scratch,
- Operand(Map::kMaximumBitField2FastHoleyElementValue));
-}
-
-
-void MacroAssembler::CheckFastSmiElements(Register map,
- Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- Branch(fail, hi, scratch,
- Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
-}
-
-
-void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
- Register key_reg,
- Register elements_reg,
- Register scratch1,
- Register scratch2,
- Label* fail,
- int elements_offset) {
- DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1, scratch2));
- Label smi_value, done;
-
- // Handle smi values specially.
- JumpIfSmi(value_reg, &smi_value);
-
- // Ensure that the object is a heap number.
- CheckMap(value_reg,
- scratch1,
- Heap::kHeapNumberMapRootIndex,
- fail,
- DONT_DO_SMI_CHECK);
-
- // Double value, turn potential sNaN into qNan.
- DoubleRegister double_result = f0;
- DoubleRegister double_scratch = f2;
-
- ldc1(double_result, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
- Branch(USE_DELAY_SLOT, &done); // Canonicalization is one instruction.
- FPUCanonicalizeNaN(double_result, double_result);
-
- bind(&smi_value);
- // Untag and transfer.
- dsrl32(scratch1, value_reg, 0);
- mtc1(scratch1, double_scratch);
- cvt_d_w(double_result, double_scratch);
-
- bind(&done);
- Daddu(scratch1, elements_reg,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
- elements_offset));
- dsra(scratch2, key_reg, 32 - kDoubleSizeLog2);
- Daddu(scratch1, scratch1, scratch2);
- // scratch1 is now effective address of the double element.
- sdc1(double_result, MemOperand(scratch1, 0));
-}
-
void MacroAssembler::SubNanPreservePayloadAndSign_s(FPURegister fd,
FPURegister fs,
FPURegister ft) {
@@ -5076,17 +4751,15 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
}
-
-void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual) {
- Label skip_flooding;
- ExternalReference last_step_action =
- ExternalReference::debug_last_step_action_address(isolate());
- STATIC_ASSERT(StepFrame > StepIn);
- li(t0, Operand(last_step_action));
+void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_hook;
+ ExternalReference debug_hook_active =
+ ExternalReference::debug_hook_on_function_call_address(isolate());
+ li(t0, Operand(debug_hook_active));
lb(t0, MemOperand(t0));
- Branch(&skip_flooding, lt, t0, Operand(StepIn));
+ Branch(&skip_hook, eq, t0, Operand(zero_reg));
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -5103,7 +4776,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
}
Push(fun);
Push(fun);
- CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ CallRuntime(Runtime::kDebugOnFunctionCall);
Pop(fun);
if (new_target.is_valid()) {
Pop(new_target);
@@ -5117,7 +4790,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
SmiUntag(expected.reg());
}
}
- bind(&skip_flooding);
+ bind(&skip_hook);
}
@@ -5131,8 +4804,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK(function.is(a1));
DCHECK_IMPLIES(new_target.is_valid(), new_target.is(a3));
- if (call_wrapper.NeedsDebugStepCheck()) {
- FloodFunctionIfStepping(function, new_target, expected, actual);
+ if (call_wrapper.NeedsDebugHookCheck()) {
+ CheckDebugHook(function, new_target, expected, actual);
}
// Clear the new.target register if not given.
@@ -5958,27 +5631,6 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
}
-
-void MacroAssembler::LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match) {
- DCHECK(IsFastElementsKind(expected_kind));
- DCHECK(IsFastElementsKind(transitioned_kind));
-
- // Check that the function's map is the same as the expected cached map.
- ld(scratch, NativeContextMemOperand());
- ld(at, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
- Branch(no_map_match, ne, map_in_out, Operand(at));
-
- // Use the transitioned cached map.
- ld(map_in_out,
- ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
-}
-
-
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
ld(dst, NativeContextMemOperand());
ld(dst, ContextMemOperand(dst, index));
@@ -6016,7 +5668,7 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
nop(Assembler::CODE_AGE_MARKER_NOP);
// Load the stub address to t9 and call it,
- // GetCodeAgeAndParity() extracts the stub address from this instruction.
+ // GetCodeAge() extracts the stub address from this instruction.
li(t9,
Operand(reinterpret_cast<uint64_t>(stub->instruction_start())),
ADDRESS_LOAD);
@@ -6367,15 +6019,6 @@ void MacroAssembler::UntagAndJumpIfSmi(Register dst,
SmiUntag(dst, src);
}
-
-void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
- Register src,
- Label* non_smi_case) {
- // DCHECK(!dst.is(src));
- JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
- SmiUntag(dst, src);
-}
-
void MacroAssembler::JumpIfSmi(Register value,
Label* smi_label,
Register scratch,
@@ -6580,6 +6223,179 @@ void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
scratch2, failure);
}
+void MacroAssembler::Float32Max(FPURegister dst, FPURegister src1,
+ FPURegister src2, Label* out_of_line) {
+ if (src1.is(src2)) {
+ Move_s(dst, src1);
+ return;
+ }
+
+ // Check if one of operands is NaN.
+ BranchF32(nullptr, out_of_line, eq, src1, src2);
+
+ if (kArchVariant >= kMips64r6) {
+ max_s(dst, src1, src2);
+ } else {
+ Label return_left, return_right, done;
+
+ BranchF32(&return_right, nullptr, lt, src1, src2);
+ BranchF32(&return_left, nullptr, lt, src2, src1);
+
+ // Operands are equal, but check for +/-0.
+ mfc1(t8, src1);
+ dsll32(t8, t8, 0);
+ Branch(&return_left, eq, t8, Operand(zero_reg));
+ Branch(&return_right);
+
+ bind(&return_right);
+ if (!src2.is(dst)) {
+ Move_s(dst, src2);
+ }
+ Branch(&done);
+
+ bind(&return_left);
+ if (!src1.is(dst)) {
+ Move_s(dst, src1);
+ }
+
+ bind(&done);
+ }
+}
+
+void MacroAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ add_s(dst, src1, src2);
+}
+
+void MacroAssembler::Float32Min(FPURegister dst, FPURegister src1,
+ FPURegister src2, Label* out_of_line) {
+ if (src1.is(src2)) {
+ Move_s(dst, src1);
+ return;
+ }
+
+ // Check if one of operands is NaN.
+ BranchF32(nullptr, out_of_line, eq, src1, src2);
+
+ if (kArchVariant >= kMips64r6) {
+ min_s(dst, src1, src2);
+ } else {
+ Label return_left, return_right, done;
+
+ BranchF32(&return_left, nullptr, lt, src1, src2);
+ BranchF32(&return_right, nullptr, lt, src2, src1);
+
+ // Left equals right => check for -0.
+ mfc1(t8, src1);
+ dsll32(t8, t8, 0);
+ Branch(&return_right, eq, t8, Operand(zero_reg));
+ Branch(&return_left);
+
+ bind(&return_right);
+ if (!src2.is(dst)) {
+ Move_s(dst, src2);
+ }
+ Branch(&done);
+
+ bind(&return_left);
+ if (!src1.is(dst)) {
+ Move_s(dst, src1);
+ }
+
+ bind(&done);
+ }
+}
+
+void MacroAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ add_s(dst, src1, src2);
+}
+
+void MacroAssembler::Float64Max(FPURegister dst, FPURegister src1,
+ FPURegister src2, Label* out_of_line) {
+ if (src1.is(src2)) {
+ Move_d(dst, src1);
+ return;
+ }
+
+ // Check if one of operands is NaN.
+ BranchF64(nullptr, out_of_line, eq, src1, src2);
+
+ if (kArchVariant >= kMips64r6) {
+ max_d(dst, src1, src2);
+ } else {
+ Label return_left, return_right, done;
+
+ BranchF64(&return_right, nullptr, lt, src1, src2);
+ BranchF64(&return_left, nullptr, lt, src2, src1);
+
+ // Left equals right => check for -0.
+ dmfc1(t8, src1);
+ Branch(&return_left, eq, t8, Operand(zero_reg));
+ Branch(&return_right);
+
+ bind(&return_right);
+ if (!src2.is(dst)) {
+ Move_d(dst, src2);
+ }
+ Branch(&done);
+
+ bind(&return_left);
+ if (!src1.is(dst)) {
+ Move_d(dst, src1);
+ }
+
+ bind(&done);
+ }
+}
+
+void MacroAssembler::Float64MaxOutOfLine(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ add_d(dst, src1, src2);
+}
+
+void MacroAssembler::Float64Min(FPURegister dst, FPURegister src1,
+ FPURegister src2, Label* out_of_line) {
+ if (src1.is(src2)) {
+ Move_d(dst, src1);
+ return;
+ }
+
+ // Check if one of operands is NaN.
+ BranchF64(nullptr, out_of_line, eq, src1, src2);
+
+ if (kArchVariant >= kMips64r6) {
+ min_d(dst, src1, src2);
+ } else {
+ Label return_left, return_right, done;
+
+ BranchF64(&return_left, nullptr, lt, src1, src2);
+ BranchF64(&return_right, nullptr, lt, src2, src1);
+
+ // Left equals right => check for -0.
+ dmfc1(t8, src1);
+ Branch(&return_right, eq, t8, Operand(zero_reg));
+ Branch(&return_left);
+
+ bind(&return_right);
+ if (!src2.is(dst)) {
+ Move_d(dst, src2);
+ }
+ Branch(&done);
+
+ bind(&return_left);
+ if (!src1.is(dst)) {
+ Move_d(dst, src1);
+ }
+
+ bind(&done);
+ }
+}
+
+void MacroAssembler::Float64MinOutOfLine(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ add_d(dst, src1, src2);
+}
void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
Register first, Register second, Register scratch1, Register scratch2,
@@ -6595,18 +6411,6 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
}
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
- Register scratch,
- Label* failure) {
- const int kFlatOneByteStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatOneByteStringTag =
- kStringTag | kOneByteStringTag | kSeqStringTag;
- And(scratch, type, Operand(kFlatOneByteStringMask));
- Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
-}
-
static const int kRegisterPassedArguments = 8;
int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
@@ -7042,40 +6846,6 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
return no_reg;
}
-
-void MacroAssembler::JumpIfDictionaryInPrototypeChain(
- Register object,
- Register scratch0,
- Register scratch1,
- Label* found) {
- DCHECK(!scratch1.is(scratch0));
- Factory* factory = isolate()->factory();
- Register current = scratch0;
- Label loop_again, end;
-
- // Scratch contained elements pointer.
- Move(current, object);
- ld(current, FieldMemOperand(current, HeapObject::kMapOffset));
- ld(current, FieldMemOperand(current, Map::kPrototypeOffset));
- Branch(&end, eq, current, Operand(factory->null_value()));
-
- // Loop based on the map going up the prototype chain.
- bind(&loop_again);
- ld(current, FieldMemOperand(current, HeapObject::kMapOffset));
- lbu(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
- STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
- STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
- Branch(found, lo, scratch1, Operand(JS_OBJECT_TYPE));
- lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
- DecodeField<Map::ElementsKindBits>(scratch1);
- Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
- ld(current, FieldMemOperand(current, Map::kPrototypeOffset));
- Branch(&loop_again, ne, current, Operand(factory->null_value()));
-
- bind(&end);
-}
-
-
bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
Register reg5, Register reg6, Register reg7, Register reg8,
Register reg9, Register reg10) {
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.h b/deps/v8/src/mips64/macro-assembler-mips64.h
index 5a1cf27c08..2b5115703e 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/mips64/macro-assembler-mips64.h
@@ -236,6 +236,13 @@ class MacroAssembler: public Assembler {
Heap::RootListIndex index,
BranchDelaySlot bdslot = PROTECT);
+// Number of instructions needed for calculation of switch table entry address
+#ifdef _MIPS_ARCH_MIPS64R6
+ static const int kSwitchTablePrologueSize = 6;
+#else
+ static const int kSwitchTablePrologueSize = 11;
+#endif
+
// GetLabelFunction must be lambda '[](size_t index) -> Label*' or a
// functor/function with 'Label *func(size_t index)' declaration.
template <typename Func>
@@ -337,17 +344,6 @@ class MacroAssembler: public Assembler {
void Movt(Register rd, Register rs, uint16_t cc = 0);
void Movf(Register rd, Register rs, uint16_t cc = 0);
- // Min, Max macros.
- // On pre-r6 these functions may modify at and t8 registers.
- void MinNaNCheck_d(FPURegister dst, FPURegister src1, FPURegister src2,
- Label* nan = nullptr);
- void MaxNaNCheck_d(FPURegister dst, FPURegister src1, FPURegister src2,
- Label* nan = nullptr);
- void MinNaNCheck_s(FPURegister dst, FPURegister src1, FPURegister src2,
- Label* nan = nullptr);
- void MaxNaNCheck_s(FPURegister dst, FPURegister src1, FPURegister src2,
- Label* nan = nullptr);
-
void Clz(Register rd, Register rs);
// Jump unconditionally to given label.
@@ -592,32 +588,6 @@ class MacroAssembler: public Assembler {
void FastAllocate(Register object_size, Register result, Register result_new,
Register scratch, AllocationFlags flags);
- void AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
- void AllocateOneByteString(Register result, Register length,
- Register scratch1, Register scratch2,
- Register scratch3, Label* gc_required);
- void AllocateTwoByteConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateOneByteConsString(Register result, Register length,
- Register scratch1, Register scratch2,
- Label* gc_required);
- void AllocateTwoByteSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateOneByteSlicedString(Register result, Register length,
- Register scratch1, Register scratch2,
- Label* gc_required);
-
// Allocates a heap number or jumps to the gc_required label if the young
// space is full and a scavenge is needed. All registers are clobbered also
// when control continues at the gc_required label.
@@ -944,10 +914,13 @@ class MacroAssembler: public Assembler {
void SubNanPreservePayloadAndSign_d(FPURegister fd, FPURegister fs,
FPURegister ft);
- void Madd_d(FPURegister fd,
- FPURegister fr,
- FPURegister fs,
- FPURegister ft,
+ void Madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
+ FPURegister scratch);
+ void Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
+ FPURegister scratch);
+ void Msub_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
+ FPURegister scratch);
+ void Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
FPURegister scratch);
// Wrapper functions for the different cmp/branch types.
@@ -1095,17 +1068,6 @@ class MacroAssembler: public Assembler {
LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
}
- // Conditionally load the cached Array transitioned map of type
- // transitioned_kind from the native context if the map in register
- // map_in_out is the cached Array map in the native context of
- // expected_kind.
- void LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match);
-
void LoadNativeContextSlot(int index, Register dst);
// Load the initial map from the global function. The registers
@@ -1138,9 +1100,10 @@ class MacroAssembler: public Assembler {
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
- void FloodFunctionIfStepping(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual);
+ // On function call, call into the debugger if necessary.
+ void CheckDebugHook(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
@@ -1217,29 +1180,6 @@ class MacroAssembler: public Assembler {
FieldMemOperand(object_map, Map::kInstanceTypeOffset));
}
- // Check if a map for a JSObject indicates that the object can have both smi
- // and HeapObject elements. Jump to the specified label if it does not.
- void CheckFastObjectElements(Register map,
- Register scratch,
- Label* fail);
-
- // Check if a map for a JSObject indicates that the object has fast smi only
- // elements. Jump to the specified label if it does not.
- void CheckFastSmiElements(Register map,
- Register scratch,
- Label* fail);
-
- // Check to see if maybe_number can be stored as a double in
- // FastDoubleElements. If it can, store it at the index specified by key in
- // the FastDoubleElements array elements. Otherwise jump to fail.
- void StoreNumberToDoubleElements(Register value_reg,
- Register key_reg,
- Register elements_reg,
- Register scratch1,
- Register scratch2,
- Label* fail,
- int elements_offset = 0);
-
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
// "branch_to" if the result of the comparison is "cond". If multiple map
@@ -1445,6 +1385,29 @@ class MacroAssembler: public Assembler {
Ret(ge, overflow_check, Operand(zero_reg), bd);
}
+ // Perform a floating-point min or max operation with the
+ // (IEEE-754-compatible) semantics of MIPS32's Release 6 MIN.fmt/MAX.fmt.
+ // Some cases, typically NaNs or +/-0.0, are expected to be rare and are
+ // handled in out-of-line code. The specific behaviour depends on supported
+ // instructions.
+ //
+ // These functions assume (and assert) that !src1.is(src2). It is permitted
+ // for the result to alias either input register.
+ void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* out_of_line);
+ void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* out_of_line);
+ void Float64Max(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* out_of_line);
+ void Float64Min(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* out_of_line);
+
+ // Generate out-of-line cases for the macros above.
+ void Float32MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
+ void Float32MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
+ void Float64MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
+ void Float64MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
+
// -------------------------------------------------------------------------
// Runtime calls.
@@ -1706,10 +1669,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Source and destination can be the same register.
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
- // Untag the source value into destination and jump if source is not a smi.
- // Source and destination can be the same register.
- void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
-
// Jump the register contains a smi.
void JumpIfSmi(Register value,
Label* smi_label,
@@ -1779,11 +1738,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
Register first_object_instance_type, Register second_object_instance_type,
Register scratch1, Register scratch2, Label* failure);
- // Check if instance type is sequential one-byte string and jump to label if
- // it is not.
- void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
- Label* failure);
-
void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
void EmitSeqStringSetCharCheck(Register string,
@@ -1871,20 +1825,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
Register scratch_reg,
Label* no_memento_found);
- void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
- Register scratch_reg,
- Label* memento_found) {
- Label no_memento_found;
- TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
- &no_memento_found);
- Branch(memento_found);
- bind(&no_memento_found);
- }
-
- // Jumps to found label if a prototype map has dictionary elements.
- void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
- Register scratch1, Label* found);
-
bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; }
private:
@@ -2014,7 +1954,8 @@ void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
// Ensure that dd-ed labels following this instruction use 8 bytes aligned
// addresses.
if (kArchVariant >= kMips64r6) {
- BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 + 6);
+ BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 +
+ kSwitchTablePrologueSize);
// Opposite of Align(8) as we have odd number of instructions in this case.
if ((pc_offset() & 7) == 0) {
nop();
@@ -2024,7 +1965,8 @@ void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
ld(at, MemOperand(at));
} else {
Label here;
- BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 + 11);
+ BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 +
+ kSwitchTablePrologueSize);
Align(8);
push(ra);
bal(&here);
diff --git a/deps/v8/src/mips64/simulator-mips64.cc b/deps/v8/src/mips64/simulator-mips64.cc
index 4a8e0076d9..591ddaf3a1 100644
--- a/deps/v8/src/mips64/simulator-mips64.cc
+++ b/deps/v8/src/mips64/simulator-mips64.cc
@@ -2475,11 +2475,11 @@ void Simulator::DecodeTypeRegisterSRsType() {
break;
case MADDF_S:
DCHECK(kArchVariant == kMips64r6);
- set_fpu_register_float(fd_reg(), fd + (fs * ft));
+ set_fpu_register_float(fd_reg(), std::fma(fs, ft, fd));
break;
case MSUBF_S:
DCHECK(kArchVariant == kMips64r6);
- set_fpu_register_float(fd_reg(), fd - (fs * ft));
+ set_fpu_register_float(fd_reg(), std::fma(-fs, ft, fd));
break;
case MUL_S:
set_fpu_register_float(
@@ -2901,11 +2901,11 @@ void Simulator::DecodeTypeRegisterDRsType() {
break;
case MADDF_D:
DCHECK(kArchVariant == kMips64r6);
- set_fpu_register_double(fd_reg(), fd + (fs * ft));
+ set_fpu_register_double(fd_reg(), std::fma(fs, ft, fd));
break;
case MSUBF_D:
DCHECK(kArchVariant == kMips64r6);
- set_fpu_register_double(fd_reg(), fd - (fs * ft));
+ set_fpu_register_double(fd_reg(), std::fma(-fs, ft, fd));
break;
case MUL_D:
set_fpu_register_double(
diff --git a/deps/v8/src/objects-body-descriptors-inl.h b/deps/v8/src/objects-body-descriptors-inl.h
index f7a1a71514..bffc8bdb3e 100644
--- a/deps/v8/src/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects-body-descriptors-inl.h
@@ -360,7 +360,10 @@ class Code::BodyDescriptor final : public BodyDescriptorBase {
kSourcePositionTableOffset);
STATIC_ASSERT(kSourcePositionTableOffset + kPointerSize ==
kTypeFeedbackInfoOffset);
- STATIC_ASSERT(kTypeFeedbackInfoOffset + kPointerSize == kNextCodeLinkOffset);
+ STATIC_ASSERT(kTypeFeedbackInfoOffset + kPointerSize ==
+ kProtectedInstructionOffset);
+ STATIC_ASSERT(kProtectedInstructionOffset + kPointerSize ==
+ kNextCodeLinkOffset);
static bool IsValidSlot(HeapObject* obj, int offset) {
// Slots in code can't be invalid because we never trim code objects.
@@ -462,6 +465,7 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
case JS_OBJECT_TYPE:
case JS_ERROR_TYPE:
case JS_ARGUMENTS_TYPE:
+ case JS_PROMISE_CAPABILITY_TYPE:
case JS_PROMISE_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
@@ -469,7 +473,6 @@ ReturnType BodyDescriptorApply(InstanceType type, T1 p1, T2 p2, T3 p3) {
case JS_DATE_TYPE:
case JS_ARRAY_TYPE:
case JS_MODULE_NAMESPACE_TYPE:
- case JS_FIXED_ARRAY_ITERATOR_TYPE:
case JS_TYPED_ARRAY_TYPE:
case JS_DATA_VIEW_TYPE:
case JS_SET_TYPE:
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 2580bfb397..986970444f 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -44,6 +44,8 @@ void Smi::SmiVerify() {
void HeapObject::HeapObjectVerify() {
+ VerifyHeapPointer(map());
+ CHECK(map()->IsMap());
InstanceType instance_type = map()->instance_type();
if (instance_type < FIRST_NONSTRING_TYPE) {
@@ -104,7 +106,6 @@ void HeapObject::HeapObjectVerify() {
case JS_API_OBJECT_TYPE:
case JS_SPECIAL_API_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
- case JS_PROMISE_TYPE:
JSObject::cast(this)->JSObjectVerify();
break;
case JS_GENERATOR_OBJECT_TYPE:
@@ -143,9 +144,6 @@ void HeapObject::HeapObjectVerify() {
case JS_MODULE_NAMESPACE_TYPE:
JSModuleNamespace::cast(this)->JSModuleNamespaceVerify();
break;
- case JS_FIXED_ARRAY_ITERATOR_TYPE:
- JSFixedArrayIterator::cast(this)->JSFixedArrayIteratorVerify();
- break;
case JS_SET_TYPE:
JSSet::cast(this)->JSSetVerify();
break;
@@ -205,6 +203,12 @@ void HeapObject::HeapObjectVerify() {
case JS_WEAK_SET_TYPE:
JSWeakSet::cast(this)->JSWeakSetVerify();
break;
+ case JS_PROMISE_CAPABILITY_TYPE:
+ JSPromiseCapability::cast(this)->JSPromiseCapabilityVerify();
+ break;
+ case JS_PROMISE_TYPE:
+ JSPromise::cast(this)->JSPromiseVerify();
+ break;
case JS_REGEXP_TYPE:
JSRegExp::cast(this)->JSRegExpVerify();
break;
@@ -337,7 +341,9 @@ void JSObject::JSObjectVerify() {
DescriptorArray* descriptors = map()->instance_descriptors();
Isolate* isolate = GetIsolate();
for (int i = 0; i < map()->NumberOfOwnDescriptors(); i++) {
- if (descriptors->GetDetails(i).type() == DATA) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.location() == kField) {
+ DCHECK_EQ(kData, details.kind());
Representation r = descriptors->GetDetails(i).representation();
FieldIndex index = FieldIndex::ForDescriptor(map(), i);
if (IsUnboxedDoubleField(index)) {
@@ -471,7 +477,7 @@ void JSGeneratorObject::JSGeneratorObjectVerify() {
VerifyObjectField(kFunctionOffset);
VerifyObjectField(kContextOffset);
VerifyObjectField(kReceiverOffset);
- VerifyObjectField(kOperandStackOffset);
+ VerifyObjectField(kRegisterFileOffset);
VerifyObjectField(kContinuationOffset);
}
@@ -604,21 +610,29 @@ void JSFunction::JSFunctionVerify() {
void SharedFunctionInfo::SharedFunctionInfoVerify() {
CHECK(IsSharedFunctionInfo());
- VerifyObjectField(kNameOffset);
+
VerifyObjectField(kCodeOffset);
- VerifyObjectField(kOptimizedCodeMapOffset);
+ VerifyObjectField(kDebugInfoOffset);
VerifyObjectField(kFeedbackMetadataOffset);
- VerifyObjectField(kScopeInfoOffset);
- VerifyObjectField(kOuterScopeInfoOffset);
+ VerifyObjectField(kFunctionDataOffset);
+ VerifyObjectField(kFunctionIdentifierOffset);
VerifyObjectField(kInstanceClassNameOffset);
+ VerifyObjectField(kNameOffset);
+ VerifyObjectField(kOptimizedCodeMapOffset);
+ VerifyObjectField(kOuterScopeInfoOffset);
+ VerifyObjectField(kScopeInfoOffset);
+ VerifyObjectField(kScriptOffset);
+
CHECK(function_data()->IsUndefined(GetIsolate()) || IsApiFunction() ||
HasBytecodeArray() || HasAsmWasmData());
- VerifyObjectField(kFunctionDataOffset);
- VerifyObjectField(kScriptOffset);
- VerifyObjectField(kDebugInfoOffset);
+
CHECK(function_identifier()->IsUndefined(GetIsolate()) ||
HasBuiltinFunctionId() || HasInferredName());
- VerifyObjectField(kFunctionIdentifierOffset);
+
+ if (scope_info()->length() > 0) {
+ CHECK(kind() == scope_info()->function_kind());
+ CHECK_EQ(kind() == kModule, scope_info()->scope_type() == MODULE_SCOPE);
+ }
}
@@ -876,6 +890,35 @@ void JSWeakSet::JSWeakSetVerify() {
CHECK(table()->IsHashTable() || table()->IsUndefined(GetIsolate()));
}
+void JSPromiseCapability::JSPromiseCapabilityVerify() {
+ CHECK(IsJSPromiseCapability());
+ JSObjectVerify();
+ VerifyPointer(promise());
+ VerifyPointer(resolve());
+ VerifyPointer(reject());
+}
+
+void JSPromise::JSPromiseVerify() {
+ CHECK(IsJSPromise());
+ JSObjectVerify();
+ Isolate* isolate = GetIsolate();
+ VerifySmiField(kStatusOffset);
+ CHECK(result()->IsUndefined(isolate) || result()->IsObject());
+ CHECK(deferred_promise()->IsUndefined(isolate) ||
+ deferred_promise()->IsJSReceiver() ||
+ deferred_promise()->IsFixedArray());
+ CHECK(deferred_on_resolve()->IsUndefined(isolate) ||
+ deferred_on_resolve()->IsCallable() ||
+ deferred_on_resolve()->IsFixedArray());
+ CHECK(deferred_on_reject()->IsUndefined(isolate) ||
+ deferred_on_reject()->IsCallable() ||
+ deferred_on_reject()->IsFixedArray());
+ CHECK(fulfill_reactions()->IsUndefined(isolate) ||
+ fulfill_reactions()->IsCallable() ||
+ fulfill_reactions()->IsFixedArray());
+ CHECK(reject_reactions()->IsUndefined(isolate) ||
+ reject_reactions()->IsCallable() || reject_reactions()->IsFixedArray());
+}
void JSRegExp::JSRegExpVerify() {
JSObjectVerify();
@@ -988,14 +1031,12 @@ void Box::BoxVerify() {
}
void PromiseResolveThenableJobInfo::PromiseResolveThenableJobInfoVerify() {
- Isolate* isolate = GetIsolate();
CHECK(IsPromiseResolveThenableJobInfo());
CHECK(thenable()->IsJSReceiver());
CHECK(then()->IsJSReceiver());
CHECK(resolve()->IsJSFunction());
CHECK(reject()->IsJSFunction());
- CHECK(debug_id()->IsNumber() || debug_id()->IsUndefined(isolate));
- CHECK(debug_name()->IsString() || debug_name()->IsUndefined(isolate));
+ VerifySmiField(kDebugIdOffset);
CHECK(context()->IsContext());
}
@@ -1003,10 +1044,17 @@ void PromiseReactionJobInfo::PromiseReactionJobInfoVerify() {
Isolate* isolate = GetIsolate();
CHECK(IsPromiseReactionJobInfo());
CHECK(value()->IsObject());
- CHECK(tasks()->IsJSArray() || tasks()->IsCallable());
- CHECK(deferred()->IsJSObject() || deferred()->IsUndefined(isolate));
- CHECK(debug_id()->IsNumber() || debug_id()->IsUndefined(isolate));
- CHECK(debug_name()->IsString() || debug_name()->IsUndefined(isolate));
+ CHECK(tasks()->IsFixedArray() || tasks()->IsCallable());
+ CHECK(deferred_promise()->IsUndefined(isolate) ||
+ deferred_promise()->IsJSReceiver() ||
+ deferred_promise()->IsFixedArray());
+ CHECK(deferred_on_resolve()->IsUndefined(isolate) ||
+ deferred_on_resolve()->IsCallable() ||
+ deferred_on_resolve()->IsFixedArray());
+ CHECK(deferred_on_reject()->IsUndefined(isolate) ||
+ deferred_on_reject()->IsCallable() ||
+ deferred_on_reject()->IsFixedArray());
+ VerifySmiField(kDebugIdOffset);
CHECK(context()->IsContext());
}
@@ -1015,16 +1063,6 @@ void JSModuleNamespace::JSModuleNamespaceVerify() {
VerifyPointer(module());
}
-void JSFixedArrayIterator::JSFixedArrayIteratorVerify() {
- CHECK(IsJSFixedArrayIterator());
-
- VerifyPointer(array());
- VerifyPointer(initial_next());
- VerifySmiField(kIndexOffset);
-
- CHECK_LE(index(), array()->length());
-}
-
void ModuleInfoEntry::ModuleInfoEntryVerify() {
Isolate* isolate = GetIsolate();
CHECK(IsModuleInfoEntry());
@@ -1078,6 +1116,12 @@ void PrototypeInfo::PrototypeInfoVerify() {
CHECK(validity_cell()->IsCell() || validity_cell()->IsSmi());
}
+void Tuple2::Tuple2Verify() {
+ CHECK(IsTuple2());
+ VerifyObjectField(kValue1Offset);
+ VerifyObjectField(kValue2Offset);
+}
+
void Tuple3::Tuple3Verify() {
CHECK(IsTuple3());
VerifyObjectField(kValue1Offset);
@@ -1091,6 +1135,11 @@ void ContextExtension::ContextExtensionVerify() {
VerifyObjectField(kExtensionOffset);
}
+void ConstantElementsPair::ConstantElementsPairVerify() {
+ CHECK(IsConstantElementsPair());
+ VerifySmiField(kElementsKindOffset);
+ VerifyObjectField(kConstantValuesOffset);
+}
void AccessorInfo::AccessorInfoVerify() {
CHECK(IsAccessorInfo());
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 1a8274cbf1..21278929c4 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -30,6 +30,8 @@
#include "src/lookup-cache-inl.h"
#include "src/lookup.h"
#include "src/objects.h"
+#include "src/objects/module-info.h"
+#include "src/objects/scope-info.h"
#include "src/property.h"
#include "src/prototype.h"
#include "src/transitions-inl.h"
@@ -59,36 +61,23 @@ int PropertyDetails::field_width_in_words() const {
return representation().IsDouble() ? kDoubleSize / kPointerSize : 1;
}
-#define TYPE_CHECKER(type, instancetype) \
- bool HeapObject::Is##type() const { \
- return map()->instance_type() == instancetype; \
- }
-
-#define CAST_ACCESSOR(type) \
- type* type::cast(Object* object) { \
- SLOW_DCHECK(object->Is##type()); \
- return reinterpret_cast<type*>(object); \
- } \
- const type* type::cast(const Object* object) { \
- SLOW_DCHECK(object->Is##type()); \
- return reinterpret_cast<const type*>(object); \
- }
-
-
#define INT_ACCESSORS(holder, name, offset) \
int holder::name() const { return READ_INT_FIELD(this, offset); } \
void holder::set_##name(int value) { WRITE_INT_FIELD(this, offset, value); }
-#define ACCESSORS_CHECKED(holder, name, type, offset, condition) \
- type* holder::name() const { \
- DCHECK(condition); \
- return type::cast(READ_FIELD(this, offset)); \
- } \
- void holder::set_##name(type* value, WriteBarrierMode mode) { \
- DCHECK(condition); \
- WRITE_FIELD(this, offset, value); \
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); \
+#define ACCESSORS_CHECKED2(holder, name, type, offset, get_condition, \
+ set_condition) \
+ type* holder::name() const { \
+ DCHECK(get_condition); \
+ return type::cast(READ_FIELD(this, offset)); \
+ } \
+ void holder::set_##name(type* value, WriteBarrierMode mode) { \
+ DCHECK(set_condition); \
+ WRITE_FIELD(this, offset, value); \
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); \
}
+#define ACCESSORS_CHECKED(holder, name, type, offset, condition) \
+ ACCESSORS_CHECKED2(holder, name, type, offset, condition, condition)
#define ACCESSORS(holder, name, type, offset) \
ACCESSORS_CHECKED(holder, name, type, offset, true)
@@ -140,6 +129,62 @@ int PropertyDetails::field_width_in_words() const {
set_##field(BooleanBit::set(field(), offset, value)); \
}
+#define TYPE_CHECKER(type, instancetype) \
+ bool HeapObject::Is##type() const { \
+ return map()->instance_type() == instancetype; \
+ }
+
+TYPE_CHECKER(ByteArray, BYTE_ARRAY_TYPE)
+TYPE_CHECKER(BytecodeArray, BYTECODE_ARRAY_TYPE)
+TYPE_CHECKER(Cell, CELL_TYPE)
+TYPE_CHECKER(Code, CODE_TYPE)
+TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
+TYPE_CHECKER(Foreign, FOREIGN_TYPE)
+TYPE_CHECKER(FreeSpace, FREE_SPACE_TYPE)
+TYPE_CHECKER(HeapNumber, HEAP_NUMBER_TYPE)
+TYPE_CHECKER(JSArgumentsObject, JS_ARGUMENTS_TYPE)
+TYPE_CHECKER(JSArray, JS_ARRAY_TYPE)
+TYPE_CHECKER(JSArrayBuffer, JS_ARRAY_BUFFER_TYPE)
+TYPE_CHECKER(JSBoundFunction, JS_BOUND_FUNCTION_TYPE)
+TYPE_CHECKER(JSContextExtensionObject, JS_CONTEXT_EXTENSION_OBJECT_TYPE)
+TYPE_CHECKER(JSDataView, JS_DATA_VIEW_TYPE)
+TYPE_CHECKER(JSDate, JS_DATE_TYPE)
+TYPE_CHECKER(JSError, JS_ERROR_TYPE)
+TYPE_CHECKER(JSFunction, JS_FUNCTION_TYPE)
+TYPE_CHECKER(JSGeneratorObject, JS_GENERATOR_OBJECT_TYPE)
+TYPE_CHECKER(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE)
+TYPE_CHECKER(JSMap, JS_MAP_TYPE)
+TYPE_CHECKER(JSMapIterator, JS_MAP_ITERATOR_TYPE)
+TYPE_CHECKER(JSMessageObject, JS_MESSAGE_OBJECT_TYPE)
+TYPE_CHECKER(JSModuleNamespace, JS_MODULE_NAMESPACE_TYPE)
+TYPE_CHECKER(JSPromiseCapability, JS_PROMISE_CAPABILITY_TYPE)
+TYPE_CHECKER(JSPromise, JS_PROMISE_TYPE)
+TYPE_CHECKER(JSRegExp, JS_REGEXP_TYPE)
+TYPE_CHECKER(JSSet, JS_SET_TYPE)
+TYPE_CHECKER(JSSetIterator, JS_SET_ITERATOR_TYPE)
+TYPE_CHECKER(JSStringIterator, JS_STRING_ITERATOR_TYPE)
+TYPE_CHECKER(JSTypedArray, JS_TYPED_ARRAY_TYPE)
+TYPE_CHECKER(JSValue, JS_VALUE_TYPE)
+TYPE_CHECKER(JSWeakMap, JS_WEAK_MAP_TYPE)
+TYPE_CHECKER(JSWeakSet, JS_WEAK_SET_TYPE)
+TYPE_CHECKER(Map, MAP_TYPE)
+TYPE_CHECKER(MutableHeapNumber, MUTABLE_HEAP_NUMBER_TYPE)
+TYPE_CHECKER(Oddball, ODDBALL_TYPE)
+TYPE_CHECKER(PropertyCell, PROPERTY_CELL_TYPE)
+TYPE_CHECKER(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE)
+TYPE_CHECKER(Simd128Value, SIMD128_VALUE_TYPE)
+TYPE_CHECKER(Symbol, SYMBOL_TYPE)
+TYPE_CHECKER(TransitionArray, TRANSITION_ARRAY_TYPE)
+TYPE_CHECKER(WeakCell, WEAK_CELL_TYPE)
+TYPE_CHECKER(WeakFixedArray, FIXED_ARRAY_TYPE)
+
+#define TYPED_ARRAY_TYPE_CHECKER(Type, type, TYPE, ctype, size) \
+ TYPE_CHECKER(Fixed##Type##Array, FIXED_##TYPE##_ARRAY_TYPE)
+TYPED_ARRAYS(TYPED_ARRAY_TYPE_CHECKER)
+#undef TYPED_ARRAY_TYPE_CHECKER
+
+#undef TYPE_CHECKER
+
bool HeapObject::IsFixedArrayBase() const {
return IsFixedArray() || IsFixedDoubleArray() || IsFixedTypedArrayBase();
}
@@ -150,18 +195,11 @@ bool HeapObject::IsFixedArray() const {
instance_type == TRANSITION_ARRAY_TYPE;
}
-
// External objects are not extensible, so the map check is enough.
bool HeapObject::IsExternal() const {
return map() == GetHeap()->external_map();
}
-
-TYPE_CHECKER(HeapNumber, HEAP_NUMBER_TYPE)
-TYPE_CHECKER(MutableHeapNumber, MUTABLE_HEAP_NUMBER_TYPE)
-TYPE_CHECKER(Symbol, SYMBOL_TYPE)
-TYPE_CHECKER(Simd128Value, SIMD128_VALUE_TYPE)
-
#define SIMD128_TYPE_CHECKER(TYPE, Type, type, lane_count, lane_type) \
bool HeapObject::Is##Type() const { return map() == GetHeap()->type##_map(); }
SIMD128_TYPES(SIMD128_TYPE_CHECKER)
@@ -184,6 +222,16 @@ HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DEF)
ODDBALL_LIST(IS_TYPE_FUNCTION_DEF)
#undef IS_TYPE_FUNCTION_DEF
+bool Object::IsNullOrUndefined(Isolate* isolate) const {
+ Heap* heap = isolate->heap();
+ return this == heap->null_value() || this == heap->undefined_value();
+}
+
+bool HeapObject::IsNullOrUndefined(Isolate* isolate) const {
+ Heap* heap = isolate->heap();
+ return this == heap->null_value() || this == heap->undefined_value();
+}
+
bool HeapObject::IsString() const {
return map()->instance_type() < FIRST_NONSTRING_TYPE;
}
@@ -266,12 +314,409 @@ bool HeapObject::IsExternalTwoByteString() const {
String::cast(this)->IsTwoByteRepresentation();
}
+bool Object::IsNumber() const { return IsSmi() || IsHeapNumber(); }
+
+bool HeapObject::IsFiller() const {
+ InstanceType instance_type = map()->instance_type();
+ return instance_type == FREE_SPACE_TYPE || instance_type == FILLER_TYPE;
+}
+
+bool HeapObject::IsFixedTypedArrayBase() const {
+ InstanceType instance_type = map()->instance_type();
+ return (instance_type >= FIRST_FIXED_TYPED_ARRAY_TYPE &&
+ instance_type <= LAST_FIXED_TYPED_ARRAY_TYPE);
+}
+
+bool HeapObject::IsJSReceiver() const {
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ return map()->instance_type() >= FIRST_JS_RECEIVER_TYPE;
+}
+
+bool HeapObject::IsJSObject() const {
+ STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
+ return map()->IsJSObjectMap();
+}
+
+bool HeapObject::IsJSProxy() const { return map()->IsJSProxyMap(); }
+
+bool HeapObject::IsJSArrayIterator() const {
+ InstanceType instance_type = map()->instance_type();
+ return (instance_type >= FIRST_ARRAY_ITERATOR_TYPE &&
+ instance_type <= LAST_ARRAY_ITERATOR_TYPE);
+}
+
+bool HeapObject::IsJSWeakCollection() const {
+ return IsJSWeakMap() || IsJSWeakSet();
+}
+
+bool HeapObject::IsJSCollection() const { return IsJSMap() || IsJSSet(); }
+
+bool HeapObject::IsDescriptorArray() const { return IsFixedArray(); }
+
+bool HeapObject::IsFrameArray() const { return IsFixedArray(); }
+
+bool HeapObject::IsArrayList() const { return IsFixedArray(); }
+
+bool HeapObject::IsRegExpMatchInfo() const { return IsFixedArray(); }
+
+bool Object::IsLayoutDescriptor() const {
+ return IsSmi() || IsFixedTypedArrayBase();
+}
+
+bool HeapObject::IsTypeFeedbackVector() const {
+ return map() == GetHeap()->type_feedback_vector_map();
+}
+
+bool HeapObject::IsTypeFeedbackMetadata() const { return IsFixedArray(); }
+
+bool HeapObject::IsLiteralsArray() const { return IsFixedArray(); }
+
+bool HeapObject::IsDeoptimizationInputData() const {
+ // Must be a fixed array.
+ if (!IsFixedArray()) return false;
+
+ // There's no sure way to detect the difference between a fixed array and
+ // a deoptimization data array. Since this is used for asserts we can
+ // check that the length is zero or else the fixed size plus a multiple of
+ // the entry size.
+ int length = FixedArray::cast(this)->length();
+ if (length == 0) return true;
+
+ length -= DeoptimizationInputData::kFirstDeoptEntryIndex;
+ return length >= 0 && length % DeoptimizationInputData::kDeoptEntrySize == 0;
+}
+
+bool HeapObject::IsDeoptimizationOutputData() const {
+ if (!IsFixedArray()) return false;
+ // There's actually no way to see the difference between a fixed array and
+ // a deoptimization data array. Since this is used for asserts we can check
+ // that the length is plausible though.
+ if (FixedArray::cast(this)->length() % 2 != 0) return false;
+ return true;
+}
+
+bool HeapObject::IsHandlerTable() const {
+ if (!IsFixedArray()) return false;
+ // There's actually no way to see the difference between a fixed array and
+ // a handler table array.
+ return true;
+}
+
+bool HeapObject::IsTemplateList() const {
+ if (!IsFixedArray()) return false;
+ // There's actually no way to see the difference between a fixed array and
+ // a template list.
+ if (FixedArray::cast(this)->length() < 1) return false;
+ return true;
+}
+
+bool HeapObject::IsDependentCode() const {
+ if (!IsFixedArray()) return false;
+ // There's actually no way to see the difference between a fixed array and
+ // a dependent codes array.
+ return true;
+}
+
+bool HeapObject::IsContext() const {
+ Map* map = this->map();
+ Heap* heap = GetHeap();
+ return (
+ map == heap->function_context_map() || map == heap->catch_context_map() ||
+ map == heap->with_context_map() || map == heap->native_context_map() ||
+ map == heap->block_context_map() || map == heap->module_context_map() ||
+ map == heap->eval_context_map() || map == heap->script_context_map() ||
+ map == heap->debug_evaluate_context_map());
+}
+
+bool HeapObject::IsNativeContext() const {
+ return map() == GetHeap()->native_context_map();
+}
+
+bool HeapObject::IsScriptContextTable() const {
+ return map() == GetHeap()->script_context_table_map();
+}
+
+bool HeapObject::IsScopeInfo() const {
+ return map() == GetHeap()->scope_info_map();
+}
+
+bool HeapObject::IsModuleInfo() const {
+ return map() == GetHeap()->module_info_map();
+}
+
+template <>
+inline bool Is<JSFunction>(Object* obj) {
+ return obj->IsJSFunction();
+}
+
+bool HeapObject::IsAbstractCode() const {
+ return IsBytecodeArray() || IsCode();
+}
+
+bool HeapObject::IsStringWrapper() const {
+ return IsJSValue() && JSValue::cast(this)->value()->IsString();
+}
+
+bool HeapObject::IsBoolean() const {
+ return IsOddball() &&
+ ((Oddball::cast(this)->kind() & Oddball::kNotBooleanMask) == 0);
+}
+
+bool HeapObject::IsJSArrayBufferView() const {
+ return IsJSDataView() || IsJSTypedArray();
+}
+
+template <>
+inline bool Is<JSArray>(Object* obj) {
+ return obj->IsJSArray();
+}
+
+bool HeapObject::IsHashTable() const {
+ return map() == GetHeap()->hash_table_map();
+}
+
+bool HeapObject::IsWeakHashTable() const { return IsHashTable(); }
+
+bool HeapObject::IsDictionary() const {
+ return IsHashTable() && this != GetHeap()->string_table();
+}
+
+bool Object::IsNameDictionary() const { return IsDictionary(); }
+
+bool Object::IsGlobalDictionary() const { return IsDictionary(); }
+
+bool Object::IsSeededNumberDictionary() const { return IsDictionary(); }
+
+bool HeapObject::IsUnseededNumberDictionary() const {
+ return map() == GetHeap()->unseeded_number_dictionary_map();
+}
+
+bool HeapObject::IsStringTable() const { return IsHashTable(); }
+
+bool HeapObject::IsStringSet() const { return IsHashTable(); }
+
+bool HeapObject::IsObjectHashSet() const { return IsHashTable(); }
+
+bool HeapObject::IsNormalizedMapCache() const {
+ return NormalizedMapCache::IsNormalizedMapCache(this);
+}
+
+int NormalizedMapCache::GetIndex(Handle<Map> map) {
+ return map->Hash() % NormalizedMapCache::kEntries;
+}
+
+bool NormalizedMapCache::IsNormalizedMapCache(const HeapObject* obj) {
+ if (!obj->IsFixedArray()) return false;
+ if (FixedArray::cast(obj)->length() != NormalizedMapCache::kEntries) {
+ return false;
+ }
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ reinterpret_cast<NormalizedMapCache*>(const_cast<HeapObject*>(obj))
+ ->NormalizedMapCacheVerify();
+ }
+#endif
+ return true;
+}
+
+bool HeapObject::IsCompilationCacheTable() const { return IsHashTable(); }
+
+bool HeapObject::IsCodeCacheHashTable() const { return IsHashTable(); }
+
+bool HeapObject::IsMapCache() const { return IsHashTable(); }
+
+bool HeapObject::IsObjectHashTable() const { return IsHashTable(); }
+
+bool HeapObject::IsOrderedHashTable() const {
+ return map() == GetHeap()->ordered_hash_table_map();
+}
+
+bool Object::IsOrderedHashSet() const { return IsOrderedHashTable(); }
+
+bool Object::IsOrderedHashMap() const { return IsOrderedHashTable(); }
+
+bool Object::IsPrimitive() const {
+ return IsSmi() || HeapObject::cast(this)->map()->IsPrimitiveMap();
+}
+
+bool HeapObject::IsJSGlobalProxy() const {
+ bool result = map()->instance_type() == JS_GLOBAL_PROXY_TYPE;
+ DCHECK(!result || map()->is_access_check_needed());
+ return result;
+}
+
+bool HeapObject::IsUndetectable() const { return map()->is_undetectable(); }
+
+bool HeapObject::IsAccessCheckNeeded() const {
+ if (IsJSGlobalProxy()) {
+ const JSGlobalProxy* proxy = JSGlobalProxy::cast(this);
+ JSGlobalObject* global = proxy->GetIsolate()->context()->global_object();
+ return proxy->IsDetachedFrom(global);
+ }
+ return map()->is_access_check_needed();
+}
+
+bool HeapObject::IsStruct() const {
+ switch (map()->instance_type()) {
+#define MAKE_STRUCT_CASE(NAME, Name, name) \
+ case NAME##_TYPE: \
+ return true;
+ STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+ default:
+ return false;
+ }
+}
+
+#define MAKE_STRUCT_PREDICATE(NAME, Name, name) \
+ bool Object::Is##Name() const { \
+ return IsHeapObject() && HeapObject::cast(this)->Is##Name(); \
+ } \
+ bool HeapObject::Is##Name() const { \
+ return map()->instance_type() == NAME##_TYPE; \
+ }
+STRUCT_LIST(MAKE_STRUCT_PREDICATE)
+#undef MAKE_STRUCT_PREDICATE
+
+double Object::Number() const {
+ DCHECK(IsNumber());
+ return IsSmi()
+ ? static_cast<double>(reinterpret_cast<const Smi*>(this)->value())
+ : reinterpret_cast<const HeapNumber*>(this)->value();
+}
+
+bool Object::IsNaN() const {
+ return this->IsHeapNumber() && std::isnan(HeapNumber::cast(this)->value());
+}
+
+bool Object::IsMinusZero() const {
+ return this->IsHeapNumber() &&
+ i::IsMinusZero(HeapNumber::cast(this)->value());
+}
+
+// ------------------------------------
+// Cast operations
+
+#define CAST_ACCESSOR(type) \
+ type* type::cast(Object* object) { \
+ SLOW_DCHECK(object->Is##type()); \
+ return reinterpret_cast<type*>(object); \
+ } \
+ const type* type::cast(const Object* object) { \
+ SLOW_DCHECK(object->Is##type()); \
+ return reinterpret_cast<const type*>(object); \
+ }
+
+CAST_ACCESSOR(AbstractCode)
+CAST_ACCESSOR(ArrayList)
+CAST_ACCESSOR(Bool16x8)
+CAST_ACCESSOR(Bool32x4)
+CAST_ACCESSOR(Bool8x16)
+CAST_ACCESSOR(ByteArray)
+CAST_ACCESSOR(BytecodeArray)
+CAST_ACCESSOR(Cell)
+CAST_ACCESSOR(Code)
+CAST_ACCESSOR(CodeCacheHashTable)
+CAST_ACCESSOR(CompilationCacheTable)
+CAST_ACCESSOR(ConsString)
+CAST_ACCESSOR(DeoptimizationInputData)
+CAST_ACCESSOR(DeoptimizationOutputData)
+CAST_ACCESSOR(DependentCode)
+CAST_ACCESSOR(DescriptorArray)
+CAST_ACCESSOR(ExternalOneByteString)
+CAST_ACCESSOR(ExternalString)
+CAST_ACCESSOR(ExternalTwoByteString)
+CAST_ACCESSOR(FixedArray)
+CAST_ACCESSOR(FixedArrayBase)
+CAST_ACCESSOR(FixedDoubleArray)
+CAST_ACCESSOR(FixedTypedArrayBase)
+CAST_ACCESSOR(Float32x4)
+CAST_ACCESSOR(Foreign)
+CAST_ACCESSOR(FrameArray)
+CAST_ACCESSOR(GlobalDictionary)
+CAST_ACCESSOR(HandlerTable)
+CAST_ACCESSOR(HeapObject)
+CAST_ACCESSOR(Int16x8)
+CAST_ACCESSOR(Int32x4)
+CAST_ACCESSOR(Int8x16)
+CAST_ACCESSOR(JSArray)
+CAST_ACCESSOR(JSArrayBuffer)
+CAST_ACCESSOR(JSArrayBufferView)
+CAST_ACCESSOR(JSBoundFunction)
+CAST_ACCESSOR(JSDataView)
+CAST_ACCESSOR(JSDate)
+CAST_ACCESSOR(JSFunction)
+CAST_ACCESSOR(JSGeneratorObject)
+CAST_ACCESSOR(JSGlobalObject)
+CAST_ACCESSOR(JSGlobalProxy)
+CAST_ACCESSOR(JSMap)
+CAST_ACCESSOR(JSMapIterator)
+CAST_ACCESSOR(JSMessageObject)
+CAST_ACCESSOR(JSModuleNamespace)
+CAST_ACCESSOR(JSObject)
+CAST_ACCESSOR(JSProxy)
+CAST_ACCESSOR(JSReceiver)
+CAST_ACCESSOR(JSRegExp)
+CAST_ACCESSOR(JSPromiseCapability)
+CAST_ACCESSOR(JSPromise)
+CAST_ACCESSOR(JSSet)
+CAST_ACCESSOR(JSSetIterator)
+CAST_ACCESSOR(JSStringIterator)
+CAST_ACCESSOR(JSArrayIterator)
+CAST_ACCESSOR(JSTypedArray)
+CAST_ACCESSOR(JSValue)
+CAST_ACCESSOR(JSWeakCollection)
+CAST_ACCESSOR(JSWeakMap)
+CAST_ACCESSOR(JSWeakSet)
+CAST_ACCESSOR(LayoutDescriptor)
+CAST_ACCESSOR(Map)
+CAST_ACCESSOR(ModuleInfo)
+CAST_ACCESSOR(Name)
+CAST_ACCESSOR(NameDictionary)
+CAST_ACCESSOR(NormalizedMapCache)
+CAST_ACCESSOR(Object)
+CAST_ACCESSOR(ObjectHashTable)
+CAST_ACCESSOR(ObjectHashSet)
+CAST_ACCESSOR(Oddball)
+CAST_ACCESSOR(OrderedHashMap)
+CAST_ACCESSOR(OrderedHashSet)
+CAST_ACCESSOR(PropertyCell)
+CAST_ACCESSOR(TemplateList)
+CAST_ACCESSOR(RegExpMatchInfo)
+CAST_ACCESSOR(ScopeInfo)
+CAST_ACCESSOR(SeededNumberDictionary)
+CAST_ACCESSOR(SeqOneByteString)
+CAST_ACCESSOR(SeqString)
+CAST_ACCESSOR(SeqTwoByteString)
+CAST_ACCESSOR(SharedFunctionInfo)
+CAST_ACCESSOR(Simd128Value)
+CAST_ACCESSOR(SlicedString)
+CAST_ACCESSOR(Smi)
+CAST_ACCESSOR(String)
+CAST_ACCESSOR(StringSet)
+CAST_ACCESSOR(StringTable)
+CAST_ACCESSOR(Struct)
+CAST_ACCESSOR(Symbol)
+CAST_ACCESSOR(TemplateInfo)
+CAST_ACCESSOR(Uint16x8)
+CAST_ACCESSOR(Uint32x4)
+CAST_ACCESSOR(Uint8x16)
+CAST_ACCESSOR(UnseededNumberDictionary)
+CAST_ACCESSOR(WeakCell)
+CAST_ACCESSOR(WeakFixedArray)
+CAST_ACCESSOR(WeakHashTable)
+
+#define MAKE_STRUCT_CAST(NAME, Name, name) CAST_ACCESSOR(Name)
+STRUCT_LIST(MAKE_STRUCT_CAST)
+#undef MAKE_STRUCT_CAST
+
+#undef CAST_ACCESSOR
+
bool Object::HasValidElements() {
// Dictionary is covered under FixedArray.
return IsFixedArray() || IsFixedDoubleArray() || IsFixedTypedArrayBase();
}
-
bool Object::KeyEquals(Object* second) {
Object* first = this;
if (second->IsNumber()) {
@@ -289,7 +734,6 @@ bool Object::KeyEquals(Object* second) {
return Name::cast(first)->Equals(Name::cast(second));
}
-
bool Object::FilterKey(PropertyFilter filter) {
if (IsSymbol()) {
if (filter & SKIP_SYMBOLS) return true;
@@ -300,17 +744,12 @@ bool Object::FilterKey(PropertyFilter filter) {
return false;
}
-
-Handle<Object> Object::NewStorageFor(Isolate* isolate,
- Handle<Object> object,
+Handle<Object> Object::NewStorageFor(Isolate* isolate, Handle<Object> object,
Representation representation) {
- if (representation.IsSmi() && object->IsUninitialized(isolate)) {
- return handle(Smi::kZero, isolate);
- }
if (!representation.IsDouble()) return object;
double value;
if (object->IsUninitialized(isolate)) {
- value = 0;
+ value = bit_cast<double>(kHoleNanInt64);
} else if (object->IsMutableHeapNumber()) {
value = HeapNumber::cast(*object)->value();
} else {
@@ -319,9 +758,7 @@ Handle<Object> Object::NewStorageFor(Isolate* isolate,
return isolate->factory()->NewHeapNumber(value, MUTABLE);
}
-
-Handle<Object> Object::WrapForRead(Isolate* isolate,
- Handle<Object> object,
+Handle<Object> Object::WrapForRead(Isolate* isolate, Handle<Object> object,
Representation representation) {
DCHECK(!object->IsUninitialized(isolate));
if (!representation.IsDouble()) {
@@ -331,48 +768,39 @@ Handle<Object> Object::WrapForRead(Isolate* isolate,
return isolate->factory()->NewHeapNumber(HeapNumber::cast(*object)->value());
}
-
StringShape::StringShape(const String* str)
- : type_(str->map()->instance_type()) {
+ : type_(str->map()->instance_type()) {
set_valid();
DCHECK((type_ & kIsNotStringMask) == kStringTag);
}
-
-StringShape::StringShape(Map* map)
- : type_(map->instance_type()) {
+StringShape::StringShape(Map* map) : type_(map->instance_type()) {
set_valid();
DCHECK((type_ & kIsNotStringMask) == kStringTag);
}
-
-StringShape::StringShape(InstanceType t)
- : type_(static_cast<uint32_t>(t)) {
+StringShape::StringShape(InstanceType t) : type_(static_cast<uint32_t>(t)) {
set_valid();
DCHECK((type_ & kIsNotStringMask) == kStringTag);
}
-
bool StringShape::IsInternalized() {
DCHECK(valid());
STATIC_ASSERT(kNotInternalizedTag != 0);
return (type_ & (kIsNotStringMask | kIsNotInternalizedMask)) ==
- (kStringTag | kInternalizedTag);
+ (kStringTag | kInternalizedTag);
}
-
bool String::IsOneByteRepresentation() const {
uint32_t type = map()->instance_type();
return (type & kStringEncodingMask) == kOneByteStringTag;
}
-
bool String::IsTwoByteRepresentation() const {
uint32_t type = map()->instance_type();
return (type & kStringEncodingMask) == kTwoByteStringTag;
}
-
bool String::IsOneByteRepresentationUnderneath() {
uint32_t type = map()->instance_type();
STATIC_ASSERT(kIsIndirectStringTag != 0);
@@ -388,7 +816,6 @@ bool String::IsOneByteRepresentationUnderneath() {
}
}
-
bool String::IsTwoByteRepresentationUnderneath() {
uint32_t type = map()->instance_type();
STATIC_ASSERT(kIsIndirectStringTag != 0);
@@ -404,94 +831,75 @@ bool String::IsTwoByteRepresentationUnderneath() {
}
}
-
bool String::HasOnlyOneByteChars() {
uint32_t type = map()->instance_type();
return (type & kOneByteDataHintMask) == kOneByteDataHintTag ||
IsOneByteRepresentation();
}
-
bool StringShape::IsCons() {
return (type_ & kStringRepresentationMask) == kConsStringTag;
}
-
bool StringShape::IsSliced() {
return (type_ & kStringRepresentationMask) == kSlicedStringTag;
}
-
bool StringShape::IsIndirect() {
return (type_ & kIsIndirectStringMask) == kIsIndirectStringTag;
}
-
bool StringShape::IsExternal() {
return (type_ & kStringRepresentationMask) == kExternalStringTag;
}
-
bool StringShape::IsSequential() {
return (type_ & kStringRepresentationMask) == kSeqStringTag;
}
-
StringRepresentationTag StringShape::representation_tag() {
uint32_t tag = (type_ & kStringRepresentationMask);
return static_cast<StringRepresentationTag>(tag);
}
-
-uint32_t StringShape::encoding_tag() {
- return type_ & kStringEncodingMask;
-}
-
+uint32_t StringShape::encoding_tag() { return type_ & kStringEncodingMask; }
uint32_t StringShape::full_representation_tag() {
return (type_ & (kStringRepresentationMask | kStringEncodingMask));
}
-
STATIC_ASSERT((kStringRepresentationMask | kStringEncodingMask) ==
- Internals::kFullStringRepresentationMask);
+ Internals::kFullStringRepresentationMask);
STATIC_ASSERT(static_cast<uint32_t>(kStringEncodingMask) ==
- Internals::kStringEncodingMask);
-
+ Internals::kStringEncodingMask);
bool StringShape::IsSequentialOneByte() {
return full_representation_tag() == (kSeqStringTag | kOneByteStringTag);
}
-
bool StringShape::IsSequentialTwoByte() {
return full_representation_tag() == (kSeqStringTag | kTwoByteStringTag);
}
-
bool StringShape::IsExternalOneByte() {
return full_representation_tag() == (kExternalStringTag | kOneByteStringTag);
}
-
STATIC_ASSERT((kExternalStringTag | kOneByteStringTag) ==
Internals::kExternalOneByteRepresentationTag);
STATIC_ASSERT(v8::String::ONE_BYTE_ENCODING == kOneByteStringTag);
-
bool StringShape::IsExternalTwoByte() {
return full_representation_tag() == (kExternalStringTag | kTwoByteStringTag);
}
-
STATIC_ASSERT((kExternalStringTag | kTwoByteStringTag) ==
- Internals::kExternalTwoByteRepresentationTag);
+ Internals::kExternalTwoByteRepresentationTag);
STATIC_ASSERT(v8::String::TWO_BYTE_ENCODING == kTwoByteStringTag);
-
uc32 FlatStringReader::Get(int index) {
if (is_one_byte_) {
return Get<uint8_t>(index);
@@ -500,7 +908,6 @@ uc32 FlatStringReader::Get(int index) {
}
}
-
template <typename Char>
Char FlatStringReader::Get(int index) {
DCHECK_EQ(is_one_byte_, sizeof(Char) == 1);
@@ -512,18 +919,15 @@ Char FlatStringReader::Get(int index) {
}
}
-
Handle<Object> StringTableShape::AsHandle(Isolate* isolate, HashTableKey* key) {
return key->AsHandle(isolate);
}
-
Handle<Object> CompilationCacheShape::AsHandle(Isolate* isolate,
HashTableKey* key) {
return key->AsHandle(isolate);
}
-
Handle<Object> CodeCacheHashTableShape::AsHandle(Isolate* isolate,
HashTableKey* key) {
return key->AsHandle(isolate);
@@ -533,19 +937,17 @@ template <typename Char>
class SequentialStringKey : public HashTableKey {
public:
explicit SequentialStringKey(Vector<const Char> string, uint32_t seed)
- : string_(string), hash_field_(0), seed_(seed) { }
+ : string_(string), hash_field_(0), seed_(seed) {}
uint32_t Hash() override {
- hash_field_ = StringHasher::HashSequentialString<Char>(string_.start(),
- string_.length(),
- seed_);
+ hash_field_ = StringHasher::HashSequentialString<Char>(
+ string_.start(), string_.length(), seed_);
uint32_t result = hash_field_ >> String::kHashShift;
DCHECK(result != 0); // Ensure that the hash value of 0 is never computed.
return result;
}
-
uint32_t HashForObject(Object* other) override {
return String::cast(other)->Hash();
}
@@ -555,11 +957,10 @@ class SequentialStringKey : public HashTableKey {
uint32_t seed_;
};
-
class OneByteStringKey : public SequentialStringKey<uint8_t> {
public:
OneByteStringKey(Vector<const uint8_t> str, uint32_t seed)
- : SequentialStringKey<uint8_t>(str, seed) { }
+ : SequentialStringKey<uint8_t>(str, seed) {}
bool IsMatch(Object* string) override {
return String::cast(string)->IsOneByteEqualTo(string_);
@@ -568,7 +969,6 @@ class OneByteStringKey : public SequentialStringKey<uint8_t> {
Handle<Object> AsHandle(Isolate* isolate) override;
};
-
class SeqOneByteSubStringKey : public HashTableKey {
public:
SeqOneByteSubStringKey(Handle<SeqOneByteString> string, int from, int length)
@@ -601,11 +1001,10 @@ class SeqOneByteSubStringKey : public HashTableKey {
uint32_t hash_field_;
};
-
class TwoByteStringKey : public SequentialStringKey<uc16> {
public:
explicit TwoByteStringKey(Vector<const uc16> str, uint32_t seed)
- : SequentialStringKey<uc16>(str, seed) { }
+ : SequentialStringKey<uc16>(str, seed) {}
bool IsMatch(Object* string) override {
return String::cast(string)->IsTwoByteEqualTo(string_);
@@ -614,12 +1013,11 @@ class TwoByteStringKey : public SequentialStringKey<uc16> {
Handle<Object> AsHandle(Isolate* isolate) override;
};
-
// Utf8StringKey carries a vector of chars as key.
class Utf8StringKey : public HashTableKey {
public:
explicit Utf8StringKey(Vector<const char> string, uint32_t seed)
- : string_(string), hash_field_(0), seed_(seed) { }
+ : string_(string), hash_field_(0), seed_(seed) {}
bool IsMatch(Object* string) override {
return String::cast(string)->IsUtf8EqualTo(string_);
@@ -639,8 +1037,8 @@ class Utf8StringKey : public HashTableKey {
Handle<Object> AsHandle(Isolate* isolate) override {
if (hash_field_ == 0) Hash();
- return isolate->factory()->NewInternalizedStringFromUtf8(
- string_, chars_, hash_field_);
+ return isolate->factory()->NewInternalizedStringFromUtf8(string_, chars_,
+ hash_field_);
}
Vector<const char> string_;
@@ -649,361 +1047,6 @@ class Utf8StringKey : public HashTableKey {
uint32_t seed_;
};
-
-bool Object::IsNumber() const {
- return IsSmi() || IsHeapNumber();
-}
-
-
-TYPE_CHECKER(ByteArray, BYTE_ARRAY_TYPE)
-TYPE_CHECKER(BytecodeArray, BYTECODE_ARRAY_TYPE)
-TYPE_CHECKER(FreeSpace, FREE_SPACE_TYPE)
-
-bool HeapObject::IsFiller() const {
- InstanceType instance_type = map()->instance_type();
- return instance_type == FREE_SPACE_TYPE || instance_type == FILLER_TYPE;
-}
-
-
-
-#define TYPED_ARRAY_TYPE_CHECKER(Type, type, TYPE, ctype, size) \
- TYPE_CHECKER(Fixed##Type##Array, FIXED_##TYPE##_ARRAY_TYPE)
-
-TYPED_ARRAYS(TYPED_ARRAY_TYPE_CHECKER)
-#undef TYPED_ARRAY_TYPE_CHECKER
-
-bool HeapObject::IsFixedTypedArrayBase() const {
- InstanceType instance_type = map()->instance_type();
- return (instance_type >= FIRST_FIXED_TYPED_ARRAY_TYPE &&
- instance_type <= LAST_FIXED_TYPED_ARRAY_TYPE);
-}
-
-bool HeapObject::IsJSReceiver() const {
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- return map()->instance_type() >= FIRST_JS_RECEIVER_TYPE;
-}
-
-bool HeapObject::IsJSObject() const {
- STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
- return map()->IsJSObjectMap();
-}
-
-bool HeapObject::IsJSProxy() const { return map()->IsJSProxyMap(); }
-
-bool HeapObject::IsJSArrayIterator() const {
- InstanceType instance_type = map()->instance_type();
- return (instance_type >= FIRST_ARRAY_ITERATOR_TYPE &&
- instance_type <= LAST_ARRAY_ITERATOR_TYPE);
-}
-
-TYPE_CHECKER(JSSet, JS_SET_TYPE)
-TYPE_CHECKER(JSMap, JS_MAP_TYPE)
-TYPE_CHECKER(JSSetIterator, JS_SET_ITERATOR_TYPE)
-TYPE_CHECKER(JSMapIterator, JS_MAP_ITERATOR_TYPE)
-TYPE_CHECKER(JSWeakMap, JS_WEAK_MAP_TYPE)
-TYPE_CHECKER(JSWeakSet, JS_WEAK_SET_TYPE)
-TYPE_CHECKER(JSContextExtensionObject, JS_CONTEXT_EXTENSION_OBJECT_TYPE)
-TYPE_CHECKER(Map, MAP_TYPE)
-TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
-TYPE_CHECKER(WeakFixedArray, FIXED_ARRAY_TYPE)
-TYPE_CHECKER(TransitionArray, TRANSITION_ARRAY_TYPE)
-TYPE_CHECKER(JSStringIterator, JS_STRING_ITERATOR_TYPE)
-TYPE_CHECKER(JSFixedArrayIterator, JS_FIXED_ARRAY_ITERATOR_TYPE)
-
-bool HeapObject::IsJSWeakCollection() const {
- return IsJSWeakMap() || IsJSWeakSet();
-}
-
-bool HeapObject::IsJSCollection() const { return IsJSMap() || IsJSSet(); }
-
-bool HeapObject::IsDescriptorArray() const { return IsFixedArray(); }
-
-bool HeapObject::IsFrameArray() const { return IsFixedArray(); }
-
-bool HeapObject::IsArrayList() const { return IsFixedArray(); }
-
-bool HeapObject::IsRegExpMatchInfo() const { return IsFixedArray(); }
-
-bool Object::IsLayoutDescriptor() const {
- return IsSmi() || IsFixedTypedArrayBase();
-}
-
-bool HeapObject::IsTypeFeedbackVector() const { return IsFixedArray(); }
-
-bool HeapObject::IsTypeFeedbackMetadata() const { return IsFixedArray(); }
-
-bool HeapObject::IsLiteralsArray() const { return IsFixedArray(); }
-
-bool HeapObject::IsDeoptimizationInputData() const {
- // Must be a fixed array.
- if (!IsFixedArray()) return false;
-
- // There's no sure way to detect the difference between a fixed array and
- // a deoptimization data array. Since this is used for asserts we can
- // check that the length is zero or else the fixed size plus a multiple of
- // the entry size.
- int length = FixedArray::cast(this)->length();
- if (length == 0) return true;
-
- length -= DeoptimizationInputData::kFirstDeoptEntryIndex;
- return length >= 0 && length % DeoptimizationInputData::kDeoptEntrySize == 0;
-}
-
-bool HeapObject::IsDeoptimizationOutputData() const {
- if (!IsFixedArray()) return false;
- // There's actually no way to see the difference between a fixed array and
- // a deoptimization data array. Since this is used for asserts we can check
- // that the length is plausible though.
- if (FixedArray::cast(this)->length() % 2 != 0) return false;
- return true;
-}
-
-bool HeapObject::IsHandlerTable() const {
- if (!IsFixedArray()) return false;
- // There's actually no way to see the difference between a fixed array and
- // a handler table array.
- return true;
-}
-
-bool HeapObject::IsTemplateList() const {
- if (!IsFixedArray()) return false;
- // There's actually no way to see the difference between a fixed array and
- // a template list.
- if (FixedArray::cast(this)->length() < 1) return false;
- return true;
-}
-
-bool HeapObject::IsDependentCode() const {
- if (!IsFixedArray()) return false;
- // There's actually no way to see the difference between a fixed array and
- // a dependent codes array.
- return true;
-}
-
-bool HeapObject::IsContext() const {
- Map* map = this->map();
- Heap* heap = GetHeap();
- return (
- map == heap->function_context_map() || map == heap->catch_context_map() ||
- map == heap->with_context_map() || map == heap->native_context_map() ||
- map == heap->block_context_map() || map == heap->module_context_map() ||
- map == heap->script_context_map() ||
- map == heap->debug_evaluate_context_map());
-}
-
-bool HeapObject::IsNativeContext() const {
- return map() == GetHeap()->native_context_map();
-}
-
-bool HeapObject::IsScriptContextTable() const {
- return map() == GetHeap()->script_context_table_map();
-}
-
-bool HeapObject::IsScopeInfo() const {
- return map() == GetHeap()->scope_info_map();
-}
-
-bool HeapObject::IsModuleInfo() const {
- return map() == GetHeap()->module_info_map();
-}
-
-TYPE_CHECKER(JSBoundFunction, JS_BOUND_FUNCTION_TYPE)
-TYPE_CHECKER(JSFunction, JS_FUNCTION_TYPE)
-
-
-template <> inline bool Is<JSFunction>(Object* obj) {
- return obj->IsJSFunction();
-}
-
-
-TYPE_CHECKER(Code, CODE_TYPE)
-TYPE_CHECKER(Oddball, ODDBALL_TYPE)
-TYPE_CHECKER(Cell, CELL_TYPE)
-TYPE_CHECKER(PropertyCell, PROPERTY_CELL_TYPE)
-TYPE_CHECKER(WeakCell, WEAK_CELL_TYPE)
-TYPE_CHECKER(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE)
-TYPE_CHECKER(JSDate, JS_DATE_TYPE)
-TYPE_CHECKER(JSError, JS_ERROR_TYPE)
-TYPE_CHECKER(JSGeneratorObject, JS_GENERATOR_OBJECT_TYPE)
-TYPE_CHECKER(JSMessageObject, JS_MESSAGE_OBJECT_TYPE)
-TYPE_CHECKER(JSPromise, JS_PROMISE_TYPE)
-TYPE_CHECKER(JSValue, JS_VALUE_TYPE)
-
-bool HeapObject::IsAbstractCode() const {
- return IsBytecodeArray() || IsCode();
-}
-
-bool HeapObject::IsStringWrapper() const {
- return IsJSValue() && JSValue::cast(this)->value()->IsString();
-}
-
-
-TYPE_CHECKER(Foreign, FOREIGN_TYPE)
-
-bool HeapObject::IsBoolean() const {
- return IsOddball() &&
- ((Oddball::cast(this)->kind() & Oddball::kNotBooleanMask) == 0);
-}
-
-
-TYPE_CHECKER(JSArray, JS_ARRAY_TYPE)
-TYPE_CHECKER(JSArrayBuffer, JS_ARRAY_BUFFER_TYPE)
-TYPE_CHECKER(JSTypedArray, JS_TYPED_ARRAY_TYPE)
-TYPE_CHECKER(JSDataView, JS_DATA_VIEW_TYPE)
-
-bool HeapObject::IsJSArrayBufferView() const {
- return IsJSDataView() || IsJSTypedArray();
-}
-
-
-TYPE_CHECKER(JSRegExp, JS_REGEXP_TYPE)
-
-
-template <> inline bool Is<JSArray>(Object* obj) {
- return obj->IsJSArray();
-}
-
-bool HeapObject::IsHashTable() const {
- return map() == GetHeap()->hash_table_map();
-}
-
-bool HeapObject::IsWeakHashTable() const { return IsHashTable(); }
-
-bool HeapObject::IsDictionary() const {
- return IsHashTable() && this != GetHeap()->string_table();
-}
-
-
-bool Object::IsNameDictionary() const {
- return IsDictionary();
-}
-
-
-bool Object::IsGlobalDictionary() const { return IsDictionary(); }
-
-
-bool Object::IsSeededNumberDictionary() const {
- return IsDictionary();
-}
-
-bool HeapObject::IsUnseededNumberDictionary() const {
- return map() == GetHeap()->unseeded_number_dictionary_map();
-}
-
-bool HeapObject::IsStringTable() const { return IsHashTable(); }
-
-bool HeapObject::IsStringSet() const { return IsHashTable(); }
-
-bool HeapObject::IsObjectHashSet() const { return IsHashTable(); }
-
-bool HeapObject::IsNormalizedMapCache() const {
- return NormalizedMapCache::IsNormalizedMapCache(this);
-}
-
-
-int NormalizedMapCache::GetIndex(Handle<Map> map) {
- return map->Hash() % NormalizedMapCache::kEntries;
-}
-
-bool NormalizedMapCache::IsNormalizedMapCache(const HeapObject* obj) {
- if (!obj->IsFixedArray()) return false;
- if (FixedArray::cast(obj)->length() != NormalizedMapCache::kEntries) {
- return false;
- }
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- reinterpret_cast<NormalizedMapCache*>(const_cast<HeapObject*>(obj))
- ->NormalizedMapCacheVerify();
- }
-#endif
- return true;
-}
-
-bool HeapObject::IsCompilationCacheTable() const { return IsHashTable(); }
-
-bool HeapObject::IsCodeCacheHashTable() const { return IsHashTable(); }
-
-bool HeapObject::IsMapCache() const { return IsHashTable(); }
-
-bool HeapObject::IsObjectHashTable() const { return IsHashTable(); }
-
-bool HeapObject::IsOrderedHashTable() const {
- return map() == GetHeap()->ordered_hash_table_map();
-}
-
-
-bool Object::IsOrderedHashSet() const {
- return IsOrderedHashTable();
-}
-
-
-bool Object::IsOrderedHashMap() const {
- return IsOrderedHashTable();
-}
-
-
-bool Object::IsPrimitive() const {
- return IsSmi() || HeapObject::cast(this)->map()->IsPrimitiveMap();
-}
-
-bool HeapObject::IsJSGlobalProxy() const {
- bool result = map()->instance_type() == JS_GLOBAL_PROXY_TYPE;
- DCHECK(!result || map()->is_access_check_needed());
- return result;
-}
-
-
-TYPE_CHECKER(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE)
-
-bool HeapObject::IsUndetectable() const { return map()->is_undetectable(); }
-
-bool HeapObject::IsAccessCheckNeeded() const {
- if (IsJSGlobalProxy()) {
- const JSGlobalProxy* proxy = JSGlobalProxy::cast(this);
- JSGlobalObject* global = proxy->GetIsolate()->context()->global_object();
- return proxy->IsDetachedFrom(global);
- }
- return map()->is_access_check_needed();
-}
-
-bool HeapObject::IsStruct() const {
- switch (map()->instance_type()) {
-#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return true;
- STRUCT_LIST(MAKE_STRUCT_CASE)
-#undef MAKE_STRUCT_CASE
- default: return false;
- }
-}
-
-#define MAKE_STRUCT_PREDICATE(NAME, Name, name) \
- bool Object::Is##Name() const { \
- return IsHeapObject() && HeapObject::cast(this)->Is##Name(); \
- } \
- bool HeapObject::Is##Name() const { \
- return map()->instance_type() == NAME##_TYPE; \
- }
-STRUCT_LIST(MAKE_STRUCT_PREDICATE)
-#undef MAKE_STRUCT_PREDICATE
-
-double Object::Number() const {
- DCHECK(IsNumber());
- return IsSmi()
- ? static_cast<double>(reinterpret_cast<const Smi*>(this)->value())
- : reinterpret_cast<const HeapNumber*>(this)->value();
-}
-
-
-bool Object::IsNaN() const {
- return this->IsHeapNumber() && std::isnan(HeapNumber::cast(this)->value());
-}
-
-
-bool Object::IsMinusZero() const {
- return this->IsHeapNumber() &&
- i::IsMinusZero(HeapNumber::cast(this)->value());
-}
-
-
Representation Object::OptimalRepresentation() {
if (!FLAG_track_fields) return Representation::Tagged();
if (IsSmi()) {
@@ -1051,12 +1094,7 @@ bool Object::ToUint32(uint32_t* value) {
}
if (IsHeapNumber()) {
double num = HeapNumber::cast(this)->value();
- if (num < 0) return false;
- uint32_t uint_value = FastD2UI(num);
- if (FastUI2D(uint_value) == num) {
- *value = uint_value;
- return true;
- }
+ return DoubleToUint32IfEqualToSelf(num, value);
}
return false;
}
@@ -1076,12 +1114,64 @@ MaybeHandle<Name> Object::ToName(Isolate* isolate, Handle<Object> input) {
}
// static
+MaybeHandle<Object> Object::ToPropertyKey(Isolate* isolate,
+ Handle<Object> value) {
+ if (value->IsSmi() || HeapObject::cast(*value)->IsName()) return value;
+ return ConvertToPropertyKey(isolate, value);
+}
+
+// static
MaybeHandle<Object> Object::ToPrimitive(Handle<Object> input,
ToPrimitiveHint hint) {
if (input->IsPrimitive()) return input;
return JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(input), hint);
}
+// static
+MaybeHandle<Object> Object::ToNumber(Handle<Object> input) {
+ if (input->IsNumber()) return input;
+ return ConvertToNumber(HeapObject::cast(*input)->GetIsolate(), input);
+}
+
+// static
+MaybeHandle<Object> Object::ToInteger(Isolate* isolate, Handle<Object> input) {
+ if (input->IsSmi()) return input;
+ return ConvertToInteger(isolate, input);
+}
+
+// static
+MaybeHandle<Object> Object::ToInt32(Isolate* isolate, Handle<Object> input) {
+ if (input->IsSmi()) return input;
+ return ConvertToInt32(isolate, input);
+}
+
+// static
+MaybeHandle<Object> Object::ToUint32(Isolate* isolate, Handle<Object> input) {
+ if (input->IsSmi()) return handle(Smi::cast(*input)->ToUint32Smi(), isolate);
+ return ConvertToUint32(isolate, input);
+}
+
+// static
+MaybeHandle<String> Object::ToString(Isolate* isolate, Handle<Object> input) {
+ if (input->IsString()) return Handle<String>::cast(input);
+ return ConvertToString(isolate, input);
+}
+
+// static
+MaybeHandle<Object> Object::ToLength(Isolate* isolate, Handle<Object> input) {
+ if (input->IsSmi()) {
+ int value = std::max(Smi::cast(*input)->value(), 0);
+ return handle(Smi::FromInt(value), isolate);
+ }
+ return ConvertToLength(isolate, input);
+}
+
+// static
+MaybeHandle<Object> Object::ToIndex(Isolate* isolate, Handle<Object> input,
+ MessageTemplate::Template error_index) {
+ if (input->IsSmi() && Smi::cast(*input)->value() >= 0) return input;
+ return ConvertToIndex(isolate, input, error_index);
+}
bool Object::HasSpecificClassOf(String* name) {
return this->IsJSObject() && (JSObject::cast(this)->class_name() == name);
@@ -2093,8 +2183,10 @@ int JSObject::GetHeaderSize(InstanceType type) {
return JSWeakMap::kSize;
case JS_WEAK_SET_TYPE:
return JSWeakSet::kSize;
+ case JS_PROMISE_CAPABILITY_TYPE:
+ return JSPromiseCapability::kSize;
case JS_PROMISE_TYPE:
- return JSObject::kHeaderSize;
+ return JSPromise::kSize;
case JS_REGEXP_TYPE:
return JSRegExp::kSize;
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
@@ -2107,14 +2199,21 @@ int JSObject::GetHeaderSize(InstanceType type) {
return JSObject::kHeaderSize;
case JS_STRING_ITERATOR_TYPE:
return JSStringIterator::kSize;
- case JS_FIXED_ARRAY_ITERATOR_TYPE:
- return JSFixedArrayIterator::kHeaderSize;
+ case JS_MODULE_NAMESPACE_TYPE:
+ return JSModuleNamespace::kHeaderSize;
default:
+ if (type >= FIRST_ARRAY_ITERATOR_TYPE &&
+ type <= LAST_ARRAY_ITERATOR_TYPE) {
+ return JSArrayIterator::kSize;
+ }
UNREACHABLE();
return 0;
}
}
+inline bool IsSpecialReceiverInstanceType(InstanceType instance_type) {
+ return instance_type <= LAST_SPECIAL_RECEIVER_TYPE;
+}
int JSObject::GetInternalFieldCount(Map* map) {
int instance_size = map->instance_size();
@@ -2223,7 +2322,8 @@ void JSObject::FastPropertyAtPut(FieldIndex index, Object* value) {
void JSObject::WriteToField(int descriptor, PropertyDetails details,
Object* value) {
- DCHECK(details.type() == DATA);
+ DCHECK_EQ(kField, details.location());
+ DCHECK_EQ(kData, details.kind());
DisallowHeapAllocation no_gc;
FieldIndex index = FieldIndex::ForDescriptor(map(), descriptor);
if (details.representation().IsDouble()) {
@@ -2337,7 +2437,7 @@ void Object::VerifyApiCallResultType() {
Object* FixedArray::get(int index) const {
SLOW_DCHECK(index >= 0 && index < this->length());
- return READ_FIELD(this, kHeaderSize + index * kPointerSize);
+ return NOBARRIER_READ_FIELD(this, kHeaderSize + index * kPointerSize);
}
Handle<Object> FixedArray::get(FixedArray* array, int index, Isolate* isolate) {
@@ -2366,7 +2466,7 @@ void FixedArray::set(int index, Smi* value) {
DCHECK(index >= 0 && index < this->length());
DCHECK(reinterpret_cast<Object*>(value)->IsSmi());
int offset = kHeaderSize + index * kPointerSize;
- WRITE_FIELD(this, offset, value);
+ NOBARRIER_WRITE_FIELD(this, offset, value);
}
@@ -2376,7 +2476,7 @@ void FixedArray::set(int index, Object* value) {
DCHECK_GE(index, 0);
DCHECK_LT(index, this->length());
int offset = kHeaderSize + index * kPointerSize;
- WRITE_FIELD(this, offset, value);
+ NOBARRIER_WRITE_FIELD(this, offset, value);
WRITE_BARRIER(GetHeap(), this, offset, value);
}
@@ -2420,6 +2520,9 @@ void FixedDoubleArray::set(int index, double value) {
DCHECK(!is_the_hole(index));
}
+void FixedDoubleArray::set_the_hole(Isolate* isolate, int index) {
+ set_the_hole(index);
+}
void FixedDoubleArray::set_the_hole(int index) {
DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
@@ -2597,8 +2700,9 @@ AllocationAlignment HeapObject::RequiredAlignment() {
void FixedArray::set(int index,
Object* value,
WriteBarrierMode mode) {
- DCHECK(map() != GetHeap()->fixed_cow_array_map());
- DCHECK(index >= 0 && index < this->length());
+ DCHECK_NE(map(), GetHeap()->fixed_cow_array_map());
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, this->length());
int offset = kHeaderSize + index * kPointerSize;
NOBARRIER_WRITE_FIELD(this, offset, value);
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
@@ -2608,45 +2712,38 @@ void FixedArray::set(int index,
void FixedArray::NoWriteBarrierSet(FixedArray* array,
int index,
Object* value) {
- DCHECK(array->map() != array->GetHeap()->fixed_cow_array_map());
- DCHECK(index >= 0 && index < array->length());
+ DCHECK_NE(array->map(), array->GetHeap()->fixed_cow_array_map());
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, array->length());
DCHECK(!array->GetHeap()->InNewSpace(value));
NOBARRIER_WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
}
-
void FixedArray::set_undefined(int index) {
- DCHECK(map() != GetHeap()->fixed_cow_array_map());
- DCHECK(index >= 0 && index < this->length());
- DCHECK(!GetHeap()->InNewSpace(GetHeap()->undefined_value()));
- WRITE_FIELD(this,
- kHeaderSize + index * kPointerSize,
- GetHeap()->undefined_value());
+ set_undefined(GetIsolate(), index);
}
-
-void FixedArray::set_null(int index) {
- DCHECK(index >= 0 && index < this->length());
- DCHECK(!GetHeap()->InNewSpace(GetHeap()->null_value()));
- WRITE_FIELD(this,
- kHeaderSize + index * kPointerSize,
- GetHeap()->null_value());
+void FixedArray::set_undefined(Isolate* isolate, int index) {
+ FixedArray::NoWriteBarrierSet(this, index,
+ isolate->heap()->undefined_value());
}
+void FixedArray::set_null(int index) { set_null(GetIsolate(), index); }
-void FixedArray::set_the_hole(int index) {
- DCHECK(map() != GetHeap()->fixed_cow_array_map());
- DCHECK(index >= 0 && index < this->length());
- DCHECK(!GetHeap()->InNewSpace(GetHeap()->the_hole_value()));
- WRITE_FIELD(this,
- kHeaderSize + index * kPointerSize,
- GetHeap()->the_hole_value());
+void FixedArray::set_null(Isolate* isolate, int index) {
+ FixedArray::NoWriteBarrierSet(this, index, isolate->heap()->null_value());
}
+void FixedArray::set_the_hole(int index) { set_the_hole(GetIsolate(), index); }
+
+void FixedArray::set_the_hole(Isolate* isolate, int index) {
+ FixedArray::NoWriteBarrierSet(this, index, isolate->heap()->the_hole_value());
+}
void FixedArray::FillWithHoles(int from, int to) {
+ Isolate* isolate = GetIsolate();
for (int i = from; i < to; i++) {
- set_the_hole(i);
+ set_the_hole(isolate, i);
}
}
@@ -2940,26 +3037,6 @@ FixedArrayBase* Map::GetInitialElements() {
return result;
}
-// static
-Handle<Map> Map::ReconfigureProperty(Handle<Map> map, int modify_index,
- PropertyKind new_kind,
- PropertyAttributes new_attributes,
- Representation new_representation,
- Handle<FieldType> new_field_type,
- StoreMode store_mode) {
- return Reconfigure(map, map->elements_kind(), modify_index, new_kind,
- new_attributes, new_representation, new_field_type,
- store_mode);
-}
-
-// static
-Handle<Map> Map::ReconfigureElementsKind(Handle<Map> map,
- ElementsKind new_elements_kind) {
- return Reconfigure(map, new_elements_kind, -1, kData, NONE,
- Representation::None(), FieldType::None(map->GetIsolate()),
- ALLOW_IN_DESCRIPTOR);
-}
-
Object** DescriptorArray::GetKeySlot(int descriptor_number) {
DCHECK(descriptor_number < number_of_descriptors());
return RawFieldOfElementAt(ToKeyIndex(descriptor_number));
@@ -2998,15 +3075,6 @@ void DescriptorArray::SetSortedKey(int descriptor_index, int pointer) {
}
-void DescriptorArray::SetRepresentation(int descriptor_index,
- Representation representation) {
- DCHECK(!representation.IsNone());
- PropertyDetails details = GetDetails(descriptor_index);
- set(ToDetailsIndex(descriptor_index),
- details.CopyWithRepresentation(representation).AsSmi());
-}
-
-
Object** DescriptorArray::GetValueSlot(int descriptor_number) {
DCHECK(descriptor_number < number_of_descriptors());
return RawFieldOfElementAt(ToValueIndex(descriptor_number));
@@ -3036,57 +3104,36 @@ PropertyDetails DescriptorArray::GetDetails(int descriptor_number) {
}
-PropertyType DescriptorArray::GetType(int descriptor_number) {
- return GetDetails(descriptor_number).type();
-}
-
-
int DescriptorArray::GetFieldIndex(int descriptor_number) {
DCHECK(GetDetails(descriptor_number).location() == kField);
return GetDetails(descriptor_number).field_index();
}
-Object* DescriptorArray::GetConstant(int descriptor_number) {
- return GetValue(descriptor_number);
-}
-
-
-Object* DescriptorArray::GetCallbacksObject(int descriptor_number) {
- DCHECK(GetType(descriptor_number) == ACCESSOR_CONSTANT);
- return GetValue(descriptor_number);
-}
-
-
-AccessorDescriptor* DescriptorArray::GetCallbacks(int descriptor_number) {
- DCHECK(GetType(descriptor_number) == ACCESSOR_CONSTANT);
- Foreign* p = Foreign::cast(GetCallbacksObject(descriptor_number));
- return reinterpret_cast<AccessorDescriptor*>(p->foreign_address());
+FieldType* DescriptorArray::GetFieldType(int descriptor_number) {
+ DCHECK(GetDetails(descriptor_number).location() == kField);
+ Object* wrapped_type = GetValue(descriptor_number);
+ return Map::UnwrapFieldType(wrapped_type);
}
-
void DescriptorArray::Get(int descriptor_number, Descriptor* desc) {
desc->Init(handle(GetKey(descriptor_number), GetIsolate()),
handle(GetValue(descriptor_number), GetIsolate()),
GetDetails(descriptor_number));
}
-
-void DescriptorArray::SetDescriptor(int descriptor_number, Descriptor* desc) {
+void DescriptorArray::Set(int descriptor_number, Name* key, Object* value,
+ PropertyDetails details) {
// Range check.
DCHECK(descriptor_number < number_of_descriptors());
- set(ToKeyIndex(descriptor_number), *desc->GetKey());
- set(ToValueIndex(descriptor_number), *desc->GetValue());
- set(ToDetailsIndex(descriptor_number), desc->GetDetails().AsSmi());
+ set(ToKeyIndex(descriptor_number), key);
+ set(ToValueIndex(descriptor_number), value);
+ set(ToDetailsIndex(descriptor_number), details.AsSmi());
}
-
void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
- // Range check.
- DCHECK(descriptor_number < number_of_descriptors());
-
- set(ToKeyIndex(descriptor_number), *desc->GetKey());
- set(ToValueIndex(descriptor_number), *desc->GetValue());
- set(ToDetailsIndex(descriptor_number), desc->GetDetails().AsSmi());
+ Name* key = *desc->GetKey();
+ Object* value = *desc->GetValue();
+ Set(descriptor_number, key, value, desc->GetDetails());
}
@@ -3117,14 +3164,6 @@ void DescriptorArray::SwapSortedKeys(int first, int second) {
}
-PropertyType DescriptorArray::Entry::type() { return descs_->GetType(index_); }
-
-
-Object* DescriptorArray::Entry::GetCallbackObject() {
- return descs_->GetValue(index_);
-}
-
-
int HashTableBase::NumberOfElements() {
return Smi::cast(get(kNumberOfElementsIndex))->value();
}
@@ -3272,107 +3311,6 @@ void SeededNumberDictionary::set_requires_slow_elements() {
}
-// ------------------------------------
-// Cast operations
-
-CAST_ACCESSOR(AbstractCode)
-CAST_ACCESSOR(ArrayList)
-CAST_ACCESSOR(Bool16x8)
-CAST_ACCESSOR(Bool32x4)
-CAST_ACCESSOR(Bool8x16)
-CAST_ACCESSOR(ByteArray)
-CAST_ACCESSOR(BytecodeArray)
-CAST_ACCESSOR(Cell)
-CAST_ACCESSOR(Code)
-CAST_ACCESSOR(CodeCacheHashTable)
-CAST_ACCESSOR(CompilationCacheTable)
-CAST_ACCESSOR(ConsString)
-CAST_ACCESSOR(DeoptimizationInputData)
-CAST_ACCESSOR(DeoptimizationOutputData)
-CAST_ACCESSOR(DependentCode)
-CAST_ACCESSOR(DescriptorArray)
-CAST_ACCESSOR(ExternalOneByteString)
-CAST_ACCESSOR(ExternalString)
-CAST_ACCESSOR(ExternalTwoByteString)
-CAST_ACCESSOR(FixedArray)
-CAST_ACCESSOR(FixedArrayBase)
-CAST_ACCESSOR(FixedDoubleArray)
-CAST_ACCESSOR(FixedTypedArrayBase)
-CAST_ACCESSOR(Float32x4)
-CAST_ACCESSOR(Foreign)
-CAST_ACCESSOR(FrameArray)
-CAST_ACCESSOR(GlobalDictionary)
-CAST_ACCESSOR(HandlerTable)
-CAST_ACCESSOR(HeapObject)
-CAST_ACCESSOR(Int16x8)
-CAST_ACCESSOR(Int32x4)
-CAST_ACCESSOR(Int8x16)
-CAST_ACCESSOR(JSArray)
-CAST_ACCESSOR(JSArrayBuffer)
-CAST_ACCESSOR(JSArrayBufferView)
-CAST_ACCESSOR(JSBoundFunction)
-CAST_ACCESSOR(JSDataView)
-CAST_ACCESSOR(JSDate)
-CAST_ACCESSOR(JSFunction)
-CAST_ACCESSOR(JSGeneratorObject)
-CAST_ACCESSOR(JSGlobalObject)
-CAST_ACCESSOR(JSGlobalProxy)
-CAST_ACCESSOR(JSMap)
-CAST_ACCESSOR(JSMapIterator)
-CAST_ACCESSOR(JSMessageObject)
-CAST_ACCESSOR(JSModuleNamespace)
-CAST_ACCESSOR(JSFixedArrayIterator)
-CAST_ACCESSOR(JSObject)
-CAST_ACCESSOR(JSProxy)
-CAST_ACCESSOR(JSReceiver)
-CAST_ACCESSOR(JSRegExp)
-CAST_ACCESSOR(JSSet)
-CAST_ACCESSOR(JSSetIterator)
-CAST_ACCESSOR(JSStringIterator)
-CAST_ACCESSOR(JSArrayIterator)
-CAST_ACCESSOR(JSTypedArray)
-CAST_ACCESSOR(JSValue)
-CAST_ACCESSOR(JSWeakCollection)
-CAST_ACCESSOR(JSWeakMap)
-CAST_ACCESSOR(JSWeakSet)
-CAST_ACCESSOR(LayoutDescriptor)
-CAST_ACCESSOR(Map)
-CAST_ACCESSOR(ModuleInfo)
-CAST_ACCESSOR(Name)
-CAST_ACCESSOR(NameDictionary)
-CAST_ACCESSOR(NormalizedMapCache)
-CAST_ACCESSOR(Object)
-CAST_ACCESSOR(ObjectHashTable)
-CAST_ACCESSOR(ObjectHashSet)
-CAST_ACCESSOR(Oddball)
-CAST_ACCESSOR(OrderedHashMap)
-CAST_ACCESSOR(OrderedHashSet)
-CAST_ACCESSOR(PropertyCell)
-CAST_ACCESSOR(TemplateList)
-CAST_ACCESSOR(RegExpMatchInfo)
-CAST_ACCESSOR(ScopeInfo)
-CAST_ACCESSOR(SeededNumberDictionary)
-CAST_ACCESSOR(SeqOneByteString)
-CAST_ACCESSOR(SeqString)
-CAST_ACCESSOR(SeqTwoByteString)
-CAST_ACCESSOR(SharedFunctionInfo)
-CAST_ACCESSOR(Simd128Value)
-CAST_ACCESSOR(SlicedString)
-CAST_ACCESSOR(Smi)
-CAST_ACCESSOR(String)
-CAST_ACCESSOR(StringSet)
-CAST_ACCESSOR(StringTable)
-CAST_ACCESSOR(Struct)
-CAST_ACCESSOR(Symbol)
-CAST_ACCESSOR(TemplateInfo)
-CAST_ACCESSOR(Uint16x8)
-CAST_ACCESSOR(Uint32x4)
-CAST_ACCESSOR(Uint8x16)
-CAST_ACCESSOR(UnseededNumberDictionary)
-CAST_ACCESSOR(WeakCell)
-CAST_ACCESSOR(WeakFixedArray)
-CAST_ACCESSOR(WeakHashTable)
-
template <class T>
PodArray<T>* PodArray<T>::cast(Object* object) {
SLOW_DCHECK(object->IsByteArray());
@@ -3603,11 +3541,6 @@ int HandlerTable::NumberOfRangeEntries() const {
return length() / kRangeEntrySize;
}
-#define MAKE_STRUCT_CAST(NAME, Name, name) CAST_ACCESSOR(Name)
- STRUCT_LIST(MAKE_STRUCT_CAST)
-#undef MAKE_STRUCT_CAST
-
-
template <typename Derived, typename Shape, typename Key>
HashTable<Derived, Shape, Key>*
HashTable<Derived, Shape, Key>::cast(Object* obj) {
@@ -3704,7 +3637,7 @@ ACCESSORS(Symbol, name, Object, kNameOffset)
SMI_ACCESSORS(Symbol, flags, kFlagsOffset)
BOOL_ACCESSORS(Symbol, flags, is_private, kPrivateBit)
BOOL_ACCESSORS(Symbol, flags, is_well_known_symbol, kWellKnownSymbolBit)
-
+BOOL_ACCESSORS(Symbol, flags, is_public, kPublicBit)
bool String::Equals(String* other) {
if (other == this) return true;
@@ -3854,6 +3787,12 @@ inline Vector<const uc16> String::GetCharVector() {
return flat.ToUC16Vector();
}
+uint32_t String::ToValidIndex(Object* number) {
+ uint32_t index = PositiveNumberToUint32(number);
+ uint32_t length_value = static_cast<uint32_t>(length());
+ if (index > length_value) return length_value;
+ return index;
+}
uint16_t SeqOneByteString::SeqOneByteStringGet(int index) {
DCHECK(index >= 0 && index < length());
@@ -4221,13 +4160,23 @@ void BytecodeArray::set_osr_loop_nesting_level(int depth) {
WRITE_INT8_FIELD(this, kOSRNestingLevelOffset, depth);
}
+BytecodeArray::Age BytecodeArray::bytecode_age() const {
+ return static_cast<Age>(READ_INT8_FIELD(this, kBytecodeAgeOffset));
+}
+
+void BytecodeArray::set_bytecode_age(BytecodeArray::Age age) {
+ DCHECK_GE(age, kFirstBytecodeAge);
+ DCHECK_LE(age, kLastBytecodeAge);
+ STATIC_ASSERT(kLastBytecodeAge <= kMaxInt8);
+ WRITE_INT8_FIELD(this, kBytecodeAgeOffset, static_cast<int8_t>(age));
+}
+
int BytecodeArray::parameter_count() const {
// Parameter count is stored as the size on stack of the parameters to allow
// it to be used directly by generated code.
return READ_INT_FIELD(this, kParameterSizeOffset) >> kPointerSizeLog2;
}
-
ACCESSORS(BytecodeArray, constant_pool, FixedArray, kConstantPoolOffset)
ACCESSORS(BytecodeArray, handler_table, FixedArray, kHandlerTableOffset)
ACCESSORS(BytecodeArray, source_position_table, ByteArray,
@@ -4901,7 +4850,9 @@ bool Map::CanBeDeprecated() {
if (details.representation().IsSmi()) return true;
if (details.representation().IsDouble()) return true;
if (details.representation().IsHeapObject()) return true;
- if (details.type() == DATA_CONSTANT) return true;
+ if (details.kind() == kData && details.location() == kDescriptor) {
+ return true;
+ }
}
return false;
}
@@ -4950,6 +4901,12 @@ bool Map::IsJSGlobalObjectMap() {
bool Map::IsJSTypedArrayMap() { return instance_type() == JS_TYPED_ARRAY_TYPE; }
bool Map::IsJSDataViewMap() { return instance_type() == JS_DATA_VIEW_TYPE; }
+bool Map::IsSpecialReceiverMap() {
+ bool result = IsSpecialReceiverInstanceType(instance_type());
+ DCHECK_IMPLIES(!result,
+ !has_named_interceptor() && !is_access_check_needed());
+ return result;
+}
bool Map::CanOmitMapChecks() {
return is_stable() && FLAG_omit_map_checks_for_leaf_maps;
@@ -5125,6 +5082,32 @@ inline void Code::set_is_construct_stub(bool value) {
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
}
+inline bool Code::is_promise_rejection() {
+ DCHECK(kind() == BUILTIN);
+ return IsPromiseRejectionField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
+}
+
+inline void Code::set_is_promise_rejection(bool value) {
+ DCHECK(kind() == BUILTIN);
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+ int updated = IsPromiseRejectionField::update(previous, value);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
+}
+
+inline bool Code::is_exception_caught() {
+ DCHECK(kind() == BUILTIN);
+ return IsExceptionCaughtField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
+}
+
+inline void Code::set_is_exception_caught(bool value) {
+ DCHECK(kind() == BUILTIN);
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+ int updated = IsExceptionCaughtField::update(previous, value);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
+}
+
bool Code::has_deoptimization_support() {
DCHECK_EQ(FUNCTION, kind());
unsigned flags = READ_UINT32_FIELD(this, kFullCodeFlags);
@@ -5448,16 +5431,6 @@ void AbstractCode::set_source_position_table(ByteArray* source_position_table) {
}
}
-int AbstractCode::LookupRangeInHandlerTable(
- int code_offset, int* data, HandlerTable::CatchPrediction* prediction) {
- if (IsCode()) {
- return GetCode()->LookupRangeInHandlerTable(code_offset, data, prediction);
- } else {
- return GetBytecodeArray()->LookupRangeInHandlerTable(code_offset, data,
- prediction);
- }
-}
-
int AbstractCode::SizeIncludingMetadata() {
if (IsCode()) {
return GetCode()->SizeIncludingMetadata();
@@ -5609,7 +5582,7 @@ void Map::AppendDescriptor(Descriptor* desc) {
// it should never try to (otherwise, layout descriptor must be updated too).
#ifdef DEBUG
PropertyDetails details = desc->GetDetails();
- CHECK(details.type() != DATA || !details.representation().IsDouble());
+ CHECK(details.location() != kField || !details.representation().IsDouble());
#endif
}
@@ -5717,15 +5690,18 @@ ACCESSORS(PromiseResolveThenableJobInfo, thenable, JSReceiver, kThenableOffset)
ACCESSORS(PromiseResolveThenableJobInfo, then, JSReceiver, kThenOffset)
ACCESSORS(PromiseResolveThenableJobInfo, resolve, JSFunction, kResolveOffset)
ACCESSORS(PromiseResolveThenableJobInfo, reject, JSFunction, kRejectOffset)
-ACCESSORS(PromiseResolveThenableJobInfo, debug_id, Object, kDebugIdOffset)
-ACCESSORS(PromiseResolveThenableJobInfo, debug_name, Object, kDebugNameOffset)
+SMI_ACCESSORS(PromiseResolveThenableJobInfo, debug_id, kDebugIdOffset)
ACCESSORS(PromiseResolveThenableJobInfo, context, Context, kContextOffset);
ACCESSORS(PromiseReactionJobInfo, value, Object, kValueOffset);
ACCESSORS(PromiseReactionJobInfo, tasks, Object, kTasksOffset);
-ACCESSORS(PromiseReactionJobInfo, deferred, Object, kDeferredOffset);
-ACCESSORS(PromiseReactionJobInfo, debug_id, Object, kDebugIdOffset);
-ACCESSORS(PromiseReactionJobInfo, debug_name, Object, kDebugNameOffset);
+ACCESSORS(PromiseReactionJobInfo, deferred_promise, Object,
+ kDeferredPromiseOffset);
+ACCESSORS(PromiseReactionJobInfo, deferred_on_resolve, Object,
+ kDeferredOnResolveOffset);
+ACCESSORS(PromiseReactionJobInfo, deferred_on_reject, Object,
+ kDeferredOnRejectOffset);
+SMI_ACCESSORS(PromiseReactionJobInfo, debug_id, kDebugIdOffset);
ACCESSORS(PromiseReactionJobInfo, context, Context, kContextOffset);
Map* PrototypeInfo::ObjectCreateMap() {
@@ -5777,18 +5753,18 @@ ACCESSORS(PrototypeInfo, validity_cell, Object, kValidityCellOffset)
SMI_ACCESSORS(PrototypeInfo, bit_field, kBitFieldOffset)
BOOL_ACCESSORS(PrototypeInfo, bit_field, should_be_fast_map, kShouldBeFastBit)
-ACCESSORS(Tuple3, value1, Object, kValue1Offset)
-ACCESSORS(Tuple3, value2, Object, kValue2Offset)
+ACCESSORS(Tuple2, value1, Object, kValue1Offset)
+ACCESSORS(Tuple2, value2, Object, kValue2Offset)
ACCESSORS(Tuple3, value3, Object, kValue3Offset)
ACCESSORS(ContextExtension, scope_info, ScopeInfo, kScopeInfoOffset)
ACCESSORS(ContextExtension, extension, Object, kExtensionOffset)
-ACCESSORS(JSModuleNamespace, module, Module, kModuleOffset)
+SMI_ACCESSORS(ConstantElementsPair, elements_kind, kElementsKindOffset)
+ACCESSORS(ConstantElementsPair, constant_values, FixedArrayBase,
+ kConstantValuesOffset)
-ACCESSORS(JSFixedArrayIterator, array, FixedArray, kArrayOffset)
-SMI_ACCESSORS(JSFixedArrayIterator, index, kIndexOffset)
-ACCESSORS(JSFixedArrayIterator, initial_next, JSFunction, kNextOffset)
+ACCESSORS(JSModuleNamespace, module, Module, kModuleOffset)
ACCESSORS(Module, code, Object, kCodeOffset)
ACCESSORS(Module, exports, ObjectHashTable, kExportsOffset)
@@ -5853,6 +5829,9 @@ ACCESSORS(TemplateInfo, property_accessors, Object, kPropertyAccessorsOffset)
ACCESSORS(FunctionTemplateInfo, call_code, Object, kCallCodeOffset)
ACCESSORS(FunctionTemplateInfo, prototype_template, Object,
kPrototypeTemplateOffset)
+ACCESSORS(FunctionTemplateInfo, prototype_provider_template, Object,
+ kPrototypeProviderTemplateOffset)
+
ACCESSORS(FunctionTemplateInfo, parent_template, Object, kParentTemplateOffset)
ACCESSORS(FunctionTemplateInfo, named_property_handler, Object,
kNamedPropertyHandlerOffset)
@@ -5933,7 +5912,7 @@ ACCESSORS_CHECKED(Script, eval_from_shared, Object, kEvalFromSharedOffset,
this->type() != TYPE_WASM)
SMI_ACCESSORS_CHECKED(Script, eval_from_position, kEvalFromPositionOffset,
this->type() != TYPE_WASM)
-ACCESSORS(Script, shared_function_infos, Object, kSharedFunctionInfosOffset)
+ACCESSORS(Script, shared_function_infos, FixedArray, kSharedFunctionInfosOffset)
SMI_ACCESSORS(Script, flags, kFlagsOffset)
ACCESSORS(Script, source_url, Object, kSourceUrlOffset)
ACCESSORS(Script, source_mapping_url, Object, kSourceMappingUrlOffset)
@@ -5948,10 +5927,6 @@ void Script::set_compilation_type(CompilationType type) {
set_flags(BooleanBit::set(flags(), kCompilationTypeBit,
type == COMPILATION_TYPE_EVAL));
}
-bool Script::hide_source() { return BooleanBit::get(flags(), kHideSourceBit); }
-void Script::set_hide_source(bool value) {
- set_flags(BooleanBit::set(flags(), kHideSourceBit, value));
-}
Script::CompilationState Script::compilation_state() {
return BooleanBit::get(flags(), kCompilationStateBit) ?
COMPILATION_STATE_COMPILED : COMPILATION_STATE_INITIAL;
@@ -6010,6 +5985,7 @@ ACCESSORS(SharedFunctionInfo, optimized_code_map, FixedArray,
ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
ACCESSORS(SharedFunctionInfo, feedback_metadata, TypeFeedbackMetadata,
kFeedbackMetadataOffset)
+SMI_ACCESSORS(SharedFunctionInfo, function_literal_id, kFunctionLiteralIdOffset)
#if TRACE_MAPS
SMI_ACCESSORS(SharedFunctionInfo, unique_id, kUniqueIdOffset)
#endif
@@ -6052,8 +6028,6 @@ BOOL_ACCESSORS(SharedFunctionInfo,
kHasDuplicateParameters)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, asm_function, kIsAsmFunction)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, deserialized, kDeserialized)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, never_compiled,
- kNeverCompiled)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_declaration,
kIsDeclaration)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, marked_for_tier_up,
@@ -6202,16 +6176,15 @@ BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints,
kNameShouldPrintAsAnonymous)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_anonymous_expression,
kIsAnonymousExpression)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_function, kIsFunction)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_crankshaft,
- kDontCrankshaft)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, must_use_ignition_turbo,
+ kMustUseIgnitionTurbo)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_flush, kDontFlush)
BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_asm_wasm_broken,
kIsAsmWasmBroken)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, requires_class_field_init,
- kRequiresClassFieldInit)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_class_field_initializer,
- kIsClassFieldInitializer)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, has_no_side_effect,
+ kHasNoSideEffect)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, computed_has_no_side_effect,
+ kComputedHasNoSideEffect)
bool Script::HasValidSource() {
Object* src = this->source();
@@ -6251,6 +6224,10 @@ Code* SharedFunctionInfo::code() const {
void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
DCHECK(value->kind() != Code::OPTIMIZED_FUNCTION);
+ // If the SharedFunctionInfo has bytecode we should never mark it for lazy
+ // compile, since the bytecode is never flushed.
+ DCHECK(value != GetIsolate()->builtins()->builtin(Builtins::kCompileLazy) ||
+ !HasBytecodeArray());
WRITE_FIELD(this, kCodeOffset, value);
CONDITIONAL_WRITE_BARRIER(value->GetHeap(), this, kCodeOffset, value, mode);
}
@@ -6270,8 +6247,6 @@ void SharedFunctionInfo::ReplaceCode(Code* value) {
#endif // DEBUG
set_code(value);
-
- if (is_compiled()) set_never_compiled(false);
}
bool SharedFunctionInfo::IsInterpreted() const {
@@ -6497,17 +6472,15 @@ void SharedFunctionInfo::set_disable_optimization_reason(BailoutReason reason) {
opt_count_and_bailout_reason(), reason));
}
-
-bool SharedFunctionInfo::IsBuiltin() {
+bool SharedFunctionInfo::IsUserJavaScript() {
Object* script_obj = script();
- if (script_obj->IsUndefined(GetIsolate())) return true;
+ if (script_obj->IsUndefined(GetIsolate())) return false;
Script* script = Script::cast(script_obj);
- Script::Type type = static_cast<Script::Type>(script->type());
- return type != Script::TYPE_NORMAL;
+ return static_cast<Script::Type>(script->type()) == Script::TYPE_NORMAL;
}
bool SharedFunctionInfo::IsSubjectToDebugging() {
- return !IsBuiltin() && !HasAsmWasmData();
+ return IsUserJavaScript() && !HasAsmWasmData();
}
bool SharedFunctionInfo::OptimizedCodeMapIsCleared() const {
@@ -6750,7 +6723,7 @@ ACCESSORS(JSGeneratorObject, receiver, Object, kReceiverOffset)
ACCESSORS(JSGeneratorObject, input_or_debug_pos, Object, kInputOrDebugPosOffset)
SMI_ACCESSORS(JSGeneratorObject, resume_mode, kResumeModeOffset)
SMI_ACCESSORS(JSGeneratorObject, continuation, kContinuationOffset)
-ACCESSORS(JSGeneratorObject, operand_stack, FixedArray, kOperandStackOffset)
+ACCESSORS(JSGeneratorObject, register_file, FixedArray, kRegisterFileOffset)
bool JSGeneratorObject::is_suspended() const {
DCHECK_LT(kGeneratorExecuting, 0);
@@ -6766,8 +6739,6 @@ bool JSGeneratorObject::is_executing() const {
return continuation() == kGeneratorExecuting;
}
-TYPE_CHECKER(JSModuleNamespace, JS_MODULE_NAMESPACE_TYPE)
-
ACCESSORS(JSValue, value, Object, kValueOffset)
@@ -6800,17 +6771,22 @@ ACCESSORS(JSMessageObject, script, Object, kScriptOffset)
ACCESSORS(JSMessageObject, stack_frames, Object, kStackFramesOffset)
SMI_ACCESSORS(JSMessageObject, start_position, kStartPositionOffset)
SMI_ACCESSORS(JSMessageObject, end_position, kEndPositionOffset)
-
+SMI_ACCESSORS(JSMessageObject, error_level, kErrorLevelOffset)
INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
INT_ACCESSORS(Code, prologue_offset, kPrologueOffset)
INT_ACCESSORS(Code, constant_pool_offset, kConstantPoolOffset)
-ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
-ACCESSORS(Code, handler_table, FixedArray, kHandlerTableOffset)
-ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
-ACCESSORS(Code, source_position_table, ByteArray, kSourcePositionTableOffset)
-ACCESSORS(Code, raw_type_feedback_info, Object, kTypeFeedbackInfoOffset)
-ACCESSORS(Code, next_code_link, Object, kNextCodeLinkOffset)
+#define CODE_ACCESSORS(name, type, offset) \
+ ACCESSORS_CHECKED2(Code, name, type, offset, true, \
+ !GetHeap()->InNewSpace(value))
+CODE_ACCESSORS(relocation_info, ByteArray, kRelocationInfoOffset)
+CODE_ACCESSORS(handler_table, FixedArray, kHandlerTableOffset)
+CODE_ACCESSORS(deoptimization_data, FixedArray, kDeoptimizationDataOffset)
+CODE_ACCESSORS(source_position_table, ByteArray, kSourcePositionTableOffset)
+CODE_ACCESSORS(protected_instructions, FixedArray, kProtectedInstructionOffset)
+CODE_ACCESSORS(raw_type_feedback_info, Object, kTypeFeedbackInfoOffset)
+CODE_ACCESSORS(next_code_link, Object, kNextCodeLinkOffset)
+#undef CODE_ACCESSORS
void Code::WipeOutHeader() {
WRITE_FIELD(this, kRelocationInfoOffset, NULL);
@@ -6823,6 +6799,7 @@ void Code::WipeOutHeader() {
}
WRITE_FIELD(this, kNextCodeLinkOffset, NULL);
WRITE_FIELD(this, kGCMetadataOffset, NULL);
+ WRITE_FIELD(this, kProtectedInstructionOffset, NULL);
}
@@ -6906,6 +6883,7 @@ int Code::SizeIncludingMetadata() {
size += deoptimization_data()->Size();
size += handler_table()->Size();
if (kind() == FUNCTION) size += source_position_table()->Size();
+ size += protected_instructions()->Size();
return size;
}
@@ -6984,6 +6962,7 @@ bool JSArrayBuffer::is_external() { return IsExternal::decode(bit_field()); }
void JSArrayBuffer::set_is_external(bool value) {
+ DCHECK(!value || !has_guard_region());
set_bit_field(IsExternal::update(bit_field(), value));
}
@@ -7013,6 +6992,13 @@ void JSArrayBuffer::set_is_shared(bool value) {
set_bit_field(IsShared::update(bit_field(), value));
}
+bool JSArrayBuffer::has_guard_region() {
+ return HasGuardRegion::decode(bit_field());
+}
+
+void JSArrayBuffer::set_has_guard_region(bool value) {
+ set_bit_field(HasGuardRegion::update(bit_field(), value));
+}
Object* JSArrayBufferView::byte_offset() const {
if (WasNeutered()) return Smi::kZero;
@@ -7074,6 +7060,20 @@ void JSTypedArray::set_length(Object* value, WriteBarrierMode mode) {
ACCESSORS(JSTypedArray, raw_length, Object, kLengthOffset)
#endif
+ACCESSORS(JSPromiseCapability, promise, Object, kPromiseOffset)
+ACCESSORS(JSPromiseCapability, resolve, Object, kResolveOffset)
+ACCESSORS(JSPromiseCapability, reject, Object, kRejectOffset)
+
+SMI_ACCESSORS(JSPromise, status, kStatusOffset)
+ACCESSORS(JSPromise, result, Object, kResultOffset)
+ACCESSORS(JSPromise, deferred_promise, Object, kDeferredPromiseOffset)
+ACCESSORS(JSPromise, deferred_on_resolve, Object, kDeferredOnResolveOffset)
+ACCESSORS(JSPromise, deferred_on_reject, Object, kDeferredOnRejectOffset)
+ACCESSORS(JSPromise, fulfill_reactions, Object, kFulfillReactionsOffset)
+ACCESSORS(JSPromise, reject_reactions, Object, kRejectReactionsOffset)
+SMI_ACCESSORS(JSPromise, flags, kFlagsOffset)
+BOOL_ACCESSORS(JSPromise, flags, has_handler, kHasHandlerBit)
+BOOL_ACCESSORS(JSPromise, flags, handled_hint, kHandledHintBit)
ACCESSORS(JSRegExp, data, Object, kDataOffset)
ACCESSORS(JSRegExp, flags, Object, kFlagsOffset)
@@ -8042,29 +8042,6 @@ Handle<Object> WeakHashTableShape<entrysize>::AsHandle(Isolate* isolate,
}
-bool ScopeInfo::IsAsmModule() { return AsmModuleField::decode(Flags()); }
-
-
-bool ScopeInfo::IsAsmFunction() { return AsmFunctionField::decode(Flags()); }
-
-
-bool ScopeInfo::HasSimpleParameters() {
- return HasSimpleParametersField::decode(Flags());
-}
-
-
-#define SCOPE_INFO_FIELD_ACCESSORS(name) \
- void ScopeInfo::Set##name(int value) { set(k##name, Smi::FromInt(value)); } \
- int ScopeInfo::name() { \
- if (length() > 0) { \
- return Smi::cast(get(k##name))->value(); \
- } else { \
- return 0; \
- } \
- }
-FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(SCOPE_INFO_FIELD_ACCESSORS)
-#undef SCOPE_INFO_FIELD_ACCESSORS
-
ACCESSORS(ModuleInfoEntry, export_name, Object, kExportNameOffset)
ACCESSORS(ModuleInfoEntry, local_name, Object, kLocalNameOffset)
ACCESSORS(ModuleInfoEntry, import_name, Object, kImportNameOffset)
@@ -8073,35 +8050,6 @@ SMI_ACCESSORS(ModuleInfoEntry, cell_index, kCellIndexOffset)
SMI_ACCESSORS(ModuleInfoEntry, beg_pos, kBegPosOffset)
SMI_ACCESSORS(ModuleInfoEntry, end_pos, kEndPosOffset)
-FixedArray* ModuleInfo::module_requests() const {
- return FixedArray::cast(get(kModuleRequestsIndex));
-}
-
-FixedArray* ModuleInfo::special_exports() const {
- return FixedArray::cast(get(kSpecialExportsIndex));
-}
-
-FixedArray* ModuleInfo::regular_exports() const {
- return FixedArray::cast(get(kRegularExportsIndex));
-}
-
-FixedArray* ModuleInfo::regular_imports() const {
- return FixedArray::cast(get(kRegularImportsIndex));
-}
-
-FixedArray* ModuleInfo::namespace_imports() const {
- return FixedArray::cast(get(kNamespaceImportsIndex));
-}
-
-#ifdef DEBUG
-bool ModuleInfo::Equals(ModuleInfo* other) const {
- return regular_exports() == other->regular_exports() &&
- regular_imports() == other->regular_imports() &&
- special_exports() == other->special_exports() &&
- namespace_imports() == other->namespace_imports();
-}
-#endif
-
void Map::ClearCodeCache(Heap* heap) {
// No write barrier is needed since empty_fixed_array is not in new space.
// Please note this function is used during marking:
@@ -8129,11 +8077,13 @@ void JSArray::set_length(Smi* length) {
bool JSArray::SetLengthWouldNormalize(Heap* heap, uint32_t new_length) {
+ // This constant is somewhat arbitrary. Any large enough value would work.
+ const uint32_t kMaxFastArrayLength = 32 * 1024 * 1024;
// If the new array won't fit in a some non-trivial fraction of the max old
// space size, then force it to go dictionary mode.
- uint32_t max_fast_array_size =
+ uint32_t heap_based_upper_bound =
static_cast<uint32_t>((heap->MaxOldGenerationSize() / kDoubleSize) / 4);
- return new_length >= max_fast_array_size;
+ return new_length >= Min(kMaxFastArrayLength, heap_based_upper_bound);
}
@@ -8397,10 +8347,10 @@ ACCESSORS(JSArrayIterator, object_map, Object, kIteratedObjectMapOffset)
ACCESSORS(JSStringIterator, string, String, kStringOffset)
SMI_ACCESSORS(JSStringIterator, index, kNextIndexOffset)
-#undef TYPE_CHECKER
-#undef CAST_ACCESSOR
#undef INT_ACCESSORS
#undef ACCESSORS
+#undef ACCESSORS_CHECKED
+#undef ACCESSORS_CHECKED2
#undef SMI_ACCESSORS
#undef SYNCHRONIZED_SMI_ACCESSORS
#undef NOBARRIER_SMI_ACCESSORS
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 83e00b9f5f..1f10b9235d 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -7,6 +7,7 @@
#include <iomanip>
#include <memory>
+#include "src/bootstrapper.h"
#include "src/disasm.h"
#include "src/disassembler.h"
#include "src/interpreter/bytecodes.h"
@@ -149,11 +150,14 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_SPECIAL_API_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE:
- case JS_PROMISE_TYPE:
case JS_ARGUMENTS_TYPE:
case JS_ERROR_TYPE:
+ case JS_PROMISE_CAPABILITY_TYPE:
JSObject::cast(this)->JSObjectPrint(os);
break;
+ case JS_PROMISE_TYPE:
+ JSPromise::cast(this)->JSPromisePrint(os);
+ break;
case JS_ARRAY_TYPE:
JSArray::cast(this)->JSArrayPrint(os);
break;
@@ -232,9 +236,6 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_TYPED_ARRAY_TYPE:
JSTypedArray::cast(this)->JSTypedArrayPrint(os);
break;
- case JS_FIXED_ARRAY_ITERATOR_TYPE:
- JSFixedArrayIterator::cast(this)->JSFixedArrayIteratorPrint(os);
- break;
case JS_DATA_VIEW_TYPE:
JSDataView::cast(this)->JSDataViewPrint(os);
break;
@@ -326,43 +327,39 @@ void FixedTypedArray<Traits>::FixedTypedArrayPrint(
os << "fixed " << Traits::Designator();
}
-
-void JSObject::PrintProperties(std::ostream& os) { // NOLINT
+bool JSObject::PrintProperties(std::ostream& os) { // NOLINT
if (HasFastProperties()) {
DescriptorArray* descs = map()->instance_descriptors();
- for (int i = 0; i < map()->NumberOfOwnDescriptors(); i++) {
- os << "\n ";
+ int i = 0;
+ for (; i < map()->NumberOfOwnDescriptors(); i++) {
+ os << "\n ";
descs->GetKey(i)->NamePrint(os);
os << ": ";
- switch (descs->GetType(i)) {
- case DATA: {
- FieldIndex index = FieldIndex::ForDescriptor(map(), i);
- if (IsUnboxedDoubleField(index)) {
- os << "<unboxed double> " << RawFastDoublePropertyAt(index);
+ PropertyDetails details = descs->GetDetails(i);
+ switch (details.location()) {
+ case kField: {
+ FieldIndex field_index = FieldIndex::ForDescriptor(map(), i);
+ if (IsUnboxedDoubleField(field_index)) {
+ os << "<unboxed double> " << RawFastDoublePropertyAt(field_index);
} else {
- os << Brief(RawFastPropertyAt(index));
+ os << Brief(RawFastPropertyAt(field_index));
}
- os << " (data field at offset " << index.property_index() << ")";
- break;
- }
- case ACCESSOR: {
- FieldIndex index = FieldIndex::ForDescriptor(map(), i);
- os << " (accessor field at offset " << index.property_index() << ")";
break;
}
- case DATA_CONSTANT:
- os << Brief(descs->GetConstant(i)) << " (data constant)";
- break;
- case ACCESSOR_CONSTANT:
- os << Brief(descs->GetCallbacksObject(i)) << " (accessor constant)";
+ case kDescriptor:
+ os << Brief(descs->GetValue(i));
break;
}
+ os << " ";
+ details.PrintAsFastTo(os, PropertyDetails::kForProperties);
}
+ return i > 0;
} else if (IsJSGlobalObject()) {
global_dictionary()->Print(os);
} else {
property_dictionary()->Print(os);
}
+ return true;
}
namespace {
@@ -381,10 +378,8 @@ bool is_the_hole(double maybe_hole) {
return bit_cast<uint64_t>(maybe_hole) == kHoleNanInt64;
}
-} // namespace
-
template <class T, bool print_the_hole>
-static void DoPrintElements(std::ostream& os, Object* object) { // NOLINT
+void DoPrintElements(std::ostream& os, Object* object) { // NOLINT
T* array = T::cast(object);
if (array->length() == 0) return;
int previous_index = 0;
@@ -415,38 +410,42 @@ static void DoPrintElements(std::ostream& os, Object* object) { // NOLINT
}
}
+void PrintFixedArrayElements(std::ostream& os, FixedArray* array) {
+ // Print in array notation for non-sparse arrays.
+ Object* previous_value = array->get(0);
+ Object* value = nullptr;
+ int previous_index = 0;
+ int i;
+ for (i = 1; i <= array->length(); i++) {
+ if (i < array->length()) value = array->get(i);
+ if (previous_value == value && i != array->length()) {
+ continue;
+ }
+ os << "\n";
+ std::stringstream ss;
+ ss << previous_index;
+ if (previous_index != i - 1) {
+ ss << '-' << (i - 1);
+ }
+ os << std::setw(12) << ss.str() << ": " << Brief(previous_value);
+ previous_index = i;
+ previous_value = value;
+ }
+}
+
+} // namespace
-void JSObject::PrintElements(std::ostream& os) { // NOLINT
+bool JSObject::PrintElements(std::ostream& os) { // NOLINT
// Don't call GetElementsKind, its validation code can cause the printer to
// fail when debugging.
- if (elements()->length() == 0) return;
+ if (elements()->length() == 0) return false;
switch (map()->elements_kind()) {
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_ELEMENTS:
case FAST_STRING_WRAPPER_ELEMENTS: {
- // Print in array notation for non-sparse arrays.
- FixedArray* array = FixedArray::cast(elements());
- Object* previous_value = array->get(0);
- Object* value = nullptr;
- int previous_index = 0;
- int i;
- for (i = 1; i <= array->length(); i++) {
- if (i < array->length()) value = array->get(i);
- if (previous_value == value && i != array->length()) {
- continue;
- }
- os << "\n";
- std::stringstream ss;
- ss << previous_index;
- if (previous_index != i - 1) {
- ss << '-' << (i - 1);
- }
- os << std::setw(12) << ss.str() << ": " << Brief(previous_value);
- previous_index = i;
- previous_value = value;
- }
+ PrintFixedArrayElements(os, FixedArray::cast(elements()));
break;
}
case FAST_HOLEY_DOUBLE_ELEMENTS:
@@ -481,6 +480,7 @@ void JSObject::PrintElements(std::ostream& os) { // NOLINT
case NO_ELEMENTS:
break;
}
+ return true;
}
@@ -511,19 +511,19 @@ static void JSObjectPrintHeader(std::ostream& os, JSObject* obj,
static void JSObjectPrintBody(std::ostream& os, JSObject* obj, // NOLINT
bool print_elements = true) {
- os << "\n - properties = {";
- obj->PrintProperties(os);
- os << "\n }\n";
+ os << "\n - properties = " << Brief(obj->properties()) << " {";
+ if (obj->PrintProperties(os)) os << "\n ";
+ os << "}\n";
if (print_elements && obj->elements()->length() > 0) {
- os << " - elements = {";
- obj->PrintElements(os);
- os << "\n }\n";
+ os << " - elements = " << Brief(obj->elements()) << " {";
+ if (obj->PrintElements(os)) os << "\n ";
+ os << "}\n";
}
int internal_fields = obj->GetInternalFieldCount();
if (internal_fields > 0) {
os << " - internal fields = {";
for (int i = 0; i < internal_fields; i++) {
- os << "\n " << Brief(obj->GetInternalField(i));
+ os << "\n " << obj->GetInternalField(i);
}
os << "\n }\n";
}
@@ -541,6 +541,17 @@ void JSArray::JSArrayPrint(std::ostream& os) { // NOLINT
JSObjectPrintBody(os, this);
}
+void JSPromise::JSPromisePrint(std::ostream& os) { // NOLINT
+ JSObjectPrintHeader(os, this, "JSPromise");
+ os << "\n - status = " << JSPromise::Status(status());
+ os << "\n - result = " << Brief(result());
+ os << "\n - deferred_promise: " << Brief(deferred_promise());
+ os << "\n - deferred_on_resolve: " << Brief(deferred_on_resolve());
+ os << "\n - deferred_on_reject: " << Brief(deferred_on_reject());
+ os << "\n - fulfill_reactions = " << Brief(fulfill_reactions());
+ os << "\n - reject_reactions = " << Brief(reject_reactions());
+ os << "\n - has_handler = " << has_handler();
+}
void JSRegExp::JSRegExpPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSRegExp");
@@ -578,6 +589,7 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
}
if (is_deprecated()) os << "\n - deprecated_map";
if (is_stable()) os << "\n - stable_map";
+ if (is_migration_target()) os << "\n - migration_target";
if (is_dictionary_map()) os << "\n - dictionary_map";
if (has_hidden_prototype()) os << "\n - has_hidden_prototype";
if (has_named_interceptor()) os << "\n - named_interceptor";
@@ -597,7 +609,8 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
<< "#" << NumberOfOwnDescriptors() << ": "
<< Brief(instance_descriptors());
if (FLAG_unbox_double_fields) {
- os << "\n - layout descriptor: " << Brief(layout_descriptor());
+ os << "\n - layout descriptor: ";
+ layout_descriptor()->ShortPrint(os);
}
int nof_transitions = TransitionArray::NumberOfTransitions(raw_transitions());
if (nof_transitions > 0) {
@@ -631,25 +644,18 @@ void AliasedArgumentsEntry::AliasedArgumentsEntryPrint(
void FixedArray::FixedArrayPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "FixedArray");
+ os << "\n - map = " << Brief(map());
os << "\n - length: " << length();
- for (int i = 0; i < length(); i++) {
- os << "\n [" << i << "]: " << Brief(get(i));
- }
+ PrintFixedArrayElements(os, this);
os << "\n";
}
void FixedDoubleArray::FixedDoubleArrayPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "FixedDoubleArray");
+ os << "\n - map = " << Brief(map());
os << "\n - length: " << length();
- for (int i = 0; i < length(); i++) {
- os << "\n [" << i << "]: ";
- if (is_the_hole(i)) {
- os << "<the hole>";
- } else {
- os << get_scalar(i);
- }
- }
+ DoPrintElements<FixedDoubleArray, true>(os, this);
os << "\n";
}
@@ -686,16 +692,11 @@ void FeedbackVectorSpecBase<Derived>::FeedbackVectorSpecPrint(
return;
}
- for (int slot = 0, name_index = 0; slot < slot_count;) {
+ for (int slot = 0; slot < slot_count;) {
FeedbackVectorSlotKind kind = This()->GetKind(slot);
int entry_size = TypeFeedbackMetadata::GetSlotSize(kind);
DCHECK_LT(0, entry_size);
-
os << "\n Slot #" << slot << " " << kind;
- if (TypeFeedbackMetadata::SlotRequiresName(kind)) {
- os << ", " << Brief(*This()->GetName(name_index++));
- }
-
slot += entry_size;
}
os << "\n";
@@ -719,12 +720,14 @@ void TypeFeedbackMetadata::TypeFeedbackMetadataPrint(
os << "\n - slot_count: " << slot_count();
TypeFeedbackMetadataIterator iter(this);
+ int parameter_index = 0;
while (iter.HasNext()) {
FeedbackVectorSlot slot = iter.Next();
FeedbackVectorSlotKind kind = iter.kind();
os << "\n Slot " << slot << " " << kind;
- if (TypeFeedbackMetadata::SlotRequiresName(kind)) {
- os << ", " << Brief(iter.name());
+ if (TypeFeedbackMetadata::SlotRequiresParameter(kind)) {
+ int parameter_value = this->GetParameter(parameter_index++);
+ os << " [" << parameter_value << "]";
}
}
os << "\n";
@@ -746,15 +749,13 @@ void TypeFeedbackVector::TypeFeedbackVectorPrint(std::ostream& os) { // NOLINT
return;
}
+ int parameter_index = 0;
TypeFeedbackMetadataIterator iter(metadata());
while (iter.HasNext()) {
FeedbackVectorSlot slot = iter.Next();
FeedbackVectorSlotKind kind = iter.kind();
os << "\n Slot " << slot << " " << kind;
- if (TypeFeedbackMetadata::SlotRequiresName(kind)) {
- os << ", " << Brief(iter.name());
- }
os << " ";
switch (kind) {
case FeedbackVectorSlotKind::LOAD_IC: {
@@ -797,6 +798,17 @@ void TypeFeedbackVector::TypeFeedbackVectorPrint(std::ostream& os) { // NOLINT
os << Code::ICState2String(nexus.StateFromFeedback());
break;
}
+ case FeedbackVectorSlotKind::STORE_DATA_PROPERTY_IN_LITERAL_IC: {
+ StoreDataPropertyInLiteralICNexus nexus(this, slot);
+ os << Code::ICState2String(nexus.StateFromFeedback());
+ break;
+ }
+ case FeedbackVectorSlotKind::CREATE_CLOSURE: {
+ // TODO(mvstanton): Integrate this into the iterator.
+ int parameter_value = metadata()->GetParameter(parameter_index++);
+ os << "[" << parameter_value << "]";
+ break;
+ }
case FeedbackVectorSlotKind::GENERAL:
break;
case FeedbackVectorSlotKind::INVALID:
@@ -1011,15 +1023,6 @@ void JSArrayIterator::JSArrayIteratorPrint(std::ostream& os) { // NOLING
JSObjectPrintBody(os, this);
}
-void JSFixedArrayIterator::JSFixedArrayIteratorPrint(
- std::ostream& os) { // NOLINT
- JSObjectPrintHeader(os, this, "JSFixedArrayIterator");
- os << "\n - array = " << Brief(array());
- os << "\n - index = " << index();
- os << "\n - initial_next = " << Brief(initial_next());
- JSObjectPrintBody(os, this);
-}
-
void JSDataView::JSDataViewPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSDataView");
os << "\n - buffer =" << Brief(buffer());
@@ -1105,7 +1108,9 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
void JSGlobalProxy::JSGlobalProxyPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSGlobalProxy");
- os << "\n - native context = " << Brief(native_context());
+ if (!GetIsolate()->bootstrapper()->IsActive()) {
+ os << "\n - native context = " << Brief(native_context());
+ }
os << "\n - hash = " << Brief(hash());
JSObjectPrintBody(os, this);
}
@@ -1113,7 +1118,9 @@ void JSGlobalProxy::JSGlobalProxyPrint(std::ostream& os) { // NOLINT
void JSGlobalObject::JSGlobalObjectPrint(std::ostream& os) { // NOLINT
JSObjectPrintHeader(os, this, "JSGlobalObject");
- os << "\n - native context = " << Brief(native_context());
+ if (!GetIsolate()->bootstrapper()->IsActive()) {
+ os << "\n - native context = " << Brief(native_context());
+ }
os << "\n - global proxy = " << Brief(global_proxy());
JSObjectPrintBody(os, this);
}
@@ -1129,7 +1136,8 @@ void Cell::CellPrint(std::ostream& os) { // NOLINT
void PropertyCell::PropertyCellPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "PropertyCell");
os << "\n - value: " << Brief(value());
- os << "\n - details: " << property_details();
+ os << "\n - details: ";
+ property_details().PrintAsSlowTo(os);
PropertyCellType cell_type = property_details().cell_type();
os << "\n - cell_type: ";
if (value()->IsTheHole(GetIsolate())) {
@@ -1227,8 +1235,7 @@ void PromiseResolveThenableJobInfo::PromiseResolveThenableJobInfoPrint(
os << "\n - then: " << Brief(then());
os << "\n - resolve: " << Brief(resolve());
os << "\n - reject: " << Brief(reject());
- os << "\n - debug id: " << Brief(debug_id());
- os << "\n - debug name: " << Brief(debug_name());
+ os << "\n - debug id: " << debug_id();
os << "\n - context: " << Brief(context());
os << "\n";
}
@@ -1238,9 +1245,10 @@ void PromiseReactionJobInfo::PromiseReactionJobInfoPrint(
HeapObject::PrintHeader(os, "PromiseReactionJobInfo");
os << "\n - value: " << Brief(value());
os << "\n - tasks: " << Brief(tasks());
- os << "\n - deferred: " << Brief(deferred());
- os << "\n - debug id: " << Brief(debug_id());
- os << "\n - debug name: " << Brief(debug_name());
+ os << "\n - deferred_promise: " << Brief(deferred_promise());
+ os << "\n - deferred_on_resolve: " << Brief(deferred_on_resolve());
+ os << "\n - deferred_on_reject: " << Brief(deferred_on_reject());
+ os << "\n - debug id: " << debug_id();
os << "\n - reaction context: " << Brief(context());
os << "\n";
}
@@ -1267,9 +1275,9 @@ void Module::ModulePrint(std::ostream& os) { // NOLINT
}
void JSModuleNamespace::JSModuleNamespacePrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "JSModuleNamespace");
+ JSObjectPrintHeader(os, this, "JSModuleNamespace");
os << "\n - module: " << Brief(module());
- os << "\n";
+ JSObjectPrintBody(os, this);
}
void PrototypeInfo::PrototypeInfoPrint(std::ostream& os) { // NOLINT
@@ -1282,6 +1290,13 @@ void PrototypeInfo::PrototypeInfoPrint(std::ostream& os) { // NOLINT
os << "\n";
}
+void Tuple2::Tuple2Print(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "Tuple2");
+ os << "\n - value1: " << Brief(value1());
+ os << "\n - value2: " << Brief(value2());
+ os << "\n";
+}
+
void Tuple3::Tuple3Print(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "Tuple3");
os << "\n - value1: " << Brief(value1());
@@ -1297,6 +1312,13 @@ void ContextExtension::ContextExtensionPrint(std::ostream& os) { // NOLINT
os << "\n";
}
+void ConstantElementsPair::ConstantElementsPairPrint(
+ std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "ConstantElementsPair");
+ os << "\n - elements_kind: " << static_cast<ElementsKind>(elements_kind());
+ os << "\n - constant_values: " << Brief(constant_values());
+ os << "\n";
+}
void AccessorPair::AccessorPairPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "AccessorPair");
@@ -1392,7 +1414,7 @@ void AllocationSite::AllocationSitePrint(std::ostream& os) { // NOLINT
} else if (transition_info()->IsJSArray()) {
os << "Array literal " << Brief(transition_info());
} else {
- os << "unknown transition_info" << Brief(transition_info());
+ os << "unknown transition_info " << Brief(transition_info());
}
os << "\n";
}
@@ -1460,16 +1482,24 @@ void LayoutDescriptor::Print() {
os << std::flush;
}
+void LayoutDescriptor::ShortPrint(std::ostream& os) {
+ if (IsSmi()) {
+ os << this; // Print tagged value for easy use with "jld" gdb macro.
+ } else {
+ os << Brief(this);
+ }
+}
void LayoutDescriptor::Print(std::ostream& os) { // NOLINT
os << "Layout descriptor: ";
- if (IsOddball() && IsUninitialized(HeapObject::cast(this)->GetIsolate())) {
- os << "<uninitialized>";
- } else if (IsFastPointerLayout()) {
+ if (IsFastPointerLayout()) {
os << "<all tagged>";
} else if (IsSmi()) {
os << "fast";
PrintBitMask(os, static_cast<uint32_t>(Smi::cast(this)->value()));
+ } else if (IsOddball() &&
+ IsUninitialized(HeapObject::cast(this)->GetIsolate())) {
+ os << "<uninitialized>";
} else {
os << "slow";
int len = length();
@@ -1546,15 +1576,43 @@ void DescriptorArray::Print() {
void DescriptorArray::PrintDescriptors(std::ostream& os) { // NOLINT
HandleScope scope(GetIsolate());
- os << "Descriptor array #" << number_of_descriptors();
+ os << "Descriptor array #" << number_of_descriptors() << ":";
for (int i = 0; i < number_of_descriptors(); i++) {
- Descriptor desc;
- Get(i, &desc);
- os << "\n " << i << ": " << desc;
+ Name* key = GetKey(i);
+ os << "\n [" << i << "]: ";
+#ifdef OBJECT_PRINT
+ key->NamePrint(os);
+#else
+ key->ShortPrint(os);
+#endif
+ os << " ";
+ PrintDescriptorDetails(os, i, PropertyDetails::kPrintFull);
}
os << "\n";
}
+void DescriptorArray::PrintDescriptorDetails(std::ostream& os, int descriptor,
+ PropertyDetails::PrintMode mode) {
+ PropertyDetails details = GetDetails(descriptor);
+ details.PrintAsFastTo(os, mode);
+ os << " @ ";
+ Object* value = GetValue(descriptor);
+ switch (details.location()) {
+ case kField: {
+ FieldType* field_type = Map::UnwrapFieldType(value);
+ field_type->PrintTo(os);
+ break;
+ }
+ case kDescriptor:
+ os << Brief(value);
+ if (value->IsAccessorPair()) {
+ AccessorPair* pair = AccessorPair::cast(value);
+ os << "(get: " << Brief(pair->getter())
+ << ", set: " << Brief(pair->setter()) << ")";
+ }
+ break;
+ }
+}
void TransitionArray::Print() {
OFStream os(stdout);
@@ -1592,18 +1650,13 @@ void TransitionArray::PrintTransitions(std::ostream& os, Object* transitions,
} else if (key == heap->strict_function_transition_symbol()) {
os << " (transition to strict function)";
} else {
- PropertyDetails details = GetTargetDetails(key, target);
+ DCHECK(!IsSpecialTransition(key));
os << "(transition to ";
- if (details.location() == kDescriptor) {
- os << "immutable ";
- }
- os << (details.kind() == kData ? "data" : "accessor");
- if (details.location() == kDescriptor) {
- Object* value =
- target->instance_descriptors()->GetValue(target->LastAdded());
- os << " " << Brief(value);
- }
- os << "), attrs: " << details.attributes();
+ int descriptor = target->LastAdded();
+ DescriptorArray* descriptors = target->instance_descriptors();
+ descriptors->PrintDescriptorDetails(os, descriptor,
+ PropertyDetails::kForTransitions);
+ os << ")";
}
os << " -> " << Brief(target);
}
@@ -1649,6 +1702,15 @@ extern void _v8_internal_Print_DescriptorArray(void* object) {
}
}
+extern void _v8_internal_Print_LayoutDescriptor(void* object) {
+ i::Object* o = reinterpret_cast<i::Object*>(object);
+ if (!o->IsLayoutDescriptor()) {
+ printf("Not a layout descriptor\n");
+ } else {
+ reinterpret_cast<i::LayoutDescriptor*>(object)->Print();
+ }
+}
+
extern void _v8_internal_Print_TransitionArray(void* object) {
if (reinterpret_cast<i::Object*>(object)->IsSmi()) {
printf("Not a transition array\n");
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 15773748ed..086d515625 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -28,6 +28,7 @@
#include "src/counters-inl.h"
#include "src/counters.h"
#include "src/date.h"
+#include "src/debug/debug-evaluate.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/elements.h"
@@ -49,6 +50,7 @@
#include "src/log.h"
#include "src/lookup.h"
#include "src/macro-assembler.h"
+#include "src/map-updater.h"
#include "src/messages.h"
#include "src/objects-body-descriptors-inl.h"
#include "src/property-descriptor.h"
@@ -139,7 +141,8 @@ MaybeHandle<JSReceiver> Object::ConvertReceiver(Isolate* isolate,
}
// static
-MaybeHandle<Object> Object::ToNumber(Handle<Object> input) {
+MaybeHandle<Object> Object::ConvertToNumber(Isolate* isolate,
+ Handle<Object> input) {
while (true) {
if (input->IsNumber()) {
return input;
@@ -150,7 +153,6 @@ MaybeHandle<Object> Object::ToNumber(Handle<Object> input) {
if (input->IsOddball()) {
return Oddball::ToNumber(Handle<Oddball>::cast(input));
}
- Isolate* const isolate = Handle<HeapObject>::cast(input)->GetIsolate();
if (input->IsSymbol()) {
THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kSymbolToNumber),
Object);
@@ -166,28 +168,33 @@ MaybeHandle<Object> Object::ToNumber(Handle<Object> input) {
}
}
-
// static
-MaybeHandle<Object> Object::ToInteger(Isolate* isolate, Handle<Object> input) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ToNumber(input), Object);
+MaybeHandle<Object> Object::ConvertToInteger(Isolate* isolate,
+ Handle<Object> input) {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ConvertToNumber(isolate, input),
+ Object);
+ if (input->IsSmi()) return input;
return isolate->factory()->NewNumber(DoubleToInteger(input->Number()));
}
-
// static
-MaybeHandle<Object> Object::ToInt32(Isolate* isolate, Handle<Object> input) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ToNumber(input), Object);
+MaybeHandle<Object> Object::ConvertToInt32(Isolate* isolate,
+ Handle<Object> input) {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ConvertToNumber(isolate, input),
+ Object);
+ if (input->IsSmi()) return input;
return isolate->factory()->NewNumberFromInt(DoubleToInt32(input->Number()));
}
-
// static
-MaybeHandle<Object> Object::ToUint32(Isolate* isolate, Handle<Object> input) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ToNumber(input), Object);
+MaybeHandle<Object> Object::ConvertToUint32(Isolate* isolate,
+ Handle<Object> input) {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ConvertToNumber(isolate, input),
+ Object);
+ if (input->IsSmi()) return handle(Smi::cast(*input)->ToUint32Smi(), isolate);
return isolate->factory()->NewNumberFromUint(DoubleToUint32(input->Number()));
}
-
// static
MaybeHandle<Name> Object::ConvertToName(Isolate* isolate,
Handle<Object> input) {
@@ -198,12 +205,35 @@ MaybeHandle<Name> Object::ConvertToName(Isolate* isolate,
return ToString(isolate, input);
}
+// ES6 7.1.14
// static
-MaybeHandle<String> Object::ToString(Isolate* isolate, Handle<Object> input) {
- while (true) {
- if (input->IsString()) {
- return Handle<String>::cast(input);
+MaybeHandle<Object> Object::ConvertToPropertyKey(Isolate* isolate,
+ Handle<Object> value) {
+ // 1. Let key be ToPrimitive(argument, hint String).
+ MaybeHandle<Object> maybe_key =
+ Object::ToPrimitive(value, ToPrimitiveHint::kString);
+ // 2. ReturnIfAbrupt(key).
+ Handle<Object> key;
+ if (!maybe_key.ToHandle(&key)) return key;
+ // 3. If Type(key) is Symbol, then return key.
+ if (key->IsSymbol()) return key;
+ // 4. Return ToString(key).
+ // Extending spec'ed behavior, we'd be happy to return an element index.
+ if (key->IsSmi()) return key;
+ if (key->IsHeapNumber()) {
+ uint32_t uint_value;
+ if (value->ToArrayLength(&uint_value) &&
+ uint_value <= static_cast<uint32_t>(Smi::kMaxValue)) {
+ return handle(Smi::FromInt(static_cast<int>(uint_value)), isolate);
}
+ }
+ return Object::ToString(isolate, key);
+}
+
+// static
+MaybeHandle<String> Object::ConvertToString(Isolate* isolate,
+ Handle<Object> input) {
+ while (true) {
if (input->IsOddball()) {
return handle(Handle<Oddball>::cast(input)->to_string(), isolate);
}
@@ -221,6 +251,11 @@ MaybeHandle<String> Object::ToString(Isolate* isolate, Handle<Object> input) {
isolate, input, JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(input),
ToPrimitiveHint::kString),
String);
+ // The previous isString() check happened in Object::ToString and thus we
+ // put it at the end of the loop in this helper.
+ if (input->IsString()) {
+ return Handle<String>::cast(input);
+ }
}
}
@@ -375,11 +410,16 @@ Handle<String> Object::NoSideEffectsToString(Isolate* isolate,
}
// static
-MaybeHandle<Object> Object::ToLength(Isolate* isolate, Handle<Object> input) {
+MaybeHandle<Object> Object::ConvertToLength(Isolate* isolate,
+ Handle<Object> input) {
ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ToNumber(input), Object);
+ if (input->IsSmi()) {
+ int value = std::max(Smi::cast(*input)->value(), 0);
+ return handle(Smi::FromInt(value), isolate);
+ }
double len = DoubleToInteger(input->Number());
if (len <= 0.0) {
- len = 0.0;
+ return handle(Smi::kZero, isolate);
} else if (len >= kMaxSafeInteger) {
len = kMaxSafeInteger;
}
@@ -387,10 +427,12 @@ MaybeHandle<Object> Object::ToLength(Isolate* isolate, Handle<Object> input) {
}
// static
-MaybeHandle<Object> Object::ToIndex(Isolate* isolate, Handle<Object> input,
- MessageTemplate::Template error_index) {
- if (input->IsUndefined(isolate)) return isolate->factory()->NewNumber(0.0);
+MaybeHandle<Object> Object::ConvertToIndex(
+ Isolate* isolate, Handle<Object> input,
+ MessageTemplate::Template error_index) {
+ if (input->IsUndefined(isolate)) return handle(Smi::kZero, isolate);
ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ToNumber(input), Object);
+ if (input->IsSmi() && Smi::cast(*input)->value() >= 0) return input;
double len = DoubleToInteger(input->Number()) + 0.0;
auto js_len = isolate->factory()->NewNumber(len);
if (len < 0.0 || len > kMaxSafeInteger) {
@@ -404,7 +446,7 @@ bool Object::BooleanValue() {
DCHECK(IsHeapObject());
Isolate* isolate = HeapObject::cast(this)->GetIsolate();
if (IsBoolean()) return IsTrue(isolate);
- if (IsUndefined(isolate) || IsNull(isolate)) return false;
+ if (IsNullOrUndefined(isolate)) return false;
if (IsUndetectable()) return false; // Undetectable object is false.
if (IsString()) return String::cast(this)->length() != 0;
if (IsHeapNumber()) return HeapNumber::cast(this)->HeapNumberBooleanValue();
@@ -847,7 +889,7 @@ MaybeHandle<Object> Object::GetMethod(Handle<JSReceiver> receiver,
Isolate* isolate = receiver->GetIsolate();
ASSIGN_RETURN_ON_EXCEPTION(isolate, func,
JSReceiver::GetProperty(receiver, name), Object);
- if (func->IsNull(isolate) || func->IsUndefined(isolate)) {
+ if (func->IsNullOrUndefined(isolate)) {
return isolate->factory()->undefined_value();
}
if (!func->IsCallable()) {
@@ -858,10 +900,30 @@ MaybeHandle<Object> Object::GetMethod(Handle<JSReceiver> receiver,
return func;
}
+namespace {
+MaybeHandle<FixedArray> CreateListFromArrayLikeFastPath(
+ Isolate* isolate, Handle<Object> object, ElementTypes element_types) {
+ if (element_types != ElementTypes::kAll || !object->IsJSArray()) {
+ return MaybeHandle<FixedArray>();
+ }
+ Handle<JSArray> array = Handle<JSArray>::cast(object);
+ uint32_t length;
+ if (!array->HasArrayPrototype(isolate) ||
+ !array->length()->ToUint32(&length) || !array->HasFastElements() ||
+ !JSObject::PrototypeHasNoElements(isolate, *array)) {
+ return MaybeHandle<FixedArray>();
+ }
+ return array->GetElementsAccessor()->CreateListFromArray(isolate, array);
+}
+} // namespace
// static
MaybeHandle<FixedArray> Object::CreateListFromArrayLike(
Isolate* isolate, Handle<Object> object, ElementTypes element_types) {
+ // Fast-path for JS_ARRAY_TYPE.
+ MaybeHandle<FixedArray> fast_result =
+ CreateListFromArrayLikeFastPath(isolate, object, element_types);
+ if (!fast_result.is_null()) return fast_result;
// 1. ReturnIfAbrupt(object).
// 2. (default elementTypes -- not applicable.)
// 3. If Type(obj) is not Object, throw a TypeError exception.
@@ -872,6 +934,7 @@ MaybeHandle<FixedArray> Object::CreateListFromArrayLike(
"CreateListFromArrayLike")),
FixedArray);
}
+
// 4. Let len be ? ToLength(? Get(obj, "length")).
Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
Handle<Object> raw_length_number;
@@ -1790,11 +1853,13 @@ MaybeHandle<Object> JSObject::GetPropertyWithFailedAccessCheck(
GetPropertyWithInterceptor(it, &done), Object);
if (done) return result;
}
+
} else {
- MaybeHandle<Object> result;
+ Handle<Object> result;
bool done;
- result = GetPropertyWithInterceptorInternal(it, interceptor, &done);
- RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ GetPropertyWithInterceptorInternal(it, interceptor, &done), Object);
if (done) return result;
}
@@ -1830,7 +1895,7 @@ Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithFailedAccessCheck(
} else {
Maybe<PropertyAttributes> result =
GetPropertyAttributesWithInterceptorInternal(it, interceptor);
- RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<PropertyAttributes>());
+ if (isolate->has_pending_exception()) return Nothing<PropertyAttributes>();
if (result.FromMaybe(ABSENT) != ABSENT) return result;
}
isolate->ReportFailedAccessCheck(checked);
@@ -1866,10 +1931,9 @@ Maybe<bool> JSObject::SetPropertyWithFailedAccessCheck(
} else {
Maybe<bool> result = SetPropertyWithInterceptorInternal(
it, interceptor, should_throw, value);
- RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+ if (isolate->has_pending_exception()) return Nothing<bool>();
if (result.IsJust()) return result;
}
-
isolate->ReportFailedAccessCheck(checked);
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
return Just(true);
@@ -1937,6 +2001,155 @@ Maybe<bool> JSReceiver::HasInPrototypeChain(Isolate* isolate,
}
}
+namespace {
+
+MUST_USE_RESULT Maybe<bool> FastAssign(Handle<JSReceiver> target,
+ Handle<Object> source, bool use_set) {
+ // Non-empty strings are the only non-JSReceivers that need to be handled
+ // explicitly by Object.assign.
+ if (!source->IsJSReceiver()) {
+ return Just(!source->IsString() || String::cast(*source)->length() == 0);
+ }
+
+ // If the target is deprecated, the object will be updated on first store. If
+ // the source for that store equals the target, this will invalidate the
+ // cached representation of the source. Preventively upgrade the target.
+ // Do this on each iteration since any property load could cause deprecation.
+ if (target->map()->is_deprecated()) {
+ JSObject::MigrateInstance(Handle<JSObject>::cast(target));
+ }
+
+ Isolate* isolate = target->GetIsolate();
+ Handle<Map> map(JSReceiver::cast(*source)->map(), isolate);
+
+ if (!map->IsJSObjectMap()) return Just(false);
+ if (!map->OnlyHasSimpleProperties()) return Just(false);
+
+ Handle<JSObject> from = Handle<JSObject>::cast(source);
+ if (from->elements() != isolate->heap()->empty_fixed_array()) {
+ return Just(false);
+ }
+
+ Handle<DescriptorArray> descriptors(map->instance_descriptors(), isolate);
+ int length = map->NumberOfOwnDescriptors();
+
+ bool stable = true;
+
+ for (int i = 0; i < length; i++) {
+ Handle<Name> next_key(descriptors->GetKey(i), isolate);
+ Handle<Object> prop_value;
+ // Directly decode from the descriptor array if |from| did not change shape.
+ if (stable) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (!details.IsEnumerable()) continue;
+ if (details.kind() == kData) {
+ if (details.location() == kDescriptor) {
+ prop_value = handle(descriptors->GetValue(i), isolate);
+ } else {
+ Representation representation = details.representation();
+ FieldIndex index = FieldIndex::ForDescriptor(*map, i);
+ prop_value = JSObject::FastPropertyAt(from, representation, index);
+ }
+ } else {
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, prop_value, JSReceiver::GetProperty(from, next_key),
+ Nothing<bool>());
+ stable = from->map() == *map;
+ }
+ } else {
+ // If the map did change, do a slower lookup. We are still guaranteed that
+ // the object has a simple shape, and that the key is a name.
+ LookupIterator it(from, next_key, from,
+ LookupIterator::OWN_SKIP_INTERCEPTOR);
+ if (!it.IsFound()) continue;
+ DCHECK(it.state() == LookupIterator::DATA ||
+ it.state() == LookupIterator::ACCESSOR);
+ if (!it.IsEnumerable()) continue;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, prop_value, Object::GetProperty(&it), Nothing<bool>());
+ }
+
+ if (use_set) {
+ LookupIterator it(target, next_key, target);
+ bool call_to_js = it.IsFound() && it.state() != LookupIterator::DATA;
+ Maybe<bool> result = Object::SetProperty(
+ &it, prop_value, STRICT, Object::CERTAINLY_NOT_STORE_FROM_KEYED);
+ if (result.IsNothing()) return result;
+ if (stable && call_to_js) stable = from->map() == *map;
+ } else {
+ // 4a ii 2. Perform ? CreateDataProperty(target, nextKey, propValue).
+ bool success;
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, target, next_key, &success, LookupIterator::OWN);
+ CHECK(success);
+ CHECK(
+ JSObject::CreateDataProperty(&it, prop_value, Object::THROW_ON_ERROR)
+ .FromJust());
+ }
+ }
+
+ return Just(true);
+}
+
+} // namespace
+
+// static
+Maybe<bool> JSReceiver::SetOrCopyDataProperties(Isolate* isolate,
+ Handle<JSReceiver> target,
+ Handle<Object> source,
+ bool use_set) {
+ Maybe<bool> fast_assign = FastAssign(target, source, use_set);
+ if (fast_assign.IsNothing()) return Nothing<bool>();
+ if (fast_assign.FromJust()) return Just(true);
+
+ Handle<JSReceiver> from = Object::ToObject(isolate, source).ToHandleChecked();
+ // 3b. Let keys be ? from.[[OwnPropertyKeys]]().
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, keys,
+ KeyAccumulator::GetKeys(from, KeyCollectionMode::kOwnOnly, ALL_PROPERTIES,
+ GetKeysConversion::kKeepNumbers),
+ Nothing<bool>());
+
+ // 4. Repeat for each element nextKey of keys in List order,
+ for (int j = 0; j < keys->length(); ++j) {
+ Handle<Object> next_key(keys->get(j), isolate);
+ // 4a i. Let desc be ? from.[[GetOwnProperty]](nextKey).
+ PropertyDescriptor desc;
+ Maybe<bool> found =
+ JSReceiver::GetOwnPropertyDescriptor(isolate, from, next_key, &desc);
+ if (found.IsNothing()) return Nothing<bool>();
+ // 4a ii. If desc is not undefined and desc.[[Enumerable]] is true, then
+ if (found.FromJust() && desc.enumerable()) {
+ // 4a ii 1. Let propValue be ? Get(from, nextKey).
+ Handle<Object> prop_value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, prop_value,
+ Runtime::GetObjectProperty(isolate, from, next_key), Nothing<bool>());
+
+ if (use_set) {
+ // 4c ii 2. Let status be ? Set(to, nextKey, propValue, true).
+ Handle<Object> status;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, status, Runtime::SetObjectProperty(
+ isolate, target, next_key, prop_value, STRICT),
+ Nothing<bool>());
+ } else {
+ // 4a ii 2. Perform ! CreateDataProperty(target, nextKey, propValue).
+ bool success;
+ LookupIterator it = LookupIterator::PropertyOrElement(
+ isolate, target, next_key, &success, LookupIterator::OWN);
+ CHECK(success);
+ CHECK(JSObject::CreateDataProperty(&it, prop_value,
+ Object::THROW_ON_ERROR)
+ .FromJust());
+ }
+ }
+ }
+
+ return Just(true);
+}
+
Map* Object::GetPrototypeChainRootMap(Isolate* isolate) {
DisallowHeapAllocation no_alloc;
if (IsSmi()) {
@@ -2154,6 +2367,40 @@ MaybeHandle<Object> Object::ArraySpeciesConstructor(
}
}
+bool Object::IterationHasObservableEffects() {
+ // Check that this object is an array.
+ if (!IsJSArray()) return true;
+ JSArray* spread_array = JSArray::cast(this);
+ Isolate* isolate = spread_array->GetIsolate();
+
+ // Check that we have the original ArrayPrototype.
+ JSObject* array_proto = JSObject::cast(spread_array->map()->prototype());
+ if (!isolate->is_initial_array_prototype(array_proto)) return true;
+
+ // Check that the ArrayPrototype hasn't been modified in a way that would
+ // affect iteration.
+ if (!isolate->IsArrayIteratorLookupChainIntact()) return true;
+
+ // Check that the map of the initial array iterator hasn't changed.
+ Map* iterator_map = isolate->initial_array_iterator_prototype()->map();
+ if (!isolate->is_initial_array_iterator_prototype_map(iterator_map)) {
+ return true;
+ }
+
+ // For FastPacked kinds, iteration will have the same effect as simply
+ // accessing each property in order.
+ ElementsKind array_kind = spread_array->GetElementsKind();
+ if (IsFastPackedElementsKind(array_kind)) return false;
+
+ // For FastHoley kinds, an element access on a hole would cause a lookup on
+ // the prototype. This could have different results if the prototype has been
+ // changed.
+ if (IsFastHoleyElementsKind(array_kind) &&
+ isolate->IsFastArrayConstructorPrototypeChainIntact()) {
+ return false;
+ }
+ return true;
+}
void Object::ShortPrint(FILE* out) {
OFStream os(out);
@@ -2182,9 +2429,6 @@ std::ostream& operator<<(std::ostream& os, const Brief& v) {
return os;
}
-// Declaration of the static Smi::kZero constant.
-Smi* const Smi::kZero(nullptr);
-
void Smi::SmiPrint(std::ostream& os) const { // NOLINT
os << value();
}
@@ -2311,7 +2555,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
self->set_resource(resource);
if (is_internalized) self->Hash(); // Force regeneration of the hash value.
- heap->AdjustLiveBytes(this, new_size - size, Heap::CONCURRENT_TO_SWEEPER);
+ heap->AdjustLiveBytes(this, new_size - size);
return true;
}
@@ -2377,7 +2621,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
self->set_resource(resource);
if (is_internalized) self->Hash(); // Force regeneration of the hash value.
- heap->AdjustLiveBytes(this, new_size - size, Heap::CONCURRENT_TO_SWEEPER);
+ heap->AdjustLiveBytes(this, new_size - size);
return true;
}
@@ -2619,10 +2863,10 @@ void Map::PrintReconfiguration(FILE* file, int modify_index, PropertyKind kind,
void Map::PrintGeneralization(
FILE* file, const char* reason, int modify_index, int split,
- int descriptors, bool constant_to_field, Representation old_representation,
- Representation new_representation, MaybeHandle<FieldType> old_field_type,
- MaybeHandle<Object> old_value, MaybeHandle<FieldType> new_field_type,
- MaybeHandle<Object> new_value) {
+ int descriptors, bool descriptor_to_field,
+ Representation old_representation, Representation new_representation,
+ MaybeHandle<FieldType> old_field_type, MaybeHandle<Object> old_value,
+ MaybeHandle<FieldType> new_field_type, MaybeHandle<Object> new_value) {
OFStream os(file);
os << "[generalizing]";
Name* name = instance_descriptors()->GetKey(modify_index);
@@ -2632,7 +2876,7 @@ void Map::PrintGeneralization(
os << "{symbol " << static_cast<void*>(name) << "}";
}
os << ":";
- if (constant_to_field) {
+ if (descriptor_to_field) {
os << "c";
} else {
os << old_representation.Mnemonic() << "{";
@@ -2673,8 +2917,8 @@ void JSObject::PrintInstanceMigration(FILE* file,
if (!o_r.Equals(n_r)) {
String::cast(o->GetKey(i))->PrintOn(file);
PrintF(file, ":%s->%s ", o_r.Mnemonic(), n_r.Mnemonic());
- } else if (o->GetDetails(i).type() == DATA_CONSTANT &&
- n->GetDetails(i).type() == DATA) {
+ } else if (o->GetDetails(i).location() == kDescriptor &&
+ n->GetDetails(i).location() == kField) {
Name* name = o->GetKey(i);
if (name->IsString()) {
String::cast(name)->PrintOn(file);
@@ -3084,11 +3328,20 @@ Context* JSReceiver::GetCreationContext() {
return function->context()->native_context();
}
-static Handle<Object> WrapType(Handle<FieldType> type) {
+Handle<Object> Map::WrapFieldType(Handle<FieldType> type) {
if (type->IsClass()) return Map::WeakCellForMap(type->AsClass());
return type;
}
+FieldType* Map::UnwrapFieldType(Object* wrapped_type) {
+ Object* value = wrapped_type;
+ if (value->IsWeakCell()) {
+ if (WeakCell::cast(value)->cleared()) return FieldType::None();
+ value = WeakCell::cast(value)->value();
+ }
+ return FieldType::cast(value);
+}
+
MaybeHandle<Map> Map::CopyWithField(Handle<Map> map, Handle<Name> name,
Handle<FieldType> type,
PropertyAttributes attributes,
@@ -3113,11 +3366,11 @@ MaybeHandle<Map> Map::CopyWithField(Handle<Map> map, Handle<Name> name,
type = FieldType::Any(isolate);
}
- Handle<Object> wrapped_type(WrapType(type));
+ Handle<Object> wrapped_type(WrapFieldType(type));
- DataDescriptor new_field_desc(name, index, wrapped_type, attributes,
- representation);
- Handle<Map> new_map = Map::CopyAddDescriptor(map, &new_field_desc, flag);
+ Descriptor d = Descriptor::DataField(name, index, wrapped_type, attributes,
+ representation);
+ Handle<Map> new_map = Map::CopyAddDescriptor(map, &d, flag);
int unused_property_fields = new_map->unused_property_fields() - 1;
if (unused_property_fields < 0) {
unused_property_fields += JSObject::kFieldsAdded;
@@ -3138,8 +3391,8 @@ MaybeHandle<Map> Map::CopyWithConstant(Handle<Map> map,
}
// Allocate new instance descriptors with (name, constant) added.
- DataConstantDescriptor new_constant_desc(name, constant, attributes);
- return Map::CopyAddDescriptor(map, &new_constant_desc, flag);
+ Descriptor d = Descriptor::DataConstant(name, constant, attributes);
+ return Map::CopyAddDescriptor(map, &d, flag);
}
const char* Representation::Mnemonic() const {
@@ -3276,7 +3529,7 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
FieldIndex::ForDescriptor(*new_map, new_map->LastAdded());
DCHECK(details.representation().IsDouble());
DCHECK(!new_map->IsUnboxedDoubleField(index));
- Handle<Object> value = isolate->factory()->NewHeapNumber(0, MUTABLE);
+ Handle<Object> value = isolate->factory()->NewMutableHeapNumber();
object->RawFastPropertyAtPut(index, *value);
object->synchronized_set_map(*new_map);
return;
@@ -3292,11 +3545,12 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
// Properly initialize newly added property.
Handle<Object> value;
if (details.representation().IsDouble()) {
- value = isolate->factory()->NewHeapNumber(0, MUTABLE);
+ value = isolate->factory()->NewMutableHeapNumber();
} else {
value = isolate->factory()->uninitialized_value();
}
- DCHECK_EQ(DATA, details.type());
+ DCHECK_EQ(kField, details.location());
+ DCHECK_EQ(kData, details.kind());
int target_index = details.field_index() - new_map->GetInObjectProperties();
DCHECK(target_index >= 0); // Must be a backing store index.
new_storage->set(target_index, *value);
@@ -3339,24 +3593,29 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
for (int i = 0; i < old_nof; i++) {
PropertyDetails details = new_descriptors->GetDetails(i);
- if (details.type() != DATA) continue;
+ if (details.location() != kField) continue;
+ DCHECK_EQ(kData, details.kind());
PropertyDetails old_details = old_descriptors->GetDetails(i);
Representation old_representation = old_details.representation();
Representation representation = details.representation();
Handle<Object> value;
- if (old_details.type() == ACCESSOR_CONSTANT) {
- // In case of kAccessor -> kData property reconfiguration, the property
- // must already be prepared for data or certain type.
- DCHECK(!details.representation().IsNone());
- if (details.representation().IsDouble()) {
- value = isolate->factory()->NewHeapNumber(0, MUTABLE);
+ if (old_details.location() == kDescriptor) {
+ if (old_details.kind() == kAccessor) {
+ // In case of kAccessor -> kData property reconfiguration, the property
+ // must already be prepared for data of certain type.
+ DCHECK(!details.representation().IsNone());
+ if (details.representation().IsDouble()) {
+ value = isolate->factory()->NewMutableHeapNumber();
+ } else {
+ value = isolate->factory()->uninitialized_value();
+ }
} else {
- value = isolate->factory()->uninitialized_value();
+ DCHECK_EQ(kData, old_details.kind());
+ value = handle(old_descriptors->GetValue(i), isolate);
+ DCHECK(!old_representation.IsDouble() && !representation.IsDouble());
}
- } else if (old_details.type() == DATA_CONSTANT) {
- value = handle(old_descriptors->GetValue(i), isolate);
- DCHECK(!old_representation.IsDouble() && !representation.IsDouble());
} else {
+ DCHECK_EQ(kField, old_details.location());
FieldIndex index = FieldIndex::ForDescriptor(*old_map, i);
if (object->IsUnboxedDoubleField(index)) {
double old = object->RawFastDoublePropertyAt(index);
@@ -3366,9 +3625,8 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
} else {
value = handle(object->RawFastPropertyAt(index), isolate);
if (!old_representation.IsDouble() && representation.IsDouble()) {
- if (old_representation.IsNone()) {
- value = handle(Smi::kZero, isolate);
- }
+ DCHECK_IMPLIES(old_representation.IsNone(),
+ value->IsUninitialized(isolate));
value = Object::NewStorageFor(isolate, value, representation);
} else if (old_representation.IsDouble() &&
!representation.IsDouble()) {
@@ -3384,10 +3642,11 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
for (int i = old_nof; i < new_nof; i++) {
PropertyDetails details = new_descriptors->GetDetails(i);
- if (details.type() != DATA) continue;
+ if (details.location() != kField) continue;
+ DCHECK_EQ(kData, details.kind());
Handle<Object> value;
if (details.representation().IsDouble()) {
- value = isolate->factory()->NewHeapNumber(0, MUTABLE);
+ value = isolate->factory()->NewMutableHeapNumber();
} else {
value = isolate->factory()->uninitialized_value();
}
@@ -3417,6 +3676,9 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
// Transition from tagged to untagged slot.
heap->ClearRecordedSlot(*object,
HeapObject::RawField(*object, index.offset()));
+ } else {
+ DCHECK(!heap->HasRecordedSlot(
+ *object, HeapObject::RawField(*object, index.offset())));
}
} else {
object->RawFastPropertyAtPut(index, value);
@@ -3427,7 +3689,7 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
// If there are properties in the new backing store, trim it to the correct
// size and install the backing store into the object.
if (external > 0) {
- heap->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(*array, inobject);
+ heap->RightTrimFixedArray(*array, inobject);
object->set_properties(*array);
}
@@ -3440,8 +3702,7 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
Address address = object->address();
heap->CreateFillerObjectAt(address + new_instance_size, instance_size_delta,
ClearRecordedSlots::kYes);
- heap->AdjustLiveBytes(*object, -instance_size_delta,
- Heap::CONCURRENT_TO_SWEEPER);
+ heap->AdjustLiveBytes(*object, -instance_size_delta);
}
// We are storing the new map using release store after creating a filler for
@@ -3476,17 +3737,10 @@ void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
for (int i = 0; i < real_size; i++) {
PropertyDetails details = descs->GetDetails(i);
Handle<Name> key(descs->GetKey(i));
- switch (details.type()) {
- case DATA_CONSTANT: {
- Handle<Object> value(descs->GetConstant(i), isolate);
- PropertyDetails d(details.attributes(), DATA, i + 1,
- PropertyCellType::kNoCell);
- dictionary = NameDictionary::Add(dictionary, key, value, d);
- break;
- }
- case DATA: {
- FieldIndex index = FieldIndex::ForDescriptor(*map, i);
- Handle<Object> value;
+ Handle<Object> value;
+ if (details.location() == kField) {
+ FieldIndex index = FieldIndex::ForDescriptor(*map, i);
+ if (details.kind() == kData) {
if (object->IsUnboxedDoubleField(index)) {
double old_value = object->RawFastDoublePropertyAt(index);
value = isolate->factory()->NewHeapNumber(old_value);
@@ -3498,27 +3752,19 @@ void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
value = isolate->factory()->NewHeapNumber(old->value());
}
}
- PropertyDetails d(details.attributes(), DATA, i + 1,
- PropertyCellType::kNoCell);
- dictionary = NameDictionary::Add(dictionary, key, value, d);
- break;
- }
- case ACCESSOR: {
- FieldIndex index = FieldIndex::ForDescriptor(*map, i);
- Handle<Object> value(object->RawFastPropertyAt(index), isolate);
- PropertyDetails d(details.attributes(), ACCESSOR_CONSTANT, i + 1,
- PropertyCellType::kNoCell);
- dictionary = NameDictionary::Add(dictionary, key, value, d);
- break;
- }
- case ACCESSOR_CONSTANT: {
- Handle<Object> value(descs->GetCallbacksObject(i), isolate);
- PropertyDetails d(details.attributes(), ACCESSOR_CONSTANT, i + 1,
- PropertyCellType::kNoCell);
- dictionary = NameDictionary::Add(dictionary, key, value, d);
- break;
+ } else {
+ DCHECK_EQ(kAccessor, details.kind());
+ value = handle(object->RawFastPropertyAt(index), isolate);
}
+
+ } else {
+ DCHECK_EQ(kDescriptor, details.location());
+ value = handle(descs->GetValue(i), isolate);
}
+ DCHECK(!value.is_null());
+ PropertyDetails d(details.kind(), details.attributes(), i + 1,
+ PropertyCellType::kNoCell);
+ dictionary = NameDictionary::Add(dictionary, key, value, d);
}
// Copy the next enumeration index from instance descriptor.
@@ -3536,8 +3782,7 @@ void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
Heap* heap = isolate->heap();
heap->CreateFillerObjectAt(object->address() + new_instance_size,
instance_size_delta, ClearRecordedSlots::kYes);
- heap->AdjustLiveBytes(*object, -instance_size_delta,
- Heap::CONCURRENT_TO_SWEEPER);
+ heap->AdjustLiveBytes(*object, -instance_size_delta);
}
// We are storing the new map using release store after creating a filler for
@@ -3647,22 +3892,30 @@ int Map::NumberOfFields() {
return result;
}
-Handle<Map> Map::CopyGeneralizeAllRepresentations(
- Handle<Map> map, ElementsKind elements_kind, int modify_index,
- StoreMode store_mode, PropertyKind kind, PropertyAttributes attributes,
- const char* reason) {
+void DescriptorArray::GeneralizeAllFields() {
+ int length = number_of_descriptors();
+ for (int i = 0; i < length; i++) {
+ PropertyDetails details = GetDetails(i);
+ details = details.CopyWithRepresentation(Representation::Tagged());
+ if (details.location() == kField) {
+ DCHECK_EQ(kData, details.kind());
+ SetValue(i, FieldType::Any());
+ }
+ set(ToDetailsIndex(i), details.AsSmi());
+ }
+}
+
+Handle<Map> Map::CopyGeneralizeAllFields(Handle<Map> map,
+ ElementsKind elements_kind,
+ int modify_index, PropertyKind kind,
+ PropertyAttributes attributes,
+ const char* reason) {
Isolate* isolate = map->GetIsolate();
Handle<DescriptorArray> old_descriptors(map->instance_descriptors(), isolate);
int number_of_own_descriptors = map->NumberOfOwnDescriptors();
Handle<DescriptorArray> descriptors =
DescriptorArray::CopyUpTo(old_descriptors, number_of_own_descriptors);
-
- for (int i = 0; i < number_of_own_descriptors; i++) {
- descriptors->SetRepresentation(i, Representation::Tagged());
- if (descriptors->GetDetails(i).type() == DATA) {
- descriptors->SetValue(i, FieldType::Any());
- }
- }
+ descriptors->GeneralizeAllFields();
Handle<LayoutDescriptor> new_layout_descriptor(
LayoutDescriptor::FastPointerLayout(), isolate);
@@ -3673,14 +3926,15 @@ Handle<Map> Map::CopyGeneralizeAllRepresentations(
// Unless the instance is being migrated, ensure that modify_index is a field.
if (modify_index >= 0) {
PropertyDetails details = descriptors->GetDetails(modify_index);
- if (store_mode == FORCE_FIELD &&
- (details.type() != DATA || details.attributes() != attributes)) {
- int field_index = details.type() == DATA ? details.field_index()
- : new_map->NumberOfFields();
- DataDescriptor d(handle(descriptors->GetKey(modify_index), isolate),
- field_index, attributes, Representation::Tagged());
+ if (details.location() != kField || details.attributes() != attributes) {
+ int field_index = details.location() == kField
+ ? details.field_index()
+ : new_map->NumberOfFields();
+ Descriptor d = Descriptor::DataField(
+ handle(descriptors->GetKey(modify_index), isolate), field_index,
+ attributes, Representation::Tagged());
descriptors->Replace(modify_index, &d);
- if (details.type() != DATA) {
+ if (details.location() != kField) {
int unused_property_fields = new_map->unused_property_fields() - 1;
if (unused_property_fields < 0) {
unused_property_fields += JSObject::kFieldsAdded;
@@ -3693,14 +3947,13 @@ Handle<Map> Map::CopyGeneralizeAllRepresentations(
if (FLAG_trace_generalization) {
MaybeHandle<FieldType> field_type = FieldType::None(isolate);
- if (details.type() == DATA) {
+ if (details.location() == kField) {
field_type = handle(
map->instance_descriptors()->GetFieldType(modify_index), isolate);
}
map->PrintGeneralization(
stdout, reason, modify_index, new_map->NumberOfOwnDescriptors(),
- new_map->NumberOfOwnDescriptors(),
- details.type() == DATA_CONSTANT && store_mode == FORCE_FIELD,
+ new_map->NumberOfOwnDescriptors(), details.location() == kDescriptor,
details.representation(), Representation::Tagged(), field_type,
MaybeHandle<Object>(), FieldType::Any(isolate),
MaybeHandle<Object>());
@@ -3725,13 +3978,6 @@ void Map::DeprecateTransitionTree() {
}
-static inline bool EqualImmutableValues(Object* obj1, Object* obj2) {
- if (obj1 == obj2) return true; // Valid for both kData and kAccessor kinds.
- // TODO(ishell): compare AccessorPairs.
- return false;
-}
-
-
// Installs |new_descriptors| over the current instance_descriptors to ensure
// proper sharing of descriptor arrays.
void Map::ReplaceDescriptors(DescriptorArray* new_descriptors,
@@ -3774,50 +4020,9 @@ Map* Map::FindRootMap() {
}
-Map* Map::FindLastMatchMap(int verbatim,
- int length,
- DescriptorArray* descriptors) {
- DisallowHeapAllocation no_allocation;
-
- // This can only be called on roots of transition trees.
- DCHECK_EQ(verbatim, NumberOfOwnDescriptors());
-
- Map* current = this;
-
- for (int i = verbatim; i < length; i++) {
- Name* name = descriptors->GetKey(i);
- PropertyDetails details = descriptors->GetDetails(i);
- Map* next = TransitionArray::SearchTransition(current, details.kind(), name,
- details.attributes());
- if (next == NULL) break;
- DescriptorArray* next_descriptors = next->instance_descriptors();
-
- PropertyDetails next_details = next_descriptors->GetDetails(i);
- DCHECK_EQ(details.kind(), next_details.kind());
- DCHECK_EQ(details.attributes(), next_details.attributes());
- if (details.location() != next_details.location()) break;
- if (!details.representation().Equals(next_details.representation())) break;
-
- if (next_details.location() == kField) {
- FieldType* next_field_type = next_descriptors->GetFieldType(i);
- if (!descriptors->GetFieldType(i)->NowIs(next_field_type)) {
- break;
- }
- } else {
- if (!EqualImmutableValues(descriptors->GetValue(i),
- next_descriptors->GetValue(i))) {
- break;
- }
- }
- current = next;
- }
- return current;
-}
-
-
Map* Map::FindFieldOwner(int descriptor) {
DisallowHeapAllocation no_allocation;
- DCHECK_EQ(DATA, instance_descriptors()->GetDetails(descriptor).type());
+ DCHECK_EQ(kField, instance_descriptors()->GetDetails(descriptor).location());
Map* result = this;
Isolate* isolate = GetIsolate();
while (true) {
@@ -3838,7 +4043,8 @@ void Map::UpdateFieldType(int descriptor, Handle<Name> name,
// We store raw pointers in the queue, so no allocations are allowed.
DisallowHeapAllocation no_allocation;
PropertyDetails details = instance_descriptors()->GetDetails(descriptor);
- if (details.type() != DATA) return;
+ if (details.location() != kField) return;
+ DCHECK_EQ(kData, details.kind());
Zone zone(GetIsolate()->allocator(), ZONE_NAME);
ZoneQueue<Map*> backlog(&zone);
@@ -3863,9 +4069,9 @@ void Map::UpdateFieldType(int descriptor, Handle<Name> name,
// Skip if already updated the shared descriptor.
if (descriptors->GetValue(descriptor) != *new_wrapped_type) {
- DataDescriptor d(name, descriptors->GetFieldIndex(descriptor),
- new_wrapped_type, details.attributes(),
- new_representation);
+ Descriptor d = Descriptor::DataField(
+ name, descriptors->GetFieldIndex(descriptor), new_wrapped_type,
+ details.attributes(), new_representation);
descriptors->Replace(descriptor, &d);
}
}
@@ -3895,9 +4101,9 @@ Handle<FieldType> Map::GeneralizeFieldType(Representation rep1,
// static
-void Map::GeneralizeFieldType(Handle<Map> map, int modify_index,
- Representation new_representation,
- Handle<FieldType> new_field_type) {
+void Map::GeneralizeField(Handle<Map> map, int modify_index,
+ Representation new_representation,
+ Handle<FieldType> new_field_type) {
Isolate* isolate = map->GetIsolate();
// Check if we actually need to generalize the field type at all.
@@ -3912,8 +4118,8 @@ void Map::GeneralizeFieldType(Handle<Map> map, int modify_index,
// Checking old_field_type for being cleared is not necessary because
// the NowIs check below would fail anyway in that case.
new_field_type->NowIs(old_field_type)) {
- DCHECK(Map::GeneralizeFieldType(old_representation, old_field_type,
- new_representation, new_field_type, isolate)
+ DCHECK(GeneralizeFieldType(old_representation, old_field_type,
+ new_representation, new_field_type, isolate)
->NowIs(old_field_type));
return;
}
@@ -3931,7 +4137,7 @@ void Map::GeneralizeFieldType(Handle<Map> map, int modify_index,
PropertyDetails details = descriptors->GetDetails(modify_index);
Handle<Name> name(descriptors->GetKey(modify_index));
- Handle<Object> wrapped_type(WrapType(new_field_type));
+ Handle<Object> wrapped_type(WrapFieldType(new_field_type));
field_owner->UpdateFieldType(modify_index, name, new_representation,
wrapped_type);
field_owner->dependent_code()->DeoptimizeDependentCodeGroup(
@@ -3946,577 +4152,40 @@ void Map::GeneralizeFieldType(Handle<Map> map, int modify_index,
}
}
-static inline Handle<FieldType> GetFieldType(
- Isolate* isolate, Handle<DescriptorArray> descriptors, int descriptor,
- PropertyLocation location, Representation representation) {
-#ifdef DEBUG
- PropertyDetails details = descriptors->GetDetails(descriptor);
- DCHECK_EQ(kData, details.kind());
- DCHECK_EQ(details.location(), location);
-#endif
- if (location == kField) {
- return handle(descriptors->GetFieldType(descriptor), isolate);
- } else {
- return descriptors->GetValue(descriptor)
- ->OptimalType(isolate, representation);
- }
-}
-
-// Reconfigures elements kind to |new_elements_kind| and/or property at
-// |modify_index| with |new_kind|, |new_attributes|, |store_mode| and/or
-// |new_representation|/|new_field_type|.
-// If |modify_index| is negative then no properties are reconfigured but the
-// map is migrated to the up-to-date non-deprecated state.
-//
-// This method rewrites or completes the transition tree to reflect the new
-// change. To avoid high degrees over polymorphism, and to stabilize quickly,
-// on every rewrite the new type is deduced by merging the current type with
-// any potential new (partial) version of the type in the transition tree.
-// To do this, on each rewrite:
-// - Search the root of the transition tree using FindRootMap.
-// - Find/create a |root_map| with requested |new_elements_kind|.
-// - Find |target_map|, the newest matching version of this map using the
-// virtually "enhanced" |old_map|'s descriptor array (i.e. whose entry at
-// |modify_index| is considered to be of |new_kind| and having
-// |new_attributes|) to walk the transition tree.
-// - Merge/generalize the "enhanced" descriptor array of the |old_map| and
-// descriptor array of the |target_map|.
-// - Generalize the |modify_index| descriptor using |new_representation| and
-// |new_field_type|.
-// - Walk the tree again starting from the root towards |target_map|. Stop at
-// |split_map|, the first map who's descriptor array does not match the merged
-// descriptor array.
-// - If |target_map| == |split_map|, |target_map| is in the expected state.
-// Return it.
-// - Otherwise, invalidate the outdated transition target from |target_map|, and
-// replace its transition tree with a new branch for the updated descriptors.
-Handle<Map> Map::Reconfigure(Handle<Map> old_map,
- ElementsKind new_elements_kind, int modify_index,
- PropertyKind new_kind,
- PropertyAttributes new_attributes,
- Representation new_representation,
- Handle<FieldType> new_field_type,
- StoreMode store_mode) {
- DCHECK_NE(kAccessor, new_kind); // TODO(ishell): not supported yet.
- DCHECK(store_mode != FORCE_FIELD || modify_index >= 0);
- Isolate* isolate = old_map->GetIsolate();
-
- Handle<DescriptorArray> old_descriptors(
- old_map->instance_descriptors(), isolate);
- int old_nof = old_map->NumberOfOwnDescriptors();
-
- // If it's just a representation generalization case (i.e. property kind and
- // attributes stays unchanged) it's fine to transition from None to anything
- // but double without any modification to the object, because the default
- // uninitialized value for representation None can be overwritten by both
- // smi and tagged values. Doubles, however, would require a box allocation.
- if (modify_index >= 0 && !new_representation.IsNone() &&
- !new_representation.IsDouble() &&
- old_map->elements_kind() == new_elements_kind) {
- PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
- Representation old_representation = old_details.representation();
-
- if (old_representation.IsNone()) {
- DCHECK_EQ(new_kind, old_details.kind());
- DCHECK_EQ(new_attributes, old_details.attributes());
- DCHECK_EQ(DATA, old_details.type());
- if (FLAG_trace_generalization) {
- old_map->PrintGeneralization(
- stdout, "uninitialized field", modify_index,
- old_map->NumberOfOwnDescriptors(),
- old_map->NumberOfOwnDescriptors(), false, old_representation,
- new_representation,
- handle(old_descriptors->GetFieldType(modify_index), isolate),
- MaybeHandle<Object>(), new_field_type, MaybeHandle<Object>());
- }
- Handle<Map> field_owner(old_map->FindFieldOwner(modify_index), isolate);
-
- GeneralizeFieldType(field_owner, modify_index, new_representation,
- new_field_type);
- DCHECK(old_descriptors->GetDetails(modify_index)
- .representation()
- .Equals(new_representation));
- DCHECK(
- old_descriptors->GetFieldType(modify_index)->NowIs(new_field_type));
- return old_map;
- }
- }
-
- // Check the state of the root map.
- Handle<Map> root_map(old_map->FindRootMap(), isolate);
- if (!old_map->EquivalentToForTransition(*root_map)) {
- return CopyGeneralizeAllRepresentations(
- old_map, new_elements_kind, modify_index, store_mode, new_kind,
- new_attributes, "GenAll_NotEquivalent");
- }
-
- ElementsKind from_kind = root_map->elements_kind();
- ElementsKind to_kind = new_elements_kind;
- // TODO(ishell): Add a test for SLOW_SLOPPY_ARGUMENTS_ELEMENTS.
- if (from_kind != to_kind && to_kind != DICTIONARY_ELEMENTS &&
- to_kind != SLOW_STRING_WRAPPER_ELEMENTS &&
- to_kind != SLOW_SLOPPY_ARGUMENTS_ELEMENTS &&
- !(IsTransitionableFastElementsKind(from_kind) &&
- IsMoreGeneralElementsKindTransition(from_kind, to_kind))) {
- return CopyGeneralizeAllRepresentations(
- old_map, to_kind, modify_index, store_mode, new_kind, new_attributes,
- "GenAll_InvalidElementsTransition");
- }
- int root_nof = root_map->NumberOfOwnDescriptors();
- if (modify_index >= 0 && modify_index < root_nof) {
- PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
- if (old_details.kind() != new_kind ||
- old_details.attributes() != new_attributes) {
- return CopyGeneralizeAllRepresentations(
- old_map, to_kind, modify_index, store_mode, new_kind, new_attributes,
- "GenAll_RootModification1");
- }
- if ((old_details.type() != DATA && store_mode == FORCE_FIELD) ||
- (old_details.type() == DATA &&
- (!new_field_type->NowIs(old_descriptors->GetFieldType(modify_index)) ||
- !new_representation.fits_into(old_details.representation())))) {
- return CopyGeneralizeAllRepresentations(
- old_map, to_kind, modify_index, store_mode, new_kind, new_attributes,
- "GenAll_RootModification2");
- }
- }
-
- // From here on, use the map with correct elements kind as root map.
- if (from_kind != to_kind) {
- root_map = Map::AsElementsKind(root_map, to_kind);
- }
-
- Handle<Map> target_map = root_map;
- for (int i = root_nof; i < old_nof; ++i) {
- PropertyDetails old_details = old_descriptors->GetDetails(i);
- PropertyKind next_kind;
- PropertyLocation next_location;
- PropertyAttributes next_attributes;
- Representation next_representation;
- bool property_kind_reconfiguration = false;
-
- if (modify_index == i) {
- DCHECK_EQ(FORCE_FIELD, store_mode);
- property_kind_reconfiguration = old_details.kind() != new_kind;
-
- next_kind = new_kind;
- next_location = kField;
- next_attributes = new_attributes;
- // If property kind is not reconfigured merge the result with
- // representation/field type from the old descriptor.
- next_representation = new_representation;
- if (!property_kind_reconfiguration) {
- next_representation =
- next_representation.generalize(old_details.representation());
- }
-
- } else {
- next_kind = old_details.kind();
- next_location = old_details.location();
- next_attributes = old_details.attributes();
- next_representation = old_details.representation();
- }
- Map* transition = TransitionArray::SearchTransition(
- *target_map, next_kind, old_descriptors->GetKey(i), next_attributes);
- if (transition == NULL) break;
- Handle<Map> tmp_map(transition, isolate);
-
- Handle<DescriptorArray> tmp_descriptors = handle(
- tmp_map->instance_descriptors(), isolate);
-
- // Check if target map is incompatible.
- PropertyDetails tmp_details = tmp_descriptors->GetDetails(i);
- DCHECK_EQ(next_kind, tmp_details.kind());
- DCHECK_EQ(next_attributes, tmp_details.attributes());
- if (next_kind == kAccessor &&
- !EqualImmutableValues(old_descriptors->GetValue(i),
- tmp_descriptors->GetValue(i))) {
- return CopyGeneralizeAllRepresentations(
- old_map, to_kind, modify_index, store_mode, new_kind, new_attributes,
- "GenAll_Incompatible");
- }
- if (next_location == kField && tmp_details.location() == kDescriptor) break;
-
- Representation tmp_representation = tmp_details.representation();
- if (!next_representation.fits_into(tmp_representation)) break;
-
- PropertyLocation old_location = old_details.location();
- PropertyLocation tmp_location = tmp_details.location();
- if (tmp_location == kField) {
- if (next_kind == kData) {
- Handle<FieldType> next_field_type;
- if (modify_index == i) {
- next_field_type = new_field_type;
- if (!property_kind_reconfiguration) {
- Handle<FieldType> old_field_type =
- GetFieldType(isolate, old_descriptors, i,
- old_details.location(), tmp_representation);
- Representation old_representation = old_details.representation();
- next_field_type = GeneralizeFieldType(
- old_representation, old_field_type, new_representation,
- next_field_type, isolate);
- }
- } else {
- Handle<FieldType> old_field_type =
- GetFieldType(isolate, old_descriptors, i, old_details.location(),
- tmp_representation);
- next_field_type = old_field_type;
- }
- GeneralizeFieldType(tmp_map, i, tmp_representation, next_field_type);
- }
- } else if (old_location == kField ||
- !EqualImmutableValues(old_descriptors->GetValue(i),
- tmp_descriptors->GetValue(i))) {
- break;
- }
- DCHECK(!tmp_map->is_deprecated());
- target_map = tmp_map;
- }
-
- // Directly change the map if the target map is more general.
- Handle<DescriptorArray> target_descriptors(
- target_map->instance_descriptors(), isolate);
- int target_nof = target_map->NumberOfOwnDescriptors();
- if (target_nof == old_nof &&
- (store_mode != FORCE_FIELD ||
- (modify_index >= 0 &&
- target_descriptors->GetDetails(modify_index).location() == kField))) {
-#ifdef DEBUG
- if (modify_index >= 0) {
- PropertyDetails details = target_descriptors->GetDetails(modify_index);
- DCHECK_EQ(new_kind, details.kind());
- DCHECK_EQ(new_attributes, details.attributes());
- DCHECK(new_representation.fits_into(details.representation()));
- DCHECK(details.location() != kField ||
- new_field_type->NowIs(
- target_descriptors->GetFieldType(modify_index)));
- }
-#endif
- if (*target_map != *old_map) {
- old_map->NotifyLeafMapLayoutChange();
- }
- return target_map;
- }
-
- // Find the last compatible target map in the transition tree.
- for (int i = target_nof; i < old_nof; ++i) {
- PropertyDetails old_details = old_descriptors->GetDetails(i);
- PropertyKind next_kind;
- PropertyAttributes next_attributes;
- if (modify_index == i) {
- next_kind = new_kind;
- next_attributes = new_attributes;
- } else {
- next_kind = old_details.kind();
- next_attributes = old_details.attributes();
- }
- Map* transition = TransitionArray::SearchTransition(
- *target_map, next_kind, old_descriptors->GetKey(i), next_attributes);
- if (transition == NULL) break;
- Handle<Map> tmp_map(transition, isolate);
- Handle<DescriptorArray> tmp_descriptors(
- tmp_map->instance_descriptors(), isolate);
-
- // Check if target map is compatible.
-#ifdef DEBUG
- PropertyDetails tmp_details = tmp_descriptors->GetDetails(i);
- DCHECK_EQ(next_kind, tmp_details.kind());
- DCHECK_EQ(next_attributes, tmp_details.attributes());
-#endif
- if (next_kind == kAccessor &&
- !EqualImmutableValues(old_descriptors->GetValue(i),
- tmp_descriptors->GetValue(i))) {
- return CopyGeneralizeAllRepresentations(
- old_map, to_kind, modify_index, store_mode, new_kind, new_attributes,
- "GenAll_Incompatible");
- }
- DCHECK(!tmp_map->is_deprecated());
- target_map = tmp_map;
- }
- target_nof = target_map->NumberOfOwnDescriptors();
- target_descriptors = handle(target_map->instance_descriptors(), isolate);
-
- // Allocate a new descriptor array large enough to hold the required
- // descriptors, with minimally the exact same size as the old descriptor
- // array.
- int new_slack = Max(
- old_nof, old_descriptors->number_of_descriptors()) - old_nof;
- Handle<DescriptorArray> new_descriptors = DescriptorArray::Allocate(
- isolate, old_nof, new_slack);
- DCHECK(new_descriptors->length() > target_descriptors->length() ||
- new_descriptors->NumberOfSlackDescriptors() > 0 ||
- new_descriptors->number_of_descriptors() ==
- old_descriptors->number_of_descriptors());
- DCHECK(new_descriptors->number_of_descriptors() == old_nof);
-
- // 0 -> |root_nof|
- int current_offset = 0;
- for (int i = 0; i < root_nof; ++i) {
- PropertyDetails old_details = old_descriptors->GetDetails(i);
- if (old_details.location() == kField) {
- current_offset += old_details.field_width_in_words();
- }
- Descriptor d(handle(old_descriptors->GetKey(i), isolate),
- handle(old_descriptors->GetValue(i), isolate),
- old_details);
- new_descriptors->Set(i, &d);
- }
-
- // |root_nof| -> |target_nof|
- for (int i = root_nof; i < target_nof; ++i) {
- Handle<Name> target_key(target_descriptors->GetKey(i), isolate);
- PropertyDetails old_details = old_descriptors->GetDetails(i);
- PropertyDetails target_details = target_descriptors->GetDetails(i);
-
- PropertyKind next_kind;
- PropertyAttributes next_attributes;
- PropertyLocation next_location;
- Representation next_representation;
- bool property_kind_reconfiguration = false;
-
- if (modify_index == i) {
- DCHECK_EQ(FORCE_FIELD, store_mode);
- property_kind_reconfiguration = old_details.kind() != new_kind;
-
- next_kind = new_kind;
- next_attributes = new_attributes;
- next_location = kField;
-
- // Merge new representation/field type with ones from the target
- // descriptor. If property kind is not reconfigured merge the result with
- // representation/field type from the old descriptor.
- next_representation =
- new_representation.generalize(target_details.representation());
- if (!property_kind_reconfiguration) {
- next_representation =
- next_representation.generalize(old_details.representation());
- }
- } else {
- // Merge old_descriptor and target_descriptor entries.
- DCHECK_EQ(target_details.kind(), old_details.kind());
- next_kind = target_details.kind();
- next_attributes = target_details.attributes();
- next_location =
- old_details.location() == kField ||
- target_details.location() == kField ||
- !EqualImmutableValues(target_descriptors->GetValue(i),
- old_descriptors->GetValue(i))
- ? kField
- : kDescriptor;
-
- next_representation = old_details.representation().generalize(
- target_details.representation());
- }
- DCHECK_EQ(next_kind, target_details.kind());
- DCHECK_EQ(next_attributes, target_details.attributes());
-
- if (next_location == kField) {
- if (next_kind == kData) {
- Handle<FieldType> target_field_type =
- GetFieldType(isolate, target_descriptors, i,
- target_details.location(), next_representation);
-
- Handle<FieldType> next_field_type;
- if (modify_index == i) {
- next_field_type = GeneralizeFieldType(
- target_details.representation(), target_field_type,
- new_representation, new_field_type, isolate);
- if (!property_kind_reconfiguration) {
- Handle<FieldType> old_field_type =
- GetFieldType(isolate, old_descriptors, i,
- old_details.location(), next_representation);
- next_field_type = GeneralizeFieldType(
- old_details.representation(), old_field_type,
- next_representation, next_field_type, isolate);
- }
- } else {
- Handle<FieldType> old_field_type =
- GetFieldType(isolate, old_descriptors, i, old_details.location(),
- next_representation);
- next_field_type = GeneralizeFieldType(
- old_details.representation(), old_field_type, next_representation,
- target_field_type, isolate);
- }
- Handle<Object> wrapped_type(WrapType(next_field_type));
- DataDescriptor d(target_key, current_offset, wrapped_type,
- next_attributes, next_representation);
- current_offset += d.GetDetails().field_width_in_words();
- new_descriptors->Set(i, &d);
- } else {
- UNIMPLEMENTED(); // TODO(ishell): implement.
- }
- } else {
- PropertyDetails details(next_attributes, next_kind, next_location,
- next_representation);
- Descriptor d(target_key, handle(target_descriptors->GetValue(i), isolate),
- details);
- new_descriptors->Set(i, &d);
- }
- }
-
- // |target_nof| -> |old_nof|
- for (int i = target_nof; i < old_nof; ++i) {
- PropertyDetails old_details = old_descriptors->GetDetails(i);
- Handle<Name> old_key(old_descriptors->GetKey(i), isolate);
-
- // Merge old_descriptor entry and modified details together.
- PropertyKind next_kind;
- PropertyAttributes next_attributes;
- PropertyLocation next_location;
- Representation next_representation;
- bool property_kind_reconfiguration = false;
-
- if (modify_index == i) {
- DCHECK_EQ(FORCE_FIELD, store_mode);
- // In case of property kind reconfiguration it is not necessary to
- // take into account representation/field type of the old descriptor.
- property_kind_reconfiguration = old_details.kind() != new_kind;
-
- next_kind = new_kind;
- next_attributes = new_attributes;
- next_location = kField;
- next_representation = new_representation;
- if (!property_kind_reconfiguration) {
- next_representation =
- next_representation.generalize(old_details.representation());
- }
- } else {
- next_kind = old_details.kind();
- next_attributes = old_details.attributes();
- next_location = old_details.location();
- next_representation = old_details.representation();
- }
-
- if (next_location == kField) {
- if (next_kind == kData) {
- Handle<FieldType> next_field_type;
- if (modify_index == i) {
- next_field_type = new_field_type;
- if (!property_kind_reconfiguration) {
- Handle<FieldType> old_field_type =
- GetFieldType(isolate, old_descriptors, i,
- old_details.location(), next_representation);
- next_field_type = GeneralizeFieldType(
- old_details.representation(), old_field_type,
- next_representation, next_field_type, isolate);
- }
- } else {
- Handle<FieldType> old_field_type =
- GetFieldType(isolate, old_descriptors, i, old_details.location(),
- next_representation);
- next_field_type = old_field_type;
- }
-
- Handle<Object> wrapped_type(WrapType(next_field_type));
-
- DataDescriptor d(old_key, current_offset, wrapped_type, next_attributes,
- next_representation);
- current_offset += d.GetDetails().field_width_in_words();
- new_descriptors->Set(i, &d);
- } else {
- UNIMPLEMENTED(); // TODO(ishell): implement.
- }
- } else {
- PropertyDetails details(next_attributes, next_kind, next_location,
- next_representation);
- Descriptor d(old_key, handle(old_descriptors->GetValue(i), isolate),
- details);
- new_descriptors->Set(i, &d);
- }
- }
-
- new_descriptors->Sort();
-
- DCHECK(store_mode != FORCE_FIELD ||
- new_descriptors->GetDetails(modify_index).location() == kField);
-
- Handle<Map> split_map(root_map->FindLastMatchMap(
- root_nof, old_nof, *new_descriptors), isolate);
- int split_nof = split_map->NumberOfOwnDescriptors();
- DCHECK_NE(old_nof, split_nof);
-
- PropertyKind split_kind;
- PropertyAttributes split_attributes;
- if (modify_index == split_nof) {
- split_kind = new_kind;
- split_attributes = new_attributes;
- } else {
- PropertyDetails split_prop_details = old_descriptors->GetDetails(split_nof);
- split_kind = split_prop_details.kind();
- split_attributes = split_prop_details.attributes();
- }
-
- // Invalidate a transition target at |key|.
- Map* maybe_transition = TransitionArray::SearchTransition(
- *split_map, split_kind, old_descriptors->GetKey(split_nof),
- split_attributes);
- if (maybe_transition != NULL) {
- maybe_transition->DeprecateTransitionTree();
- }
-
- // If |maybe_transition| is not NULL then the transition array already
- // contains entry for given descriptor. This means that the transition
- // could be inserted regardless of whether transitions array is full or not.
- if (maybe_transition == NULL &&
- !TransitionArray::CanHaveMoreTransitions(split_map)) {
- return CopyGeneralizeAllRepresentations(
- old_map, to_kind, modify_index, store_mode, new_kind, new_attributes,
- "GenAll_CantHaveMoreTransitions");
- }
-
- old_map->NotifyLeafMapLayoutChange();
-
- if (FLAG_trace_generalization && modify_index >= 0) {
- PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
- PropertyDetails new_details = new_descriptors->GetDetails(modify_index);
- MaybeHandle<FieldType> old_field_type;
- MaybeHandle<FieldType> new_field_type;
- MaybeHandle<Object> old_value;
- MaybeHandle<Object> new_value;
- if (old_details.type() == DATA) {
- old_field_type =
- handle(old_descriptors->GetFieldType(modify_index), isolate);
- } else {
- old_value = handle(old_descriptors->GetValue(modify_index), isolate);
- }
- if (new_details.type() == DATA) {
- new_field_type =
- handle(new_descriptors->GetFieldType(modify_index), isolate);
- } else {
- new_value = handle(new_descriptors->GetValue(modify_index), isolate);
- }
-
- old_map->PrintGeneralization(
- stdout, "", modify_index, split_nof, old_nof,
- old_details.location() == kDescriptor && store_mode == FORCE_FIELD,
- old_details.representation(), new_details.representation(),
- old_field_type, old_value, new_field_type, new_value);
- }
-
- Handle<LayoutDescriptor> new_layout_descriptor =
- LayoutDescriptor::New(split_map, new_descriptors, old_nof);
-
- Handle<Map> new_map =
- AddMissingTransitions(split_map, new_descriptors, new_layout_descriptor);
-
- // Deprecated part of the transition tree is no longer reachable, so replace
- // current instance descriptors in the "survived" part of the tree with
- // the new descriptors to maintain descriptors sharing invariant.
- split_map->ReplaceDescriptors(*new_descriptors, *new_layout_descriptor);
- return new_map;
+// TODO(ishell): remove.
+// static
+Handle<Map> Map::ReconfigureProperty(Handle<Map> map, int modify_index,
+ PropertyKind new_kind,
+ PropertyAttributes new_attributes,
+ Representation new_representation,
+ Handle<FieldType> new_field_type) {
+ DCHECK_EQ(kData, new_kind); // Only kData case is supported.
+ MapUpdater mu(map->GetIsolate(), map);
+ return mu.ReconfigureToDataField(modify_index, new_attributes,
+ new_representation, new_field_type);
+}
+
+// TODO(ishell): remove.
+// static
+Handle<Map> Map::ReconfigureElementsKind(Handle<Map> map,
+ ElementsKind new_elements_kind) {
+ MapUpdater mu(map->GetIsolate(), map);
+ return mu.ReconfigureElementsKind(new_elements_kind);
}
+// Generalize all fields and update the transition tree.
+Handle<Map> Map::GeneralizeAllFields(Handle<Map> map) {
+ Isolate* isolate = map->GetIsolate();
+ Handle<FieldType> any_type = FieldType::Any(isolate);
-// Generalize the representation of all DATA descriptors.
-Handle<Map> Map::GeneralizeAllFieldRepresentations(
- Handle<Map> map) {
Handle<DescriptorArray> descriptors(map->instance_descriptors());
for (int i = 0; i < map->NumberOfOwnDescriptors(); ++i) {
PropertyDetails details = descriptors->GetDetails(i);
- if (details.type() == DATA) {
- map = ReconfigureProperty(map, i, kData, details.attributes(),
- Representation::Tagged(),
- FieldType::Any(map->GetIsolate()), FORCE_FIELD);
+ if (details.location() == kField) {
+ DCHECK_EQ(kData, details.kind());
+ MapUpdater mu(isolate, map);
+ map = mu.ReconfigureToDataField(i, details.attributes(),
+ Representation::Tagged(), any_type);
}
}
return map;
@@ -4572,46 +4241,43 @@ Map* Map::TryReplayPropertyTransitions(Map* old_map) {
if (!old_details.representation().fits_into(new_details.representation())) {
return nullptr;
}
- switch (new_details.type()) {
- case DATA: {
+ if (new_details.location() == kField) {
+ if (new_details.kind() == kData) {
FieldType* new_type = new_descriptors->GetFieldType(i);
// Cleared field types need special treatment. They represent lost
// knowledge, so we must first generalize the new_type to "Any".
if (FieldTypeIsCleared(new_details.representation(), new_type)) {
return nullptr;
}
- PropertyType old_property_type = old_details.type();
- if (old_property_type == DATA) {
+ DCHECK_EQ(kData, old_details.kind());
+ if (old_details.location() == kField) {
FieldType* old_type = old_descriptors->GetFieldType(i);
if (FieldTypeIsCleared(old_details.representation(), old_type) ||
!old_type->NowIs(new_type)) {
return nullptr;
}
} else {
- DCHECK(old_property_type == DATA_CONSTANT);
+ DCHECK_EQ(kDescriptor, old_details.location());
Object* old_value = old_descriptors->GetValue(i);
if (!new_type->NowContains(old_value)) {
return nullptr;
}
}
- break;
- }
- case ACCESSOR: {
+
+ } else {
+ DCHECK_EQ(kAccessor, new_details.kind());
#ifdef DEBUG
FieldType* new_type = new_descriptors->GetFieldType(i);
DCHECK(new_type->IsAny());
#endif
- break;
+ UNREACHABLE();
}
-
- case DATA_CONSTANT:
- case ACCESSOR_CONSTANT: {
- Object* old_value = old_descriptors->GetValue(i);
- Object* new_value = new_descriptors->GetValue(i);
- if (old_details.location() == kField || old_value != new_value) {
- return nullptr;
- }
- break;
+ } else {
+ DCHECK_EQ(kDescriptor, new_details.location());
+ Object* old_value = old_descriptors->GetValue(i);
+ Object* new_value = new_descriptors->GetValue(i);
+ if (old_details.location() == kField || old_value != new_value) {
+ return nullptr;
}
}
}
@@ -4623,9 +4289,8 @@ Map* Map::TryReplayPropertyTransitions(Map* old_map) {
// static
Handle<Map> Map::Update(Handle<Map> map) {
if (!map->is_deprecated()) return map;
- return ReconfigureProperty(map, -1, kData, NONE, Representation::None(),
- FieldType::None(map->GetIsolate()),
- ALLOW_IN_DESCRIPTOR);
+ MapUpdater mu(map->GetIsolate(), map);
+ return mu.Update();
}
Maybe<bool> JSObject::SetPropertyWithInterceptor(LookupIterator* it,
@@ -5045,6 +4710,36 @@ void Map::EnsureDescriptorSlack(Handle<Map> map, int slack) {
map->UpdateDescriptors(*new_descriptors, layout_descriptor);
}
+// static
+Handle<Map> Map::GetObjectCreateMap(Handle<HeapObject> prototype) {
+ Isolate* isolate = prototype->GetIsolate();
+ Handle<Map> map(isolate->native_context()->object_function()->initial_map(),
+ isolate);
+ if (map->prototype() == *prototype) return map;
+ if (prototype->IsNull(isolate)) {
+ return isolate->slow_object_with_null_prototype_map();
+ }
+ if (prototype->IsJSObject()) {
+ Handle<JSObject> js_prototype = Handle<JSObject>::cast(prototype);
+ if (!js_prototype->map()->is_prototype_map()) {
+ JSObject::OptimizeAsPrototype(js_prototype, FAST_PROTOTYPE);
+ }
+ Handle<PrototypeInfo> info =
+ Map::GetOrCreatePrototypeInfo(js_prototype, isolate);
+ // TODO(verwaest): Use inobject slack tracking for this map.
+ if (info->HasObjectCreateMap()) {
+ map = handle(info->ObjectCreateMap(), isolate);
+ } else {
+ map = Map::CopyInitialMap(map);
+ Map::SetPrototype(map, prototype, FAST_PROTOTYPE);
+ PrototypeInfo::SetObjectCreateMap(info, map);
+ }
+ return map;
+ }
+
+ return Map::TransitionToPrototype(map, prototype, REGULAR_PROTOTYPE);
+}
+
template <class T>
static int AppendUniqueCallbacks(Handle<TemplateList> callbacks,
Handle<typename T::Array> array,
@@ -5094,8 +4789,9 @@ struct DescriptorArrayAppender {
int valid_descriptors,
Handle<DescriptorArray> array) {
DisallowHeapAllocation no_gc;
- AccessorConstantDescriptor desc(key, entry, entry->property_attributes());
- array->Append(&desc);
+ Descriptor d =
+ Descriptor::AccessorConstant(key, entry, entry->property_attributes());
+ array->Append(&d);
}
};
@@ -5979,7 +5675,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
iteration_order =
NameDictionary::DoGenerateNewEnumerationIndices(dictionary);
} else {
- iteration_order = NameDictionary::BuildIterationIndicesArray(dictionary);
+ iteration_order = NameDictionary::IterationIndices(dictionary);
}
int instance_descriptor_length = iteration_order->length();
@@ -5990,10 +5686,12 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
int index = Smi::cast(iteration_order->get(i))->value();
DCHECK(dictionary->IsKey(isolate, dictionary->KeyAt(index)));
- Object* value = dictionary->ValueAt(index);
- PropertyType type = dictionary->DetailsAt(index).type();
- if (type == DATA && !value->IsJSFunction()) {
- number_of_fields += 1;
+ PropertyKind kind = dictionary->DetailsAt(index).kind();
+ if (kind == kData) {
+ Object* value = dictionary->ValueAt(index);
+ if (!value->IsJSFunction()) {
+ number_of_fields += 1;
+ }
}
}
@@ -6058,13 +5756,25 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
PropertyDetails details = dictionary->DetailsAt(index);
int enumeration_index = details.dictionary_index();
- PropertyType type = details.type();
- if (value->IsJSFunction()) {
- DataConstantDescriptor d(key, handle(value, isolate),
- details.attributes());
- descriptors->Set(enumeration_index - 1, &d);
- } else if (type == DATA) {
+ Descriptor d;
+ if (details.kind() == kData) {
+ if (value->IsJSFunction()) {
+ d = Descriptor::DataConstant(key, handle(value, isolate),
+ details.attributes());
+ } else {
+ d = Descriptor::DataField(
+ key, current_offset, details.attributes(),
+ // TODO(verwaest): value->OptimalRepresentation();
+ Representation::Tagged());
+ }
+ } else {
+ DCHECK_EQ(kAccessor, details.kind());
+ d = Descriptor::AccessorConstant(key, handle(value, isolate),
+ details.attributes());
+ }
+ details = d.GetDetails();
+ if (details.location() == kField) {
if (current_offset < inobject_props) {
object->InObjectPropertyAtPut(current_offset, value,
UPDATE_WRITE_BARRIER);
@@ -6072,18 +5782,9 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
int offset = current_offset - inobject_props;
fields->set(offset, value);
}
- DataDescriptor d(key, current_offset, details.attributes(),
- // TODO(verwaest): value->OptimalRepresentation();
- Representation::Tagged());
- current_offset += d.GetDetails().field_width_in_words();
- descriptors->Set(enumeration_index - 1, &d);
- } else if (type == ACCESSOR_CONSTANT) {
- AccessorConstantDescriptor d(key, handle(value, isolate),
- details.attributes());
- descriptors->Set(enumeration_index - 1, &d);
- } else {
- UNREACHABLE();
+ current_offset += details.field_width_in_words();
}
+ descriptors->Set(enumeration_index - 1, &d);
}
DCHECK(current_offset == number_of_fields);
@@ -6123,9 +5824,10 @@ void JSObject::ResetElements(Handle<JSObject> object) {
void JSObject::RequireSlowElements(SeededNumberDictionary* dictionary) {
if (dictionary->requires_slow_elements()) return;
dictionary->set_requires_slow_elements();
- // TODO(verwaest): Remove this hack.
if (map()->is_prototype_map()) {
- TypeFeedbackVector::ClearAllKeyedStoreICs(GetIsolate());
+ // If this object is a prototype (the callee will check), invalidate any
+ // prototype chains involving it.
+ InvalidatePrototypeChains(map());
}
}
@@ -6412,33 +6114,6 @@ Maybe<bool> JSReceiver::DeletePropertyOrElement(Handle<JSReceiver> object,
return DeleteProperty(&it, language_mode);
}
-
-// ES6 7.1.14
-// static
-MaybeHandle<Object> Object::ToPropertyKey(Isolate* isolate,
- Handle<Object> value) {
- // 1. Let key be ToPrimitive(argument, hint String).
- MaybeHandle<Object> maybe_key =
- Object::ToPrimitive(value, ToPrimitiveHint::kString);
- // 2. ReturnIfAbrupt(key).
- Handle<Object> key;
- if (!maybe_key.ToHandle(&key)) return key;
- // 3. If Type(key) is Symbol, then return key.
- if (key->IsSymbol()) return key;
- // 4. Return ToString(key).
- // Extending spec'ed behavior, we'd be happy to return an element index.
- if (key->IsSmi()) return key;
- if (key->IsHeapNumber()) {
- uint32_t uint_value;
- if (value->ToArrayLength(&uint_value) &&
- uint_value <= static_cast<uint32_t>(Smi::kMaxValue)) {
- return handle(Smi::FromInt(static_cast<int>(uint_value)), isolate);
- }
- }
- return Object::ToString(isolate, key);
-}
-
-
// ES6 19.1.2.4
// static
Object* JSReceiver::DefineProperty(Isolate* isolate, Handle<Object> object,
@@ -6563,12 +6238,15 @@ Maybe<bool> JSReceiver::DefineOwnProperty(Isolate* isolate,
return JSProxy::DefineOwnProperty(isolate, Handle<JSProxy>::cast(object),
key, desc, should_throw);
}
+ if (object->IsJSTypedArray()) {
+ return JSTypedArray::DefineOwnProperty(
+ isolate, Handle<JSTypedArray>::cast(object), key, desc, should_throw);
+ }
// TODO(neis): Special case for JSModuleNamespace?
// OrdinaryDefineOwnProperty, by virtue of calling
- // DefineOwnPropertyIgnoreAttributes, can handle arguments (ES6 9.4.4.2)
- // and IntegerIndexedExotics (ES6 9.4.5.3), with one exception:
- // TODO(jkummerow): Setting an indexed accessor on a typed array should throw.
+ // DefineOwnPropertyIgnoreAttributes, can handle arguments
+ // (ES#sec-arguments-exotic-objects-defineownproperty-p-desc).
return OrdinaryDefineOwnProperty(isolate, Handle<JSObject>::cast(object), key,
desc, should_throw);
}
@@ -7305,7 +6983,7 @@ Maybe<bool> JSProxy::SetPrivateProperty(Isolate* isolate, Handle<JSProxy> proxy,
}
Handle<NameDictionary> dict(proxy->property_dictionary());
- PropertyDetails details(DONT_ENUM, DATA, 0, PropertyCellType::kNoCell);
+ PropertyDetails details(kData, DONT_ENUM, 0, PropertyCellType::kNoCell);
Handle<NameDictionary> result =
NameDictionary::Add(dict, private_name, value, details);
if (!dict.is_identical_to(result)) proxy->set_properties(*result);
@@ -7330,13 +7008,7 @@ namespace {
Maybe<bool> GetPropertyDescriptorWithInterceptor(LookupIterator* it,
PropertyDescriptor* desc) {
- bool has_access = true;
- if (it->state() == LookupIterator::ACCESS_CHECK) {
- has_access = it->HasAccess() || JSObject::AllCanRead(it);
- it->Next();
- }
-
- if (has_access && it->state() == LookupIterator::INTERCEPTOR) {
+ if (it->state() == LookupIterator::INTERCEPTOR) {
Isolate* isolate = it->isolate();
Handle<InterceptorInfo> interceptor = it->GetInterceptor();
if (!interceptor->descriptor()->IsUndefined(isolate)) {
@@ -7380,7 +7052,6 @@ Maybe<bool> GetPropertyDescriptorWithInterceptor(LookupIterator* it,
}
}
}
- it->Restart();
return Just(false);
}
} // namespace
@@ -8005,7 +7676,7 @@ void ApplyAttributesToDictionary(Isolate* isolate,
PropertyDetails details = dictionary->DetailsAt(i);
int attrs = attributes;
// READ_ONLY is an invalid attribute for JS setters/getters.
- if ((attributes & READ_ONLY) && details.type() == ACCESSOR_CONSTANT) {
+ if ((attributes & READ_ONLY) && details.kind() == kAccessor) {
Object* v = dictionary->ValueAt(i);
if (v->IsPropertyCell()) v = PropertyCell::cast(v)->value();
if (v->IsAccessorPair()) attrs &= ~READ_ONLY;
@@ -8245,7 +7916,8 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
int limit = copy->map()->NumberOfOwnDescriptors();
for (int i = 0; i < limit; i++) {
PropertyDetails details = descriptors->GetDetails(i);
- if (details.type() != DATA) continue;
+ if (details.location() != kField) continue;
+ DCHECK_EQ(kData, details.kind());
FieldIndex index = FieldIndex::ForDescriptor(copy->map(), i);
if (object->IsUnboxedDoubleField(index)) {
if (copying) {
@@ -8570,8 +8242,8 @@ bool Map::OnlyHasSimpleProperties() {
// Wrapped string elements aren't explicitly stored in the elements backing
// store, but are loaded indirectly from the underlying string.
return !IsStringWrapperElementsKind(elements_kind()) &&
- instance_type() > LAST_SPECIAL_RECEIVER_TYPE &&
- !has_hidden_prototype() && !is_dictionary_map();
+ !IsSpecialReceiverMap() && !has_hidden_prototype() &&
+ !is_dictionary_map();
}
MUST_USE_RESULT Maybe<bool> FastGetOwnValuesOrEntries(
@@ -8839,7 +8511,9 @@ Object* JSObject::SlowReverseLookup(Object* value) {
DescriptorArray* descs = map()->instance_descriptors();
bool value_is_number = value->IsNumber();
for (int i = 0; i < number_of_own_descriptors; i++) {
- if (descs->GetType(i) == DATA) {
+ PropertyDetails details = descs->GetDetails(i);
+ if (details.location() == kField) {
+ DCHECK_EQ(kData, details.kind());
FieldIndex field_index = FieldIndex::ForDescriptor(map(), i);
if (IsUnboxedDoubleField(field_index)) {
if (value_is_number) {
@@ -8859,9 +8533,12 @@ Object* JSObject::SlowReverseLookup(Object* value) {
return descs->GetKey(i);
}
}
- } else if (descs->GetType(i) == DATA_CONSTANT) {
- if (descs->GetConstant(i) == value) {
- return descs->GetKey(i);
+ } else {
+ DCHECK_EQ(kDescriptor, details.location());
+ if (details.kind() == kData) {
+ if (descs->GetValue(i) == value) {
+ return descs->GetKey(i);
+ }
}
}
}
@@ -9007,12 +8684,13 @@ Handle<Map> Map::CopyInitialMap(Handle<Map> map, int instance_size,
Isolate* isolate = map->GetIsolate();
// Strict function maps have Function as a constructor but the
// Function's initial map is a sloppy function map. Same holds for
- // GeneratorFunction and its initial map.
+ // GeneratorFunction / AsyncFunction and its initial map.
Object* constructor = map->GetConstructor();
DCHECK(constructor->IsJSFunction());
DCHECK(*map == JSFunction::cast(constructor)->initial_map() ||
*map == *isolate->strict_function_map() ||
- *map == *isolate->strict_generator_function_map());
+ *map == *isolate->generator_function_map() ||
+ *map == *isolate->async_function_map());
#endif
// Initial maps must always own their descriptors and it's descriptor array
// does not contain descriptors that do not belong to the map.
@@ -9168,13 +8846,7 @@ Handle<Map> Map::CopyReplaceDescriptors(
CHECK(maybe_name.ToHandle(&name));
ConnectTransition(map, result, name, simple_flag);
} else {
- int length = descriptors->number_of_descriptors();
- for (int i = 0; i < length; i++) {
- descriptors->SetRepresentation(i, Representation::Tagged());
- if (descriptors->GetDetails(i).type() == DATA) {
- descriptors->SetValue(i, FieldType::Any());
- }
- }
+ descriptors->GeneralizeAllFields();
result->InitializeDescriptors(*descriptors,
LayoutDescriptor::FastPointerLayout());
}
@@ -9456,35 +9128,31 @@ Handle<Map> Map::CopyForPreventExtensions(Handle<Map> map,
return new_map;
}
-FieldType* DescriptorArray::GetFieldType(int descriptor_number) {
- DCHECK(GetDetails(descriptor_number).location() == kField);
- Object* value = GetValue(descriptor_number);
- if (value->IsWeakCell()) {
- if (WeakCell::cast(value)->cleared()) return FieldType::None();
- value = WeakCell::cast(value)->value();
- }
- return FieldType::cast(value);
-}
-
namespace {
bool CanHoldValue(DescriptorArray* descriptors, int descriptor, Object* value) {
PropertyDetails details = descriptors->GetDetails(descriptor);
- switch (details.type()) {
- case DATA:
+ if (details.location() == kField) {
+ if (details.kind() == kData) {
return value->FitsRepresentation(details.representation()) &&
descriptors->GetFieldType(descriptor)->NowContains(value);
+ } else {
+ DCHECK_EQ(kAccessor, details.kind());
+ UNREACHABLE();
+ return false;
+ }
- case DATA_CONSTANT:
- DCHECK(descriptors->GetConstant(descriptor) != value ||
+ } else {
+ DCHECK_EQ(kDescriptor, details.location());
+ if (details.kind() == kData) {
+ DCHECK(descriptors->GetValue(descriptor) != value ||
value->FitsRepresentation(details.representation()));
- return descriptors->GetConstant(descriptor) == value;
-
- case ACCESSOR:
- case ACCESSOR_CONSTANT:
+ return descriptors->GetValue(descriptor) == value;
+ } else {
+ DCHECK_EQ(kAccessor, details.kind());
return false;
+ }
}
-
UNREACHABLE();
return false;
}
@@ -9499,8 +9167,9 @@ Handle<Map> UpdateDescriptorForValue(Handle<Map> map, int descriptor,
Representation representation = value->OptimalRepresentation();
Handle<FieldType> type = value->OptimalType(isolate, representation);
- return Map::ReconfigureProperty(map, descriptor, kData, attributes,
- representation, type, FORCE_FIELD);
+ MapUpdater mu(isolate, map);
+ return mu.ReconfigureToDataField(descriptor, attributes, representation,
+ type);
}
} // namespace
@@ -9583,9 +9252,9 @@ Handle<Map> Map::ReconfigureExistingProperty(Handle<Map> map, int descriptor,
if (!map->GetBackPointer()->IsMap()) {
// There is no benefit from reconstructing transition tree for maps without
// back pointers.
- return CopyGeneralizeAllRepresentations(
- map, map->elements_kind(), descriptor, FORCE_FIELD, kind, attributes,
- "GenAll_AttributesMismatchProtoMap");
+ return CopyGeneralizeAllFields(map, map->elements_kind(), descriptor, kind,
+ attributes,
+ "GenAll_AttributesMismatchProtoMap");
}
if (FLAG_trace_generalization) {
@@ -9593,9 +9262,11 @@ Handle<Map> Map::ReconfigureExistingProperty(Handle<Map> map, int descriptor,
}
Isolate* isolate = map->GetIsolate();
- Handle<Map> new_map = ReconfigureProperty(
- map, descriptor, kind, attributes, Representation::None(),
- FieldType::None(isolate), FORCE_FIELD);
+
+ MapUpdater mu(isolate, map);
+ DCHECK_EQ(kData, kind); // Only kData case is supported so far.
+ Handle<Map> new_map = mu.ReconfigureToDataField(
+ descriptor, attributes, Representation::None(), FieldType::None(isolate));
return new_map;
}
@@ -9655,7 +9326,7 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
return Map::Normalize(map, mode, "AccessorsOverwritingNonLast");
}
PropertyDetails old_details = old_descriptors->GetDetails(descriptor);
- if (old_details.type() != ACCESSOR_CONSTANT) {
+ if (old_details.kind() != kAccessor) {
return Map::Normalize(map, mode, "AccessorsOverwritingNonAccessors");
}
@@ -9697,8 +9368,8 @@ Handle<Map> Map::TransitionToAccessorProperty(Isolate* isolate, Handle<Map> map,
pair->SetComponents(*getter, *setter);
TransitionFlag flag = INSERT_TRANSITION;
- AccessorConstantDescriptor new_desc(name, pair, attributes);
- return Map::CopyInsertDescriptor(map, &new_desc, flag);
+ Descriptor d = Descriptor::AccessorConstant(name, pair, attributes);
+ return Map::CopyInsertDescriptor(map, &d, flag);
}
@@ -9777,15 +9448,13 @@ Handle<DescriptorArray> DescriptorArray::CopyUpToAddAttributes(
if (!key->IsPrivate()) {
int mask = DONT_DELETE | DONT_ENUM;
// READ_ONLY is an invalid attribute for JS setters/getters.
- if (details.type() != ACCESSOR_CONSTANT || !value->IsAccessorPair()) {
+ if (details.kind() != kAccessor || !value->IsAccessorPair()) {
mask |= READ_ONLY;
}
details = details.CopyAddAttributes(
static_cast<PropertyAttributes>(attributes & mask));
}
- Descriptor inner_desc(
- handle(key), handle(value, desc->GetIsolate()), details);
- descriptors->SetDescriptor(i, &inner_desc);
+ descriptors->Set(i, key, value, details);
}
} else {
for (int i = 0; i < size; ++i) {
@@ -9806,7 +9475,8 @@ bool DescriptorArray::IsEqualUpTo(DescriptorArray* desc, int nof_descriptors) {
}
PropertyDetails details = GetDetails(i);
PropertyDetails other_details = desc->GetDetails(i);
- if (details.type() != other_details.type() ||
+ if (details.kind() != other_details.kind() ||
+ details.location() != other_details.location() ||
!details.representation().Equals(other_details.representation())) {
return false;
}
@@ -10120,8 +9790,7 @@ Handle<FixedArray> FixedArray::SetAndGrow(Handle<FixedArray> array, int index,
void FixedArray::Shrink(int new_length) {
DCHECK(0 <= new_length && new_length <= length());
if (new_length < length()) {
- GetHeap()->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
- this, length() - new_length);
+ GetHeap()->RightTrimFixedArray(this, length() - new_length);
}
}
@@ -10449,17 +10118,11 @@ void DescriptorArray::SetEnumCache(Handle<DescriptorArray> descriptors,
}
}
-
void DescriptorArray::CopyFrom(int index, DescriptorArray* src) {
- Object* value = src->GetValue(index);
PropertyDetails details = src->GetDetails(index);
- Descriptor desc(handle(src->GetKey(index)),
- handle(value, src->GetIsolate()),
- details);
- SetDescriptor(index, &desc);
+ Set(index, src->GetKey(index), src->GetValue(index), details);
}
-
void DescriptorArray::Sort() {
// In-place heap sort.
int len = number_of_descriptors();
@@ -10561,7 +10224,7 @@ Handle<DeoptimizationOutputData> DeoptimizationOutputData::New(
SharedFunctionInfo* DeoptimizationInputData::GetInlinedFunction(int index) {
if (index == -1) {
- return SharedFunctionInfo::cast(this->SharedFunctionInfo());
+ return SharedFunctionInfo::cast(SharedFunctionInfo());
} else {
return SharedFunctionInfo::cast(LiteralArray()->get(index));
}
@@ -10605,7 +10268,7 @@ int HandlerTable::LookupRange(int pc_offset, int* data_out,
int handler_offset = HandlerOffsetField::decode(handler_field);
CatchPrediction prediction = HandlerPredictionField::decode(handler_field);
int handler_data = Smi::cast(get(i + kRangeDataIndex))->value();
- if (pc_offset > start_offset && pc_offset <= end_offset) {
+ if (pc_offset >= start_offset && pc_offset < end_offset) {
DCHECK_GE(start_offset, innermost_start);
DCHECK_LT(end_offset, innermost_end);
innermost_handler = handler_offset;
@@ -10680,7 +10343,6 @@ bool String::LooksValid() {
return true;
}
-
// static
MaybeHandle<String> Name::ToFunctionName(Handle<Name> name) {
if (name->IsString()) return Handle<String>::cast(name);
@@ -11637,7 +11299,7 @@ ComparisonResult String::Compare(Handle<String> x, Handle<String> y) {
Object* String::IndexOf(Isolate* isolate, Handle<Object> receiver,
Handle<Object> search, Handle<Object> position) {
- if (receiver->IsNull(isolate) || receiver->IsUndefined(isolate)) {
+ if (receiver->IsNullOrUndefined(isolate)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
isolate->factory()->NewStringFromAsciiChecked(
@@ -11654,11 +11316,9 @@ Object* String::IndexOf(Isolate* isolate, Handle<Object> receiver,
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, position,
Object::ToInteger(isolate, position));
- double index = std::max(position->Number(), 0.0);
- index = std::min(index, static_cast<double>(receiver_string->length()));
-
- return Smi::FromInt(String::IndexOf(isolate, receiver_string, search_string,
- static_cast<uint32_t>(index)));
+ uint32_t index = receiver_string->ToValidIndex(*position);
+ return Smi::FromInt(
+ String::IndexOf(isolate, receiver_string, search_string, index));
}
namespace {
@@ -11840,7 +11500,7 @@ int StringMatchBackwards(Vector<const schar> subject,
Object* String::LastIndexOf(Isolate* isolate, Handle<Object> receiver,
Handle<Object> search, Handle<Object> position) {
- if (receiver->IsNull(isolate) || receiver->IsUndefined(isolate)) {
+ if (receiver->IsNullOrUndefined(isolate)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
isolate->factory()->NewStringFromAsciiChecked(
@@ -11864,11 +11524,7 @@ Object* String::LastIndexOf(Isolate* isolate, Handle<Object> receiver,
} else {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, position,
Object::ToInteger(isolate, position));
-
- double position_number = std::max(position->Number(), 0.0);
- position_number = std::min(position_number,
- static_cast<double>(receiver_string->length()));
- start_index = static_cast<uint32_t>(position_number);
+ start_index = receiver_string->ToValidIndex(*position);
}
uint32_t pattern_length = search_string->length();
@@ -12037,7 +11693,7 @@ Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) {
// that are a multiple of pointer size.
heap->CreateFillerObjectAt(start_of_string + new_size, delta,
ClearRecordedSlots::kNo);
- heap->AdjustLiveBytes(*string, -delta, Heap::CONCURRENT_TO_SWEEPER);
+ heap->AdjustLiveBytes(*string, -delta);
// We are storing the new length using release store after creating a filler
// for the left-over space to avoid races with the sweeper thread.
@@ -12216,7 +11872,9 @@ bool Map::EquivalentToForNormalization(Map* other,
int properties =
mode == CLEAR_INOBJECT_PROPERTIES ? 0 : other->GetInObjectProperties();
return CheckEquivalent(this, other) && bit_field2() == other->bit_field2() &&
- GetInObjectProperties() == properties;
+ GetInObjectProperties() == properties &&
+ JSObject::GetInternalFieldCount(this) ==
+ JSObject::GetInternalFieldCount(other);
}
@@ -12324,16 +11982,23 @@ void SharedFunctionInfo::AddToOptimizedCodeMap(
DCHECK(code.is_null() ||
code.ToHandleChecked()->kind() == Code::OPTIMIZED_FUNCTION);
DCHECK(native_context->IsNativeContext());
- STATIC_ASSERT(kEntryLength == 4);
+ STATIC_ASSERT(kEntryLength == 3);
Handle<FixedArray> new_code_map;
int entry;
+ if (!osr_ast_id.IsNone()) {
+ Context::AddToOptimizedCodeMap(
+ native_context, shared, code.ToHandleChecked(), literals, osr_ast_id);
+ return;
+ }
+
+ DCHECK(osr_ast_id.IsNone());
if (shared->OptimizedCodeMapIsCleared()) {
new_code_map = isolate->factory()->NewFixedArray(kInitialLength, TENURED);
entry = kEntriesStart;
} else {
Handle<FixedArray> old_code_map(shared->optimized_code_map(), isolate);
- entry = shared->SearchOptimizedCodeMapEntry(*native_context, osr_ast_id);
+ entry = shared->SearchOptimizedCodeMapEntry(*native_context);
if (entry >= kEntriesStart) {
// Just set the code and literals of the entry.
if (!code.is_null()) {
@@ -12380,7 +12045,6 @@ void SharedFunctionInfo::AddToOptimizedCodeMap(
new_code_map->set(entry + kContextOffset, context_cell);
new_code_map->set(entry + kCachedCodeOffset, *code_cell);
new_code_map->set(entry + kLiteralsOffset, *literals_cell);
- new_code_map->set(entry + kOsrAstIdOffset, Smi::FromInt(osr_ast_id.ToInt()));
#ifdef DEBUG
for (int i = kEntriesStart; i < new_code_map->length(); i += kEntryLength) {
@@ -12392,7 +12056,6 @@ void SharedFunctionInfo::AddToOptimizedCodeMap(
Code::cast(cell->value())->kind() == Code::OPTIMIZED_FUNCTION));
cell = WeakCell::cast(new_code_map->get(i + kLiteralsOffset));
DCHECK(cell->cleared() || cell->value()->IsFixedArray());
- DCHECK(new_code_map->get(i + kOsrAstIdOffset)->IsSmi());
}
#endif
@@ -12412,69 +12075,34 @@ void SharedFunctionInfo::ClearOptimizedCodeMap() {
void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
const char* reason) {
DisallowHeapAllocation no_gc;
- if (OptimizedCodeMapIsCleared()) return;
+ Isolate* isolate = GetIsolate();
+ bool found = false;
- Heap* heap = GetHeap();
- FixedArray* code_map = optimized_code_map();
- int dst = kEntriesStart;
- int length = code_map->length();
- for (int src = kEntriesStart; src < length; src += kEntryLength) {
- DCHECK(WeakCell::cast(code_map->get(src))->cleared() ||
- WeakCell::cast(code_map->get(src))->value()->IsNativeContext());
- if (WeakCell::cast(code_map->get(src + kCachedCodeOffset))->value() ==
- optimized_code) {
- BailoutId osr(Smi::cast(code_map->get(src + kOsrAstIdOffset))->value());
- if (FLAG_trace_opt) {
- PrintF("[evicting entry from optimizing code map (%s) for ", reason);
- ShortPrint();
- if (osr.IsNone()) {
+ if (!OptimizedCodeMapIsCleared()) {
+ Heap* heap = isolate->heap();
+ FixedArray* code_map = optimized_code_map();
+ int length = code_map->length();
+ for (int src = kEntriesStart; src < length; src += kEntryLength) {
+ DCHECK(WeakCell::cast(code_map->get(src))->cleared() ||
+ WeakCell::cast(code_map->get(src))->value()->IsNativeContext());
+ found = WeakCell::cast(code_map->get(src + kCachedCodeOffset))->value() ==
+ optimized_code;
+ if (found) {
+ if (FLAG_trace_opt) {
+ PrintF("[evicting entry from optimizing code map (%s) for ", reason);
+ ShortPrint();
PrintF("]\n");
- } else {
- PrintF(" (osr ast id %d)]\n", osr.ToInt());
}
+ // Just clear the code in order to continue sharing literals.
+ code_map->set(src + kCachedCodeOffset, heap->empty_weak_cell(),
+ SKIP_WRITE_BARRIER);
}
- if (!osr.IsNone()) {
- // Evict the src entry by not copying it to the dst entry.
- continue;
- }
- // In case of non-OSR entry just clear the code in order to proceed
- // sharing literals.
- code_map->set(src + kCachedCodeOffset, heap->empty_weak_cell(),
- SKIP_WRITE_BARRIER);
}
-
- // Keep the src entry by copying it to the dst entry.
- if (dst != src) {
- code_map->set(dst + kContextOffset, code_map->get(src + kContextOffset));
- code_map->set(dst + kCachedCodeOffset,
- code_map->get(src + kCachedCodeOffset));
- code_map->set(dst + kLiteralsOffset,
- code_map->get(src + kLiteralsOffset));
- code_map->set(dst + kOsrAstIdOffset,
- code_map->get(src + kOsrAstIdOffset));
- }
- dst += kEntryLength;
}
- if (dst != length) {
- // Always trim even when array is cleared because of heap verifier.
- heap->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(code_map,
- length - dst);
- if (code_map->length() == kEntriesStart) {
- ClearOptimizedCodeMap();
- }
- }
-}
-
-void SharedFunctionInfo::TrimOptimizedCodeMap(int shrink_by) {
- FixedArray* code_map = optimized_code_map();
- DCHECK(shrink_by % kEntryLength == 0);
- DCHECK(shrink_by <= code_map->length() - kEntriesStart);
- // Always trim even when array is cleared because of heap verifier.
- GetHeap()->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(code_map,
- shrink_by);
- if (code_map->length() == kEntriesStart) {
- ClearOptimizedCodeMap();
+ if (!found) {
+ // We didn't find the code in here. It must be osr'd code.
+ isolate->EvictOSROptimizedCode(optimized_code, reason);
}
}
@@ -12531,19 +12159,10 @@ void Map::CompleteInobjectSlackTracking() {
static bool PrototypeBenefitsFromNormalization(Handle<JSObject> object) {
DisallowHeapAllocation no_gc;
if (!object->HasFastProperties()) return false;
- Map* map = object->map();
- if (map->is_prototype_map()) return false;
- DescriptorArray* descriptors = map->instance_descriptors();
- for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
- PropertyDetails details = descriptors->GetDetails(i);
- if (details.location() == kDescriptor) continue;
- if (details.representation().IsHeapObject() ||
- details.representation().IsTagged()) {
- FieldIndex index = FieldIndex::ForDescriptor(map, i);
- if (object->RawFastPropertyAt(index)->IsJSFunction()) return true;
- }
- }
- return false;
+ if (object->IsJSGlobalProxy()) return false;
+ if (object->GetIsolate()->bootstrapper()->IsActive()) return false;
+ return !object->map()->is_prototype_map() ||
+ !object->map()->should_be_fast_prototype_map();
}
// static
@@ -12558,8 +12177,10 @@ void JSObject::MakePrototypesFast(Handle<Object> receiver,
if (!current->IsJSObject()) return;
Handle<JSObject> current_obj = Handle<JSObject>::cast(current);
Map* current_map = current_obj->map();
- if (current_map->is_prototype_map() &&
- !current_map->should_be_fast_prototype_map()) {
+ if (current_map->is_prototype_map()) {
+ // If the map is already marked as should be fast, we're done. Its
+ // prototypes will have been marked already as well.
+ if (current_map->should_be_fast_prototype_map()) return;
Handle<Map> map(current_map);
Map::SetShouldBeFastPrototypeMap(map, true, isolate);
JSObject::OptimizeAsPrototype(current_obj, FAST_PROTOTYPE);
@@ -12765,9 +12386,17 @@ void Map::SetShouldBeFastPrototypeMap(Handle<Map> map, bool value,
// static
Handle<Cell> Map::GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
Isolate* isolate) {
- Handle<Object> maybe_prototype(
- map->GetPrototypeChainRootMap(isolate)->prototype(), isolate);
- if (!maybe_prototype->IsJSObject()) return Handle<Cell>::null();
+ Handle<Object> maybe_prototype;
+ if (map->IsJSGlobalObjectMap()) {
+ DCHECK(map->is_prototype_map());
+ // Global object is prototype of a global proxy and therefore we can
+ // use its validity cell for guarding global object's prototype change.
+ maybe_prototype = isolate->global_object();
+ } else {
+ maybe_prototype =
+ handle(map->GetPrototypeChainRootMap(isolate)->prototype(), isolate);
+ if (!maybe_prototype->IsJSObject()) return Handle<Cell>::null();
+ }
Handle<JSObject> prototype = Handle<JSObject>::cast(maybe_prototype);
// Ensure the prototype is registered with its own prototypes so its cell
// will be invalidated when necessary.
@@ -13138,7 +12767,7 @@ MaybeHandle<Map> JSFunction::GetDerivedMap(Isolate* isolate,
// Link initial map and constructor function if the new.target is actually a
// subclass constructor.
- if (IsSubclassConstructor(function->shared()->kind())) {
+ if (IsDerivedConstructor(function->shared()->kind())) {
Handle<Object> prototype(function->instance_prototype(), isolate);
InstanceType instance_type = constructor_initial_map->instance_type();
DCHECK(CanSubclassHaveInobjectProperties(instance_type));
@@ -13288,8 +12917,7 @@ Handle<String> JSFunction::ToString(Handle<JSFunction> function) {
Handle<SharedFunctionInfo> shared_info(function->shared(), isolate);
// Check if {function} should hide its source code.
- if (!shared_info->script()->IsScript() ||
- Script::cast(shared_info->script())->hide_source()) {
+ if (!shared_info->IsUserJavaScript()) {
return NativeCodeFunctionSourceString(shared_info);
}
@@ -13363,7 +12991,7 @@ void Script::SetEvalOrigin(Handle<Script> script,
StackTraceFrameIterator it(script->GetIsolate());
if (!it.done() && it.is_javascript()) {
FrameSummary summary = FrameSummary::GetFirst(it.javascript_frame());
- script->set_eval_from_shared(summary.function()->shared());
+ script->set_eval_from_shared(summary.AsJavaScript().function()->shared());
script->set_eval_from_position(-summary.code_offset());
return;
}
@@ -13416,15 +13044,7 @@ bool Script::GetPositionInfo(Handle<Script> script, int position,
PositionInfo* info, OffsetFlag offset_flag) {
// For wasm, we do not create an artificial line_ends array, but do the
// translation directly.
- if (script->type() == Script::TYPE_WASM) {
- Handle<WasmCompiledModule> compiled_module(
- WasmCompiledModule::cast(script->wasm_compiled_module()));
- DCHECK_LE(0, position);
- return wasm::GetPositionInfo(compiled_module,
- static_cast<uint32_t>(position), info);
- }
-
- InitLineEnds(script);
+ if (script->type() != Script::TYPE_WASM) InitLineEnds(script);
return script->GetPositionInfo(position, info, offset_flag);
}
@@ -13460,6 +13080,16 @@ bool Script::GetPositionInfo(int position, PositionInfo* info,
OffsetFlag offset_flag) const {
DisallowHeapAllocation no_allocation;
+ // For wasm, we do not rely on the line_ends array, but do the translation
+ // directly.
+ if (type() == Script::TYPE_WASM) {
+ Handle<WasmCompiledModule> compiled_module(
+ WasmCompiledModule::cast(wasm_compiled_module()));
+ DCHECK_LE(0, position);
+ return compiled_module->GetPositionInfo(static_cast<uint32_t>(position),
+ info);
+ }
+
if (line_ends()->IsUndefined(GetIsolate())) {
// Slow mode: we do not have line_ends. We have to iterate through source.
if (!GetPositionInfoSlow(this, position, info)) return false;
@@ -13553,15 +13183,11 @@ int Script::GetLineNumber(int code_pos) const {
return info.line;
}
-Handle<Object> Script::GetNameOrSourceURL(Handle<Script> script) {
- Isolate* isolate = script->GetIsolate();
-
+Object* Script::GetNameOrSourceURL() {
+ Isolate* isolate = GetIsolate();
// Keep in sync with ScriptNameOrSourceURL in messages.js.
-
- if (!script->source_url()->IsUndefined(isolate)) {
- return handle(script->source_url(), isolate);
- }
- return handle(script->name(), isolate);
+ if (!source_url()->IsUndefined(isolate)) return source_url();
+ return name();
}
@@ -13590,53 +13216,68 @@ Handle<JSObject> Script::GetWrapper(Handle<Script> script) {
return result;
}
-
MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
- FunctionLiteral* fun) {
- WeakFixedArray::Iterator iterator(shared_function_infos());
- SharedFunctionInfo* shared;
- while ((shared = iterator.Next<SharedFunctionInfo>())) {
- if (fun->function_token_position() == shared->function_token_position() &&
- fun->start_position() == shared->start_position() &&
- fun->end_position() == shared->end_position()) {
- return Handle<SharedFunctionInfo>(shared);
- }
+ Isolate* isolate, FunctionLiteral* fun) {
+ DCHECK_NE(fun->function_literal_id(), FunctionLiteral::kIdTypeInvalid);
+ DCHECK_LT(fun->function_literal_id(), shared_function_infos()->length());
+ Object* shared = shared_function_infos()->get(fun->function_literal_id());
+ if (shared->IsUndefined(isolate) || WeakCell::cast(shared)->cleared()) {
+ return MaybeHandle<SharedFunctionInfo>();
}
- return MaybeHandle<SharedFunctionInfo>();
+ return handle(SharedFunctionInfo::cast(WeakCell::cast(shared)->value()));
}
-
Script::Iterator::Iterator(Isolate* isolate)
: iterator_(isolate->heap()->script_list()) {}
Script* Script::Iterator::Next() { return iterator_.Next<Script>(); }
+SharedFunctionInfo::ScriptIterator::ScriptIterator(Handle<Script> script)
+ : ScriptIterator(script->GetIsolate(),
+ handle(script->shared_function_infos())) {}
-SharedFunctionInfo::Iterator::Iterator(Isolate* isolate)
- : script_iterator_(isolate),
- sfi_iterator_(isolate->heap()->noscript_shared_function_infos()) {}
+SharedFunctionInfo::ScriptIterator::ScriptIterator(
+ Isolate* isolate, Handle<FixedArray> shared_function_infos)
+ : isolate_(isolate),
+ shared_function_infos_(shared_function_infos),
+ index_(0) {}
+SharedFunctionInfo* SharedFunctionInfo::ScriptIterator::Next() {
+ while (index_ < shared_function_infos_->length()) {
+ Object* raw = shared_function_infos_->get(index_++);
+ if (raw->IsUndefined(isolate_) || WeakCell::cast(raw)->cleared()) continue;
+ return SharedFunctionInfo::cast(WeakCell::cast(raw)->value());
+ }
+ return nullptr;
+}
-bool SharedFunctionInfo::Iterator::NextScript() {
- Script* script = script_iterator_.Next();
- if (script == NULL) return false;
- sfi_iterator_.Reset(script->shared_function_infos());
- return true;
+void SharedFunctionInfo::ScriptIterator::Reset(Handle<Script> script) {
+ shared_function_infos_ = handle(script->shared_function_infos());
+ index_ = 0;
}
+SharedFunctionInfo::GlobalIterator::GlobalIterator(Isolate* isolate)
+ : script_iterator_(isolate),
+ noscript_sfi_iterator_(isolate->heap()->noscript_shared_function_infos()),
+ sfi_iterator_(handle(script_iterator_.Next(), isolate)) {}
-SharedFunctionInfo* SharedFunctionInfo::Iterator::Next() {
- do {
- SharedFunctionInfo* next = sfi_iterator_.Next<SharedFunctionInfo>();
- if (next != NULL) return next;
- } while (NextScript());
- return NULL;
+SharedFunctionInfo* SharedFunctionInfo::GlobalIterator::Next() {
+ SharedFunctionInfo* next = noscript_sfi_iterator_.Next<SharedFunctionInfo>();
+ if (next != nullptr) return next;
+ for (;;) {
+ next = sfi_iterator_.Next();
+ if (next != nullptr) return next;
+ Script* next_script = script_iterator_.Next();
+ if (next_script == nullptr) return nullptr;
+ sfi_iterator_.Reset(handle(next_script));
+ }
}
void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
Handle<Object> script_object) {
+ DCHECK_NE(shared->function_literal_id(), FunctionLiteral::kIdTypeInvalid);
if (shared->script() == *script_object) return;
Isolate* isolate = shared->GetIsolate();
@@ -13644,39 +13285,52 @@ void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
// the shared function info may be temporarily in two lists.
// This is okay because the gc-time processing of these lists can tolerate
// duplicates.
- Handle<Object> list;
if (script_object->IsScript()) {
Handle<Script> script = Handle<Script>::cast(script_object);
- list = handle(script->shared_function_infos(), isolate);
+ Handle<FixedArray> list = handle(script->shared_function_infos(), isolate);
+#ifdef DEBUG
+ DCHECK_LT(shared->function_literal_id(), list->length());
+ if (list->get(shared->function_literal_id())->IsWeakCell() &&
+ !WeakCell::cast(list->get(shared->function_literal_id()))->cleared()) {
+ DCHECK(
+ WeakCell::cast(list->get(shared->function_literal_id()))->value() ==
+ *shared);
+ }
+#endif
+ Handle<WeakCell> cell = isolate->factory()->NewWeakCell(shared);
+ list->set(shared->function_literal_id(), *cell);
} else {
- list = isolate->factory()->noscript_shared_function_infos();
- }
+ Handle<Object> list = isolate->factory()->noscript_shared_function_infos();
#ifdef DEBUG
- if (FLAG_enable_slow_asserts) {
- WeakFixedArray::Iterator iterator(*list);
- SharedFunctionInfo* next;
- while ((next = iterator.Next<SharedFunctionInfo>())) {
- DCHECK_NE(next, *shared);
+ if (FLAG_enable_slow_asserts) {
+ WeakFixedArray::Iterator iterator(*list);
+ SharedFunctionInfo* next;
+ while ((next = iterator.Next<SharedFunctionInfo>())) {
+ DCHECK_NE(next, *shared);
+ }
}
- }
#endif // DEBUG
- list = WeakFixedArray::Add(list, shared);
- if (script_object->IsScript()) {
- Handle<Script> script = Handle<Script>::cast(script_object);
- script->set_shared_function_infos(*list);
- } else {
+ list = WeakFixedArray::Add(list, shared);
+
isolate->heap()->SetRootNoScriptSharedFunctionInfos(*list);
}
- // Remove shared function info from old script's list.
if (shared->script()->IsScript()) {
+ // Remove shared function info from old script's list.
Script* old_script = Script::cast(shared->script());
- if (old_script->shared_function_infos()->IsWeakFixedArray()) {
- WeakFixedArray* list =
- WeakFixedArray::cast(old_script->shared_function_infos());
- list->Remove(shared);
+
+ // Due to liveedit, it might happen that the old_script doesn't know
+ // about the SharedFunctionInfo, so we have to guard against that.
+ Handle<FixedArray> infos(old_script->shared_function_infos(), isolate);
+ if (shared->function_literal_id() < infos->length()) {
+ Object* raw = old_script->shared_function_infos()->get(
+ shared->function_literal_id());
+ if (!raw->IsWeakCell() || WeakCell::cast(raw)->value() == *shared) {
+ old_script->shared_function_infos()->set(
+ shared->function_literal_id(), isolate->heap()->undefined_value());
+ }
}
} else {
// Remove shared function info from root array.
@@ -13695,6 +13349,16 @@ String* SharedFunctionInfo::DebugName() {
return String::cast(n);
}
+bool SharedFunctionInfo::HasNoSideEffect() {
+ if (!computed_has_no_side_effect()) {
+ DisallowHeapAllocation not_handlified;
+ Handle<SharedFunctionInfo> info(this);
+ set_has_no_side_effect(DebugEvaluate::FunctionHasNoSideEffect(info));
+ set_computed_has_no_side_effect(true);
+ }
+ return has_no_side_effect();
+}
+
// The filter is a pattern that matches function names in this way:
// "*" all; the default
// "-" all but the top-level function
@@ -13797,7 +13461,7 @@ void JSFunction::CalculateInstanceSizeForDerivedClass(
JSFunction* func = JSFunction::cast(current);
SharedFunctionInfo* shared = func->shared();
expected_nof_properties += shared->expected_nof_properties();
- if (!IsSubclassConstructor(shared->kind())) {
+ if (!IsDerivedConstructor(shared->kind())) {
break;
}
}
@@ -13940,8 +13604,6 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
shared_info->set_language_mode(lit->language_mode());
shared_info->set_uses_arguments(lit->scope()->arguments() != NULL);
shared_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
- shared_info->set_is_function(lit->is_function());
- shared_info->set_never_compiled(true);
shared_info->set_kind(lit->kind());
if (!IsConstructable(lit->kind(), lit->language_mode())) {
shared_info->SetConstructStub(
@@ -13949,9 +13611,7 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
}
shared_info->set_needs_home_object(lit->scope()->NeedsHomeObject());
shared_info->set_asm_function(lit->scope()->asm_function());
- shared_info->set_requires_class_field_init(lit->requires_class_field_init());
- shared_info->set_is_class_field_initializer(
- lit->is_class_field_initializer());
+ shared_info->set_function_literal_id(lit->function_literal_id());
SetExpectedNofPropertiesFromEstimate(shared_info, lit);
}
@@ -14006,19 +13666,15 @@ void SharedFunctionInfo::ResetForNewContext(int new_ic_age) {
}
}
-
-int SharedFunctionInfo::SearchOptimizedCodeMapEntry(Context* native_context,
- BailoutId osr_ast_id) {
+int SharedFunctionInfo::SearchOptimizedCodeMapEntry(Context* native_context) {
DisallowHeapAllocation no_gc;
DCHECK(native_context->IsNativeContext());
if (!OptimizedCodeMapIsCleared()) {
FixedArray* optimized_code_map = this->optimized_code_map();
int length = optimized_code_map->length();
- Smi* osr_ast_id_smi = Smi::FromInt(osr_ast_id.ToInt());
for (int i = kEntriesStart; i < length; i += kEntryLength) {
if (WeakCell::cast(optimized_code_map->get(i + kContextOffset))
- ->value() == native_context &&
- optimized_code_map->get(i + kOsrAstIdOffset) == osr_ast_id_smi) {
+ ->value() == native_context) {
return i;
}
}
@@ -14041,7 +13697,16 @@ void SharedFunctionInfo::ClearCodeFromOptimizedCodeMap() {
CodeAndLiterals SharedFunctionInfo::SearchOptimizedCodeMap(
Context* native_context, BailoutId osr_ast_id) {
CodeAndLiterals result = {nullptr, nullptr};
- int entry = SearchOptimizedCodeMapEntry(native_context, osr_ast_id);
+ if (!osr_ast_id.IsNone()) {
+ Code* code;
+ LiteralsArray* literals;
+ native_context->SearchOptimizedCodeMap(this, osr_ast_id, &code, &literals);
+ result = {code, literals};
+ return result;
+ }
+
+ DCHECK(osr_ast_id.IsNone());
+ int entry = SearchOptimizedCodeMapEntry(native_context);
if (entry != kNotFound) {
FixedArray* code_map = optimized_code_map();
DCHECK_LE(entry + kEntryLength, code_map->length());
@@ -14360,21 +14025,13 @@ uint32_t Code::TranslateAstIdToPcOffset(BailoutId ast_id) {
return 0;
}
-int Code::LookupRangeInHandlerTable(int code_offset, int* data,
- HandlerTable::CatchPrediction* prediction) {
- DCHECK(!is_optimized_code());
- HandlerTable* table = HandlerTable::cast(handler_table());
- return table->LookupRange(code_offset, data, prediction);
-}
-
void Code::MakeCodeAgeSequenceYoung(byte* sequence, Isolate* isolate) {
- PatchPlatformCodeAge(isolate, sequence, kNoAgeCodeAge, NO_MARKING_PARITY);
+ PatchPlatformCodeAge(isolate, sequence, kNoAgeCodeAge);
}
void Code::MarkCodeAsExecuted(byte* sequence, Isolate* isolate) {
- PatchPlatformCodeAge(isolate, sequence, kExecutedOnceCodeAge,
- NO_MARKING_PARITY);
+ PatchPlatformCodeAge(isolate, sequence, kExecutedOnceCodeAge);
}
@@ -14408,28 +14065,25 @@ void Code::MakeYoung(Isolate* isolate) {
void Code::PreAge(Isolate* isolate) {
byte* sequence = FindCodeAgeSequence();
if (sequence != NULL) {
- PatchPlatformCodeAge(isolate, sequence, kPreAgedCodeAge, NO_MARKING_PARITY);
+ PatchPlatformCodeAge(isolate, sequence, kPreAgedCodeAge);
}
}
void Code::MarkToBeExecutedOnce(Isolate* isolate) {
byte* sequence = FindCodeAgeSequence();
if (sequence != NULL) {
- PatchPlatformCodeAge(isolate, sequence, kToBeExecutedOnceCodeAge,
- NO_MARKING_PARITY);
+ PatchPlatformCodeAge(isolate, sequence, kToBeExecutedOnceCodeAge);
}
}
-void Code::MakeOlder(MarkingParity current_parity) {
+void Code::MakeOlder() {
byte* sequence = FindCodeAgeSequence();
if (sequence != NULL) {
- Age age;
- MarkingParity code_parity;
Isolate* isolate = GetIsolate();
- GetCodeAgeAndParity(isolate, sequence, &age, &code_parity);
+ Age age = GetCodeAge(isolate, sequence);
Age next_age = NextAge(age);
- if (age != next_age && code_parity != current_parity) {
- PatchPlatformCodeAge(isolate, sequence, next_age, current_parity);
+ if (age != next_age) {
+ PatchPlatformCodeAge(isolate, sequence, next_age);
}
}
}
@@ -14455,77 +14109,47 @@ Code::Age Code::GetAge() {
if (sequence == NULL) {
return kNoAgeCodeAge;
}
- Age age;
- MarkingParity parity;
- GetCodeAgeAndParity(GetIsolate(), sequence, &age, &parity);
- return age;
+ return GetCodeAge(GetIsolate(), sequence);
}
-
-void Code::GetCodeAgeAndParity(Code* code, Age* age,
- MarkingParity* parity) {
+Code::Age Code::GetAgeOfCodeAgeStub(Code* code) {
Isolate* isolate = code->GetIsolate();
Builtins* builtins = isolate->builtins();
- Code* stub = NULL;
-#define HANDLE_CODE_AGE(AGE) \
- stub = *builtins->Make##AGE##CodeYoungAgainEvenMarking(); \
- if (code == stub) { \
- *age = k##AGE##CodeAge; \
- *parity = EVEN_MARKING_PARITY; \
- return; \
- } \
- stub = *builtins->Make##AGE##CodeYoungAgainOddMarking(); \
- if (code == stub) { \
- *age = k##AGE##CodeAge; \
- *parity = ODD_MARKING_PARITY; \
- return; \
+#define HANDLE_CODE_AGE(AGE) \
+ if (code == *builtins->Make##AGE##CodeYoungAgain()) { \
+ return k##AGE##CodeAge; \
}
CODE_AGE_LIST(HANDLE_CODE_AGE)
#undef HANDLE_CODE_AGE
- stub = *builtins->MarkCodeAsExecutedOnce();
- if (code == stub) {
- *age = kNotExecutedCodeAge;
- *parity = NO_MARKING_PARITY;
- return;
+ if (code == *builtins->MarkCodeAsExecutedOnce()) {
+ return kNotExecutedCodeAge;
}
- stub = *builtins->MarkCodeAsExecutedTwice();
- if (code == stub) {
- *age = kExecutedOnceCodeAge;
- *parity = NO_MARKING_PARITY;
- return;
+ if (code == *builtins->MarkCodeAsExecutedTwice()) {
+ return kExecutedOnceCodeAge;
}
- stub = *builtins->MarkCodeAsToBeExecutedOnce();
- if (code == stub) {
- *age = kToBeExecutedOnceCodeAge;
- *parity = NO_MARKING_PARITY;
- return;
+ if (code == *builtins->MarkCodeAsToBeExecutedOnce()) {
+ return kToBeExecutedOnceCodeAge;
}
UNREACHABLE();
+ return kNoAgeCodeAge;
}
-
-Code* Code::GetCodeAgeStub(Isolate* isolate, Age age, MarkingParity parity) {
+Code* Code::GetCodeAgeStub(Isolate* isolate, Age age) {
Builtins* builtins = isolate->builtins();
switch (age) {
-#define HANDLE_CODE_AGE(AGE) \
- case k##AGE##CodeAge: { \
- Code* stub = parity == EVEN_MARKING_PARITY \
- ? *builtins->Make##AGE##CodeYoungAgainEvenMarking() \
- : *builtins->Make##AGE##CodeYoungAgainOddMarking(); \
- return stub; \
- }
+#define HANDLE_CODE_AGE(AGE) \
+ case k##AGE##CodeAge: { \
+ return *builtins->Make##AGE##CodeYoungAgain(); \
+ }
CODE_AGE_LIST(HANDLE_CODE_AGE)
#undef HANDLE_CODE_AGE
case kNotExecutedCodeAge: {
- DCHECK(parity == NO_MARKING_PARITY);
return *builtins->MarkCodeAsExecutedOnce();
}
case kExecutedOnceCodeAge: {
- DCHECK(parity == NO_MARKING_PARITY);
return *builtins->MarkCodeAsExecutedTwice();
}
case kToBeExecutedOnceCodeAge: {
- DCHECK(parity == NO_MARKING_PARITY);
return *builtins->MarkCodeAsToBeExecutedOnce();
}
default:
@@ -15129,11 +14753,17 @@ void BytecodeArray::CopyBytecodesTo(BytecodeArray* to) {
from->length());
}
-int BytecodeArray::LookupRangeInHandlerTable(
- int code_offset, int* data, HandlerTable::CatchPrediction* prediction) {
- HandlerTable* table = HandlerTable::cast(handler_table());
- code_offset++; // Point after current bytecode.
- return table->LookupRange(code_offset, data, prediction);
+void BytecodeArray::MakeOlder() {
+ Age age = bytecode_age();
+ if (age < kLastBytecodeAge) {
+ set_bytecode_age(static_cast<Age>(age + 1));
+ }
+ DCHECK_GE(bytecode_age(), kFirstBytecodeAge);
+ DCHECK_LE(bytecode_age(), kLastBytecodeAge);
+}
+
+bool BytecodeArray::IsOld() const {
+ return bytecode_age() >= kIsOldBytecodeAge;
}
// static
@@ -15572,9 +15202,6 @@ Maybe<bool> JSObject::SetPrototype(Handle<JSObject> object,
// SpiderMonkey behaves this way.
if (!value->IsJSReceiver() && !value->IsNull(isolate)) return Just(true);
- bool dictionary_elements_in_chain =
- object->map()->DictionaryElementsInPrototypeChainOnly();
-
bool all_extensible = object->map()->is_extensible();
Handle<JSObject> real_receiver = object;
if (from_javascript) {
@@ -15640,14 +15267,6 @@ Maybe<bool> JSObject::SetPrototype(Handle<JSObject> object,
DCHECK(new_map->prototype() == *value);
JSObject::MigrateToMap(real_receiver, new_map);
- if (from_javascript && !dictionary_elements_in_chain &&
- new_map->DictionaryElementsInPrototypeChainOnly()) {
- // If the prototype chain didn't previously have element callbacks, then
- // KeyedStoreICs need to be cleared to ensure any that involve this
- // map go generic.
- TypeFeedbackVector::ClearAllKeyedStoreICs(isolate);
- }
-
heap->ClearInstanceofCache();
DCHECK(size == object->Size());
return Just(true);
@@ -16154,7 +15773,8 @@ void Dictionary<Derived, Shape, Key>::Print(std::ostream& os) { // NOLINT
} else {
os << Brief(k);
}
- os << ": " << Brief(this->ValueAt(i)) << " " << this->DetailsAt(i);
+ os << ": " << Brief(this->ValueAt(i)) << " ";
+ this->DetailsAt(i).PrintAsSlowTo(os);
}
}
}
@@ -16220,118 +15840,6 @@ int FixedArrayBase::GetMaxLengthForNewSpaceAllocation(ElementsKind kind) {
ElementsKindToShiftSize(kind));
}
-void FixedArray::SwapPairs(FixedArray* numbers, int i, int j) {
- Object* temp = get(i);
- set(i, get(j));
- set(j, temp);
- if (this != numbers) {
- temp = numbers->get(i);
- numbers->set(i, Smi::cast(numbers->get(j)));
- numbers->set(j, Smi::cast(temp));
- }
-}
-
-
-static void InsertionSortPairs(FixedArray* content,
- FixedArray* numbers,
- int len) {
- for (int i = 1; i < len; i++) {
- int j = i;
- while (j > 0 &&
- (NumberToUint32(numbers->get(j - 1)) >
- NumberToUint32(numbers->get(j)))) {
- content->SwapPairs(numbers, j - 1, j);
- j--;
- }
- }
-}
-
-
-void HeapSortPairs(FixedArray* content, FixedArray* numbers, int len) {
- // In-place heap sort.
- DCHECK(content->length() == numbers->length());
-
- // Bottom-up max-heap construction.
- for (int i = 1; i < len; ++i) {
- int child_index = i;
- while (child_index > 0) {
- int parent_index = ((child_index + 1) >> 1) - 1;
- uint32_t parent_value = NumberToUint32(numbers->get(parent_index));
- uint32_t child_value = NumberToUint32(numbers->get(child_index));
- if (parent_value < child_value) {
- content->SwapPairs(numbers, parent_index, child_index);
- } else {
- break;
- }
- child_index = parent_index;
- }
- }
-
- // Extract elements and create sorted array.
- for (int i = len - 1; i > 0; --i) {
- // Put max element at the back of the array.
- content->SwapPairs(numbers, 0, i);
- // Sift down the new top element.
- int parent_index = 0;
- while (true) {
- int child_index = ((parent_index + 1) << 1) - 1;
- if (child_index >= i) break;
- uint32_t child1_value = NumberToUint32(numbers->get(child_index));
- uint32_t child2_value = NumberToUint32(numbers->get(child_index + 1));
- uint32_t parent_value = NumberToUint32(numbers->get(parent_index));
- if (child_index + 1 >= i || child1_value > child2_value) {
- if (parent_value > child1_value) break;
- content->SwapPairs(numbers, parent_index, child_index);
- parent_index = child_index;
- } else {
- if (parent_value > child2_value) break;
- content->SwapPairs(numbers, parent_index, child_index + 1);
- parent_index = child_index + 1;
- }
- }
- }
-}
-
-
-// Sort this array and the numbers as pairs wrt. the (distinct) numbers.
-void FixedArray::SortPairs(FixedArray* numbers, uint32_t len) {
- DCHECK(this->length() == numbers->length());
- // For small arrays, simply use insertion sort.
- if (len <= 10) {
- InsertionSortPairs(this, numbers, len);
- return;
- }
- // Check the range of indices.
- uint32_t min_index = NumberToUint32(numbers->get(0));
- uint32_t max_index = min_index;
- uint32_t i;
- for (i = 1; i < len; i++) {
- if (NumberToUint32(numbers->get(i)) < min_index) {
- min_index = NumberToUint32(numbers->get(i));
- } else if (NumberToUint32(numbers->get(i)) > max_index) {
- max_index = NumberToUint32(numbers->get(i));
- }
- }
- if (max_index - min_index + 1 == len) {
- // Indices form a contiguous range, unless there are duplicates.
- // Do an in-place linear time sort assuming distinct numbers, but
- // avoid hanging in case they are not.
- for (i = 0; i < len; i++) {
- uint32_t p;
- uint32_t j = 0;
- // While the current element at i is not at its correct position p,
- // swap the elements at these two positions.
- while ((p = NumberToUint32(numbers->get(i)) - min_index) != i &&
- j++ < len) {
- SwapPairs(numbers, i, p);
- }
- }
- } else {
- HeapSortPairs(this, numbers, len);
- return;
- }
-}
-
bool JSObject::WasConstructedFromApiFunction() {
auto instance_type = map()->instance_type();
bool is_api_object = instance_type == JS_API_OBJECT_TYPE ||
@@ -16351,94 +15859,6 @@ bool JSObject::WasConstructedFromApiFunction() {
return is_api_object;
}
-MaybeHandle<String> Object::ObjectProtoToString(Isolate* isolate,
- Handle<Object> object) {
- if (*object == isolate->heap()->undefined_value()) {
- return isolate->factory()->undefined_to_string();
- }
- if (*object == isolate->heap()->null_value()) {
- return isolate->factory()->null_to_string();
- }
-
- Handle<JSReceiver> receiver =
- Object::ToObject(isolate, object).ToHandleChecked();
-
- // For proxies, we must check IsArray() before get(toStringTag) to comply
- // with the specification
- Maybe<bool> is_array = Nothing<bool>();
- InstanceType instance_type = receiver->map()->instance_type();
- if (instance_type == JS_PROXY_TYPE) {
- is_array = Object::IsArray(receiver);
- MAYBE_RETURN(is_array, MaybeHandle<String>());
- }
-
- Handle<String> tag;
- Handle<Object> to_string_tag;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, to_string_tag,
- JSReceiver::GetProperty(receiver,
- isolate->factory()->to_string_tag_symbol()),
- String);
- if (to_string_tag->IsString()) {
- tag = Handle<String>::cast(to_string_tag);
- } else {
- switch (instance_type) {
- case JS_API_OBJECT_TYPE:
- case JS_SPECIAL_API_OBJECT_TYPE:
- tag = handle(receiver->class_name(), isolate);
- break;
- case JS_ARGUMENTS_TYPE:
- return isolate->factory()->arguments_to_string();
- case JS_ARRAY_TYPE:
- return isolate->factory()->array_to_string();
- case JS_BOUND_FUNCTION_TYPE:
- case JS_FUNCTION_TYPE:
- return isolate->factory()->function_to_string();
- case JS_ERROR_TYPE:
- return isolate->factory()->error_to_string();
- case JS_DATE_TYPE:
- return isolate->factory()->date_to_string();
- case JS_REGEXP_TYPE:
- return isolate->factory()->regexp_to_string();
- case JS_PROXY_TYPE: {
- if (is_array.FromJust()) {
- return isolate->factory()->array_to_string();
- }
- if (receiver->IsCallable()) {
- return isolate->factory()->function_to_string();
- }
- return isolate->factory()->object_to_string();
- }
- case JS_VALUE_TYPE: {
- Object* value = JSValue::cast(*receiver)->value();
- if (value->IsString()) {
- return isolate->factory()->string_to_string();
- }
- if (value->IsNumber()) {
- return isolate->factory()->number_to_string();
- }
- if (value->IsBoolean()) {
- return isolate->factory()->boolean_to_string();
- }
- if (value->IsSymbol()) {
- return isolate->factory()->object_to_string();
- }
- UNREACHABLE();
- tag = handle(receiver->class_name(), isolate);
- break;
- }
- default:
- return isolate->factory()->object_to_string();
- }
- }
-
- IncrementalStringBuilder builder(isolate);
- builder.AppendCString("[object ");
- builder.AppendString(tag);
- builder.AppendCharacter(']');
- return builder.Finish();
-}
-
const char* Symbol::PrivateSymbolToName() const {
Heap* heap = GetIsolate()->heap();
#define SYMBOL_CHECK_AND_PRINT(name) \
@@ -16552,6 +15972,19 @@ class StringSharedKey : public HashTableKey {
int scope_position_;
};
+// static
+const char* JSPromise::Status(int status) {
+ switch (status) {
+ case v8::Promise::kFulfilled:
+ return "resolved";
+ case v8::Promise::kPending:
+ return "pending";
+ case v8::Promise::kRejected:
+ return "rejected";
+ }
+ UNREACHABLE();
+ return NULL;
+}
namespace {
@@ -16865,7 +16298,13 @@ Handle<Derived> HashTable<Derived, Shape, Key>::New(
if (capacity > HashTable::kMaxCapacity) {
v8::internal::Heap::FatalProcessOutOfMemory("invalid table size", true);
}
+ return New(isolate, capacity, pretenure);
+}
+template <typename Derived, typename Shape, typename Key>
+Handle<Derived> HashTable<Derived, Shape, Key>::New(Isolate* isolate,
+ int capacity,
+ PretenureFlag pretenure) {
Factory* factory = isolate->factory();
int length = EntryToIndex(capacity);
Handle<FixedArray> array = factory->NewFixedArray(length, pretenure);
@@ -16878,7 +16317,6 @@ Handle<Derived> HashTable<Derived, Shape, Key>::New(
return table;
}
-
// Find entry for key otherwise return kNotFound.
template <typename Derived, typename Shape>
int NameDictionaryBase<Derived, Shape>::FindEntry(Handle<Name> key) {
@@ -17152,6 +16590,10 @@ Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::New(
Isolate*, int at_least_space_for, PretenureFlag pretenure,
MinimumCapacity capacity_option);
+template Handle<SeededNumberDictionary>
+Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape,
+ uint32_t>::NewEmpty(Isolate*, PretenureFlag pretenure);
+
template Handle<UnseededNumberDictionary>
Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape,
uint32_t>::New(Isolate*, int at_least_space_for,
@@ -17162,6 +16604,10 @@ template Handle<NameDictionary>
Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>::New(
Isolate*, int n, PretenureFlag pretenure, MinimumCapacity capacity_option);
+template Handle<NameDictionary>
+Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>::NewEmpty(
+ Isolate*, PretenureFlag pretenure);
+
template Handle<GlobalDictionary>
Dictionary<GlobalDictionary, GlobalDictionaryShape, Handle<Name>>::New(
Isolate*, int n, PretenureFlag pretenure, MinimumCapacity capacity_option);
@@ -17227,10 +16673,6 @@ Dictionary<GlobalDictionary, GlobalDictionaryShape, Handle<Name>>::Add(
template Handle<FixedArray> Dictionary<
NameDictionary, NameDictionaryShape,
- Handle<Name> >::BuildIterationIndicesArray(Handle<NameDictionary>);
-
-template Handle<FixedArray> Dictionary<
- NameDictionary, NameDictionaryShape,
Handle<Name> >::GenerateNewEnumerationIndices(Handle<NameDictionary>);
template Handle<SeededNumberDictionary>
@@ -17285,6 +16727,12 @@ Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>::CopyEnumKeysTo(
Handle<FixedArray> storage, KeyCollectionMode mode,
KeyAccumulator* accumulator);
+template Handle<FixedArray>
+Dictionary<GlobalDictionary, GlobalDictionaryShape, Handle<Name>>::
+ IterationIndices(
+ Handle<
+ Dictionary<GlobalDictionary, GlobalDictionaryShape, Handle<Name>>>
+ dictionary);
template void
Dictionary<GlobalDictionary, GlobalDictionaryShape, Handle<Name>>::
CollectKeysTo(Handle<Dictionary<GlobalDictionary, GlobalDictionaryShape,
@@ -17292,6 +16740,10 @@ Dictionary<GlobalDictionary, GlobalDictionaryShape, Handle<Name>>::
dictionary,
KeyAccumulator* keys);
+template Handle<FixedArray>
+Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>::IterationIndices(
+ Handle<Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>>
+ dictionary);
template void
Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>::CollectKeysTo(
Handle<Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>>
@@ -17328,7 +16780,7 @@ Handle<Object> JSObject::PrepareSlowElementsForSort(
HandleScope scope(isolate);
Handle<Object> value(dict->ValueAt(i), isolate);
PropertyDetails details = dict->DetailsAt(i);
- if (details.type() == ACCESSOR_CONSTANT || details.IsReadOnly()) {
+ if (details.kind() == kAccessor || details.IsReadOnly()) {
// Bail out and do the sorting of undefineds and array holes in JS.
// Also bail out if the element is not supposed to be moved.
return bailout;
@@ -17344,7 +16796,7 @@ Handle<Object> JSObject::PrepareSlowElementsForSort(
return bailout;
} else {
Handle<Object> result = SeededNumberDictionary::AddNumberEntry(
- new_dict, pos, value, details, object->map()->is_prototype_map());
+ new_dict, pos, value, details, object);
DCHECK(result.is_identical_to(new_dict));
USE(result);
pos++;
@@ -17355,7 +16807,7 @@ Handle<Object> JSObject::PrepareSlowElementsForSort(
return bailout;
} else {
Handle<Object> result = SeededNumberDictionary::AddNumberEntry(
- new_dict, key, value, details, object->map()->is_prototype_map());
+ new_dict, key, value, details, object);
DCHECK(result.is_identical_to(new_dict));
USE(result);
}
@@ -17372,7 +16824,7 @@ Handle<Object> JSObject::PrepareSlowElementsForSort(
HandleScope scope(isolate);
Handle<Object> result = SeededNumberDictionary::AddNumberEntry(
new_dict, pos, isolate->factory()->undefined_value(), no_details,
- object->map()->is_prototype_map());
+ object);
DCHECK(result.is_identical_to(new_dict));
USE(result);
pos++;
@@ -17510,11 +16962,11 @@ Handle<Object> JSObject::PrepareElementsForSort(Handle<JSObject> object,
}
result = undefs;
while (undefs < holes) {
- elements->set_undefined(undefs);
+ elements->set_undefined(isolate, undefs);
undefs++;
}
while (holes < limit) {
- elements->set_the_hole(holes);
+ elements->set_the_hole(isolate, holes);
holes++;
}
}
@@ -17522,6 +16974,98 @@ Handle<Object> JSObject::PrepareElementsForSort(Handle<JSObject> object,
return isolate->factory()->NewNumberFromUint(result);
}
+namespace {
+
+bool CanonicalNumericIndexString(Isolate* isolate, Handle<Object> s,
+ Handle<Object>* index) {
+ DCHECK(s->IsString() || s->IsSmi());
+
+ Handle<Object> result;
+ if (s->IsSmi()) {
+ result = s;
+ } else {
+ result = String::ToNumber(Handle<String>::cast(s));
+ if (!result->IsMinusZero()) {
+ Handle<String> str = Object::ToString(isolate, result).ToHandleChecked();
+ // Avoid treating strings like "2E1" and "20" as the same key.
+ if (!str->SameValue(*s)) return false;
+ }
+ }
+ *index = result;
+ return true;
+}
+
+} // anonymous namespace
+
+// ES#sec-integer-indexed-exotic-objects-defineownproperty-p-desc
+// static
+Maybe<bool> JSTypedArray::DefineOwnProperty(Isolate* isolate,
+ Handle<JSTypedArray> o,
+ Handle<Object> key,
+ PropertyDescriptor* desc,
+ ShouldThrow should_throw) {
+ // 1. Assert: IsPropertyKey(P) is true.
+ DCHECK(key->IsName() || key->IsNumber());
+ // 2. Assert: O is an Object that has a [[ViewedArrayBuffer]] internal slot.
+ // 3. If Type(P) is String, then
+ if (key->IsString() || key->IsSmi()) {
+ // 3a. Let numericIndex be ! CanonicalNumericIndexString(P)
+ // 3b. If numericIndex is not undefined, then
+ Handle<Object> numeric_index;
+ if (CanonicalNumericIndexString(isolate, key, &numeric_index)) {
+ // 3b i. If IsInteger(numericIndex) is false, return false.
+ // 3b ii. If numericIndex = -0, return false.
+ // 3b iii. If numericIndex < 0, return false.
+ // FIXME: the standard allows up to 2^53 elements.
+ uint32_t index;
+ if (numeric_index->IsMinusZero() || !numeric_index->ToUint32(&index)) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kInvalidTypedArrayIndex));
+ }
+ // 3b iv. Let length be O.[[ArrayLength]].
+ uint32_t length = o->length()->Number();
+ // 3b v. If numericIndex ≥ length, return false.
+ if (index >= length) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kInvalidTypedArrayIndex));
+ }
+ // 3b vi. If IsAccessorDescriptor(Desc) is true, return false.
+ if (PropertyDescriptor::IsAccessorDescriptor(desc)) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kRedefineDisallowed, key));
+ }
+ // 3b vii. If Desc has a [[Configurable]] field and if
+ // Desc.[[Configurable]] is true, return false.
+ // 3b viii. If Desc has an [[Enumerable]] field and if Desc.[[Enumerable]]
+ // is false, return false.
+ // 3b ix. If Desc has a [[Writable]] field and if Desc.[[Writable]] is
+ // false, return false.
+ if ((desc->has_configurable() && desc->configurable()) ||
+ (desc->has_enumerable() && !desc->enumerable()) ||
+ (desc->has_writable() && !desc->writable())) {
+ RETURN_FAILURE(isolate, should_throw,
+ NewTypeError(MessageTemplate::kRedefineDisallowed, key));
+ }
+ // 3b x. If Desc has a [[Value]] field, then
+ // 3b x 1. Let value be Desc.[[Value]].
+ // 3b x 2. Return ? IntegerIndexedElementSet(O, numericIndex, value).
+ if (desc->has_value()) {
+ if (!desc->has_configurable()) desc->set_configurable(false);
+ if (!desc->has_enumerable()) desc->set_enumerable(true);
+ if (!desc->has_writable()) desc->set_writable(true);
+ Handle<Object> value = desc->value();
+ RETURN_ON_EXCEPTION_VALUE(isolate,
+ SetOwnElementIgnoreAttributes(
+ o, index, value, desc->ToAttributes()),
+ Nothing<bool>());
+ }
+ // 3b xi. Return true.
+ return Just(true);
+ }
+ }
+ // 4. Return ! OrdinaryDefineOwnProperty(O, P, Desc).
+ return OrdinaryDefineOwnProperty(isolate, o, key, desc, should_throw);
+}
ExternalArrayType JSTypedArray::type() {
switch (elements()->map()->instance_type()) {
@@ -17584,12 +17128,12 @@ Handle<PropertyCell> JSGlobalObject::EnsureEmptyPropertyCell(
if (original_cell_type == PropertyCellType::kInvalidated) {
cell = PropertyCell::InvalidateEntry(dictionary, entry);
}
- PropertyDetails details(NONE, DATA, 0, cell_type);
+ PropertyDetails details(kData, NONE, 0, cell_type);
cell->set_property_details(details);
return cell;
}
cell = isolate->factory()->NewPropertyCell();
- PropertyDetails details(NONE, DATA, 0, cell_type);
+ PropertyDetails details(kData, NONE, 0, cell_type);
dictionary =
GlobalDictionary::Add(dictionary, name, cell, details, entry_out);
// {*entry_out} is initialized inside GlobalDictionary::Add().
@@ -17931,7 +17475,11 @@ void CompilationCacheTable::Age() {
}
} else if (get(entry_index)->IsFixedArray()) {
SharedFunctionInfo* info = SharedFunctionInfo::cast(get(value_index));
- if (info->code()->kind() != Code::FUNCTION || info->code()->IsOld()) {
+ bool is_old =
+ info->IsInterpreted()
+ ? info->bytecode_array()->IsOld()
+ : info->code()->kind() != Code::FUNCTION || info->code()->IsOld();
+ if (is_old) {
NoWriteBarrierSet(this, entry_index, the_hole_value);
NoWriteBarrierSet(this, value_index, the_hole_value);
ElementRemoved();
@@ -17969,44 +17517,24 @@ Handle<Derived> Dictionary<Derived, Shape, Key>::New(
return dict;
}
-
template <typename Derived, typename Shape, typename Key>
-Handle<FixedArray> Dictionary<Derived, Shape, Key>::BuildIterationIndicesArray(
- Handle<Derived> dictionary) {
- Isolate* isolate = dictionary->GetIsolate();
- Factory* factory = isolate->factory();
- int length = dictionary->NumberOfElements();
-
- Handle<FixedArray> iteration_order = factory->NewFixedArray(length);
- Handle<FixedArray> enumeration_order = factory->NewFixedArray(length);
-
- // Fill both the iteration order array and the enumeration order array
- // with property details.
- int capacity = dictionary->Capacity();
- int pos = 0;
- for (int i = 0; i < capacity; i++) {
- if (dictionary->IsKey(isolate, dictionary->KeyAt(i))) {
- int index = dictionary->DetailsAt(i).dictionary_index();
- iteration_order->set(pos, Smi::FromInt(i));
- enumeration_order->set(pos, Smi::FromInt(index));
- pos++;
- }
- }
- DCHECK(pos == length);
-
- // Sort the arrays wrt. enumeration order.
- iteration_order->SortPairs(*enumeration_order, enumeration_order->length());
- return iteration_order;
+Handle<Derived> Dictionary<Derived, Shape, Key>::NewEmpty(
+ Isolate* isolate, PretenureFlag pretenure) {
+ Handle<Derived> dict = DerivedHashTable::New(isolate, 1, pretenure);
+ // Attempt to add one element to the empty dictionary must cause reallocation.
+ DCHECK(!dict->HasSufficientCapacityToAdd(1));
+ // Initialize the next enumeration index.
+ dict->SetNextEnumerationIndex(PropertyDetails::kInitialIndex);
+ return dict;
}
-
template <typename Derived, typename Shape, typename Key>
Handle<FixedArray>
Dictionary<Derived, Shape, Key>::GenerateNewEnumerationIndices(
Handle<Derived> dictionary) {
int length = dictionary->NumberOfElements();
- Handle<FixedArray> iteration_order = BuildIterationIndicesArray(dictionary);
+ Handle<FixedArray> iteration_order = IterationIndices(dictionary);
DCHECK(iteration_order->length() == length);
// Iterate over the dictionary using the enumeration order and update
@@ -18137,15 +17665,15 @@ bool SeededNumberDictionary::HasComplexElements() {
if (!this->IsKey(isolate, k)) continue;
DCHECK(!IsDeleted(i));
PropertyDetails details = this->DetailsAt(i);
- if (details.type() == ACCESSOR_CONSTANT) return true;
+ if (details.kind() == kAccessor) return true;
PropertyAttributes attr = details.attributes();
if (attr & ALL_ATTRIBUTES_MASK) return true;
}
return false;
}
-void SeededNumberDictionary::UpdateMaxNumberKey(uint32_t key,
- bool used_as_prototype) {
+void SeededNumberDictionary::UpdateMaxNumberKey(
+ uint32_t key, Handle<JSObject> dictionary_holder) {
DisallowHeapAllocation no_allocation;
// If the dictionary requires slow elements an element has already
// been added at a high index.
@@ -18153,9 +17681,8 @@ void SeededNumberDictionary::UpdateMaxNumberKey(uint32_t key,
// Check if this index is high enough that we should require slow
// elements.
if (key > kRequiresSlowElementsLimit) {
- if (used_as_prototype) {
- // TODO(verwaest): Remove this hack.
- TypeFeedbackVector::ClearAllKeyedStoreICs(GetIsolate());
+ if (!dictionary_holder.is_null()) {
+ dictionary_holder->RequireSlowElements(this);
}
set_requires_slow_elements();
return;
@@ -18168,11 +17695,11 @@ void SeededNumberDictionary::UpdateMaxNumberKey(uint32_t key,
}
}
-
Handle<SeededNumberDictionary> SeededNumberDictionary::AddNumberEntry(
Handle<SeededNumberDictionary> dictionary, uint32_t key,
- Handle<Object> value, PropertyDetails details, bool used_as_prototype) {
- dictionary->UpdateMaxNumberKey(key, used_as_prototype);
+ Handle<Object> value, PropertyDetails details,
+ Handle<JSObject> dictionary_holder) {
+ dictionary->UpdateMaxNumberKey(key, dictionary_holder);
SLOW_DCHECK(dictionary->FindEntry(key) == kNotFound);
return Add(dictionary, key, value, details);
}
@@ -18200,8 +17727,8 @@ Handle<UnseededNumberDictionary> UnseededNumberDictionary::DeleteKey(
Handle<SeededNumberDictionary> SeededNumberDictionary::AtNumberPut(
Handle<SeededNumberDictionary> dictionary, uint32_t key,
- Handle<Object> value, bool used_as_prototype) {
- dictionary->UpdateMaxNumberKey(key, used_as_prototype);
+ Handle<Object> value, Handle<JSObject> dictionary_holder) {
+ dictionary->UpdateMaxNumberKey(key, dictionary_holder);
return AtPut(dictionary, key, value);
}
@@ -18213,13 +17740,13 @@ Handle<UnseededNumberDictionary> UnseededNumberDictionary::AtNumberPut(
return AtPut(dictionary, key, value);
}
-
Handle<SeededNumberDictionary> SeededNumberDictionary::Set(
Handle<SeededNumberDictionary> dictionary, uint32_t key,
- Handle<Object> value, PropertyDetails details, bool used_as_prototype) {
+ Handle<Object> value, PropertyDetails details,
+ Handle<JSObject> dictionary_holder) {
int entry = dictionary->FindEntry(key);
if (entry == kNotFound) {
- return AddNumberEntry(dictionary, key, value, details, used_as_prototype);
+ return AddNumberEntry(dictionary, key, value, details, dictionary_holder);
}
// Preserve enumeration index.
details = details.set_index(dictionary->DetailsAt(entry).dictionary_index());
@@ -18278,6 +17805,7 @@ void Dictionary<Derived, Shape, Key>::CopyEnumKeysTo(
Handle<Dictionary<Derived, Shape, Key>> dictionary,
Handle<FixedArray> storage, KeyCollectionMode mode,
KeyAccumulator* accumulator) {
+ DCHECK_IMPLIES(mode != KeyCollectionMode::kOwnOnly, accumulator != nullptr);
Isolate* isolate = dictionary->GetIsolate();
int length = storage->length();
int capacity = dictionary->Capacity();
@@ -18303,7 +17831,7 @@ void Dictionary<Derived, Shape, Key>::CopyEnumKeysTo(
storage->set(properties, Smi::FromInt(i));
}
properties++;
- if (properties == length) break;
+ if (mode == KeyCollectionMode::kOwnOnly && properties == length) break;
}
CHECK_EQ(length, properties);
@@ -18320,6 +17848,34 @@ void Dictionary<Derived, Shape, Key>::CopyEnumKeysTo(
}
template <typename Derived, typename Shape, typename Key>
+Handle<FixedArray> Dictionary<Derived, Shape, Key>::IterationIndices(
+ Handle<Dictionary<Derived, Shape, Key>> dictionary) {
+ Isolate* isolate = dictionary->GetIsolate();
+ int capacity = dictionary->Capacity();
+ int length = dictionary->NumberOfElements();
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(length);
+ int array_size = 0;
+ {
+ DisallowHeapAllocation no_gc;
+ Dictionary<Derived, Shape, Key>* raw_dict = *dictionary;
+ for (int i = 0; i < capacity; i++) {
+ Object* k = raw_dict->KeyAt(i);
+ if (!raw_dict->IsKey(isolate, k)) continue;
+ if (raw_dict->IsDeleted(i)) continue;
+ array->set(array_size++, Smi::FromInt(i));
+ }
+
+ DCHECK_EQ(array_size, length);
+
+ EnumIndexComparator<Derived> cmp(static_cast<Derived*>(raw_dict));
+ Smi** start = reinterpret_cast<Smi**>(array->GetFirstElementAddress());
+ std::sort(start, start + array_size, cmp);
+ }
+ array->Shrink(array_size);
+ return array;
+}
+
+template <typename Derived, typename Shape, typename Key>
void Dictionary<Derived, Shape, Key>::CollectKeysTo(
Handle<Dictionary<Derived, Shape, Key>> dictionary, KeyAccumulator* keys) {
Isolate* isolate = keys->isolate();
@@ -19373,27 +18929,6 @@ void JSDate::SetValue(Object* value, bool is_value_nan) {
}
-// static
-MaybeHandle<Object> JSDate::ToPrimitive(Handle<JSReceiver> receiver,
- Handle<Object> hint) {
- Isolate* const isolate = receiver->GetIsolate();
- if (hint->IsString()) {
- Handle<String> hint_string = Handle<String>::cast(hint);
- if (hint_string->Equals(isolate->heap()->number_string())) {
- return JSReceiver::OrdinaryToPrimitive(receiver,
- OrdinaryToPrimitiveHint::kNumber);
- }
- if (hint_string->Equals(isolate->heap()->default_string()) ||
- hint_string->Equals(isolate->heap()->string_string())) {
- return JSReceiver::OrdinaryToPrimitive(receiver,
- OrdinaryToPrimitiveHint::kString);
- }
- }
- THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kInvalidHint, hint),
- Object);
-}
-
-
void JSDate::SetCachedFields(int64_t local_time_ms, DateCache* date_cache) {
int days = DateCache::DaysFromTime(local_time_ms);
int time_in_day_ms = DateCache::TimeInDay(local_time_ms, days);
@@ -19479,6 +19014,11 @@ void JSArrayBuffer::Neuter() {
set_backing_store(NULL);
set_byte_length(Smi::kZero);
set_was_neutered(true);
+ // Invalidate the neutering protector.
+ Isolate* const isolate = GetIsolate();
+ if (isolate->IsArrayBufferNeuteringIntact()) {
+ isolate->InvalidateArrayBufferNeuteringProtector();
+ }
}
@@ -19726,24 +19266,14 @@ void PropertyCell::SetValueWithInvalidation(Handle<PropertyCell> cell,
int JSGeneratorObject::source_position() const {
CHECK(is_suspended());
- AbstractCode* code;
- int code_offset;
- if (function()->shared()->HasBytecodeArray()) {
- // New-style generators.
- DCHECK(!function()->shared()->HasBaselineCode());
- code_offset = Smi::cast(input_or_debug_pos())->value();
- // The stored bytecode offset is relative to a different base than what
- // is used in the source position table, hence the subtraction.
- code_offset -= BytecodeArray::kHeaderSize - kHeapObjectTag;
- code = AbstractCode::cast(function()->shared()->bytecode_array());
- } else {
- // Old-style generators.
- DCHECK(function()->shared()->HasBaselineCode());
- code_offset = continuation();
- CHECK(0 <= code_offset);
- CHECK(code_offset < function()->code()->instruction_size());
- code = AbstractCode::cast(function()->shared()->code());
- }
+ DCHECK(function()->shared()->HasBytecodeArray());
+ DCHECK(!function()->shared()->HasBaselineCode());
+ int code_offset = Smi::cast(input_or_debug_pos())->value();
+ // The stored bytecode offset is relative to a different base than what
+ // is used in the source position table, hence the subtraction.
+ code_offset -= BytecodeArray::kHeaderSize - kHeapObjectTag;
+ AbstractCode* code =
+ AbstractCode::cast(function()->shared()->bytecode_array());
return code->SourcePosition(code_offset);
}
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 747a4f0511..f9b696aff5 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -38,6 +38,8 @@
#include "src/s390/constants-s390.h" // NOLINT
#endif
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
//
// Most object types in the V8 JavaScript are described in this file.
@@ -72,7 +74,6 @@
// - JSDate
// - JSMessageObject
// - JSModuleNamespace
-// - JSFixedArrayIterator
// - JSProxy
// - FixedArrayBase
// - ByteArray
@@ -181,10 +182,6 @@ enum KeyedAccessStoreMode {
STORE_NO_TRANSITION_HANDLE_COW
};
-
-enum TypeofMode : int { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-
-
enum MutableMode {
MUTABLE,
IMMUTABLE
@@ -244,10 +241,6 @@ enum WriteBarrierMode {
};
-// Indicates whether a value can be loaded as a constant.
-enum StoreMode { ALLOW_IN_DESCRIPTOR, FORCE_FIELD };
-
-
// PropertyNormalizationMode is used to specify whether to keep
// inobject properties when normalizing properties of a JSObject.
enum PropertyNormalizationMode {
@@ -288,19 +281,6 @@ enum DescriptorFlag {
OWN_DESCRIPTORS
};
-// The GC maintains a bit of information, the MarkingParity, which toggles
-// from odd to even and back every time marking is completed. Incremental
-// marking can visit an object twice during a marking phase, so algorithms that
-// that piggy-back on marking can use the parity to ensure that they only
-// perform an operation on an object once per marking phase: they record the
-// MarkingParity when they visit an object, and only re-visit the object when it
-// is marked again and the MarkingParity changes.
-enum MarkingParity {
- NO_MARKING_PARITY,
- ODD_MARKING_PARITY,
- EVEN_MARKING_PARITY
-};
-
// ICs store extra state in a Code object. The default extra state is
// kNoExtraICState.
typedef int ExtraICState;
@@ -338,38 +318,34 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
// JSObject for GC purposes. The first four entries here have typeof
// 'object', whereas JS_FUNCTION_TYPE has typeof 'function'.
#define INSTANCE_TYPE_LIST(V) \
+ V(INTERNALIZED_STRING_TYPE) \
+ V(EXTERNAL_INTERNALIZED_STRING_TYPE) \
+ V(ONE_BYTE_INTERNALIZED_STRING_TYPE) \
+ V(EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE) \
+ V(EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \
+ V(SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE) \
+ V(SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE) \
+ V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \
V(STRING_TYPE) \
- V(ONE_BYTE_STRING_TYPE) \
V(CONS_STRING_TYPE) \
- V(CONS_ONE_BYTE_STRING_TYPE) \
- V(SLICED_STRING_TYPE) \
- V(SLICED_ONE_BYTE_STRING_TYPE) \
V(EXTERNAL_STRING_TYPE) \
+ V(SLICED_STRING_TYPE) \
+ V(ONE_BYTE_STRING_TYPE) \
+ V(CONS_ONE_BYTE_STRING_TYPE) \
V(EXTERNAL_ONE_BYTE_STRING_TYPE) \
+ V(SLICED_ONE_BYTE_STRING_TYPE) \
V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE) \
V(SHORT_EXTERNAL_STRING_TYPE) \
V(SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE) \
V(SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE) \
\
- V(INTERNALIZED_STRING_TYPE) \
- V(ONE_BYTE_INTERNALIZED_STRING_TYPE) \
- V(EXTERNAL_INTERNALIZED_STRING_TYPE) \
- V(EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE) \
- V(EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \
- V(SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE) \
- V(SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE) \
- V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \
- \
V(SYMBOL_TYPE) \
+ V(HEAP_NUMBER_TYPE) \
V(SIMD128_VALUE_TYPE) \
+ V(ODDBALL_TYPE) \
\
V(MAP_TYPE) \
V(CODE_TYPE) \
- V(ODDBALL_TYPE) \
- V(CELL_TYPE) \
- V(PROPERTY_CELL_TYPE) \
- \
- V(HEAP_NUMBER_TYPE) \
V(MUTABLE_HEAP_NUMBER_TYPE) \
V(FOREIGN_TYPE) \
V(BYTE_ARRAY_TYPE) \
@@ -386,6 +362,7 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(FIXED_FLOAT64_ARRAY_TYPE) \
V(FIXED_UINT8_CLAMPED_ARRAY_TYPE) \
\
+ V(FIXED_DOUBLE_ARRAY_TYPE) \
V(FILLER_TYPE) \
\
V(ACCESSOR_INFO_TYPE) \
@@ -395,53 +372,54 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(CALL_HANDLER_INFO_TYPE) \
V(FUNCTION_TEMPLATE_INFO_TYPE) \
V(OBJECT_TEMPLATE_INFO_TYPE) \
- V(SIGNATURE_INFO_TYPE) \
- V(TYPE_SWITCH_INFO_TYPE) \
- V(ALLOCATION_MEMENTO_TYPE) \
V(ALLOCATION_SITE_TYPE) \
+ V(ALLOCATION_MEMENTO_TYPE) \
V(SCRIPT_TYPE) \
V(TYPE_FEEDBACK_INFO_TYPE) \
V(ALIASED_ARGUMENTS_ENTRY_TYPE) \
V(BOX_TYPE) \
V(PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE) \
V(PROMISE_REACTION_JOB_INFO_TYPE) \
+ V(DEBUG_INFO_TYPE) \
+ V(BREAK_POINT_INFO_TYPE) \
V(PROTOTYPE_INFO_TYPE) \
+ V(TUPLE2_TYPE) \
V(TUPLE3_TYPE) \
V(CONTEXT_EXTENSION_TYPE) \
+ V(CONSTANT_ELEMENTS_PAIR_TYPE) \
V(MODULE_TYPE) \
V(MODULE_INFO_ENTRY_TYPE) \
- \
V(FIXED_ARRAY_TYPE) \
- V(FIXED_DOUBLE_ARRAY_TYPE) \
+ V(TRANSITION_ARRAY_TYPE) \
V(SHARED_FUNCTION_INFO_TYPE) \
+ V(CELL_TYPE) \
V(WEAK_CELL_TYPE) \
- V(TRANSITION_ARRAY_TYPE) \
- \
- V(JS_MESSAGE_OBJECT_TYPE) \
+ V(PROPERTY_CELL_TYPE) \
\
+ V(JS_PROXY_TYPE) \
+ V(JS_GLOBAL_OBJECT_TYPE) \
+ V(JS_GLOBAL_PROXY_TYPE) \
+ V(JS_SPECIAL_API_OBJECT_TYPE) \
V(JS_VALUE_TYPE) \
+ V(JS_MESSAGE_OBJECT_TYPE) \
V(JS_DATE_TYPE) \
+ V(JS_API_OBJECT_TYPE) \
V(JS_OBJECT_TYPE) \
V(JS_ARGUMENTS_TYPE) \
V(JS_CONTEXT_EXTENSION_OBJECT_TYPE) \
V(JS_GENERATOR_OBJECT_TYPE) \
V(JS_MODULE_NAMESPACE_TYPE) \
- V(JS_FIXED_ARRAY_ITERATOR_TYPE) \
- V(JS_GLOBAL_OBJECT_TYPE) \
- V(JS_GLOBAL_PROXY_TYPE) \
- V(JS_API_OBJECT_TYPE) \
- V(JS_SPECIAL_API_OBJECT_TYPE) \
V(JS_ARRAY_TYPE) \
V(JS_ARRAY_BUFFER_TYPE) \
V(JS_TYPED_ARRAY_TYPE) \
V(JS_DATA_VIEW_TYPE) \
- V(JS_PROXY_TYPE) \
V(JS_SET_TYPE) \
V(JS_MAP_TYPE) \
V(JS_SET_ITERATOR_TYPE) \
V(JS_MAP_ITERATOR_TYPE) \
V(JS_WEAK_MAP_TYPE) \
V(JS_WEAK_SET_TYPE) \
+ V(JS_PROMISE_CAPABILITY_TYPE) \
V(JS_PROMISE_TYPE) \
V(JS_REGEXP_TYPE) \
V(JS_ERROR_TYPE) \
@@ -451,12 +429,12 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(JS_FAST_ARRAY_KEY_ITERATOR_TYPE) \
V(JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE) \
\
- V(JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
V(JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
V(JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
V(JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
V(JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
V(JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
V(JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
@@ -469,12 +447,12 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
V(JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
\
- V(JS_INT8_ARRAY_VALUE_ITERATOR_TYPE) \
V(JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE) \
- V(JS_INT16_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_INT8_ARRAY_VALUE_ITERATOR_TYPE) \
V(JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE) \
- V(JS_INT32_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_INT16_ARRAY_VALUE_ITERATOR_TYPE) \
V(JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_INT32_ARRAY_VALUE_ITERATOR_TYPE) \
V(JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE) \
V(JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE) \
V(JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE) \
@@ -488,9 +466,7 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE) \
\
V(JS_BOUND_FUNCTION_TYPE) \
- V(JS_FUNCTION_TYPE) \
- V(DEBUG_INFO_TYPE) \
- V(BREAK_POINT_INFO_TYPE)
+ V(JS_FUNCTION_TYPE)
// Since string types are not consecutive, this macro is used to
// iterate over them.
@@ -553,11 +529,6 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
// type tags, elements in this list have to be added to the INSTANCE_TYPE_LIST
// manually.
#define STRUCT_LIST(V) \
- V(BOX, Box, box) \
- V(PROMISE_RESOLVE_THENABLE_JOB_INFO, PromiseResolveThenableJobInfo, \
- promise_resolve_thenable_job_info) \
- V(PROMISE_REACTION_JOB_INFO, PromiseReactionJobInfo, \
- promise_reaction_job_info) \
V(ACCESSOR_INFO, AccessorInfo, accessor_info) \
V(ACCESSOR_PAIR, AccessorPair, accessor_pair) \
V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info) \
@@ -565,18 +536,25 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(CALL_HANDLER_INFO, CallHandlerInfo, call_handler_info) \
V(FUNCTION_TEMPLATE_INFO, FunctionTemplateInfo, function_template_info) \
V(OBJECT_TEMPLATE_INFO, ObjectTemplateInfo, object_template_info) \
- V(SCRIPT, Script, script) \
V(ALLOCATION_SITE, AllocationSite, allocation_site) \
V(ALLOCATION_MEMENTO, AllocationMemento, allocation_memento) \
+ V(SCRIPT, Script, script) \
V(TYPE_FEEDBACK_INFO, TypeFeedbackInfo, type_feedback_info) \
V(ALIASED_ARGUMENTS_ENTRY, AliasedArgumentsEntry, aliased_arguments_entry) \
+ V(BOX, Box, box) \
+ V(PROMISE_RESOLVE_THENABLE_JOB_INFO, PromiseResolveThenableJobInfo, \
+ promise_resolve_thenable_job_info) \
+ V(PROMISE_REACTION_JOB_INFO, PromiseReactionJobInfo, \
+ promise_reaction_job_info) \
V(DEBUG_INFO, DebugInfo, debug_info) \
V(BREAK_POINT_INFO, BreakPointInfo, break_point_info) \
V(PROTOTYPE_INFO, PrototypeInfo, prototype_info) \
+ V(TUPLE2, Tuple2, tuple2) \
V(TUPLE3, Tuple3, tuple3) \
+ V(CONTEXT_EXTENSION, ContextExtension, context_extension) \
+ V(CONSTANT_ELEMENTS_PAIR, ConstantElementsPair, constant_elements_pair) \
V(MODULE, Module, module) \
- V(MODULE_INFO_ENTRY, ModuleInfoEntry, module_info_entry) \
- V(CONTEXT_EXTENSION, ContextExtension, context_extension)
+ V(MODULE_INFO_ENTRY, ModuleInfoEntry, module_info_entry)
// We use the full 8 bits of the instance_type field to encode heap object
// instance types. The high-order bit (bit 7) is set if the object is not a
@@ -630,7 +608,6 @@ const uint32_t kOneByteDataHintTag = 0x08;
const uint32_t kShortExternalStringMask = 0x10;
const uint32_t kShortExternalStringTag = 0x10;
-
// A ConsString with an empty string as the right side is a candidate
// for being shortcut by the garbage collector. We don't allocate any
// non-flat internalized strings, so we do not shortcut them thereby
@@ -733,8 +710,6 @@ enum InstanceType {
CALL_HANDLER_INFO_TYPE,
FUNCTION_TEMPLATE_INFO_TYPE,
OBJECT_TEMPLATE_INFO_TYPE,
- SIGNATURE_INFO_TYPE,
- TYPE_SWITCH_INFO_TYPE,
ALLOCATION_SITE_TYPE,
ALLOCATION_MEMENTO_TYPE,
SCRIPT_TYPE,
@@ -745,17 +720,19 @@ enum InstanceType {
PROMISE_REACTION_JOB_INFO_TYPE,
DEBUG_INFO_TYPE,
BREAK_POINT_INFO_TYPE,
- FIXED_ARRAY_TYPE,
- SHARED_FUNCTION_INFO_TYPE,
- CELL_TYPE,
- WEAK_CELL_TYPE,
- TRANSITION_ARRAY_TYPE,
- PROPERTY_CELL_TYPE,
PROTOTYPE_INFO_TYPE,
+ TUPLE2_TYPE,
TUPLE3_TYPE,
CONTEXT_EXTENSION_TYPE,
+ CONSTANT_ELEMENTS_PAIR_TYPE,
MODULE_TYPE,
MODULE_INFO_ENTRY_TYPE,
+ FIXED_ARRAY_TYPE,
+ TRANSITION_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ CELL_TYPE,
+ WEAK_CELL_TYPE,
+ PROPERTY_CELL_TYPE,
// All the following types are subtypes of JSReceiver, which corresponds to
// objects in the JS sense. The first and the last type in this range are
@@ -777,7 +754,6 @@ enum InstanceType {
JS_CONTEXT_EXTENSION_OBJECT_TYPE,
JS_GENERATOR_OBJECT_TYPE,
JS_MODULE_NAMESPACE_TYPE,
- JS_FIXED_ARRAY_ITERATOR_TYPE,
JS_ARRAY_TYPE,
JS_ARRAY_BUFFER_TYPE,
JS_TYPED_ARRAY_TYPE,
@@ -788,6 +764,7 @@ enum InstanceType {
JS_MAP_ITERATOR_TYPE,
JS_WEAK_MAP_TYPE,
JS_WEAK_SET_TYPE,
+ JS_PROMISE_CAPABILITY_TYPE,
JS_PROMISE_TYPE,
JS_REGEXP_TYPE,
JS_ERROR_TYPE,
@@ -965,25 +942,6 @@ enum class ComparisonResult {
};
-#define DECL_BOOLEAN_ACCESSORS(name) \
- inline bool name() const; \
- inline void set_##name(bool value);
-
-#define DECL_INT_ACCESSORS(name) \
- inline int name() const; \
- inline void set_##name(int value);
-
-
-#define DECL_ACCESSORS(name, type) \
- inline type* name() const; \
- inline void set_##name(type* value, \
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER); \
-
-
-#define DECLARE_CAST(type) \
- INLINE(static type* cast(Object* object)); \
- INLINE(static const type* cast(const Object* object));
-
class AbstractCode;
class AccessorPair;
class AllocationSite;
@@ -1021,12 +979,6 @@ class TemplateList;
// A template-ized version of the IsXXX functions.
template <class C> inline bool Is(Object* obj);
-#ifdef VERIFY_HEAP
-#define DECLARE_VERIFIER(Name) void Name##Verify();
-#else
-#define DECLARE_VERIFIER(Name)
-#endif
-
#ifdef OBJECT_PRINT
#define DECLARE_PRINTER(Name) void Name##Print(std::ostream& os); // NOLINT
#else
@@ -1083,10 +1035,10 @@ template <class C> inline bool Is(Object* obj);
V(FreeSpace) \
V(JSReceiver) \
V(JSObject) \
+ V(JSArgumentsObject) \
V(JSContextExtensionObject) \
V(JSGeneratorObject) \
V(JSModuleNamespace) \
- V(JSFixedArrayIterator) \
V(Map) \
V(DescriptorArray) \
V(FrameArray) \
@@ -1129,6 +1081,7 @@ template <class C> inline bool Is(Object* obj);
V(JSDataView) \
V(JSProxy) \
V(JSError) \
+ V(JSPromiseCapability) \
V(JSPromise) \
V(JSStringIterator) \
V(JSSet) \
@@ -1199,11 +1152,14 @@ class Object {
OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
#undef IS_TYPE_FUNCTION_DECL
+
#define IS_TYPE_FUNCTION_DECL(Type, Value) \
INLINE(bool Is##Type(Isolate* isolate) const);
ODDBALL_LIST(IS_TYPE_FUNCTION_DECL)
#undef IS_TYPE_FUNCTION_DECL
+ INLINE(bool IsNullOrUndefined(Isolate* isolate) const);
+
// A non-keyed store is of the form a.x = foo or a["x"] = foo whereas
// a keyed store is of the form a[expression] = foo.
enum StoreFromKeyed {
@@ -1313,37 +1269,38 @@ class Object {
Handle<Object> input, ToPrimitiveHint hint = ToPrimitiveHint::kDefault);
// ES6 section 7.1.3 ToNumber
- MUST_USE_RESULT static MaybeHandle<Object> ToNumber(Handle<Object> input);
+ MUST_USE_RESULT static inline MaybeHandle<Object> ToNumber(
+ Handle<Object> input);
// ES6 section 7.1.4 ToInteger
- MUST_USE_RESULT static MaybeHandle<Object> ToInteger(Isolate* isolate,
- Handle<Object> input);
+ MUST_USE_RESULT static inline MaybeHandle<Object> ToInteger(
+ Isolate* isolate, Handle<Object> input);
// ES6 section 7.1.5 ToInt32
- MUST_USE_RESULT static MaybeHandle<Object> ToInt32(Isolate* isolate,
- Handle<Object> input);
+ MUST_USE_RESULT static inline MaybeHandle<Object> ToInt32(
+ Isolate* isolate, Handle<Object> input);
// ES6 section 7.1.6 ToUint32
- MUST_USE_RESULT static MaybeHandle<Object> ToUint32(Isolate* isolate,
- Handle<Object> input);
+ MUST_USE_RESULT inline static MaybeHandle<Object> ToUint32(
+ Isolate* isolate, Handle<Object> input);
// ES6 section 7.1.12 ToString
- MUST_USE_RESULT static MaybeHandle<String> ToString(Isolate* isolate,
- Handle<Object> input);
+ MUST_USE_RESULT static inline MaybeHandle<String> ToString(
+ Isolate* isolate, Handle<Object> input);
static Handle<String> NoSideEffectsToString(Isolate* isolate,
Handle<Object> input);
// ES6 section 7.1.14 ToPropertyKey
- MUST_USE_RESULT static MaybeHandle<Object> ToPropertyKey(
+ MUST_USE_RESULT static inline MaybeHandle<Object> ToPropertyKey(
Isolate* isolate, Handle<Object> value);
// ES6 section 7.1.15 ToLength
- MUST_USE_RESULT static MaybeHandle<Object> ToLength(Isolate* isolate,
- Handle<Object> input);
+ MUST_USE_RESULT static inline MaybeHandle<Object> ToLength(
+ Isolate* isolate, Handle<Object> input);
// ES6 section 7.1.17 ToIndex
- MUST_USE_RESULT static MaybeHandle<Object> ToIndex(
+ MUST_USE_RESULT static inline MaybeHandle<Object> ToIndex(
Isolate* isolate, Handle<Object> input,
MessageTemplate::Template error_index);
@@ -1522,6 +1479,11 @@ class Object {
// allow kMaxUInt32.
inline bool ToArrayIndex(uint32_t* index);
+ // Returns true if the result of iterating over the object is the same
+ // (including observable effects) as simply accessing the properties between 0
+ // and length.
+ bool IterationHasObservableEffects();
+
DECLARE_VERIFIER(Object)
#ifdef VERIFY_HEAP
// Verify a pointer is a valid object pointer.
@@ -1530,10 +1492,6 @@ class Object {
inline void VerifyApiCallResultType();
- // ES6 19.1.3.6 Object.prototype.toString
- MUST_USE_RESULT static MaybeHandle<String> ObjectProtoToString(
- Isolate* isolate, Handle<Object> object);
-
// Prints this object without details.
void ShortPrint(FILE* out = stdout);
@@ -1573,6 +1531,23 @@ class Object {
MUST_USE_RESULT static MaybeHandle<Name> ConvertToName(Isolate* isolate,
Handle<Object> input);
+ MUST_USE_RESULT static MaybeHandle<Object> ConvertToPropertyKey(
+ Isolate* isolate, Handle<Object> value);
+ MUST_USE_RESULT static MaybeHandle<String> ConvertToString(
+ Isolate* isolate, Handle<Object> input);
+ MUST_USE_RESULT static MaybeHandle<Object> ConvertToNumber(
+ Isolate* isolate, Handle<Object> input);
+ MUST_USE_RESULT static MaybeHandle<Object> ConvertToInteger(
+ Isolate* isolate, Handle<Object> input);
+ MUST_USE_RESULT static MaybeHandle<Object> ConvertToInt32(
+ Isolate* isolate, Handle<Object> input);
+ MUST_USE_RESULT static MaybeHandle<Object> ConvertToUint32(
+ Isolate* isolate, Handle<Object> input);
+ MUST_USE_RESULT static MaybeHandle<Object> ConvertToLength(
+ Isolate* isolate, Handle<Object> input);
+ MUST_USE_RESULT static MaybeHandle<Object> ConvertToIndex(
+ Isolate* isolate, Handle<Object> input,
+ MessageTemplate::Template error_index);
DISALLOW_IMPLICIT_CONSTRUCTORS(Object);
};
@@ -1600,6 +1575,10 @@ class Smi: public Object {
public:
// Returns the integer value.
inline int value() const { return Internals::SmiValue(this); }
+ inline Smi* ToUint32Smi() {
+ if (value() <= 0) return Smi::kZero;
+ return Smi::FromInt(static_cast<uint32_t>(value()));
+ }
// Convert a value to a Smi object.
static inline Smi* FromInt(int value) {
@@ -1626,7 +1605,7 @@ class Smi: public Object {
V8_EXPORT_PRIVATE void SmiPrint(std::ostream& os) const; // NOLINT
DECLARE_VERIFIER(Smi)
- V8_EXPORT_PRIVATE static Smi* const kZero;
+ static constexpr Smi* const kZero = nullptr;
static const int kMinValue =
(static_cast<unsigned int>(-1)) << (kSmiValueSize - 1);
static const int kMaxValue = -(kMinValue + 1);
@@ -1725,6 +1704,8 @@ class HeapObject: public Object {
ODDBALL_LIST(IS_TYPE_FUNCTION_DECL)
#undef IS_TYPE_FUNCTION_DECL
+ INLINE(bool IsNullOrUndefined(Isolate* isolate) const);
+
#define DECLARE_STRUCT_PREDICATE(NAME, Name, name) \
INLINE(bool Is##Name() const);
STRUCT_LIST(DECLARE_STRUCT_PREDICATE)
@@ -2016,6 +1997,12 @@ class JSReceiver: public HeapObject {
MUST_USE_RESULT static Maybe<bool> HasInPrototypeChain(
Isolate* isolate, Handle<JSReceiver> object, Handle<Object> proto);
+ // Reads all enumerable own properties of source and adds them to target,
+ // using either Set or CreateDataProperty depending on the use_set argument.
+ MUST_USE_RESULT static Maybe<bool> SetOrCopyDataProperties(
+ Isolate* isolate, Handle<JSReceiver> target, Handle<Object> source,
+ bool use_set);
+
// Implementation of [[HasProperty]], ECMA-262 5th edition, section 8.12.6.
MUST_USE_RESULT static Maybe<bool> HasProperty(LookupIterator* it);
MUST_USE_RESULT static inline Maybe<bool> HasProperty(
@@ -2570,8 +2557,8 @@ class JSObject: public JSReceiver {
DECLARE_PRINTER(JSObject)
DECLARE_VERIFIER(JSObject)
#ifdef OBJECT_PRINT
- void PrintProperties(std::ostream& os); // NOLINT
- void PrintElements(std::ostream& os); // NOLINT
+ bool PrintProperties(std::ostream& os); // NOLINT
+ bool PrintElements(std::ostream& os); // NOLINT
#endif
#if defined(DEBUG) || defined(OBJECT_PRINT)
void PrintTransitions(std::ostream& os); // NOLINT
@@ -2855,8 +2842,11 @@ class FixedArray: public FixedArrayBase {
// Setters for frequently used oddballs located in old space.
inline void set_undefined(int index);
+ inline void set_undefined(Isolate* isolate, int index);
inline void set_null(int index);
+ inline void set_null(Isolate* isolate, int index);
inline void set_the_hole(int index);
+ inline void set_the_hole(Isolate* isolate, int index);
inline Object** GetFirstElementAddress();
inline bool ContainsOnlySmisOrHoles();
@@ -2898,16 +2888,6 @@ class FixedArray: public FixedArrayBase {
bool IsEqualTo(FixedArray* other);
#endif
- // Swap two elements in a pair of arrays. If this array and the
- // numbers array are the same object, the elements are only swapped
- // once.
- void SwapPairs(FixedArray* numbers, int i, int j);
-
- // Sort prefix of this array and the numbers array as pairs wrt. the
- // numbers. If the numbers array and the this array are the same
- // object, the prefix of this array is sorted.
- void SortPairs(FixedArray* numbers, uint32_t len);
-
typedef FlexibleBodyDescriptor<kHeaderSize> BodyDescriptor;
protected:
@@ -2933,6 +2913,7 @@ class FixedDoubleArray: public FixedArrayBase {
static inline Handle<Object> get(FixedDoubleArray* array, int index,
Isolate* isolate);
inline void set(int index, double value);
+ inline void set_the_hole(Isolate* isolate, int index);
inline void set_the_hole(int index);
// Checking for the hole.
@@ -3147,6 +3128,7 @@ class FrameArray : public FixedArray {
static const int kIsAsmJsWasmFrame = 1 << 1;
static const int kIsStrict = 1 << 2;
static const int kForceConstructor = 1 << 3;
+ static const int kAsmJsAtNumberConversion = 1 << 4;
static Handle<FrameArray> AppendJSFrame(Handle<FrameArray> in,
Handle<Object> receiver,
@@ -3252,24 +3234,23 @@ class DescriptorArray: public FixedArray {
inline Object** GetDescriptorStartSlot(int descriptor_number);
inline Object** GetDescriptorEndSlot(int descriptor_number);
inline PropertyDetails GetDetails(int descriptor_number);
- inline PropertyType GetType(int descriptor_number);
inline int GetFieldIndex(int descriptor_number);
- FieldType* GetFieldType(int descriptor_number);
- inline Object* GetConstant(int descriptor_number);
- inline Object* GetCallbacksObject(int descriptor_number);
- inline AccessorDescriptor* GetCallbacks(int descriptor_number);
+ inline FieldType* GetFieldType(int descriptor_number);
inline Name* GetSortedKey(int descriptor_number);
inline int GetSortedKeyIndex(int descriptor_number);
inline void SetSortedKey(int pointer, int descriptor_number);
- inline void SetRepresentation(int descriptor_number,
- Representation representation);
// Accessor for complete descriptor.
inline void Get(int descriptor_number, Descriptor* desc);
inline void Set(int descriptor_number, Descriptor* desc);
+ inline void Set(int descriptor_number, Name* key, Object* value,
+ PropertyDetails details);
void Replace(int descriptor_number, Descriptor* descriptor);
+ // Generalizes representation and field type of all field descriptors.
+ void GeneralizeAllFields();
+
// Append automatically sets the enumeration index. This should only be used
// to add descriptors in bulk at the end, followed by sorting the descriptor
// array.
@@ -3337,6 +3318,9 @@ class DescriptorArray: public FixedArray {
// Print all the descriptors.
void PrintDescriptors(std::ostream& os); // NOLINT
+
+ void PrintDescriptorDetails(std::ostream& os, int descriptor,
+ PropertyDetails::PrintMode mode);
#endif
#ifdef DEBUG
@@ -3372,26 +3356,10 @@ class DescriptorArray: public FixedArray {
}
private:
- // An entry in a DescriptorArray, represented as an (array, index) pair.
- class Entry {
- public:
- inline explicit Entry(DescriptorArray* descs, int index) :
- descs_(descs), index_(index) { }
-
- inline PropertyType type();
- inline Object* GetCallbackObject();
-
- private:
- DescriptorArray* descs_;
- int index_;
- };
-
// Transfer a complete descriptor from the src descriptor array to this
// descriptor array.
void CopyFrom(int index, DescriptorArray* src);
- inline void SetDescriptor(int descriptor_number, Descriptor* desc);
-
// Swap first and second descriptor.
inline void SwapSortedKeys(int first, int second);
@@ -3593,6 +3561,9 @@ class HashTable : public HashTableBase {
protected:
friend class ObjectHashTable;
+ MUST_USE_RESULT static Handle<Derived> New(Isolate* isolate, int capacity,
+ PretenureFlag pretenure);
+
// Find the entry at which to insert element with the given key that
// has the given hash value.
uint32_t FindInsertionEntry(uint32_t hash);
@@ -3788,6 +3759,10 @@ class Dictionary: public HashTable<Derived, Shape, Key> {
enum SortMode { UNSORTED, SORTED };
+ // Return the key indices sorted by its enumeration index.
+ static Handle<FixedArray> IterationIndices(
+ Handle<Dictionary<Derived, Shape, Key>> dictionary);
+
// Collect the keys into the given KeyAccumulator, in ascending chronological
// order of property creation.
static void CollectKeysTo(Handle<Dictionary<Derived, Shape, Key>> dictionary,
@@ -3814,6 +3789,10 @@ class Dictionary: public HashTable<Derived, Shape, Key> {
PretenureFlag pretenure = NOT_TENURED,
MinimumCapacity capacity_option = USE_DEFAULT_MINIMUM_CAPACITY);
+ // Creates an dictionary with minimal possible capacity.
+ MUST_USE_RESULT static Handle<Derived> NewEmpty(
+ Isolate* isolate, PretenureFlag pretenure = NOT_TENURED);
+
// Ensures that a new dictionary is created when the capacity is checked.
void SetRequiresCopyOnCapacityChange();
@@ -3843,14 +3822,11 @@ class Dictionary: public HashTable<Derived, Shape, Key> {
PropertyDetails details,
int* entry_out = nullptr);
- // Returns iteration indices array for the |dictionary|.
- // Values are direct indices in the |HashTable| array.
- static Handle<FixedArray> BuildIterationIndicesArray(
- Handle<Derived> dictionary);
-
static const int kMaxNumberKeyIndex = DerivedHashTable::kPrefixStartIndex;
static const int kNextEnumerationIndexIndex = kMaxNumberKeyIndex + 1;
+ static const bool kIsEnumerable = Shape::kIsEnumerable;
+
protected:
// Generic at put operation.
MUST_USE_RESULT static Handle<Derived> AtPut(
@@ -4023,18 +3999,20 @@ class SeededNumberDictionary
// Type specific at put (default NONE attributes is used when adding).
MUST_USE_RESULT static Handle<SeededNumberDictionary> AtNumberPut(
Handle<SeededNumberDictionary> dictionary, uint32_t key,
- Handle<Object> value, bool used_as_prototype);
+ Handle<Object> value, Handle<JSObject> dictionary_holder);
MUST_USE_RESULT static Handle<SeededNumberDictionary> AddNumberEntry(
Handle<SeededNumberDictionary> dictionary, uint32_t key,
- Handle<Object> value, PropertyDetails details, bool used_as_prototype);
+ Handle<Object> value, PropertyDetails details,
+ Handle<JSObject> dictionary_holder);
// Set an existing entry or add a new one if needed.
// Return the updated dictionary.
MUST_USE_RESULT static Handle<SeededNumberDictionary> Set(
Handle<SeededNumberDictionary> dictionary, uint32_t key,
- Handle<Object> value, PropertyDetails details, bool used_as_prototype);
+ Handle<Object> value, PropertyDetails details,
+ Handle<JSObject> dictionary_holder);
- void UpdateMaxNumberKey(uint32_t key, bool used_as_prototype);
+ void UpdateMaxNumberKey(uint32_t key, Handle<JSObject> dictionary_holder);
// Returns true if the dictionary contains any elements that are non-writable,
// non-configurable, non-enumerable, or have getters/setters.
@@ -4436,314 +4414,6 @@ class WeakHashTable: public HashTable<WeakHashTable,
};
-// ScopeInfo represents information about different scopes of a source
-// program and the allocation of the scope's variables. Scope information
-// is stored in a compressed form in ScopeInfo objects and is used
-// at runtime (stack dumps, deoptimization, etc.).
-
-// This object provides quick access to scope info details for runtime
-// routines.
-class ScopeInfo : public FixedArray {
- public:
- DECLARE_CAST(ScopeInfo)
-
- // Return the type of this scope.
- ScopeType scope_type();
-
- // Does this scope call eval?
- bool CallsEval();
-
- // Return the language mode of this scope.
- LanguageMode language_mode();
-
- // True if this scope is a (var) declaration scope.
- bool is_declaration_scope();
-
- // Does this scope make a sloppy eval call?
- bool CallsSloppyEval() { return CallsEval() && is_sloppy(language_mode()); }
-
- // Return the total number of locals allocated on the stack and in the
- // context. This includes the parameters that are allocated in the context.
- int LocalCount();
-
- // Return the number of stack slots for code. This number consists of two
- // parts:
- // 1. One stack slot per stack allocated local.
- // 2. One stack slot for the function name if it is stack allocated.
- int StackSlotCount();
-
- // Return the number of context slots for code if a context is allocated. This
- // number consists of three parts:
- // 1. Size of fixed header for every context: Context::MIN_CONTEXT_SLOTS
- // 2. One context slot per context allocated local.
- // 3. One context slot for the function name if it is context allocated.
- // Parameters allocated in the context count as context allocated locals. If
- // no contexts are allocated for this scope ContextLength returns 0.
- int ContextLength();
-
- // Does this scope declare a "this" binding?
- bool HasReceiver();
-
- // Does this scope declare a "this" binding, and the "this" binding is stack-
- // or context-allocated?
- bool HasAllocatedReceiver();
-
- // Does this scope declare a "new.target" binding?
- bool HasNewTarget();
-
- // Is this scope the scope of a named function expression?
- bool HasFunctionName();
-
- // Return if this has context allocated locals.
- bool HasHeapAllocatedLocals();
-
- // Return if contexts are allocated for this scope.
- bool HasContext();
-
- // Return if this is a function scope with "use asm".
- inline bool IsAsmModule();
-
- // Return if this is a nested function within an asm module scope.
- inline bool IsAsmFunction();
-
- inline bool HasSimpleParameters();
-
- // Return the function_name if present.
- String* FunctionName();
-
- ModuleInfo* ModuleDescriptorInfo();
-
- // Return the name of the given parameter.
- String* ParameterName(int var);
-
- // Return the name of the given local.
- String* LocalName(int var);
-
- // Return the name of the given stack local.
- String* StackLocalName(int var);
-
- // Return the name of the given stack local.
- int StackLocalIndex(int var);
-
- // Return the name of the given context local.
- String* ContextLocalName(int var);
-
- // Return the mode of the given context local.
- VariableMode ContextLocalMode(int var);
-
- // Return the initialization flag of the given context local.
- InitializationFlag ContextLocalInitFlag(int var);
-
- // Return the initialization flag of the given context local.
- MaybeAssignedFlag ContextLocalMaybeAssignedFlag(int var);
-
- // Return true if this local was introduced by the compiler, and should not be
- // exposed to the user in a debugger.
- static bool VariableIsSynthetic(String* name);
-
- // Lookup support for serialized scope info. Returns the
- // the stack slot index for a given slot name if the slot is
- // present; otherwise returns a value < 0. The name must be an internalized
- // string.
- int StackSlotIndex(String* name);
-
- // Lookup support for serialized scope info. Returns the local context slot
- // index for a given slot name if the slot is present; otherwise
- // returns a value < 0. The name must be an internalized string.
- // If the slot is present and mode != NULL, sets *mode to the corresponding
- // mode for that variable.
- static int ContextSlotIndex(Handle<ScopeInfo> scope_info, Handle<String> name,
- VariableMode* mode, InitializationFlag* init_flag,
- MaybeAssignedFlag* maybe_assigned_flag);
-
- // Lookup metadata of a MODULE-allocated variable. Return 0 if there is no
- // module variable with the given name (the index value of a MODULE variable
- // is never 0).
- int ModuleIndex(Handle<String> name, VariableMode* mode,
- InitializationFlag* init_flag,
- MaybeAssignedFlag* maybe_assigned_flag);
-
- // Lookup the name of a certain context slot by its index.
- String* ContextSlotName(int slot_index);
-
- // Lookup support for serialized scope info. Returns the
- // parameter index for a given parameter name if the parameter is present;
- // otherwise returns a value < 0. The name must be an internalized string.
- int ParameterIndex(String* name);
-
- // Lookup support for serialized scope info. Returns the function context
- // slot index if the function name is present and context-allocated (named
- // function expressions, only), otherwise returns a value < 0. The name
- // must be an internalized string.
- int FunctionContextSlotIndex(String* name);
-
- // Lookup support for serialized scope info. Returns the receiver context
- // slot index if scope has a "this" binding, and the binding is
- // context-allocated. Otherwise returns a value < 0.
- int ReceiverContextSlotIndex();
-
- FunctionKind function_kind();
-
- // Returns true if this ScopeInfo is linked to a outer ScopeInfo.
- bool HasOuterScopeInfo();
-
- // Returns true if this ScopeInfo was created for a debug-evaluate scope.
- bool IsDebugEvaluateScope();
-
- // Can be used to mark a ScopeInfo that looks like a with-scope as actually
- // being a debug-evaluate scope.
- void SetIsDebugEvaluateScope();
-
- // Return the outer ScopeInfo if present.
- ScopeInfo* OuterScopeInfo();
-
-#ifdef DEBUG
- bool Equals(ScopeInfo* other) const;
-#endif
-
- static Handle<ScopeInfo> Create(Isolate* isolate, Zone* zone, Scope* scope,
- MaybeHandle<ScopeInfo> outer_scope);
- static Handle<ScopeInfo> CreateForWithScope(
- Isolate* isolate, MaybeHandle<ScopeInfo> outer_scope);
- static Handle<ScopeInfo> CreateGlobalThisBinding(Isolate* isolate);
-
- // Serializes empty scope info.
- V8_EXPORT_PRIVATE static ScopeInfo* Empty(Isolate* isolate);
-
-#ifdef DEBUG
- void Print();
-#endif
-
- // The layout of the static part of a ScopeInfo is as follows. Each entry is
- // numeric and occupies one array slot.
-// 1. A set of properties of the scope.
-// 2. The number of parameters. For non-function scopes this is 0.
-// 3. The number of non-parameter variables allocated on the stack.
-// 4. The number of non-parameter and parameter variables allocated in the
-// context.
-#define FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(V) \
- V(Flags) \
- V(ParameterCount) \
- V(StackLocalCount) \
- V(ContextLocalCount)
-
-#define FIELD_ACCESSORS(name) \
- inline void Set##name(int value); \
- inline int name();
- FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(FIELD_ACCESSORS)
-#undef FIELD_ACCESSORS
-
- enum {
-#define DECL_INDEX(name) k##name,
- FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(DECL_INDEX)
-#undef DECL_INDEX
- kVariablePartIndex
- };
-
- private:
- // The layout of the variable part of a ScopeInfo is as follows:
- // 1. ParameterNames:
- // This part stores the names of the parameters for function scopes. One
- // slot is used per parameter, so in total this part occupies
- // ParameterCount() slots in the array. For other scopes than function
- // scopes ParameterCount() is 0.
- // 2. StackLocalFirstSlot:
- // Index of a first stack slot for stack local. Stack locals belonging to
- // this scope are located on a stack at slots starting from this index.
- // 3. StackLocalNames:
- // Contains the names of local variables that are allocated on the stack,
- // in increasing order of the stack slot index. First local variable has a
- // stack slot index defined in StackLocalFirstSlot (point 2 above).
- // One slot is used per stack local, so in total this part occupies
- // StackLocalCount() slots in the array.
- // 4. ContextLocalNames:
- // Contains the names of local variables and parameters that are allocated
- // in the context. They are stored in increasing order of the context slot
- // index starting with Context::MIN_CONTEXT_SLOTS. One slot is used per
- // context local, so in total this part occupies ContextLocalCount() slots
- // in the array.
- // 5. ContextLocalInfos:
- // Contains the variable modes and initialization flags corresponding to
- // the context locals in ContextLocalNames. One slot is used per
- // context local, so in total this part occupies ContextLocalCount()
- // slots in the array.
- // 6. ReceiverInfo:
- // If the scope binds a "this" value, one slot is reserved to hold the
- // context or stack slot index for the variable.
- // 7. FunctionNameInfo:
- // If the scope belongs to a named function expression this part contains
- // information about the function variable. It always occupies two array
- // slots: a. The name of the function variable.
- // b. The context or stack slot index for the variable.
- // 8. OuterScopeInfoIndex:
- // The outer scope's ScopeInfo or the hole if there's none.
- // 9. ModuleInfo, ModuleVariableCount, and ModuleVariables:
- // For a module scope, this part contains the ModuleInfo, the number of
- // MODULE-allocated variables, and the metadata of those variables. For
- // non-module scopes it is empty.
- int ParameterNamesIndex();
- int StackLocalFirstSlotIndex();
- int StackLocalNamesIndex();
- int ContextLocalNamesIndex();
- int ContextLocalInfosIndex();
- int ReceiverInfoIndex();
- int FunctionNameInfoIndex();
- int OuterScopeInfoIndex();
- int ModuleInfoIndex();
- int ModuleVariableCountIndex();
- int ModuleVariablesIndex();
-
- int Lookup(Handle<String> name, int start, int end, VariableMode* mode,
- VariableLocation* location, InitializationFlag* init_flag,
- MaybeAssignedFlag* maybe_assigned_flag);
-
- // Get metadata of i-th MODULE-allocated variable, where 0 <= i <
- // ModuleVariableCount. The metadata is returned via out-arguments, which may
- // be nullptr if the corresponding information is not requested
- void ModuleVariable(int i, String** name, int* index,
- VariableMode* mode = nullptr,
- InitializationFlag* init_flag = nullptr,
- MaybeAssignedFlag* maybe_assigned_flag = nullptr);
-
- // Used for the function name variable for named function expressions, and for
- // the receiver.
- enum VariableAllocationInfo { NONE, STACK, CONTEXT, UNUSED };
-
- // Properties of scopes.
- class ScopeTypeField : public BitField<ScopeType, 0, 4> {};
- class CallsEvalField : public BitField<bool, ScopeTypeField::kNext, 1> {};
- STATIC_ASSERT(LANGUAGE_END == 2);
- class LanguageModeField
- : public BitField<LanguageMode, CallsEvalField::kNext, 1> {};
- class DeclarationScopeField
- : public BitField<bool, LanguageModeField::kNext, 1> {};
- class ReceiverVariableField
- : public BitField<VariableAllocationInfo, DeclarationScopeField::kNext,
- 2> {};
- class HasNewTargetField
- : public BitField<bool, ReceiverVariableField::kNext, 1> {};
- class FunctionVariableField
- : public BitField<VariableAllocationInfo, HasNewTargetField::kNext, 2> {};
- class AsmModuleField
- : public BitField<bool, FunctionVariableField::kNext, 1> {};
- class AsmFunctionField : public BitField<bool, AsmModuleField::kNext, 1> {};
- class HasSimpleParametersField
- : public BitField<bool, AsmFunctionField::kNext, 1> {};
- class FunctionKindField
- : public BitField<FunctionKind, HasSimpleParametersField::kNext, 10> {};
- class HasOuterScopeInfoField
- : public BitField<bool, FunctionKindField::kNext, 1> {};
- class IsDebugEvaluateScopeField
- : public BitField<bool, HasOuterScopeInfoField::kNext, 1> {};
-
- // Properties of variables.
- class VariableModeField : public BitField<VariableMode, 0, 3> {};
- class InitFlagField : public BitField<InitializationFlag, 3, 1> {};
- class MaybeAssignedFlagField : public BitField<MaybeAssignedFlag, 4, 1> {};
-
- friend class ScopeIterator;
-};
-
// The cache for maps used by normalized (dictionary mode) objects.
// Such maps do not have property descriptors, so a typical program
// needs very limited number of distinct normalized maps.
@@ -4815,7 +4485,9 @@ class HandlerTable : public FixedArray {
inline void SetReturnOffset(int index, int value);
inline void SetReturnHandler(int index, int offset);
- // Lookup handler in a table based on ranges.
+ // Lookup handler in a table based on ranges. The {pc_offset} is an offset to
+ // the start of the potentially throwing instruction (using return addresses
+ // for this value would be invalid).
int LookupRange(int pc_offset, int* data, CatchPrediction* prediction);
// Lookup handler in a table based on return addresses.
@@ -4939,6 +4611,17 @@ class PodArray : public ByteArray {
// BytecodeArray represents a sequence of interpreter bytecodes.
class BytecodeArray : public FixedArrayBase {
public:
+#define DECLARE_BYTECODE_AGE_ENUM(X) k##X##BytecodeAge,
+ enum Age {
+ kNoAgeBytecodeAge = 0,
+ CODE_AGE_LIST(DECLARE_BYTECODE_AGE_ENUM) kAfterLastBytecodeAge,
+ kFirstBytecodeAge = kNoAgeBytecodeAge,
+ kLastBytecodeAge = kAfterLastBytecodeAge - 1,
+ kBytecodeAgeCount = kAfterLastBytecodeAge - kFirstBytecodeAge - 1,
+ kIsOldBytecodeAge = kSexagenarianBytecodeAge
+ };
+#undef DECLARE_BYTECODE_AGE_ENUM
+
static int SizeFor(int length) {
return OBJECT_POINTER_ALIGN(kHeaderSize + length);
}
@@ -4969,6 +4652,10 @@ class BytecodeArray : public FixedArrayBase {
inline int osr_loop_nesting_level() const;
inline void set_osr_loop_nesting_level(int depth);
+ // Accessors for bytecode's code age.
+ inline Age bytecode_age() const;
+ inline void set_bytecode_age(Age age);
+
// Accessors for the constant pool.
DECL_ACCESSORS(constant_pool, FixedArray)
@@ -5000,8 +4687,9 @@ class BytecodeArray : public FixedArrayBase {
void CopyBytecodesTo(BytecodeArray* to);
- int LookupRangeInHandlerTable(int code_offset, int* data,
- HandlerTable::CatchPrediction* prediction);
+ // Bytecode aging
+ bool IsOld() const;
+ void MakeOlder();
// Layout description.
static const int kConstantPoolOffset = FixedArrayBase::kHeaderSize;
@@ -5012,7 +4700,8 @@ class BytecodeArray : public FixedArrayBase {
static const int kParameterSizeOffset = kFrameSizeOffset + kIntSize;
static const int kInterruptBudgetOffset = kParameterSizeOffset + kIntSize;
static const int kOSRNestingLevelOffset = kInterruptBudgetOffset + kIntSize;
- static const int kHeaderSize = kOSRNestingLevelOffset + kCharSize;
+ static const int kBytecodeAgeOffset = kOSRNestingLevelOffset + kCharSize;
+ static const int kHeaderSize = kBytecodeAgeOffset + kCharSize;
// Maximal memory consumption for a single BytecodeArray.
static const int kMaxSize = 512 * MB;
@@ -5356,7 +5045,8 @@ class Code: public HeapObject {
V(REGEXP) \
V(WASM_FUNCTION) \
V(WASM_TO_JS_FUNCTION) \
- V(JS_TO_WASM_FUNCTION)
+ V(JS_TO_WASM_FUNCTION) \
+ V(WASM_INTERPRETER_ENTRY)
#define IC_KIND_LIST(V) \
V(LOAD_IC) \
@@ -5413,6 +5103,10 @@ class Code: public HeapObject {
// [source_position_table]: ByteArray for the source positions table.
DECL_ACCESSORS(source_position_table, ByteArray)
+ // [protected_instructions]: Fixed array containing protected instruction and
+ // corresponding landing pad offsets.
+ DECL_ACCESSORS(protected_instructions, FixedArray)
+
// [raw_type_feedback_info]: This field stores various things, depending on
// the kind of the code object.
// FUNCTION => type feedback information.
@@ -5562,6 +5256,16 @@ class Code: public HeapObject {
inline bool marked_for_deoptimization();
inline void set_marked_for_deoptimization(bool flag);
+ // [is_promise_rejection]: For kind BUILTIN tells whether the exception
+ // thrown by the code will lead to promise rejection.
+ inline bool is_promise_rejection();
+ inline void set_is_promise_rejection(bool flag);
+
+ // [is_exception_caught]: For kind BUILTIN tells whether the exception
+ // thrown by the code will be caught internally.
+ inline bool is_exception_caught();
+ inline void set_is_exception_caught(bool flag);
+
// [constant_pool]: The constant pool for this function.
inline Address constant_pool();
@@ -5711,9 +5415,6 @@ class Code: public HeapObject {
BailoutId TranslatePcOffsetToAstId(uint32_t pc_offset);
uint32_t TranslateAstIdToPcOffset(BailoutId ast_id);
- int LookupRangeInHandlerTable(int code_offset, int* data,
- HandlerTable::CatchPrediction* prediction);
-
#define DECLARE_CODE_AGE_ENUM(X) k##X##CodeAge,
enum Age {
kToBeExecutedOnceCodeAge = -3,
@@ -5739,12 +5440,12 @@ class Code: public HeapObject {
void MakeYoung(Isolate* isolate);
void PreAge(Isolate* isolate);
void MarkToBeExecutedOnce(Isolate* isolate);
- void MakeOlder(MarkingParity);
+ void MakeOlder();
static bool IsYoungSequence(Isolate* isolate, byte* sequence);
bool IsOld();
Age GetAge();
static inline Code* GetPreAgedCodeAgeStub(Isolate* isolate) {
- return GetCodeAgeStub(isolate, kNotExecutedCodeAge, NO_MARKING_PARITY);
+ return GetCodeAgeStub(isolate, kNotExecutedCodeAge);
}
void PrintDeoptLocation(FILE* out, Address pc);
@@ -5782,7 +5483,10 @@ class Code: public HeapObject {
// For FUNCTION kind, we store the type feedback info here.
static const int kTypeFeedbackInfoOffset =
kSourcePositionTableOffset + kPointerSize;
- static const int kNextCodeLinkOffset = kTypeFeedbackInfoOffset + kPointerSize;
+ static const int kProtectedInstructionOffset =
+ kTypeFeedbackInfoOffset + kPointerSize;
+ static const int kNextCodeLinkOffset =
+ kProtectedInstructionOffset + kPointerSize;
static const int kGCMetadataOffset = kNextCodeLinkOffset + kPointerSize;
static const int kInstructionSizeOffset = kGCMetadataOffset + kPointerSize;
static const int kICAgeOffset = kInstructionSizeOffset + kIntSize;
@@ -5797,6 +5501,9 @@ class Code: public HeapObject {
kConstantPoolOffset + kConstantPoolSize;
static const int kHeaderPaddingStart = kBuiltinIndexOffset + kIntSize;
+ enum TrapFields { kTrapCodeOffset, kTrapLandingOffset, kTrapDataSize };
+
+
// Add padding to align the instruction start following right after
// the Code object header.
static const int kHeaderSize =
@@ -5836,9 +5543,11 @@ class Code: public HeapObject {
static const int kCanHaveWeakObjects = kIsTurbofannedBit + 1;
// Could be moved to overlap previous bits when we need more space.
static const int kIsConstructStub = kCanHaveWeakObjects + 1;
+ static const int kIsPromiseRejection = kIsConstructStub + 1;
+ static const int kIsExceptionCaught = kIsPromiseRejection + 1;
STATIC_ASSERT(kStackSlotsFirstBit + kStackSlotsBitCount <= 32);
- STATIC_ASSERT(kIsConstructStub + 1 <= 32);
+ STATIC_ASSERT(kIsExceptionCaught + 1 <= 32);
class StackSlotsField: public BitField<int,
kStackSlotsFirstBit, kStackSlotsBitCount> {}; // NOLINT
@@ -5850,6 +5559,10 @@ class Code: public HeapObject {
: public BitField<bool, kCanHaveWeakObjects, 1> {}; // NOLINT
class IsConstructStubField : public BitField<bool, kIsConstructStub, 1> {
}; // NOLINT
+ class IsPromiseRejectionField
+ : public BitField<bool, kIsPromiseRejection, 1> {}; // NOLINT
+ class IsExceptionCaughtField : public BitField<bool, kIsExceptionCaught, 1> {
+ }; // NOLINT
// KindSpecificFlags2 layout (ALL)
static const int kIsCrankshaftedBit = 0;
@@ -5886,16 +5599,12 @@ class Code: public HeapObject {
// Code aging
byte* FindCodeAgeSequence();
- static void GetCodeAgeAndParity(Code* code, Age* age,
- MarkingParity* parity);
- static void GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
- MarkingParity* parity);
- static Code* GetCodeAgeStub(Isolate* isolate, Age age, MarkingParity parity);
+ static Age GetCodeAge(Isolate* isolate, byte* sequence);
+ static Age GetAgeOfCodeAgeStub(Code* code);
+ static Code* GetCodeAgeStub(Isolate* isolate, Age age);
// Code aging -- platform-specific
- static void PatchPlatformCodeAge(Isolate* isolate,
- byte* sequence, Age age,
- MarkingParity parity);
+ static void PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Age age);
DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
};
@@ -5931,10 +5640,6 @@ class AbstractCode : public HeapObject {
// Set the source position table.
inline void set_source_position_table(ByteArray* source_position_table);
- // Return the exception handler table.
- inline int LookupRangeInHandlerTable(
- int code_offset, int* data, HandlerTable::CatchPrediction* prediction);
-
// Returns the size of instructions and the metadata.
inline int SizeIncludingMetadata();
@@ -6334,21 +6039,22 @@ class Map: public HeapObject {
int target_inobject, int target_unused,
int* old_number_of_fields);
// TODO(ishell): moveit!
- static Handle<Map> GeneralizeAllFieldRepresentations(Handle<Map> map);
+ static Handle<Map> GeneralizeAllFields(Handle<Map> map);
MUST_USE_RESULT static Handle<FieldType> GeneralizeFieldType(
Representation rep1, Handle<FieldType> type1, Representation rep2,
Handle<FieldType> type2, Isolate* isolate);
- static void GeneralizeFieldType(Handle<Map> map, int modify_index,
- Representation new_representation,
- Handle<FieldType> new_field_type);
+ static void GeneralizeField(Handle<Map> map, int modify_index,
+ Representation new_representation,
+ Handle<FieldType> new_field_type);
- static inline Handle<Map> ReconfigureProperty(
- Handle<Map> map, int modify_index, PropertyKind new_kind,
- PropertyAttributes new_attributes, Representation new_representation,
- Handle<FieldType> new_field_type, StoreMode store_mode);
+ static Handle<Map> ReconfigureProperty(Handle<Map> map, int modify_index,
+ PropertyKind new_kind,
+ PropertyAttributes new_attributes,
+ Representation new_representation,
+ Handle<FieldType> new_field_type);
- static inline Handle<Map> ReconfigureElementsKind(
- Handle<Map> map, ElementsKind new_elements_kind);
+ static Handle<Map> ReconfigureElementsKind(Handle<Map> map,
+ ElementsKind new_elements_kind);
static Handle<Map> PrepareForDataProperty(Handle<Map> old_map,
int descriptor_number,
@@ -6471,6 +6177,9 @@ class Map: public HeapObject {
Descriptor* descriptor,
TransitionFlag flag);
+ static Handle<Object> WrapFieldType(Handle<FieldType> type);
+ static FieldType* UnwrapFieldType(Object* wrapped_type);
+
MUST_USE_RESULT static MaybeHandle<Map> CopyWithField(
Handle<Map> map, Handle<Name> name, Handle<FieldType> type,
PropertyAttributes attributes, Representation representation,
@@ -6568,6 +6277,8 @@ class Map: public HeapObject {
Code* LookupInCodeCache(Name* name, Code::Flags code);
+ static Handle<Map> GetObjectCreateMap(Handle<HeapObject> prototype);
+
// Computes a hash value for this map, to be used in HashTables and such.
int Hash();
@@ -6592,6 +6303,8 @@ class Map: public HeapObject {
inline bool IsJSTypedArrayMap();
inline bool IsJSDataViewMap();
+ inline bool IsSpecialReceiverMap();
+
inline bool CanOmitMapChecks();
static void AddDependentCode(Handle<Map> map,
@@ -6735,6 +6448,11 @@ class Map: public HeapObject {
Handle<Map> split_map, Handle<DescriptorArray> descriptors,
Handle<LayoutDescriptor> full_layout_descriptor);
+ // Fires when the layout of an object with a leaf map changes.
+ // This includes adding transitions to the leaf map or changing
+ // the descriptor array.
+ inline void NotifyLeafMapLayoutChange();
+
private:
// Returns the map that this (root) map transitions to if its elements_kind
// is changed to |elements_kind|, or |nullptr| if no such map is cached yet.
@@ -6783,23 +6501,10 @@ class Map: public HeapObject {
static Handle<Map> CopyNormalized(Handle<Map> map,
PropertyNormalizationMode mode);
- static Handle<Map> Reconfigure(Handle<Map> map,
- ElementsKind new_elements_kind,
- int modify_index, PropertyKind new_kind,
- PropertyAttributes new_attributes,
- Representation new_representation,
- Handle<FieldType> new_field_type,
- StoreMode store_mode);
-
- static Handle<Map> CopyGeneralizeAllRepresentations(
+ // TODO(ishell): Move to MapUpdater.
+ static Handle<Map> CopyGeneralizeAllFields(
Handle<Map> map, ElementsKind elements_kind, int modify_index,
- StoreMode store_mode, PropertyKind kind, PropertyAttributes attributes,
- const char* reason);
-
- // Fires when the layout of an object with a leaf map changes.
- // This includes adding transitions to the leaf map or changing
- // the descriptor array.
- inline void NotifyLeafMapLayoutChange();
+ PropertyKind kind, PropertyAttributes attributes, const char* reason);
void DeprecateTransitionTree();
@@ -6807,8 +6512,6 @@ class Map: public HeapObject {
LayoutDescriptor* new_layout_descriptor);
- Map* FindLastMatchMap(int verbatim, int length, DescriptorArray* descriptors);
-
// Update field type of the given descriptor to new representation and new
// type. The type must be prepared for storing in descriptor array:
// it must be either a simple type or a map wrapped in a weak cell.
@@ -6816,8 +6519,10 @@ class Map: public HeapObject {
Representation new_representation,
Handle<Object> new_wrapped_type);
+ // TODO(ishell): Move to MapUpdater.
void PrintReconfiguration(FILE* file, int modify_index, PropertyKind kind,
PropertyAttributes attributes);
+ // TODO(ishell): Move to MapUpdater.
void PrintGeneralization(FILE* file, const char* reason, int modify_index,
int split, int descriptors, bool constant_to_field,
Representation old_representation,
@@ -6826,10 +6531,11 @@ class Map: public HeapObject {
MaybeHandle<Object> old_value,
MaybeHandle<FieldType> new_field_type,
MaybeHandle<Object> new_value);
-
static const int kFastPropertiesSoftLimit = 12;
static const int kMaxFastProperties = 128;
+ friend class MapUpdater;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(Map);
};
@@ -6850,8 +6556,9 @@ class PromiseResolveThenableJobInfo : public Struct {
DECL_ACCESSORS(then, JSReceiver)
DECL_ACCESSORS(resolve, JSFunction)
DECL_ACCESSORS(reject, JSFunction)
- DECL_ACCESSORS(debug_id, Object)
- DECL_ACCESSORS(debug_name, Object)
+
+ DECL_INT_ACCESSORS(debug_id)
+
DECL_ACCESSORS(context, Context)
static const int kThenableOffset = Struct::kHeaderSize;
@@ -6859,8 +6566,7 @@ class PromiseResolveThenableJobInfo : public Struct {
static const int kResolveOffset = kThenOffset + kPointerSize;
static const int kRejectOffset = kResolveOffset + kPointerSize;
static const int kDebugIdOffset = kRejectOffset + kPointerSize;
- static const int kDebugNameOffset = kDebugIdOffset + kPointerSize;
- static const int kContextOffset = kDebugNameOffset + kPointerSize;
+ static const int kContextOffset = kDebugIdOffset + kPointerSize;
static const int kSize = kContextOffset + kPointerSize;
DECLARE_CAST(PromiseResolveThenableJobInfo)
@@ -6871,22 +6577,33 @@ class PromiseResolveThenableJobInfo : public Struct {
DISALLOW_IMPLICIT_CONSTRUCTORS(PromiseResolveThenableJobInfo);
};
+class JSPromise;
+
// Struct to hold state required for PromiseReactionJob.
class PromiseReactionJobInfo : public Struct {
public:
DECL_ACCESSORS(value, Object)
DECL_ACCESSORS(tasks, Object)
- DECL_ACCESSORS(deferred, Object)
- DECL_ACCESSORS(debug_id, Object)
- DECL_ACCESSORS(debug_name, Object)
+
+ // Check comment in JSPromise for information on what state these
+ // deferred fields could be in.
+ DECL_ACCESSORS(deferred_promise, Object)
+ DECL_ACCESSORS(deferred_on_resolve, Object)
+ DECL_ACCESSORS(deferred_on_reject, Object)
+
+ DECL_INT_ACCESSORS(debug_id)
+
DECL_ACCESSORS(context, Context)
static const int kValueOffset = Struct::kHeaderSize;
static const int kTasksOffset = kValueOffset + kPointerSize;
- static const int kDeferredOffset = kTasksOffset + kPointerSize;
- static const int kDebugIdOffset = kDeferredOffset + kPointerSize;
- static const int kDebugNameOffset = kDebugIdOffset + kPointerSize;
- static const int kContextOffset = kDebugNameOffset + kPointerSize;
+ static const int kDeferredPromiseOffset = kTasksOffset + kPointerSize;
+ static const int kDeferredOnResolveOffset =
+ kDeferredPromiseOffset + kPointerSize;
+ static const int kDeferredOnRejectOffset =
+ kDeferredOnResolveOffset + kPointerSize;
+ static const int kDebugIdOffset = kDeferredOnRejectOffset + kPointerSize;
+ static const int kContextOffset = kDebugIdOffset + kPointerSize;
static const int kSize = kContextOffset + kPointerSize;
DECLARE_CAST(PromiseReactionJobInfo)
@@ -6975,10 +6692,27 @@ class PrototypeInfo : public Struct {
DISALLOW_IMPLICIT_CONSTRUCTORS(PrototypeInfo);
};
-class Tuple3 : public Struct {
+class Tuple2 : public Struct {
public:
DECL_ACCESSORS(value1, Object)
DECL_ACCESSORS(value2, Object)
+
+ DECLARE_CAST(Tuple2)
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(Tuple2)
+ DECLARE_VERIFIER(Tuple2)
+
+ static const int kValue1Offset = HeapObject::kHeaderSize;
+ static const int kValue2Offset = kValue1Offset + kPointerSize;
+ static const int kSize = kValue2Offset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Tuple2);
+};
+
+class Tuple3 : public Tuple2 {
+ public:
DECL_ACCESSORS(value3, Object)
DECLARE_CAST(Tuple3)
@@ -6987,9 +6721,7 @@ class Tuple3 : public Struct {
DECLARE_PRINTER(Tuple3)
DECLARE_VERIFIER(Tuple3)
- static const int kValue1Offset = HeapObject::kHeaderSize;
- static const int kValue2Offset = kValue1Offset + kPointerSize;
- static const int kValue3Offset = kValue2Offset + kPointerSize;
+ static const int kValue3Offset = Tuple2::kSize;
static const int kSize = kValue3Offset + kPointerSize;
private:
@@ -7022,6 +6754,27 @@ class ContextExtension : public Struct {
DISALLOW_IMPLICIT_CONSTRUCTORS(ContextExtension);
};
+// Pair of {ElementsKind} and an array of constant values for {ArrayLiteral}
+// expressions. Used to communicate with the runtime for literal boilerplate
+// creation within the {Runtime_CreateArrayLiteral} method.
+class ConstantElementsPair : public Struct {
+ public:
+ DECL_INT_ACCESSORS(elements_kind)
+ DECL_ACCESSORS(constant_values, FixedArrayBase)
+
+ DECLARE_CAST(ConstantElementsPair)
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(ConstantElementsPair)
+ DECLARE_VERIFIER(ConstantElementsPair)
+
+ static const int kElementsKindOffset = HeapObject::kHeaderSize;
+ static const int kConstantValuesOffset = kElementsKindOffset + kPointerSize;
+ static const int kSize = kConstantValuesOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantElementsPair);
+};
// Script describes a script which has been added to the VM.
class Script: public Struct {
@@ -7031,7 +6784,8 @@ class Script: public Struct {
TYPE_NATIVE = 0,
TYPE_EXTENSION = 1,
TYPE_NORMAL = 2,
- TYPE_WASM = 3
+ TYPE_WASM = 3,
+ TYPE_INSPECTOR = 4
};
// Script compilation types.
@@ -7085,7 +6839,7 @@ class Script: public Struct {
// [shared_function_infos]: weak fixed array containing all shared
// function infos created from this script.
- DECL_ACCESSORS(shared_function_infos, Object)
+ DECL_ACCESSORS(shared_function_infos, FixedArray)
// [flags]: Holds an exciting bitfield.
DECL_INT_ACCESSORS(flags)
@@ -7110,11 +6864,6 @@ class Script: public Struct {
inline CompilationState compilation_state();
inline void set_compilation_state(CompilationState state);
- // [hide_source]: determines whether the script source can be exposed as
- // function source. Encoded in the 'flags' field.
- inline bool hide_source();
- inline void set_hide_source(bool value);
-
// [origin_options]: optional attributes set by the embedder via ScriptOrigin,
// and used by the embedder to make decisions about the script. V8 just passes
// this through. Encoded in the 'flags' field.
@@ -7127,7 +6876,7 @@ class Script: public Struct {
// resource is accessible. Otherwise, always return true.
inline bool HasValidSource();
- static Handle<Object> GetNameOrSourceURL(Handle<Script> script);
+ Object* GetNameOrSourceURL();
// Set eval origin for stack trace formatting.
static void SetEvalOrigin(Handle<Script> script,
@@ -7175,7 +6924,8 @@ class Script: public Struct {
// Look through the list of existing shared function infos to find one
// that matches the function literal. Return empty handle if not found.
- MaybeHandle<SharedFunctionInfo> FindSharedFunctionInfo(FunctionLiteral* fun);
+ MaybeHandle<SharedFunctionInfo> FindSharedFunctionInfo(Isolate* isolate,
+ FunctionLiteral* fun);
// Iterate over all script objects on the heap.
class Iterator {
@@ -7215,8 +6965,7 @@ class Script: public Struct {
// Bit positions in the flags field.
static const int kCompilationTypeBit = 0;
static const int kCompilationStateBit = 1;
- static const int kHideSourceBit = 2;
- static const int kOriginOptionsShift = 3;
+ static const int kOriginOptionsShift = 2;
static const int kOriginOptionsSize = 3;
static const int kOriginOptionsMask = ((1 << kOriginOptionsSize) - 1)
<< kOriginOptionsShift;
@@ -7235,11 +6984,26 @@ class Script: public Struct {
// Installation of ids for the selected builtin functions is handled
// by the bootstrapper.
#define FUNCTIONS_WITH_ID_LIST(V) \
+ V(Array.prototype, concat, ArrayConcat) \
+ V(Array.prototype, every, ArrayEvery) \
+ V(Array.prototype, fill, ArrayFill) \
+ V(Array.prototype, filter, ArrayFilter) \
+ V(Array.prototype, findIndex, ArrayFindIndex) \
+ V(Array.prototype, forEach, ArrayForEach) \
+ V(Array.prototype, includes, ArrayIncludes) \
V(Array.prototype, indexOf, ArrayIndexOf) \
+ V(Array.prototype, join, ArrayJoin) \
V(Array.prototype, lastIndexOf, ArrayLastIndexOf) \
- V(Array.prototype, push, ArrayPush) \
+ V(Array.prototype, map, ArrayMap) \
V(Array.prototype, pop, ArrayPop) \
+ V(Array.prototype, push, ArrayPush) \
+ V(Array.prototype, reverse, ArrayReverse) \
V(Array.prototype, shift, ArrayShift) \
+ V(Array.prototype, slice, ArraySlice) \
+ V(Array.prototype, some, ArraySome) \
+ V(Array.prototype, splice, ArraySplice) \
+ V(Array.prototype, unshift, ArrayUnshift) \
+ V(Date, now, DateNow) \
V(Date.prototype, getDate, DateGetDate) \
V(Date.prototype, getDay, DateGetDay) \
V(Date.prototype, getFullYear, DateGetFullYear) \
@@ -7252,13 +7016,33 @@ class Script: public Struct {
V(Function.prototype, apply, FunctionApply) \
V(Function.prototype, call, FunctionCall) \
V(Object.prototype, hasOwnProperty, ObjectHasOwnProperty) \
+ V(RegExp.prototype, compile, RegExpCompile) \
+ V(RegExp.prototype, exec, RegExpExec) \
+ V(RegExp.prototype, test, RegExpTest) \
+ V(RegExp.prototype, toString, RegExpToString) \
V(String.prototype, charCodeAt, StringCharCodeAt) \
V(String.prototype, charAt, StringCharAt) \
+ V(String.prototype, codePointAt, StringCodePointAt) \
V(String.prototype, concat, StringConcat) \
+ V(String.prototype, endsWith, StringEndsWith) \
+ V(String.prototype, includes, StringIncludes) \
+ V(String.prototype, indexOf, StringIndexOf) \
+ V(String.prototype, lastIndexOf, StringLastIndexOf) \
+ V(String.prototype, repeat, StringRepeat) \
+ V(String.prototype, slice, StringSlice) \
+ V(String.prototype, startsWith, StringStartsWith) \
V(String.prototype, substr, StringSubstr) \
+ V(String.prototype, substring, StringSubstring) \
V(String.prototype, toLowerCase, StringToLowerCase) \
+ V(String.prototype, toString, StringToString) \
V(String.prototype, toUpperCase, StringToUpperCase) \
+ V(String.prototype, trim, StringTrim) \
+ V(String.prototype, trimLeft, StringTrimLeft) \
+ V(String.prototype, trimRight, StringTrimRight) \
+ V(String.prototype, valueOf, StringValueOf) \
V(String, fromCharCode, StringFromCharCode) \
+ V(String, fromCodePoint, StringFromCodePoint) \
+ V(String, raw, StringRaw) \
V(Math, random, MathRandom) \
V(Math, floor, MathFloor) \
V(Math, round, MathRound) \
@@ -7299,7 +7083,8 @@ class Script: public Struct {
V(Number, isSafeInteger, NumberIsSafeInteger) \
V(Number, parseFloat, NumberParseFloat) \
V(Number, parseInt, NumberParseInt) \
- V(Number.prototype, toString, NumberToString)
+ V(Number.prototype, toString, NumberToString) \
+ V(Object, create, ObjectCreate)
#define ATOMIC_FUNCTIONS_WITH_ID_LIST(V) \
V(Atomics, load, AtomicsLoad) \
@@ -7344,15 +7129,13 @@ enum BuiltinFunctionId {
kStringIteratorNext,
};
-
// Result of searching in an optimized code map of a SharedFunctionInfo. Note
// that both {code} and {literals} can be NULL to pass search result status.
struct CodeAndLiterals {
- Code* code; // Cached optimized code.
+ Code* code; // Cached optimized code.
LiteralsArray* literals; // Cached literals array.
};
-
// SharedFunctionInfo describes the JSFunction information that can be
// shared by multiple instances of the function.
class SharedFunctionInfo: public HeapObject {
@@ -7405,9 +7188,6 @@ class SharedFunctionInfo: public HeapObject {
// the entry itself is left in the map in order to proceed sharing literals.
void EvictFromOptimizedCodeMap(Code* optimized_code, const char* reason);
- // Trims the optimized code map after entries have been removed.
- void TrimOptimizedCodeMap(int shrink_by);
-
static Handle<LiteralsArray> FindOrCreateLiterals(
Handle<SharedFunctionInfo> shared, Handle<Context> native_context);
@@ -7429,8 +7209,7 @@ class SharedFunctionInfo: public HeapObject {
static const int kContextOffset = 0;
static const int kCachedCodeOffset = 1;
static const int kLiteralsOffset = 2;
- static const int kOsrAstIdOffset = 3;
- static const int kEntryLength = 4;
+ static const int kEntryLength = 3;
static const int kInitialLength = kEntriesStart + kEntryLength;
static const int kNotFound = -1;
@@ -7444,8 +7223,6 @@ class SharedFunctionInfo: public HeapObject {
kPointerSize * (kCachedCodeOffset - kEntryLength);
static const int kOffsetToPreviousLiterals =
FixedArray::kHeaderSize + kPointerSize * (kLiteralsOffset - kEntryLength);
- static const int kOffsetToPreviousOsrAstId =
- FixedArray::kHeaderSize + kPointerSize * (kOsrAstIdOffset - kEntryLength);
// [scope_info]: Scope info.
DECL_ACCESSORS(scope_info, ScopeInfo)
@@ -7488,6 +7265,12 @@ class SharedFunctionInfo: public HeapObject {
// available.
DECL_ACCESSORS(feedback_metadata, TypeFeedbackMetadata)
+ // [function_literal_id] - uniquely identifies the FunctionLiteral this
+ // SharedFunctionInfo represents within its script, or -1 if this
+ // SharedFunctionInfo object doesn't correspond to a parsed FunctionLiteral.
+ inline int function_literal_id() const;
+ inline void set_function_literal_id(int value);
+
#if TRACE_MAPS
// [unique_id] - For --trace-maps purposes, an identifier that's persistent
// even if the GC moves this SharedFunctionInfo.
@@ -7563,6 +7346,9 @@ class SharedFunctionInfo: public HeapObject {
// The function's name if it is non-empty, otherwise the inferred name.
String* DebugName();
+ // The function cannot cause any side effects.
+ bool HasNoSideEffect();
+
// Used for flags such as --hydrogen-filter.
bool PassesFilter(const char* raw_filter);
@@ -7646,30 +7432,20 @@ class SharedFunctionInfo: public HeapObject {
// which does not change this flag).
DECL_BOOLEAN_ACCESSORS(is_anonymous_expression)
- // Is this a function or top-level/eval code.
- DECL_BOOLEAN_ACCESSORS(is_function)
-
- // Indicates that code for this function cannot be compiled with Crankshaft.
- DECL_BOOLEAN_ACCESSORS(dont_crankshaft)
+ // Indicates that code for this function must be compiled through the
+ // Ignition / TurboFan pipeline, and is unsupported by
+ // FullCodegen / Crankshaft.
+ DECL_BOOLEAN_ACCESSORS(must_use_ignition_turbo)
// Indicates that code for this function cannot be flushed.
DECL_BOOLEAN_ACCESSORS(dont_flush)
- // Indicates that this is a constructor for a base class with instance fields.
- DECL_BOOLEAN_ACCESSORS(requires_class_field_init)
- // Indicates that this is a synthesized function to set up class instance
- // fields.
- DECL_BOOLEAN_ACCESSORS(is_class_field_initializer)
-
// Indicates that this function is an asm function.
DECL_BOOLEAN_ACCESSORS(asm_function)
// Indicates that the the shared function info is deserialized from cache.
DECL_BOOLEAN_ACCESSORS(deserialized)
- // Indicates that the the shared function info has never been compiled before.
- DECL_BOOLEAN_ACCESSORS(never_compiled)
-
// Whether this function was created from a FunctionDeclaration.
DECL_BOOLEAN_ACCESSORS(is_declaration)
@@ -7679,6 +7455,12 @@ class SharedFunctionInfo: public HeapObject {
// Indicates that asm->wasm conversion failed and should not be re-attempted.
DECL_BOOLEAN_ACCESSORS(is_asm_wasm_broken)
+ // Indicates that the function cannot cause side-effects.
+ DECL_BOOLEAN_ACCESSORS(has_no_side_effect)
+
+ // Indicates that |has_no_side_effect| has been computed and set.
+ DECL_BOOLEAN_ACCESSORS(computed_has_no_side_effect)
+
inline FunctionKind kind() const;
inline void set_kind(FunctionKind kind);
@@ -7733,8 +7515,8 @@ class SharedFunctionInfo: public HeapObject {
// Tells whether this function should be subject to debugging.
inline bool IsSubjectToDebugging();
- // Whether this function is defined in native code or extensions.
- inline bool IsBuiltin();
+ // Whether this function is defined in user-provided JavaScript code.
+ inline bool IsUserJavaScript();
// Check whether or not this function is inlineable.
bool IsInlineable();
@@ -7757,19 +7539,35 @@ class SharedFunctionInfo: public HeapObject {
void ResetForNewContext(int new_ic_age);
- // Iterate over all shared function infos.
- class Iterator {
+ // Iterate over all shared function infos in a given script.
+ class ScriptIterator {
public:
- explicit Iterator(Isolate* isolate);
+ explicit ScriptIterator(Handle<Script> script);
+ ScriptIterator(Isolate* isolate, Handle<FixedArray> shared_function_infos);
SharedFunctionInfo* Next();
+ // Reset the iterator to run on |script|.
+ void Reset(Handle<Script> script);
+
private:
- bool NextScript();
+ Isolate* isolate_;
+ Handle<FixedArray> shared_function_infos_;
+ int index_;
+ DISALLOW_COPY_AND_ASSIGN(ScriptIterator);
+ };
+
+ // Iterate over all shared function infos on the heap.
+ class GlobalIterator {
+ public:
+ explicit GlobalIterator(Isolate* isolate);
+ SharedFunctionInfo* Next();
+ private:
Script::Iterator script_iterator_;
- WeakFixedArray::Iterator sfi_iterator_;
+ WeakFixedArray::Iterator noscript_sfi_iterator_;
+ SharedFunctionInfo::ScriptIterator sfi_iterator_;
DisallowHeapAllocation no_gc_;
- DISALLOW_COPY_AND_ASSIGN(Iterator);
+ DISALLOW_COPY_AND_ASSIGN(GlobalIterator);
};
DECLARE_CAST(SharedFunctionInfo)
@@ -7794,13 +7592,15 @@ class SharedFunctionInfo: public HeapObject {
static const int kFunctionIdentifierOffset = kDebugInfoOffset + kPointerSize;
static const int kFeedbackMetadataOffset =
kFunctionIdentifierOffset + kPointerSize;
+ static const int kFunctionLiteralIdOffset =
+ kFeedbackMetadataOffset + kPointerSize;
#if TRACE_MAPS
- static const int kUniqueIdOffset = kFeedbackMetadataOffset + kPointerSize;
+ static const int kUniqueIdOffset = kFunctionLiteralIdOffset + kPointerSize;
static const int kLastPointerFieldOffset = kUniqueIdOffset;
#else
// Just to not break the postmortrem support with conditional offsets
- static const int kUniqueIdOffset = kFeedbackMetadataOffset;
- static const int kLastPointerFieldOffset = kFeedbackMetadataOffset;
+ static const int kUniqueIdOffset = kFunctionLiteralIdOffset;
+ static const int kLastPointerFieldOffset = kFunctionLiteralIdOffset;
#endif
#if V8_HOST_ARCH_32_BIT
@@ -7927,29 +7727,29 @@ class SharedFunctionInfo: public HeapObject {
kAllowLazyCompilation,
kMarkedForTierUp,
kOptimizationDisabled,
- kNeverCompiled,
+ kHasDuplicateParameters,
kNative,
kStrictModeFunction,
kUsesArguments,
kNeedsHomeObject,
// byte 1
- kHasDuplicateParameters,
kForceInline,
kIsAsmFunction,
kIsAnonymousExpression,
kNameShouldPrintAsAnonymous,
- kIsFunction,
- kDontCrankshaft,
+ kMustUseIgnitionTurbo,
kDontFlush,
+ kIsDeclaration,
+
+ kUnused, // unused.
// byte 2
kFunctionKind,
// rest of byte 2 and first two bits of byte 3 are used by FunctionKind
// byte 3
kDeserialized = kFunctionKind + 10,
- kIsDeclaration,
kIsAsmWasmBroken,
- kRequiresClassFieldInit,
- kIsClassFieldInitializer,
+ kHasNoSideEffect,
+ kComputedHasNoSideEffect,
kCompilerHintsCount, // Pseudo entry
};
// kFunctionKind has to be byte-aligned
@@ -8031,11 +7831,10 @@ class SharedFunctionInfo: public HeapObject {
#undef BYTE_OFFSET
private:
- // Returns entry from optimized code map for specified context and OSR entry.
+ // Returns entry from optimized code map for specified context.
// The result is either kNotFound, or a start index of the context-dependent
// entry.
- int SearchOptimizedCodeMapEntry(Context* native_context,
- BailoutId osr_ast_id);
+ int SearchOptimizedCodeMapEntry(Context* native_context);
DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
};
@@ -8066,8 +7865,7 @@ class JSGeneratorObject: public JSObject {
// [input_or_debug_pos]
// For executing generators: the most recent input value.
- // For suspended new-style generators: debug information (bytecode offset).
- // For suspended old-style generators: unused.
+ // For suspended generators: debug information (bytecode offset).
// There is currently no need to remember the most recent input value for a
// suspended generator.
DECL_ACCESSORS(input_or_debug_pos, Object)
@@ -8091,8 +7889,8 @@ class JSGeneratorObject: public JSObject {
// is suspended.
int source_position() const;
- // [operand_stack]: Saved operand stack.
- DECL_ACCESSORS(operand_stack, FixedArray)
+ // [register_file]: Saved interpreter register file.
+ DECL_ACCESSORS(register_file, FixedArray)
DECLARE_CAST(JSGeneratorObject)
@@ -8110,93 +7908,13 @@ class JSGeneratorObject: public JSObject {
static const int kInputOrDebugPosOffset = kReceiverOffset + kPointerSize;
static const int kResumeModeOffset = kInputOrDebugPosOffset + kPointerSize;
static const int kContinuationOffset = kResumeModeOffset + kPointerSize;
- static const int kOperandStackOffset = kContinuationOffset + kPointerSize;
- static const int kSize = kOperandStackOffset + kPointerSize;
+ static const int kRegisterFileOffset = kContinuationOffset + kPointerSize;
+ static const int kSize = kRegisterFileOffset + kPointerSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSGeneratorObject);
};
-class ModuleInfoEntry : public Struct {
- public:
- DECLARE_CAST(ModuleInfoEntry)
- DECLARE_PRINTER(ModuleInfoEntry)
- DECLARE_VERIFIER(ModuleInfoEntry)
-
- DECL_ACCESSORS(export_name, Object)
- DECL_ACCESSORS(local_name, Object)
- DECL_ACCESSORS(import_name, Object)
- DECL_INT_ACCESSORS(module_request)
- DECL_INT_ACCESSORS(cell_index)
- DECL_INT_ACCESSORS(beg_pos)
- DECL_INT_ACCESSORS(end_pos)
-
- static Handle<ModuleInfoEntry> New(Isolate* isolate,
- Handle<Object> export_name,
- Handle<Object> local_name,
- Handle<Object> import_name,
- int module_request, int cell_index,
- int beg_pos, int end_pos);
-
- static const int kExportNameOffset = HeapObject::kHeaderSize;
- static const int kLocalNameOffset = kExportNameOffset + kPointerSize;
- static const int kImportNameOffset = kLocalNameOffset + kPointerSize;
- static const int kModuleRequestOffset = kImportNameOffset + kPointerSize;
- static const int kCellIndexOffset = kModuleRequestOffset + kPointerSize;
- static const int kBegPosOffset = kCellIndexOffset + kPointerSize;
- static const int kEndPosOffset = kBegPosOffset + kPointerSize;
- static const int kSize = kEndPosOffset + kPointerSize;
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(ModuleInfoEntry);
-};
-
-// ModuleInfo is to ModuleDescriptor what ScopeInfo is to Scope.
-class ModuleInfo : public FixedArray {
- public:
- DECLARE_CAST(ModuleInfo)
-
- static Handle<ModuleInfo> New(Isolate* isolate, Zone* zone,
- ModuleDescriptor* descr);
-
- inline FixedArray* module_requests() const;
- inline FixedArray* special_exports() const;
- inline FixedArray* regular_exports() const;
- inline FixedArray* namespace_imports() const;
- inline FixedArray* regular_imports() const;
-
- // Accessors for [regular_exports].
- int RegularExportCount() const;
- String* RegularExportLocalName(int i) const;
- int RegularExportCellIndex(int i) const;
- FixedArray* RegularExportExportNames(int i) const;
-
- static Handle<ModuleInfoEntry> LookupRegularImport(Handle<ModuleInfo> info,
- Handle<String> local_name);
-
-#ifdef DEBUG
- inline bool Equals(ModuleInfo* other) const;
-#endif
-
- private:
- friend class Factory;
- friend class ModuleDescriptor;
- enum {
- kModuleRequestsIndex,
- kSpecialExportsIndex,
- kRegularExportsIndex,
- kNamespaceImportsIndex,
- kRegularImportsIndex,
- kLength
- };
- enum {
- kRegularExportLocalNameOffset,
- kRegularExportCellIndexOffset,
- kRegularExportExportNamesOffset,
- kRegularExportLength
- };
- DISALLOW_IMPLICIT_CONSTRUCTORS(ModuleInfo);
-};
// When importing a module namespace (import * as foo from "bar"), a
// JSModuleNamespace object (representing module "bar") is created and bound to
// the declared variable (foo). A module can have at most one namespace object.
@@ -8214,8 +7932,16 @@ class JSModuleNamespace : public JSObject {
// schedule an exception and return Nothing.
MUST_USE_RESULT MaybeHandle<Object> GetExport(Handle<String> name);
+ // In-object fields.
+ enum {
+ kToStringTagFieldIndex,
+ kInObjectFieldCount,
+ };
+
static const int kModuleOffset = JSObject::kHeaderSize;
- static const int kSize = kModuleOffset + kPointerSize;
+ static const int kHeaderSize = kModuleOffset + kPointerSize;
+
+ static const int kSize = kHeaderSize + kPointerSize * kInObjectFieldCount;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSModuleNamespace);
@@ -8294,8 +8020,6 @@ class Module : public Struct {
static const int kSize = kRequestedModulesOffset + kPointerSize;
private:
- enum { kEvaluatedBit };
-
static void CreateExport(Handle<Module> module, int cell_index,
Handle<FixedArray> names);
static void CreateIndirectExport(Handle<Module> module, Handle<String> name,
@@ -8713,10 +8437,6 @@ class JSDate: public JSObject {
void SetValue(Object* value, bool is_value_nan);
- // ES6 section 20.3.4.45 Date.prototype [ @@toPrimitive ]
- static MUST_USE_RESULT MaybeHandle<Object> ToPrimitive(
- Handle<JSReceiver> receiver, Handle<Object> hint);
-
// Dispatched behavior.
DECLARE_PRINTER(JSDate)
DECLARE_VERIFIER(JSDate)
@@ -8813,6 +8533,9 @@ class JSMessageObject: public JSObject {
// position, or the empty string if the position is invalid.
Handle<String> GetSourceLine() const;
+ inline int error_level() const;
+ inline void set_error_level(int level);
+
DECLARE_CAST(JSMessageObject)
// Dispatched behavior.
@@ -8826,13 +8549,103 @@ class JSMessageObject: public JSObject {
static const int kStackFramesOffset = kScriptOffset + kPointerSize;
static const int kStartPositionOffset = kStackFramesOffset + kPointerSize;
static const int kEndPositionOffset = kStartPositionOffset + kPointerSize;
- static const int kSize = kEndPositionOffset + kPointerSize;
+ static const int kErrorLevelOffset = kEndPositionOffset + kPointerSize;
+ static const int kSize = kErrorLevelOffset + kPointerSize;
typedef FixedBodyDescriptor<HeapObject::kMapOffset,
kStackFramesOffset + kPointerSize,
kSize> BodyDescriptor;
};
+class JSPromise;
+
+// TODO(caitp): Make this a Struct once properties are no longer accessed from
+// JS
+class JSPromiseCapability : public JSObject {
+ public:
+ DECLARE_CAST(JSPromiseCapability)
+
+ DECLARE_VERIFIER(JSPromiseCapability)
+
+ DECL_ACCESSORS(promise, Object)
+ DECL_ACCESSORS(resolve, Object)
+ DECL_ACCESSORS(reject, Object)
+
+ static const int kPromiseOffset = JSObject::kHeaderSize;
+ static const int kResolveOffset = kPromiseOffset + kPointerSize;
+ static const int kRejectOffset = kResolveOffset + kPointerSize;
+ static const int kSize = kRejectOffset + kPointerSize;
+
+ enum InObjectPropertyIndex {
+ kPromiseIndex,
+ kResolveIndex,
+ kRejectIndex,
+ kInObjectPropertyCount // Dummy.
+ };
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSPromiseCapability);
+};
+
+class JSPromise : public JSObject {
+ public:
+ DECL_INT_ACCESSORS(status)
+ DECL_ACCESSORS(result, Object)
+
+ // There are 3 possible states for these fields --
+ // 1) Undefined -- This is the zero state when there is no callback
+ // or deferred fields registered.
+ //
+ // 2) Object -- There is a single Callable directly attached to the
+ // fulfill_reactions, reject_reactions and the deferred fields are
+ // directly attached to the slots. In this state, deferred_promise
+ // is a JSReceiver and deferred_on_{resolve, reject} are Callables.
+ //
+ // 3) FixedArray -- There is more than one callback and deferred
+ // fields attached to a FixedArray.
+ DECL_ACCESSORS(deferred_promise, Object)
+ DECL_ACCESSORS(deferred_on_resolve, Object)
+ DECL_ACCESSORS(deferred_on_reject, Object)
+ DECL_ACCESSORS(fulfill_reactions, Object)
+ DECL_ACCESSORS(reject_reactions, Object)
+
+ DECL_INT_ACCESSORS(flags)
+
+ // [has_handler]: Whether this promise has a reject handler or not.
+ DECL_BOOLEAN_ACCESSORS(has_handler)
+
+ // [handled_hint]: Whether this promise will be handled by a catch
+ // block in an async function.
+ DECL_BOOLEAN_ACCESSORS(handled_hint)
+
+ static const char* Status(int status);
+
+ DECLARE_CAST(JSPromise)
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(JSPromise)
+ DECLARE_VERIFIER(JSPromise)
+
+ // Layout description.
+ static const int kStatusOffset = JSObject::kHeaderSize;
+ static const int kResultOffset = kStatusOffset + kPointerSize;
+ static const int kDeferredPromiseOffset = kResultOffset + kPointerSize;
+ static const int kDeferredOnResolveOffset =
+ kDeferredPromiseOffset + kPointerSize;
+ static const int kDeferredOnRejectOffset =
+ kDeferredOnResolveOffset + kPointerSize;
+ static const int kFulfillReactionsOffset =
+ kDeferredOnRejectOffset + kPointerSize;
+ static const int kRejectReactionsOffset =
+ kFulfillReactionsOffset + kPointerSize;
+ static const int kFlagsOffset = kRejectReactionsOffset + kPointerSize;
+ static const int kSize = kFlagsOffset + kPointerSize;
+
+ // Flags layout.
+ static const int kHasHandlerBit = 0;
+ static const int kHandledHintBit = 1;
+};
+
// Regular expressions
// The regular expression holds a single reference to a FixedArray in
// the kDataOffset field.
@@ -9142,14 +8955,6 @@ class TypeFeedbackInfo: public Struct {
DISALLOW_IMPLICIT_CONSTRUCTORS(TypeFeedbackInfo);
};
-
-enum AllocationSiteMode {
- DONT_TRACK_ALLOCATION_SITE,
- TRACK_ALLOCATION_SITE,
- LAST_ALLOCATION_SITE_MODE = TRACK_ALLOCATION_SITE
-};
-
-
class AllocationSite: public Struct {
public:
static const uint32_t kMaximumArrayBytesToPretransition = 8 * 1024;
@@ -9583,6 +9388,10 @@ class Symbol: public Name {
// a load.
DECL_BOOLEAN_ACCESSORS(is_well_known_symbol)
+ // [is_public]: Whether this is a symbol created by Symbol.for. Calling
+ // Symbol.keyFor on such a symbol simply needs to return the attached name.
+ DECL_BOOLEAN_ACCESSORS(is_public)
+
DECLARE_CAST(Symbol)
// Dispatched behavior.
@@ -9594,14 +9403,16 @@ class Symbol: public Name {
static const int kFlagsOffset = kNameOffset + kPointerSize;
static const int kSize = kFlagsOffset + kPointerSize;
+ // Flags layout.
+ static const int kPrivateBit = 0;
+ static const int kWellKnownSymbolBit = 1;
+ static const int kPublicBit = 2;
+
typedef FixedBodyDescriptor<kNameOffset, kFlagsOffset, kSize> BodyDescriptor;
void SymbolShortPrint(std::ostream& os);
private:
- static const int kPrivateBit = 0;
- static const int kWellKnownSymbolBit = 1;
-
const char* PrivateSymbolToName() const;
#if TRACE_MAPS
@@ -9840,6 +9651,7 @@ class String: public Name {
// Conversion.
inline bool AsArrayIndex(uint32_t* index);
+ uint32_t inline ToValidIndex(Object* number);
// Trimming.
enum TrimMode { kTrim, kTrimLeft, kTrimRight };
@@ -10791,37 +10603,6 @@ class JSStringIterator : public JSObject {
DISALLOW_IMPLICIT_CONSTRUCTORS(JSStringIterator);
};
-// A JS iterator over the elements of a FixedArray.
-// This corresponds to ListIterator in ecma262/#sec-createlistiterator.
-class JSFixedArrayIterator : public JSObject {
- public:
- DECLARE_CAST(JSFixedArrayIterator)
- DECLARE_PRINTER(JSFixedArrayIterator)
- DECLARE_VERIFIER(JSFixedArrayIterator)
-
- // The array over which the iterator iterates.
- DECL_ACCESSORS(array, FixedArray)
-
- // The index of the array element that will be returned next.
- DECL_INT_ACCESSORS(index)
-
- // The initial value of the object's "next" property.
- DECL_ACCESSORS(initial_next, JSFunction)
-
- static const int kArrayOffset = JSObject::kHeaderSize;
- static const int kIndexOffset = kArrayOffset + kPointerSize;
- static const int kNextOffset = kIndexOffset + kPointerSize;
- static const int kHeaderSize = kNextOffset + kPointerSize;
-
- enum InObjectPropertyIndex {
- kNextIndex,
- kInObjectPropertyCount // Dummy.
- };
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(JSFixedArrayIterator);
-};
-
// OrderedHashTableIterator is an iterator that iterates over the keys and
// values of an OrderedHashTable.
//
@@ -11023,6 +10804,9 @@ class JSArrayBuffer: public JSObject {
inline bool is_shared();
inline void set_is_shared(bool value);
+ inline bool has_guard_region();
+ inline void set_has_guard_region(bool value);
+
DECLARE_CAST(JSArrayBuffer)
void Neuter();
@@ -11062,6 +10846,7 @@ class JSArrayBuffer: public JSObject {
class IsNeuterable : public BitField<bool, 2, 1> {};
class WasNeutered : public BitField<bool, 3, 1> {};
class IsShared : public BitField<bool, 4, 1> {};
+ class HasGuardRegion : public BitField<bool, 5, 1> {};
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayBuffer);
@@ -11106,6 +10891,11 @@ class JSTypedArray: public JSArrayBufferView {
DECL_ACCESSORS(length, Object)
inline uint32_t length_value() const;
+ // ES6 9.4.5.3
+ MUST_USE_RESULT static Maybe<bool> DefineOwnProperty(
+ Isolate* isolate, Handle<JSTypedArray> o, Handle<Object> key,
+ PropertyDescriptor* desc, ShouldThrow should_throw);
+
DECLARE_CAST(JSTypedArray)
ExternalArrayType type();
@@ -11543,6 +11333,7 @@ class FunctionTemplateInfo: public TemplateInfo {
public:
DECL_ACCESSORS(call_code, Object)
DECL_ACCESSORS(prototype_template, Object)
+ DECL_ACCESSORS(prototype_provider_template, Object)
DECL_ACCESSORS(parent_template, Object)
DECL_ACCESSORS(named_property_handler, Object)
DECL_ACCESSORS(indexed_property_handler, Object)
@@ -11577,11 +11368,15 @@ class FunctionTemplateInfo: public TemplateInfo {
DECLARE_PRINTER(FunctionTemplateInfo)
DECLARE_VERIFIER(FunctionTemplateInfo)
+ static const int kInvalidSerialNumber = 0;
+
static const int kCallCodeOffset = TemplateInfo::kHeaderSize;
static const int kPrototypeTemplateOffset =
kCallCodeOffset + kPointerSize;
- static const int kParentTemplateOffset =
+ static const int kPrototypeProviderTemplateOffset =
kPrototypeTemplateOffset + kPointerSize;
+ static const int kParentTemplateOffset =
+ kPrototypeProviderTemplateOffset + kPointerSize;
static const int kNamedPropertyHandlerOffset =
kParentTemplateOffset + kPointerSize;
static const int kIndexedPropertyHandlerOffset =
@@ -11755,11 +11550,6 @@ class BreakPointInfo: public Struct {
};
-#undef DECL_BOOLEAN_ACCESSORS
-#undef DECL_ACCESSORS
-#undef DECLARE_CAST
-#undef DECLARE_VERIFIER
-
#define VISITOR_SYNCHRONIZATION_TAGS_LIST(V) \
V(kStringTable, "string_table", "(Internalized strings)") \
V(kExternalStringsTable, "external_strings_table", "(External strings)") \
@@ -11879,4 +11669,6 @@ class BooleanBit : public AllStatic {
} // NOLINT, false-positive due to second-order macros.
} // NOLINT, false-positive due to second-order macros.
+#include "src/objects/object-macros-undef.h"
+
#endif // V8_OBJECTS_H_
diff --git a/deps/v8/src/objects/module-info.h b/deps/v8/src/objects/module-info.h
new file mode 100644
index 0000000000..099ee5f657
--- /dev/null
+++ b/deps/v8/src/objects/module-info.h
@@ -0,0 +1,129 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_MODULE_INFO_H_
+#define V8_OBJECTS_MODULE_INFO_H_
+
+#include "src/objects.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+template <typename T>
+class Handle;
+class Isolate;
+class ModuleDescriptor;
+class ModuleInfoEntry;
+class String;
+class Zone;
+
+// ModuleInfo is to ModuleDescriptor what ScopeInfo is to Scope.
+class ModuleInfo : public FixedArray {
+ public:
+ DECLARE_CAST(ModuleInfo)
+
+ static Handle<ModuleInfo> New(Isolate* isolate, Zone* zone,
+ ModuleDescriptor* descr);
+
+ inline FixedArray* module_requests() const {
+ return FixedArray::cast(get(kModuleRequestsIndex));
+ }
+
+ inline FixedArray* special_exports() const {
+ return FixedArray::cast(get(kSpecialExportsIndex));
+ }
+
+ inline FixedArray* regular_exports() const {
+ return FixedArray::cast(get(kRegularExportsIndex));
+ }
+
+ inline FixedArray* regular_imports() const {
+ return FixedArray::cast(get(kRegularImportsIndex));
+ }
+
+ inline FixedArray* namespace_imports() const {
+ return FixedArray::cast(get(kNamespaceImportsIndex));
+ }
+
+ // Accessors for [regular_exports].
+ int RegularExportCount() const;
+ String* RegularExportLocalName(int i) const;
+ int RegularExportCellIndex(int i) const;
+ FixedArray* RegularExportExportNames(int i) const;
+
+ static Handle<ModuleInfoEntry> LookupRegularImport(Handle<ModuleInfo> info,
+ Handle<String> local_name);
+
+#ifdef DEBUG
+ inline bool Equals(ModuleInfo* other) const {
+ return regular_exports() == other->regular_exports() &&
+ regular_imports() == other->regular_imports() &&
+ special_exports() == other->special_exports() &&
+ namespace_imports() == other->namespace_imports();
+ }
+#endif
+
+ private:
+ friend class Factory;
+ friend class ModuleDescriptor;
+ enum {
+ kModuleRequestsIndex,
+ kSpecialExportsIndex,
+ kRegularExportsIndex,
+ kNamespaceImportsIndex,
+ kRegularImportsIndex,
+ kLength
+ };
+ enum {
+ kRegularExportLocalNameOffset,
+ kRegularExportCellIndexOffset,
+ kRegularExportExportNamesOffset,
+ kRegularExportLength
+ };
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ModuleInfo);
+};
+
+class ModuleInfoEntry : public Struct {
+ public:
+ DECLARE_CAST(ModuleInfoEntry)
+ DECLARE_PRINTER(ModuleInfoEntry)
+ DECLARE_VERIFIER(ModuleInfoEntry)
+
+ DECL_ACCESSORS(export_name, Object)
+ DECL_ACCESSORS(local_name, Object)
+ DECL_ACCESSORS(import_name, Object)
+ DECL_INT_ACCESSORS(module_request)
+ DECL_INT_ACCESSORS(cell_index)
+ DECL_INT_ACCESSORS(beg_pos)
+ DECL_INT_ACCESSORS(end_pos)
+
+ static Handle<ModuleInfoEntry> New(Isolate* isolate,
+ Handle<Object> export_name,
+ Handle<Object> local_name,
+ Handle<Object> import_name,
+ int module_request, int cell_index,
+ int beg_pos, int end_pos);
+
+ static const int kExportNameOffset = HeapObject::kHeaderSize;
+ static const int kLocalNameOffset = kExportNameOffset + kPointerSize;
+ static const int kImportNameOffset = kLocalNameOffset + kPointerSize;
+ static const int kModuleRequestOffset = kImportNameOffset + kPointerSize;
+ static const int kCellIndexOffset = kModuleRequestOffset + kPointerSize;
+ static const int kBegPosOffset = kCellIndexOffset + kPointerSize;
+ static const int kEndPosOffset = kBegPosOffset + kPointerSize;
+ static const int kSize = kEndPosOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ModuleInfoEntry);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_MODULE_INFO_H_
diff --git a/deps/v8/src/objects/object-macros-undef.h b/deps/v8/src/objects/object-macros-undef.h
new file mode 100644
index 0000000000..509d29779f
--- /dev/null
+++ b/deps/v8/src/objects/object-macros-undef.h
@@ -0,0 +1,9 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#undef DECL_BOOLEAN_ACCESSORS
+#undef DECL_INT_ACCESSORS
+#undef DECL_ACCESSORS
+#undef DECLARE_CAST
+#undef DECLARE_VERIFIER
diff --git a/deps/v8/src/objects/object-macros.h b/deps/v8/src/objects/object-macros.h
new file mode 100644
index 0000000000..a3ececc6f7
--- /dev/null
+++ b/deps/v8/src/objects/object-macros.h
@@ -0,0 +1,32 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Note 1: Any file that includes this one should include object-macros-undef.h
+// at the bottom.
+
+// Note 2: This file is deliberately missing the include guards (the undeffing
+// approach wouldn't work otherwise).
+
+#define DECL_BOOLEAN_ACCESSORS(name) \
+ inline bool name() const; \
+ inline void set_##name(bool value);
+
+#define DECL_INT_ACCESSORS(name) \
+ inline int name() const; \
+ inline void set_##name(int value);
+
+#define DECL_ACCESSORS(name, type) \
+ inline type* name() const; \
+ inline void set_##name(type* value, \
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
+#define DECLARE_CAST(type) \
+ INLINE(static type* cast(Object* object)); \
+ INLINE(static const type* cast(const Object* object));
+
+#ifdef VERIFY_HEAP
+#define DECLARE_VERIFIER(Name) void Name##Verify();
+#else
+#define DECLARE_VERIFIER(Name)
+#endif
diff --git a/deps/v8/src/ast/scopeinfo.cc b/deps/v8/src/objects/scope-info.cc
index 3a3ea03189..ae828cc1f0 100644
--- a/deps/v8/src/ast/scopeinfo.cc
+++ b/deps/v8/src/objects/scope-info.cc
@@ -4,10 +4,13 @@
#include <stdlib.h>
+#include "src/objects/scope-info.h"
+
#include "src/ast/context-slot-cache.h"
#include "src/ast/scopes.h"
#include "src/ast/variables.h"
#include "src/bootstrapper.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -143,13 +146,15 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone, Scope* scope,
bool has_simple_parameters = false;
bool asm_module = false;
bool asm_function = false;
- FunctionKind function_kind = kNormalFunction;
if (scope->is_function_scope()) {
DeclarationScope* function_scope = scope->AsDeclarationScope();
has_simple_parameters = function_scope->has_simple_parameters();
asm_module = function_scope->asm_module();
asm_function = function_scope->asm_function();
- function_kind = function_scope->function_kind();
+ }
+ FunctionKind function_kind = kNormalFunction;
+ if (scope->is_declaration_scope()) {
+ function_kind = scope->AsDeclarationScope()->function_kind();
}
// Encode the flags.
@@ -391,37 +396,28 @@ Handle<ScopeInfo> ScopeInfo::CreateGlobalThisBinding(Isolate* isolate) {
return scope_info;
}
-
ScopeInfo* ScopeInfo::Empty(Isolate* isolate) {
return isolate->heap()->empty_scope_info();
}
-
ScopeType ScopeInfo::scope_type() {
DCHECK_LT(0, length());
return ScopeTypeField::decode(Flags());
}
-
bool ScopeInfo::CallsEval() {
return length() > 0 && CallsEvalField::decode(Flags());
}
-
LanguageMode ScopeInfo::language_mode() {
return length() > 0 ? LanguageModeField::decode(Flags()) : SLOPPY;
}
-
bool ScopeInfo::is_declaration_scope() {
return DeclarationScopeField::decode(Flags());
}
-
-int ScopeInfo::LocalCount() {
- return StackLocalCount() + ContextLocalCount();
-}
-
+int ScopeInfo::LocalCount() { return StackLocalCount() + ContextLocalCount(); }
int ScopeInfo::StackSlotCount() {
if (length() > 0) {
@@ -432,7 +428,6 @@ int ScopeInfo::StackSlotCount() {
return 0;
}
-
int ScopeInfo::ContextLength() {
if (length() > 0) {
int context_locals = ContextLocalCount();
@@ -443,6 +438,7 @@ int ScopeInfo::ContextLength() {
(scope_type() == BLOCK_SCOPE && CallsSloppyEval() &&
is_declaration_scope()) ||
(scope_type() == FUNCTION_SCOPE && CallsSloppyEval()) ||
+ (scope_type() == FUNCTION_SCOPE && IsAsmModule()) ||
scope_type() == MODULE_SCOPE;
if (has_context) {
@@ -453,7 +449,6 @@ int ScopeInfo::ContextLength() {
return 0;
}
-
bool ScopeInfo::HasReceiver() {
if (length() > 0) {
return NONE != ReceiverVariableField::decode(Flags());
@@ -462,7 +457,6 @@ bool ScopeInfo::HasReceiver() {
}
}
-
bool ScopeInfo::HasAllocatedReceiver() {
if (length() > 0) {
VariableAllocationInfo allocation = ReceiverVariableField::decode(Flags());
@@ -472,10 +466,8 @@ bool ScopeInfo::HasAllocatedReceiver() {
}
}
-
bool ScopeInfo::HasNewTarget() { return HasNewTargetField::decode(Flags()); }
-
bool ScopeInfo::HasFunctionName() {
if (length() > 0) {
return NONE != FunctionVariableField::decode(Flags());
@@ -517,11 +509,7 @@ bool ScopeInfo::HasHeapAllocatedLocals() {
}
}
-
-bool ScopeInfo::HasContext() {
- return ContextLength() > 0;
-}
-
+bool ScopeInfo::HasContext() { return ContextLength() > 0; }
String* ScopeInfo::FunctionName() {
DCHECK(HasFunctionName());
@@ -545,7 +533,6 @@ String* ScopeInfo::ParameterName(int var) {
return String::cast(get(info_index));
}
-
String* ScopeInfo::LocalName(int var) {
DCHECK_LE(0, var);
DCHECK_LT(var, LocalCount());
@@ -555,7 +542,6 @@ String* ScopeInfo::LocalName(int var) {
return String::cast(get(info_index));
}
-
String* ScopeInfo::StackLocalName(int var) {
DCHECK_LE(0, var);
DCHECK_LT(var, StackLocalCount());
@@ -563,7 +549,6 @@ String* ScopeInfo::StackLocalName(int var) {
return String::cast(get(info_index));
}
-
int ScopeInfo::StackLocalIndex(int var) {
DCHECK_LE(0, var);
DCHECK_LT(var, StackLocalCount());
@@ -571,7 +556,6 @@ int ScopeInfo::StackLocalIndex(int var) {
return first_slot_index + var;
}
-
String* ScopeInfo::ContextLocalName(int var) {
DCHECK_LE(0, var);
DCHECK_LT(var, ContextLocalCount());
@@ -579,7 +563,6 @@ String* ScopeInfo::ContextLocalName(int var) {
return String::cast(get(info_index));
}
-
VariableMode ScopeInfo::ContextLocalMode(int var) {
DCHECK_LE(0, var);
DCHECK_LT(var, ContextLocalCount());
@@ -588,7 +571,6 @@ VariableMode ScopeInfo::ContextLocalMode(int var) {
return VariableModeField::decode(value);
}
-
InitializationFlag ScopeInfo::ContextLocalInitFlag(int var) {
DCHECK_LE(0, var);
DCHECK_LT(var, ContextLocalCount());
@@ -597,7 +579,6 @@ InitializationFlag ScopeInfo::ContextLocalInitFlag(int var) {
return InitFlagField::decode(value);
}
-
MaybeAssignedFlag ScopeInfo::ContextLocalMaybeAssignedFlag(int var) {
DCHECK_LE(0, var);
DCHECK_LT(var, ContextLocalCount());
@@ -615,7 +596,6 @@ bool ScopeInfo::VariableIsSynthetic(String* name) {
name->Equals(name->GetHeap()->this_string());
}
-
int ScopeInfo::StackSlotIndex(String* name) {
DCHECK(name->IsInternalizedString());
if (length() > 0) {
@@ -704,7 +684,6 @@ String* ScopeInfo::ContextSlotName(int slot_index) {
return ContextLocalName(var);
}
-
int ScopeInfo::ParameterIndex(String* name) {
DCHECK(name->IsInternalizedString());
if (length() > 0) {
@@ -724,7 +703,6 @@ int ScopeInfo::ParameterIndex(String* name) {
return -1;
}
-
int ScopeInfo::ReceiverContextSlotIndex() {
if (length() > 0 && ReceiverVariableField::decode(Flags()) == CONTEXT)
return Smi::cast(get(ReceiverInfoIndex()))->value();
@@ -742,7 +720,6 @@ int ScopeInfo::FunctionContextSlotIndex(String* name) {
return -1;
}
-
FunctionKind ScopeInfo::function_kind() {
return FunctionKindField::decode(Flags());
}
@@ -752,7 +729,6 @@ int ScopeInfo::ParameterNamesIndex() {
return kVariablePartIndex;
}
-
int ScopeInfo::StackLocalFirstSlotIndex() {
return ParameterNamesIndex() + ParameterCount();
}
@@ -818,15 +794,12 @@ void ScopeInfo::ModuleVariable(int i, String** name, int* index,
#ifdef DEBUG
-static void PrintList(const char* list_name,
- int nof_internal_slots,
- int start,
- int end,
- ScopeInfo* scope_info) {
+static void PrintList(const char* list_name, int nof_internal_slots, int start,
+ int end, ScopeInfo* scope_info) {
if (start < end) {
PrintF("\n // %s\n", list_name);
if (nof_internal_slots > 0) {
- PrintF(" %2d - %2d [internal slots]\n", 0 , nof_internal_slots - 1);
+ PrintF(" %2d - %2d [internal slots]\n", 0, nof_internal_slots - 1);
}
for (int i = nof_internal_slots; start < end; ++i, ++start) {
PrintF(" %2d ", i);
@@ -836,7 +809,6 @@ static void PrintList(const char* list_name,
}
}
-
void ScopeInfo::Print() {
PrintF("ScopeInfo ");
if (HasFunctionName()) {
diff --git a/deps/v8/src/objects/scope-info.h b/deps/v8/src/objects/scope-info.h
new file mode 100644
index 0000000000..671734e05a
--- /dev/null
+++ b/deps/v8/src/objects/scope-info.h
@@ -0,0 +1,345 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_SCOPE_INFO_H_
+#define V8_OBJECTS_SCOPE_INFO_H_
+
+#include "src/globals.h"
+#include "src/handles.h"
+#include "src/objects.h"
+#include "src/utils.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+class Scope;
+class Zone;
+
+// ScopeInfo represents information about different scopes of a source
+// program and the allocation of the scope's variables. Scope information
+// is stored in a compressed form in ScopeInfo objects and is used
+// at runtime (stack dumps, deoptimization, etc.).
+
+// This object provides quick access to scope info details for runtime
+// routines.
+class ScopeInfo : public FixedArray {
+ public:
+ DECLARE_CAST(ScopeInfo)
+
+ // Return the type of this scope.
+ ScopeType scope_type();
+
+ // Does this scope call eval?
+ bool CallsEval();
+
+ // Return the language mode of this scope.
+ LanguageMode language_mode();
+
+ // True if this scope is a (var) declaration scope.
+ bool is_declaration_scope();
+
+ // Does this scope make a sloppy eval call?
+ bool CallsSloppyEval() { return CallsEval() && is_sloppy(language_mode()); }
+
+ // Return the total number of locals allocated on the stack and in the
+ // context. This includes the parameters that are allocated in the context.
+ int LocalCount();
+
+ // Return the number of stack slots for code. This number consists of two
+ // parts:
+ // 1. One stack slot per stack allocated local.
+ // 2. One stack slot for the function name if it is stack allocated.
+ int StackSlotCount();
+
+ // Return the number of context slots for code if a context is allocated. This
+ // number consists of three parts:
+ // 1. Size of fixed header for every context: Context::MIN_CONTEXT_SLOTS
+ // 2. One context slot per context allocated local.
+ // 3. One context slot for the function name if it is context allocated.
+ // Parameters allocated in the context count as context allocated locals. If
+ // no contexts are allocated for this scope ContextLength returns 0.
+ int ContextLength();
+
+ // Does this scope declare a "this" binding?
+ bool HasReceiver();
+
+ // Does this scope declare a "this" binding, and the "this" binding is stack-
+ // or context-allocated?
+ bool HasAllocatedReceiver();
+
+ // Does this scope declare a "new.target" binding?
+ bool HasNewTarget();
+
+ // Is this scope the scope of a named function expression?
+ bool HasFunctionName();
+
+ // Return if this has context allocated locals.
+ bool HasHeapAllocatedLocals();
+
+ // Return if contexts are allocated for this scope.
+ bool HasContext();
+
+ // Return if this is a function scope with "use asm".
+ inline bool IsAsmModule() { return AsmModuleField::decode(Flags()); }
+
+ // Return if this is a nested function within an asm module scope.
+ inline bool IsAsmFunction() { return AsmFunctionField::decode(Flags()); }
+
+ inline bool HasSimpleParameters() {
+ return HasSimpleParametersField::decode(Flags());
+ }
+
+ // Return the function_name if present.
+ String* FunctionName();
+
+ ModuleInfo* ModuleDescriptorInfo();
+
+ // Return the name of the given parameter.
+ String* ParameterName(int var);
+
+ // Return the name of the given local.
+ String* LocalName(int var);
+
+ // Return the name of the given stack local.
+ String* StackLocalName(int var);
+
+ // Return the name of the given stack local.
+ int StackLocalIndex(int var);
+
+ // Return the name of the given context local.
+ String* ContextLocalName(int var);
+
+ // Return the mode of the given context local.
+ VariableMode ContextLocalMode(int var);
+
+ // Return the initialization flag of the given context local.
+ InitializationFlag ContextLocalInitFlag(int var);
+
+ // Return the initialization flag of the given context local.
+ MaybeAssignedFlag ContextLocalMaybeAssignedFlag(int var);
+
+ // Return true if this local was introduced by the compiler, and should not be
+ // exposed to the user in a debugger.
+ static bool VariableIsSynthetic(String* name);
+
+ // Lookup support for serialized scope info. Returns the
+ // the stack slot index for a given slot name if the slot is
+ // present; otherwise returns a value < 0. The name must be an internalized
+ // string.
+ int StackSlotIndex(String* name);
+
+ // Lookup support for serialized scope info. Returns the local context slot
+ // index for a given slot name if the slot is present; otherwise
+ // returns a value < 0. The name must be an internalized string.
+ // If the slot is present and mode != NULL, sets *mode to the corresponding
+ // mode for that variable.
+ static int ContextSlotIndex(Handle<ScopeInfo> scope_info, Handle<String> name,
+ VariableMode* mode, InitializationFlag* init_flag,
+ MaybeAssignedFlag* maybe_assigned_flag);
+
+ // Lookup metadata of a MODULE-allocated variable. Return 0 if there is no
+ // module variable with the given name (the index value of a MODULE variable
+ // is never 0).
+ int ModuleIndex(Handle<String> name, VariableMode* mode,
+ InitializationFlag* init_flag,
+ MaybeAssignedFlag* maybe_assigned_flag);
+
+ // Lookup the name of a certain context slot by its index.
+ String* ContextSlotName(int slot_index);
+
+ // Lookup support for serialized scope info. Returns the
+ // parameter index for a given parameter name if the parameter is present;
+ // otherwise returns a value < 0. The name must be an internalized string.
+ int ParameterIndex(String* name);
+
+ // Lookup support for serialized scope info. Returns the function context
+ // slot index if the function name is present and context-allocated (named
+ // function expressions, only), otherwise returns a value < 0. The name
+ // must be an internalized string.
+ int FunctionContextSlotIndex(String* name);
+
+ // Lookup support for serialized scope info. Returns the receiver context
+ // slot index if scope has a "this" binding, and the binding is
+ // context-allocated. Otherwise returns a value < 0.
+ int ReceiverContextSlotIndex();
+
+ FunctionKind function_kind();
+
+ // Returns true if this ScopeInfo is linked to a outer ScopeInfo.
+ bool HasOuterScopeInfo();
+
+ // Returns true if this ScopeInfo was created for a debug-evaluate scope.
+ bool IsDebugEvaluateScope();
+
+ // Can be used to mark a ScopeInfo that looks like a with-scope as actually
+ // being a debug-evaluate scope.
+ void SetIsDebugEvaluateScope();
+
+ // Return the outer ScopeInfo if present.
+ ScopeInfo* OuterScopeInfo();
+
+#ifdef DEBUG
+ bool Equals(ScopeInfo* other) const;
+#endif
+
+ static Handle<ScopeInfo> Create(Isolate* isolate, Zone* zone, Scope* scope,
+ MaybeHandle<ScopeInfo> outer_scope);
+ static Handle<ScopeInfo> CreateForWithScope(
+ Isolate* isolate, MaybeHandle<ScopeInfo> outer_scope);
+ static Handle<ScopeInfo> CreateGlobalThisBinding(Isolate* isolate);
+
+ // Serializes empty scope info.
+ V8_EXPORT_PRIVATE static ScopeInfo* Empty(Isolate* isolate);
+
+#ifdef DEBUG
+ void Print();
+#endif
+
+// The layout of the static part of a ScopeInfo is as follows. Each entry is
+// numeric and occupies one array slot.
+// 1. A set of properties of the scope.
+// 2. The number of parameters. For non-function scopes this is 0.
+// 3. The number of non-parameter variables allocated on the stack.
+// 4. The number of non-parameter and parameter variables allocated in the
+// context.
+#define FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(V) \
+ V(Flags) \
+ V(ParameterCount) \
+ V(StackLocalCount) \
+ V(ContextLocalCount)
+
+#define FIELD_ACCESSORS(name) \
+ inline void Set##name(int value) { set(k##name, Smi::FromInt(value)); } \
+ inline int name() { \
+ if (length() > 0) { \
+ return Smi::cast(get(k##name))->value(); \
+ } else { \
+ return 0; \
+ } \
+ }
+
+ FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(FIELD_ACCESSORS)
+#undef FIELD_ACCESSORS
+
+ enum {
+#define DECL_INDEX(name) k##name,
+ FOR_EACH_SCOPE_INFO_NUMERIC_FIELD(DECL_INDEX)
+#undef DECL_INDEX
+ kVariablePartIndex
+ };
+
+ private:
+ // The layout of the variable part of a ScopeInfo is as follows:
+ // 1. ParameterNames:
+ // This part stores the names of the parameters for function scopes. One
+ // slot is used per parameter, so in total this part occupies
+ // ParameterCount() slots in the array. For other scopes than function
+ // scopes ParameterCount() is 0.
+ // 2. StackLocalFirstSlot:
+ // Index of a first stack slot for stack local. Stack locals belonging to
+ // this scope are located on a stack at slots starting from this index.
+ // 3. StackLocalNames:
+ // Contains the names of local variables that are allocated on the stack,
+ // in increasing order of the stack slot index. First local variable has a
+ // stack slot index defined in StackLocalFirstSlot (point 2 above).
+ // One slot is used per stack local, so in total this part occupies
+ // StackLocalCount() slots in the array.
+ // 4. ContextLocalNames:
+ // Contains the names of local variables and parameters that are allocated
+ // in the context. They are stored in increasing order of the context slot
+ // index starting with Context::MIN_CONTEXT_SLOTS. One slot is used per
+ // context local, so in total this part occupies ContextLocalCount() slots
+ // in the array.
+ // 5. ContextLocalInfos:
+ // Contains the variable modes and initialization flags corresponding to
+ // the context locals in ContextLocalNames. One slot is used per
+ // context local, so in total this part occupies ContextLocalCount()
+ // slots in the array.
+ // 6. ReceiverInfo:
+ // If the scope binds a "this" value, one slot is reserved to hold the
+ // context or stack slot index for the variable.
+ // 7. FunctionNameInfo:
+ // If the scope belongs to a named function expression this part contains
+ // information about the function variable. It always occupies two array
+ // slots: a. The name of the function variable.
+ // b. The context or stack slot index for the variable.
+ // 8. OuterScopeInfoIndex:
+ // The outer scope's ScopeInfo or the hole if there's none.
+ // 9. ModuleInfo, ModuleVariableCount, and ModuleVariables:
+ // For a module scope, this part contains the ModuleInfo, the number of
+ // MODULE-allocated variables, and the metadata of those variables. For
+ // non-module scopes it is empty.
+ int ParameterNamesIndex();
+ int StackLocalFirstSlotIndex();
+ int StackLocalNamesIndex();
+ int ContextLocalNamesIndex();
+ int ContextLocalInfosIndex();
+ int ReceiverInfoIndex();
+ int FunctionNameInfoIndex();
+ int OuterScopeInfoIndex();
+ int ModuleInfoIndex();
+ int ModuleVariableCountIndex();
+ int ModuleVariablesIndex();
+
+ int Lookup(Handle<String> name, int start, int end, VariableMode* mode,
+ VariableLocation* location, InitializationFlag* init_flag,
+ MaybeAssignedFlag* maybe_assigned_flag);
+
+ // Get metadata of i-th MODULE-allocated variable, where 0 <= i <
+ // ModuleVariableCount. The metadata is returned via out-arguments, which may
+ // be nullptr if the corresponding information is not requested
+ void ModuleVariable(int i, String** name, int* index,
+ VariableMode* mode = nullptr,
+ InitializationFlag* init_flag = nullptr,
+ MaybeAssignedFlag* maybe_assigned_flag = nullptr);
+
+ // Used for the function name variable for named function expressions, and for
+ // the receiver.
+ enum VariableAllocationInfo { NONE, STACK, CONTEXT, UNUSED };
+
+ // Properties of scopes.
+ class ScopeTypeField : public BitField<ScopeType, 0, 4> {};
+ class CallsEvalField : public BitField<bool, ScopeTypeField::kNext, 1> {};
+ STATIC_ASSERT(LANGUAGE_END == 2);
+ class LanguageModeField
+ : public BitField<LanguageMode, CallsEvalField::kNext, 1> {};
+ class DeclarationScopeField
+ : public BitField<bool, LanguageModeField::kNext, 1> {};
+ class ReceiverVariableField
+ : public BitField<VariableAllocationInfo, DeclarationScopeField::kNext,
+ 2> {};
+ class HasNewTargetField
+ : public BitField<bool, ReceiverVariableField::kNext, 1> {};
+ class FunctionVariableField
+ : public BitField<VariableAllocationInfo, HasNewTargetField::kNext, 2> {};
+ class AsmModuleField
+ : public BitField<bool, FunctionVariableField::kNext, 1> {};
+ class AsmFunctionField : public BitField<bool, AsmModuleField::kNext, 1> {};
+ class HasSimpleParametersField
+ : public BitField<bool, AsmFunctionField::kNext, 1> {};
+ class FunctionKindField
+ : public BitField<FunctionKind, HasSimpleParametersField::kNext, 10> {};
+ class HasOuterScopeInfoField
+ : public BitField<bool, FunctionKindField::kNext, 1> {};
+ class IsDebugEvaluateScopeField
+ : public BitField<bool, HasOuterScopeInfoField::kNext, 1> {};
+
+ // Properties of variables.
+ class VariableModeField : public BitField<VariableMode, 0, 3> {};
+ class InitFlagField : public BitField<InitializationFlag, 3, 1> {};
+ class MaybeAssignedFlagField : public BitField<MaybeAssignedFlag, 4, 1> {};
+
+ friend class ScopeIterator;
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_SCOPE_INFO_H_
diff --git a/deps/v8/src/parsing/OWNERS b/deps/v8/src/parsing/OWNERS
index 44cc4ed5ed..ee49f9c366 100644
--- a/deps/v8/src/parsing/OWNERS
+++ b/deps/v8/src/parsing/OWNERS
@@ -4,4 +4,5 @@ adamk@chromium.org
littledan@chromium.org
marja@chromium.org
rossberg@chromium.org
+verwaest@chromium.org
vogelheim@chromium.org
diff --git a/deps/v8/src/parsing/duplicate-finder.cc b/deps/v8/src/parsing/duplicate-finder.cc
index 6b57153f9b..0e03da7704 100644
--- a/deps/v8/src/parsing/duplicate-finder.cc
+++ b/deps/v8/src/parsing/duplicate-finder.cc
@@ -4,83 +4,26 @@
#include "src/parsing/duplicate-finder.h"
-#include "src/conversions.h"
-#include "src/unicode-cache.h"
-
namespace v8 {
namespace internal {
-int DuplicateFinder::AddOneByteSymbol(Vector<const uint8_t> key, int value) {
- return AddSymbol(key, true, value);
+bool DuplicateFinder::AddOneByteSymbol(Vector<const uint8_t> key) {
+ return AddSymbol(key, true);
}
-int DuplicateFinder::AddTwoByteSymbol(Vector<const uint16_t> key, int value) {
- return AddSymbol(Vector<const uint8_t>::cast(key), false, value);
+bool DuplicateFinder::AddTwoByteSymbol(Vector<const uint16_t> key) {
+ return AddSymbol(Vector<const uint8_t>::cast(key), false);
}
-int DuplicateFinder::AddSymbol(Vector<const uint8_t> key, bool is_one_byte,
- int value) {
+bool DuplicateFinder::AddSymbol(Vector<const uint8_t> key, bool is_one_byte) {
uint32_t hash = Hash(key, is_one_byte);
byte* encoding = BackupKey(key, is_one_byte);
base::HashMap::Entry* entry = map_.LookupOrInsert(encoding, hash);
int old_value = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
- entry->value =
- reinterpret_cast<void*>(static_cast<intptr_t>(value | old_value));
+ entry->value = reinterpret_cast<void*>(1);
return old_value;
}
-int DuplicateFinder::AddNumber(Vector<const uint8_t> key, int value) {
- DCHECK(key.length() > 0);
- // Quick check for already being in canonical form.
- if (IsNumberCanonical(key)) {
- return AddOneByteSymbol(key, value);
- }
-
- int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY;
- double double_value = StringToDouble(unicode_constants_, key, flags, 0.0);
- int length;
- const char* string;
- if (!std::isfinite(double_value)) {
- string = "Infinity";
- length = 8; // strlen("Infinity");
- } else {
- string = DoubleToCString(double_value,
- Vector<char>(number_buffer_, kBufferSize));
- length = StrLength(string);
- }
- return AddSymbol(
- Vector<const byte>(reinterpret_cast<const byte*>(string), length), true,
- value);
-}
-
-bool DuplicateFinder::IsNumberCanonical(Vector<const uint8_t> number) {
- // Test for a safe approximation of number literals that are already
- // in canonical form: max 15 digits, no leading zeroes, except an
- // integer part that is a single zero, and no trailing zeros below
- // the decimal point.
- int pos = 0;
- int length = number.length();
- if (number.length() > 15) return false;
- if (number[pos] == '0') {
- pos++;
- } else {
- while (pos < length &&
- static_cast<unsigned>(number[pos] - '0') <= ('9' - '0'))
- pos++;
- }
- if (length == pos) return true;
- if (number[pos] != '.') return false;
- pos++;
- bool invalid_last_digit = true;
- while (pos < length) {
- uint8_t digit = number[pos] - '0';
- if (digit > '9' - '0') return false;
- invalid_last_digit = (digit == 0);
- pos++;
- }
- return !invalid_last_digit;
-}
-
uint32_t DuplicateFinder::Hash(Vector<const uint8_t> key, bool is_one_byte) {
// Primitive hash function, almost identical to the one used
// for strings (except that it's seeded by the length and representation).
diff --git a/deps/v8/src/parsing/duplicate-finder.h b/deps/v8/src/parsing/duplicate-finder.h
index a3858e7c74..c11c477036 100644
--- a/deps/v8/src/parsing/duplicate-finder.h
+++ b/deps/v8/src/parsing/duplicate-finder.h
@@ -11,25 +11,16 @@
namespace v8 {
namespace internal {
-class UnicodeCache;
-
// DuplicateFinder discovers duplicate symbols.
class DuplicateFinder {
public:
- explicit DuplicateFinder(UnicodeCache* constants)
- : unicode_constants_(constants), backing_store_(16), map_(&Match) {}
+ DuplicateFinder() : backing_store_(16), map_(&Match) {}
- int AddOneByteSymbol(Vector<const uint8_t> key, int value);
- int AddTwoByteSymbol(Vector<const uint16_t> key, int value);
- // Add a a number literal by converting it (if necessary)
- // to the string that ToString(ToNumber(literal)) would generate.
- // and then adding that string with AddOneByteSymbol.
- // This string is the actual value used as key in an object literal,
- // and the one that must be different from the other keys.
- int AddNumber(Vector<const uint8_t> key, int value);
+ bool AddOneByteSymbol(Vector<const uint8_t> key);
+ bool AddTwoByteSymbol(Vector<const uint16_t> key);
private:
- int AddSymbol(Vector<const uint8_t> key, bool is_one_byte, int value);
+ bool AddSymbol(Vector<const uint8_t> key, bool is_one_byte);
// Backs up the key and its length in the backing store.
// The backup is stored with a base 127 encoding of the
// length (plus a bit saying whether the string is one byte),
@@ -40,22 +31,13 @@ class DuplicateFinder {
// for having the same base-127 encoded lengths and representation.
// and then having the same 'length' bytes following.
static bool Match(void* first, void* second);
+
// Creates a hash from a sequence of bytes.
static uint32_t Hash(Vector<const uint8_t> key, bool is_one_byte);
- // Checks whether a string containing a JS number is its canonical
- // form.
- static bool IsNumberCanonical(Vector<const uint8_t> key);
-
- // Size of buffer. Sufficient for using it to call DoubleToCString in
- // from conversions.h.
- static const int kBufferSize = 100;
- UnicodeCache* unicode_constants_;
// Backing store used to store strings used as hashmap keys.
SequenceCollector<unsigned char> backing_store_;
base::CustomMatcherHashMap map_;
- // Buffer used for string->number->canonical string conversions.
- char number_buffer_[kBufferSize];
};
} // namespace internal
diff --git a/deps/v8/src/parsing/func-name-inferrer.cc b/deps/v8/src/parsing/func-name-inferrer.cc
index a86e1c299f..28dbe49965 100644
--- a/deps/v8/src/parsing/func-name-inferrer.cc
+++ b/deps/v8/src/parsing/func-name-inferrer.cc
@@ -4,9 +4,10 @@
#include "src/parsing/func-name-inferrer.h"
-#include "src/ast/ast.h"
#include "src/ast/ast-value-factory.h"
+#include "src/ast/ast.h"
#include "src/list-inl.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -46,8 +47,8 @@ void FuncNameInferrer::PushVariableName(const AstRawString* name) {
void FuncNameInferrer::RemoveAsyncKeywordFromEnd() {
if (IsOpen()) {
- DCHECK(names_stack_.length() > 0);
- DCHECK(names_stack_.last().name->IsOneByteEqualTo("async"));
+ CHECK(names_stack_.length() > 0);
+ CHECK(names_stack_.last().name->IsOneByteEqualTo("async"));
names_stack_.RemoveLast();
}
}
diff --git a/deps/v8/src/parsing/parameter-initializer-rewriter.cc b/deps/v8/src/parsing/parameter-initializer-rewriter.cc
index 73224a23d1..7a1bc1ef9e 100644
--- a/deps/v8/src/parsing/parameter-initializer-rewriter.cc
+++ b/deps/v8/src/parsing/parameter-initializer-rewriter.cc
@@ -4,9 +4,10 @@
#include "src/parsing/parameter-initializer-rewriter.h"
-#include "src/ast/ast.h"
#include "src/ast/ast-traversal-visitor.h"
+#include "src/ast/ast.h"
#include "src/ast/scopes.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
index 4fbfb1948d..b703d3e924 100644
--- a/deps/v8/src/parsing/parse-info.cc
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -6,6 +6,7 @@
#include "src/ast/ast-value-factory.h"
#include "src/ast/ast.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -19,12 +20,15 @@ ParseInfo::ParseInfo(Zone* zone)
extension_(nullptr),
compile_options_(ScriptCompiler::kNoCompileOptions),
script_scope_(nullptr),
+ asm_function_scope_(nullptr),
unicode_cache_(nullptr),
stack_limit_(0),
hash_seed_(0),
compiler_hints_(0),
start_position_(0),
end_position_(0),
+ function_literal_id_(FunctionLiteral::kIdTypeInvalid),
+ max_function_literal_id_(FunctionLiteral::kIdTypeInvalid),
isolate_(nullptr),
cached_data_(nullptr),
ast_value_factory_(nullptr),
@@ -43,10 +47,12 @@ ParseInfo::ParseInfo(Zone* zone, Handle<SharedFunctionInfo> shared)
set_compiler_hints(shared->compiler_hints());
set_start_position(shared->start_position());
set_end_position(shared->end_position());
+ function_literal_id_ = shared->function_literal_id();
set_stack_limit(isolate_->stack_guard()->real_climit());
set_unicode_cache(isolate_->unicode_cache());
set_language_mode(shared->language_mode());
set_shared_info(shared);
+ set_module(shared->kind() == FunctionKind::kModule);
Handle<Script> script(Script::cast(shared->script()));
set_script(script);
@@ -63,8 +69,7 @@ ParseInfo::ParseInfo(Zone* zone, Handle<SharedFunctionInfo> shared)
ParseInfo::ParseInfo(Zone* zone, Handle<Script> script) : ParseInfo(zone) {
isolate_ = script->GetIsolate();
- set_allow_lazy_parsing(String::cast(script->source())->length() >
- FLAG_min_preparse_length);
+ set_allow_lazy_parsing();
set_toplevel();
set_hash_seed(isolate_->heap()->HashSeed());
set_stack_limit(isolate_->stack_guard()->real_climit());
@@ -89,15 +94,6 @@ bool ParseInfo::is_declaration() const {
return (compiler_hints_ & (1 << SharedFunctionInfo::kIsDeclaration)) != 0;
}
-bool ParseInfo::requires_class_field_init() const {
- return (compiler_hints_ &
- (1 << SharedFunctionInfo::kRequiresClassFieldInit)) != 0;
-}
-bool ParseInfo::is_class_field_initializer() const {
- return (compiler_hints_ &
- (1 << SharedFunctionInfo::kIsClassFieldInitializer)) != 0;
-}
-
FunctionKind ParseInfo::function_kind() const {
return SharedFunctionInfo::FunctionKindBits::decode(compiler_hints_);
}
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
index 24188d95e2..87052a5a7e 100644
--- a/deps/v8/src/parsing/parse-info.h
+++ b/deps/v8/src/parsing/parse-info.h
@@ -8,6 +8,7 @@
#include "include/v8.h"
#include "src/globals.h"
#include "src/handles.h"
+#include "src/objects/scope-info.h"
namespace v8 {
@@ -97,9 +98,6 @@ class V8_EXPORT_PRIVATE ParseInfo {
return compile_options_;
}
void set_compile_options(ScriptCompiler::CompileOptions compile_options) {
- if (compile_options == ScriptCompiler::kConsumeParserCache) {
- set_allow_lazy_parsing();
- }
compile_options_ = compile_options;
}
@@ -108,6 +106,11 @@ class V8_EXPORT_PRIVATE ParseInfo {
script_scope_ = script_scope;
}
+ DeclarationScope* asm_function_scope() const { return asm_function_scope_; }
+ void set_asm_function_scope(DeclarationScope* scope) {
+ asm_function_scope_ = scope;
+ }
+
AstValueFactory* ast_value_factory() const { return ast_value_factory_; }
void set_ast_value_factory(AstValueFactory* ast_value_factory) {
ast_value_factory_ = ast_value_factory;
@@ -147,10 +150,18 @@ class V8_EXPORT_PRIVATE ParseInfo {
int end_position() const { return end_position_; }
void set_end_position(int end_position) { end_position_ = end_position; }
+ int function_literal_id() const { return function_literal_id_; }
+ void set_function_literal_id(int function_literal_id) {
+ function_literal_id_ = function_literal_id;
+ }
+
+ int max_function_literal_id() const { return max_function_literal_id_; }
+ void set_max_function_literal_id(int max_function_literal_id) {
+ max_function_literal_id_ = max_function_literal_id;
+ }
+
// Getters for individual compiler hints.
bool is_declaration() const;
- bool requires_class_field_init() const;
- bool is_class_field_initializer() const;
FunctionKind function_kind() const;
//--------------------------------------------------------------------------
@@ -221,12 +232,15 @@ class V8_EXPORT_PRIVATE ParseInfo {
v8::Extension* extension_;
ScriptCompiler::CompileOptions compile_options_;
DeclarationScope* script_scope_;
+ DeclarationScope* asm_function_scope_;
UnicodeCache* unicode_cache_;
uintptr_t stack_limit_;
uint32_t hash_seed_;
int compiler_hints_;
int start_position_;
int end_position_;
+ int function_literal_id_;
+ int max_function_literal_id_;
// TODO(titzer): Move handles and isolate out of ParseInfo.
Isolate* isolate_;
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index 9195aec990..f7fdb26d22 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -9,6 +9,7 @@
#include "src/ast/scopes.h"
#include "src/bailout-reason.h"
#include "src/base/hashmap.h"
+#include "src/counters.h"
#include "src/globals.h"
#include "src/messages.h"
#include "src/parsing/expression-classifier.h"
@@ -192,7 +193,8 @@ class ParserBase {
ParserBase(Zone* zone, Scanner* scanner, uintptr_t stack_limit,
v8::Extension* extension, AstValueFactory* ast_value_factory,
- RuntimeCallStats* runtime_call_stats)
+ RuntimeCallStats* runtime_call_stats,
+ bool parsing_on_main_thread = true)
: scope_state_(nullptr),
function_state_(nullptr),
extension_(extension),
@@ -200,6 +202,7 @@ class ParserBase {
ast_value_factory_(ast_value_factory),
ast_node_factory_(ast_value_factory),
runtime_call_stats_(runtime_call_stats),
+ parsing_on_main_thread_(parsing_on_main_thread),
parsing_module_(false),
stack_limit_(stack_limit),
zone_(zone),
@@ -207,29 +210,28 @@ class ParserBase {
scanner_(scanner),
stack_overflow_(false),
default_eager_compile_hint_(FunctionLiteral::kShouldLazyCompile),
- allow_lazy_(false),
+ function_literal_id_(0),
allow_natives_(false),
allow_tailcalls_(false),
allow_harmony_do_expressions_(false),
allow_harmony_function_sent_(false),
- allow_harmony_async_await_(false),
allow_harmony_restrictive_generators_(false),
allow_harmony_trailing_commas_(false),
- allow_harmony_class_fields_(false) {}
+ allow_harmony_class_fields_(false),
+ allow_harmony_object_spread_(false) {}
#define ALLOW_ACCESSORS(name) \
bool allow_##name() const { return allow_##name##_; } \
void set_allow_##name(bool allow) { allow_##name##_ = allow; }
- ALLOW_ACCESSORS(lazy);
ALLOW_ACCESSORS(natives);
ALLOW_ACCESSORS(tailcalls);
ALLOW_ACCESSORS(harmony_do_expressions);
ALLOW_ACCESSORS(harmony_function_sent);
- ALLOW_ACCESSORS(harmony_async_await);
ALLOW_ACCESSORS(harmony_restrictive_generators);
ALLOW_ACCESSORS(harmony_trailing_commas);
ALLOW_ACCESSORS(harmony_class_fields);
+ ALLOW_ACCESSORS(harmony_object_spread);
#undef ALLOW_ACCESSORS
@@ -246,6 +248,13 @@ class ParserBase {
return default_eager_compile_hint_;
}
+ int GetNextFunctionLiteralId() { return ++function_literal_id_; }
+ int GetLastFunctionLiteralId() const { return function_literal_id_; }
+
+ void SkipFunctionLiterals(int delta) { function_literal_id_ += delta; }
+
+ void ResetFunctionLiteralId() { function_literal_id_ = 0; }
+
Zone* zone() const { return zone_; }
protected:
@@ -411,8 +420,9 @@ class ParserBase {
FunctionState* outer() const { return outer_function_state_; }
void set_generator_object_variable(typename Types::Variable* variable) {
- DCHECK(variable != NULL);
+ DCHECK_NOT_NULL(variable);
DCHECK(IsResumableFunction(kind()));
+ DCHECK(scope()->has_forced_context_allocation());
generator_object_variable_ = variable;
}
typename Types::Variable* generator_object_variable() const {
@@ -458,16 +468,16 @@ class ParserBase {
return &non_patterns_to_rewrite_;
}
- bool next_function_is_parenthesized() const {
- return next_function_is_parenthesized_;
+ bool next_function_is_likely_called() const {
+ return next_function_is_likely_called_;
}
- void set_next_function_is_parenthesized(bool parenthesized) {
- next_function_is_parenthesized_ = parenthesized;
+ bool previous_function_was_likely_called() const {
+ return previous_function_was_likely_called_;
}
- bool this_function_is_parenthesized() const {
- return this_function_is_parenthesized_;
+ void set_next_function_is_likely_called() {
+ next_function_is_likely_called_ = true;
}
private:
@@ -508,13 +518,13 @@ class ParserBase {
ZoneList<typename ExpressionClassifier::Error> reported_errors_;
- // If true, the next (and immediately following) function literal is
- // preceded by a parenthesis.
- bool next_function_is_parenthesized_;
-
- // The value of the parents' next_function_is_parenthesized_, as it applies
- // to this function. Filled in by constructor.
- bool this_function_is_parenthesized_;
+ // Record whether the next (=== immediately following) function literal is
+ // preceded by a parenthesis / exclamation mark. Also record the previous
+ // state.
+ // These are managed by the FunctionState constructor; the caller may only
+ // call set_next_function_is_likely_called.
+ bool next_function_is_likely_called_;
+ bool previous_function_was_likely_called_;
friend Impl;
friend class Checkpoint;
@@ -633,7 +643,6 @@ class ParserBase {
scope(nullptr),
init_block(parser->impl()->NullBlock()),
inner_block(parser->impl()->NullBlock()),
- for_promise_reject(false),
bound_names(1, parser->zone()),
tail_call_expressions(parser->zone()) {}
IdentifierT name;
@@ -642,7 +651,6 @@ class ParserBase {
Scope* scope;
BlockT init_block;
BlockT inner_block;
- bool for_promise_reject;
ZoneList<const AstRawString*> bound_names;
TailCallExpressionList tail_call_expressions;
};
@@ -666,17 +674,17 @@ class ParserBase {
: proxy(nullptr),
extends(parser->impl()->EmptyExpression()),
properties(parser->impl()->NewClassPropertyList(4)),
- instance_field_initializers(parser->impl()->NewExpressionList(0)),
constructor(parser->impl()->EmptyFunctionLiteral()),
has_seen_constructor(false),
- static_initializer_var(nullptr) {}
+ has_name_static_property(false),
+ has_static_computed_names(false) {}
VariableProxy* proxy;
ExpressionT extends;
typename Types::ClassPropertyList properties;
- ExpressionListT instance_field_initializers;
FunctionLiteralT constructor;
bool has_seen_constructor;
- Variable* static_initializer_var;
+ bool has_name_static_property;
+ bool has_static_computed_names;
};
DeclarationScope* NewScriptScope() const {
@@ -712,10 +720,15 @@ class ParserBase {
return new (zone()) Scope(zone(), parent, scope_type);
}
- DeclarationScope* NewFunctionScope(FunctionKind kind) const {
+ // Creates a function scope that always allocates in zone(). The function
+ // scope itself is either allocated in zone() or in target_zone if one is
+ // passed in.
+ DeclarationScope* NewFunctionScope(FunctionKind kind,
+ Zone* target_zone = nullptr) const {
DCHECK(ast_value_factory());
- DeclarationScope* result =
- new (zone()) DeclarationScope(zone(), scope(), FUNCTION_SCOPE, kind);
+ if (target_zone == nullptr) target_zone = zone();
+ DeclarationScope* result = new (target_zone)
+ DeclarationScope(zone(), scope(), FUNCTION_SCOPE, kind);
// TODO(verwaest): Move into the DeclarationScope constructor.
if (!IsArrowFunction(kind)) {
result->DeclareDefaultFunctionVariables(ast_value_factory());
@@ -856,35 +869,29 @@ class ParserBase {
// Checks whether an octal literal was last seen between beg_pos and end_pos.
// If so, reports an error. Only called for strict mode and template strings.
- void CheckOctalLiteral(int beg_pos, int end_pos,
- MessageTemplate::Template message, bool* ok) {
+ void CheckOctalLiteral(int beg_pos, int end_pos, bool is_template, bool* ok) {
Scanner::Location octal = scanner()->octal_position();
if (octal.IsValid() && beg_pos <= octal.beg_pos &&
octal.end_pos <= end_pos) {
+ MessageTemplate::Template message =
+ is_template ? MessageTemplate::kTemplateOctalLiteral
+ : scanner()->octal_message();
+ DCHECK_NE(message, MessageTemplate::kNone);
impl()->ReportMessageAt(octal, message);
scanner()->clear_octal_position();
+ if (message == MessageTemplate::kStrictDecimalWithLeadingZero) {
+ impl()->CountUsage(v8::Isolate::kDecimalWithLeadingZeroInStrictMode);
+ }
*ok = false;
}
}
- // for now, this check just collects statistics.
- void CheckDecimalLiteralWithLeadingZero(int beg_pos, int end_pos) {
- Scanner::Location token_location =
- scanner()->decimal_with_leading_zero_position();
- if (token_location.IsValid() && beg_pos <= token_location.beg_pos &&
- token_location.end_pos <= end_pos) {
- scanner()->clear_decimal_with_leading_zero_position();
- impl()->CountUsage(v8::Isolate::kDecimalWithLeadingZeroInStrictMode);
- }
- }
inline void CheckStrictOctalLiteral(int beg_pos, int end_pos, bool* ok) {
- CheckOctalLiteral(beg_pos, end_pos, MessageTemplate::kStrictOctalLiteral,
- ok);
+ CheckOctalLiteral(beg_pos, end_pos, false, ok);
}
inline void CheckTemplateOctalLiteral(int beg_pos, int end_pos, bool* ok) {
- CheckOctalLiteral(beg_pos, end_pos, MessageTemplate::kTemplateOctalLiteral,
- ok);
+ CheckOctalLiteral(beg_pos, end_pos, true, ok);
}
void CheckDestructuringElement(ExpressionT element, int beg_pos, int end_pos);
@@ -1143,6 +1150,7 @@ class ParserBase {
kShorthandProperty,
kMethodProperty,
kClassField,
+ kSpreadProperty,
kNotSet
};
@@ -1154,7 +1162,8 @@ class ParserBase {
ExpressionT ParseObjectLiteral(bool* ok);
ClassLiteralPropertyT ParseClassPropertyDefinition(
ClassLiteralChecker* checker, bool has_extends, bool* is_computed_name,
- bool* has_seen_constructor, bool* ok);
+ bool* has_seen_constructor, ClassLiteralProperty::Kind* property_kind,
+ bool* is_static, bool* has_name_static_property, bool* ok);
FunctionLiteralT ParseClassFieldForInitializer(bool has_initializer,
bool* ok);
ObjectLiteralPropertyT ParseObjectPropertyDefinition(
@@ -1423,6 +1432,7 @@ class ParserBase {
AstValueFactory* ast_value_factory_; // Not owned.
typename Types::Factory ast_node_factory_;
RuntimeCallStats* runtime_call_stats_;
+ bool parsing_on_main_thread_;
bool parsing_module_;
uintptr_t stack_limit_;
@@ -1437,15 +1447,16 @@ class ParserBase {
FunctionLiteral::EagerCompileHint default_eager_compile_hint_;
- bool allow_lazy_;
+ int function_literal_id_;
+
bool allow_natives_;
bool allow_tailcalls_;
bool allow_harmony_do_expressions_;
bool allow_harmony_function_sent_;
- bool allow_harmony_async_await_;
bool allow_harmony_restrictive_generators_;
bool allow_harmony_trailing_commas_;
bool allow_harmony_class_fields_;
+ bool allow_harmony_object_spread_;
friend class DiscardableZoneScope;
};
@@ -1466,13 +1477,13 @@ ParserBase<Impl>::FunctionState::FunctionState(
return_expr_context_(ReturnExprContext::kInsideValidBlock),
non_patterns_to_rewrite_(0, scope->zone()),
reported_errors_(16, scope->zone()),
- next_function_is_parenthesized_(false),
- this_function_is_parenthesized_(false) {
+ next_function_is_likely_called_(false),
+ previous_function_was_likely_called_(false) {
*function_state_stack = this;
if (outer_function_state_) {
- this_function_is_parenthesized_ =
- outer_function_state_->next_function_is_parenthesized_;
- outer_function_state_->next_function_is_parenthesized_ = false;
+ outer_function_state_->previous_function_was_likely_called_ =
+ outer_function_state_->next_function_is_likely_called_;
+ outer_function_state_->next_function_is_likely_called_ = false;
}
}
@@ -1594,7 +1605,7 @@ ParserBase<Impl>::ParseAndClassifyIdentifier(bool* ok) {
}
if (classifier()->duplicate_finder() != nullptr &&
- scanner()->FindSymbol(classifier()->duplicate_finder(), 1) != 0) {
+ scanner()->FindSymbol(classifier()->duplicate_finder())) {
classifier()->RecordDuplicateFormalParameterError(scanner()->location());
}
return name;
@@ -1728,8 +1739,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePrimaryExpression(
return impl()->ExpressionFromLiteral(Next(), beg_pos);
case Token::ASYNC:
- if (allow_harmony_async_await() &&
- !scanner()->HasAnyLineTerminatorAfterNext() &&
+ if (!scanner()->HasAnyLineTerminatorAfterNext() &&
PeekAhead() == Token::FUNCTION) {
Consume(Token::ASYNC);
return ParseAsyncFunctionLiteral(CHECK_OK);
@@ -1789,8 +1799,9 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePrimaryExpression(
}
// Heuristically try to detect immediately called functions before
// seeing the call parentheses.
- function_state_->set_next_function_is_parenthesized(peek() ==
- Token::FUNCTION);
+ if (peek() == Token::FUNCTION) {
+ function_state_->set_next_function_is_likely_called();
+ }
ExpressionT expr = ParseExpressionCoverGrammar(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
return expr;
@@ -1902,6 +1913,13 @@ ParserBase<Impl>::ParseExpressionCoverGrammar(bool accept_IN, bool* ok) {
// a trailing comma is allowed at the end of an arrow parameter list
break;
}
+
+ // Pass on the 'set_next_function_is_likely_called' flag if we have
+ // several function literals separated by comma.
+ if (peek() == Token::FUNCTION &&
+ function_state_->previous_function_was_likely_called()) {
+ function_state_->set_next_function_is_likely_called();
+ }
}
return result;
@@ -2025,7 +2043,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePropertyName(
Token::Value token = peek();
int pos = peek_position();
- if (allow_harmony_async_await() && !*is_generator && token == Token::ASYNC &&
+ if (!*is_generator && token == Token::ASYNC &&
!scanner()->HasAnyLineTerminatorAfterNext()) {
Consume(Token::ASYNC);
token = peek();
@@ -2091,6 +2109,22 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePropertyName(
break;
}
+ case Token::ELLIPSIS:
+ if (allow_harmony_object_spread()) {
+ // TODO(gsathya): Implement destructuring/rest
+ classifier()->RecordPatternError(scanner()->location(),
+ MessageTemplate::kUnexpectedToken);
+
+ *name = impl()->EmptyIdentifier();
+ Consume(Token::ELLIPSIS);
+ ExpressionClassifier spread_classifier(this);
+ expression = ParseAssignmentExpression(true, CHECK_OK);
+ impl()->RewriteNonPattern(CHECK_OK);
+ impl()->AccumulateFormalParameterContainmentErrors();
+ *kind = PropertyKind::kSpreadProperty;
+ return expression;
+ }
+
default:
*name = ParseIdentifierName(CHECK_OK);
break;
@@ -2114,17 +2148,18 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePropertyName(
template <typename Impl>
typename ParserBase<Impl>::ClassLiteralPropertyT
-ParserBase<Impl>::ParseClassPropertyDefinition(ClassLiteralChecker* checker,
- bool has_extends,
- bool* is_computed_name,
- bool* has_seen_constructor,
- bool* ok) {
- DCHECK(has_seen_constructor != nullptr);
+ParserBase<Impl>::ParseClassPropertyDefinition(
+ ClassLiteralChecker* checker, bool has_extends, bool* is_computed_name,
+ bool* has_seen_constructor, ClassLiteralProperty::Kind* property_kind,
+ bool* is_static, bool* has_name_static_property, bool* ok) {
+ DCHECK_NOT_NULL(has_seen_constructor);
+ DCHECK_NOT_NULL(has_name_static_property);
bool is_get = false;
bool is_set = false;
bool is_generator = false;
bool is_async = false;
- bool is_static = false;
+ *is_static = false;
+ *property_kind = ClassLiteralProperty::METHOD;
PropertyKind kind = PropertyKind::kNotSet;
Token::Value name_token = peek();
@@ -2142,7 +2177,7 @@ ParserBase<Impl>::ParseClassPropertyDefinition(ClassLiteralChecker* checker,
name = impl()->GetSymbol(); // TODO(bakkot) specialize on 'static'
name_expression = factory()->NewStringLiteral(name, position());
} else {
- is_static = true;
+ *is_static = true;
name_expression = ParsePropertyName(
&name, &kind, &is_generator, &is_get, &is_set, &is_async,
is_computed_name, CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
@@ -2153,6 +2188,10 @@ ParserBase<Impl>::ParseClassPropertyDefinition(ClassLiteralChecker* checker,
is_computed_name, CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
}
+ if (!*has_name_static_property && *is_static && impl()->IsName(name)) {
+ *has_name_static_property = true;
+ }
+
switch (kind) {
case PropertyKind::kClassField:
case PropertyKind::kNotSet: // This case is a name followed by a name or
@@ -2169,9 +2208,10 @@ ParserBase<Impl>::ParseClassPropertyDefinition(ClassLiteralChecker* checker,
ExpressionT function_literal = ParseClassFieldForInitializer(
has_initializer, CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
ExpectSemicolon(CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
+ *property_kind = ClassLiteralProperty::FIELD;
return factory()->NewClassLiteralProperty(
- name_expression, function_literal, ClassLiteralProperty::FIELD,
- is_static, *is_computed_name);
+ name_expression, function_literal, *property_kind, *is_static,
+ *is_computed_name);
} else {
ReportUnexpectedToken(Next());
*ok = false;
@@ -2188,7 +2228,7 @@ ParserBase<Impl>::ParseClassPropertyDefinition(ClassLiteralChecker* checker,
if (!*is_computed_name) {
checker->CheckClassMethodName(
name_token, PropertyKind::kMethodProperty, is_generator, is_async,
- is_static, CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
+ *is_static, CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
}
FunctionKind kind = is_generator
@@ -2196,9 +2236,9 @@ ParserBase<Impl>::ParseClassPropertyDefinition(ClassLiteralChecker* checker,
: is_async ? FunctionKind::kAsyncConciseMethod
: FunctionKind::kConciseMethod;
- if (!is_static && impl()->IsConstructor(name)) {
+ if (!*is_static && impl()->IsConstructor(name)) {
*has_seen_constructor = true;
- kind = has_extends ? FunctionKind::kSubclassConstructor
+ kind = has_extends ? FunctionKind::kDerivedConstructor
: FunctionKind::kBaseConstructor;
}
@@ -2207,9 +2247,10 @@ ParserBase<Impl>::ParseClassPropertyDefinition(ClassLiteralChecker* checker,
kNoSourcePosition, FunctionLiteral::kAccessorOrMethod,
language_mode(), CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
+ *property_kind = ClassLiteralProperty::METHOD;
return factory()->NewClassLiteralProperty(name_expression, value,
- ClassLiteralProperty::METHOD,
- is_static, *is_computed_name);
+ *property_kind, *is_static,
+ *is_computed_name);
}
case PropertyKind::kAccessorProperty: {
@@ -2218,7 +2259,7 @@ ParserBase<Impl>::ParseClassPropertyDefinition(ClassLiteralChecker* checker,
if (!*is_computed_name) {
checker->CheckClassMethodName(
name_token, PropertyKind::kAccessorProperty, false, false,
- is_static, CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
+ *is_static, CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
// Make sure the name expression is a string since we need a Name for
// Runtime_DefineAccessorPropertyUnchecked and since we can determine
// this statically we can skip the extra runtime check.
@@ -2238,11 +2279,14 @@ ParserBase<Impl>::ParseClassPropertyDefinition(ClassLiteralChecker* checker,
impl()->AddAccessorPrefixToFunctionName(is_get, value, name);
}
- return factory()->NewClassLiteralProperty(
- name_expression, value,
- is_get ? ClassLiteralProperty::GETTER : ClassLiteralProperty::SETTER,
- is_static, *is_computed_name);
+ *property_kind =
+ is_get ? ClassLiteralProperty::GETTER : ClassLiteralProperty::SETTER;
+ return factory()->NewClassLiteralProperty(name_expression, value,
+ *property_kind, *is_static,
+ *is_computed_name);
}
+ case PropertyKind::kSpreadProperty:
+ UNREACHABLE();
}
UNREACHABLE();
return impl()->EmptyClassLiteralProperty();
@@ -2279,8 +2323,7 @@ ParserBase<Impl>::ParseClassFieldForInitializer(bool has_initializer,
initializer_state.expected_property_count(), 0, 0,
FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::kAnonymousExpression, default_eager_compile_hint_,
- initializer_scope->start_position(), true);
- function_literal->set_is_class_field_initializer(true);
+ initializer_scope->start_position(), true, GetNextFunctionLiteralId());
return function_literal;
}
@@ -2305,6 +2348,18 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
is_computed_name, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
switch (kind) {
+ case PropertyKind::kSpreadProperty:
+ DCHECK(allow_harmony_object_spread());
+ DCHECK(!is_get && !is_set && !is_generator && !is_async &&
+ !*is_computed_name);
+ DCHECK(name_token == Token::ELLIPSIS);
+
+ *is_computed_name = true;
+
+ return factory()->NewObjectLiteralProperty(
+ impl()->GetLiteralTheHole(kNoSourcePosition), name_expression,
+ ObjectLiteralProperty::SPREAD, true);
+
case PropertyKind::kValueProperty: {
DCHECK(!is_get && !is_set && !is_generator && !is_async);
@@ -2347,7 +2402,7 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ObjectLiteralChecker* checker,
DCHECK(!*is_computed_name);
if (classifier()->duplicate_finder() != nullptr &&
- scanner()->FindSymbol(classifier()->duplicate_finder(), 1) != 0) {
+ scanner()->FindSymbol(classifier()->duplicate_finder())) {
classifier()->RecordDuplicateFormalParameterError(
scanner()->location());
}
@@ -2612,7 +2667,7 @@ ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
Scope::Snapshot scope_snapshot(scope());
- bool is_async = allow_harmony_async_await() && peek() == Token::ASYNC &&
+ bool is_async = peek() == Token::ASYNC &&
!scanner()->HasAnyLineTerminatorAfterNext() &&
IsValidArrowFormalParametersStart(PeekAhead());
@@ -2732,7 +2787,7 @@ ParserBase<Impl>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
MessageTemplate::kInvalidLhsInAssignment, CHECK_OK);
}
- expression = impl()->MarkExpressionAsAssigned(expression);
+ impl()->MarkExpressionAsAssigned(expression);
Token::Value op = Next(); // Get assignment operator.
if (op != Token::ASSIGN) {
@@ -2944,6 +2999,12 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseUnaryExpression(
op = Next();
int pos = position();
+
+ // Assume "! function ..." indicates the function is likely to be called.
+ if (op == Token::NOT && peek() == Token::FUNCTION) {
+ function_state_->set_next_function_is_likely_called();
+ }
+
ExpressionT expression = ParseUnaryExpression(CHECK_OK);
impl()->RewriteNonPattern(CHECK_OK);
@@ -2973,7 +3034,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseUnaryExpression(
expression = CheckAndRewriteReferenceExpression(
expression, beg_pos, scanner()->location().end_pos,
MessageTemplate::kInvalidLhsInPrefixOp, CHECK_OK);
- expression = impl()->MarkExpressionAsAssigned(expression);
+ impl()->MarkExpressionAsAssigned(expression);
impl()->RewriteNonPattern(CHECK_OK);
return factory()->NewCountOperation(op,
@@ -3013,7 +3074,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParsePostfixExpression(
expression = CheckAndRewriteReferenceExpression(
expression, lhs_beg_pos, scanner()->location().end_pos,
MessageTemplate::kInvalidLhsInPostfixOp, CHECK_OK);
- expression = impl()->MarkExpressionAsAssigned(expression);
+ impl()->MarkExpressionAsAssigned(expression);
impl()->RewriteNonPattern(CHECK_OK);
Token::Value next = Next();
@@ -3119,7 +3180,6 @@ ParserBase<Impl>::ParseLeftHandSideExpression(bool* ok) {
bool is_super_call = result->IsSuperCallReference();
if (spread_pos.IsValid()) {
- args = impl()->PrepareSpreadArguments(args);
result = impl()->SpreadCall(result, args, pos);
} else {
result = factory()->NewCall(result, args, pos, is_possibly_eval);
@@ -3128,7 +3188,6 @@ ParserBase<Impl>::ParseLeftHandSideExpression(bool* ok) {
// Explicit calls to the super constructor using super() perform an
// implicit binding assignment to the 'this' variable.
if (is_super_call) {
- result = impl()->RewriteSuperCall(result);
ExpressionT this_expr = impl()->ThisExpression(pos);
result =
factory()->NewAssignment(Token::INIT, this_expr, result, pos);
@@ -3211,7 +3270,6 @@ ParserBase<Impl>::ParseMemberWithNewPrefixesExpression(bool* is_async,
ExpressionListT args = ParseArguments(&spread_pos, CHECK_OK);
if (spread_pos.IsValid()) {
- args = impl()->PrepareSpreadArguments(args);
result = impl()->SpreadCallNew(result, args, new_pos);
} else {
result = factory()->NewCallNew(result, args, new_pos);
@@ -3310,7 +3368,7 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseSuperExpression(
}
// new super() is never allowed.
// super() is only allowed in derived constructor
- if (!is_new && peek() == Token::LPAREN && IsSubclassConstructor(kind)) {
+ if (!is_new && peek() == Token::LPAREN && IsDerivedConstructor(kind)) {
// TODO(rossberg): This might not be the correct FunctionState for the
// method here.
return impl()->NewSuperCallReference(pos);
@@ -3501,10 +3559,7 @@ void ParserBase<Impl>::ParseFormalParameterList(FormalParametersT* parameters,
}
}
- for (int i = 0; i < parameters->arity; ++i) {
- auto parameter = parameters->at(i);
- impl()->DeclareFormalParameter(parameters->scope, parameter);
- }
+ impl()->DeclareFormalParameters(parameters->scope, parameters->params);
}
template <typename Impl>
@@ -3725,8 +3780,23 @@ ParserBase<Impl>::ParseHoistableDeclaration(
pos, FunctionLiteral::kDeclaration, language_mode(),
CHECK_OK_CUSTOM(NullStatement));
- return impl()->DeclareFunction(variable_name, function, pos, is_generator,
- is_async, names, ok);
+ // In ES6, a function behaves as a lexical binding, except in
+ // a script scope, or the initial scope of eval or another function.
+ VariableMode mode =
+ (!scope()->is_declaration_scope() || scope()->is_module_scope()) ? LET
+ : VAR;
+ // Async functions don't undergo sloppy mode block scoped hoisting, and don't
+ // allow duplicates in a block. Both are represented by the
+ // sloppy_block_function_map. Don't add them to the map for async functions.
+ // Generators are also supposed to be prohibited; currently doing this behind
+ // a flag and UseCounting violations to assess web compatibility.
+ bool is_sloppy_block_function =
+ is_sloppy(language_mode()) && !scope()->is_declaration_scope() &&
+ !is_async && !(allow_harmony_restrictive_generators() && is_generator);
+
+ return impl()->DeclareFunction(variable_name, function, mode, pos,
+ is_generator, is_async,
+ is_sloppy_block_function, names, ok);
}
template <typename Impl>
@@ -3890,10 +3960,14 @@ template <typename Impl>
typename ParserBase<Impl>::ExpressionT
ParserBase<Impl>::ParseArrowFunctionLiteral(
bool accept_IN, const FormalParametersT& formal_parameters, bool* ok) {
+ const RuntimeCallStats::CounterId counters[2][2] = {
+ {&RuntimeCallStats::ParseBackgroundArrowFunctionLiteral,
+ &RuntimeCallStats::ParseArrowFunctionLiteral},
+ {&RuntimeCallStats::PreParseBackgroundArrowFunctionLiteral,
+ &RuntimeCallStats::PreParseArrowFunctionLiteral}};
RuntimeCallTimerScope runtime_timer(
runtime_call_stats_,
- Impl::IsPreParser() ? &RuntimeCallStats::ParseArrowFunctionLiteral
- : &RuntimeCallStats::PreParseArrowFunctionLiteral);
+ counters[Impl::IsPreParser()][parsing_on_main_thread_]);
if (peek() == Token::ARROW && scanner_->HasAnyLineTerminatorBeforeNext()) {
// ASI inserts `;` after arrow parameters if a line terminator is found.
@@ -3907,6 +3981,7 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
StatementListT body = impl()->NullStatementList();
int materialized_literal_count = -1;
int expected_property_count = -1;
+ int function_literal_id = GetNextFunctionLiteralId();
FunctionKind kind = formal_parameters.scope->function_kind();
FunctionLiteral::EagerCompileHint eager_compile_hint =
@@ -4044,7 +4119,8 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
formal_parameters.num_parameters(), formal_parameters.function_length,
FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::kAnonymousExpression, eager_compile_hint,
- formal_parameters.scope->start_position(), has_braces);
+ formal_parameters.scope->start_position(), has_braces,
+ function_literal_id);
function_literal->set_function_token_position(
formal_parameters.scope->start_position());
@@ -4102,14 +4178,26 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
FuncNameInferrer::State fni_state(fni_);
bool is_computed_name = false; // Classes do not care about computed
// property names here.
+ bool is_static;
+ ClassLiteralProperty::Kind property_kind;
ExpressionClassifier property_classifier(this);
+ // If we haven't seen the constructor yet, it potentially is the next
+ // property.
+ bool is_constructor = !class_info.has_seen_constructor;
ClassLiteralPropertyT property = ParseClassPropertyDefinition(
&checker, has_extends, &is_computed_name,
- &class_info.has_seen_constructor, CHECK_OK);
+ &class_info.has_seen_constructor, &property_kind, &is_static,
+ &class_info.has_name_static_property, CHECK_OK);
+ if (!class_info.has_static_computed_names && is_static &&
+ is_computed_name) {
+ class_info.has_static_computed_names = true;
+ }
+ is_constructor &= class_info.has_seen_constructor;
impl()->RewriteNonPattern(CHECK_OK);
impl()->AccumulateFormalParameterContainmentErrors();
- impl()->DeclareClassProperty(name, property, &class_info, CHECK_OK);
+ impl()->DeclareClassProperty(name, property, property_kind, is_static,
+ is_constructor, &class_info, CHECK_OK);
impl()->InferFunctionName();
}
@@ -4123,8 +4211,6 @@ void ParserBase<Impl>::ParseAsyncFunctionBody(Scope* scope, StatementListT body,
FunctionBodyType body_type,
bool accept_IN, int pos,
bool* ok) {
- scope->ForceContextAllocation();
-
impl()->PrepareAsyncFunctionBody(body, kind, pos);
BlockT block = factory()->NewBlock(nullptr, 8, true, kNoSourcePosition);
@@ -4291,7 +4377,12 @@ ParserBase<Impl>::CheckAndRewriteReferenceExpression(
}
if (expression->IsCall()) {
// If it is a call, make it a runtime error for legacy web compatibility.
+ // Bug: https://bugs.chromium.org/p/v8/issues/detail?id=4480
// Rewrite `expr' to `expr[throw ReferenceError]'.
+ impl()->CountUsage(
+ is_strict(language_mode())
+ ? v8::Isolate::kAssigmentExpressionLHSIsCallInStrict
+ : v8::Isolate::kAssigmentExpressionLHSIsCallInSloppy);
ExpressionT error = impl()->NewThrowReferenceError(message, beg_pos);
return factory()->NewProperty(expression, error, beg_pos);
}
@@ -4473,7 +4564,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStatementListItem(
}
break;
case Token::ASYNC:
- if (allow_harmony_async_await() && PeekAhead() == Token::FUNCTION &&
+ if (PeekAhead() == Token::FUNCTION &&
!scanner()->HasAnyLineTerminatorAfterNext()) {
Consume(Token::ASYNC);
return ParseAsyncFunctionDeclaration(nullptr, false, ok);
@@ -4866,13 +4957,13 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseReturnStatement(
ExpressionT return_value = impl()->EmptyExpression();
if (scanner()->HasAnyLineTerminatorBeforeNext() || tok == Token::SEMICOLON ||
tok == Token::RBRACE || tok == Token::EOS) {
- if (IsSubclassConstructor(function_state_->kind())) {
+ if (IsDerivedConstructor(function_state_->kind())) {
return_value = impl()->ThisExpression(loc.beg_pos);
} else {
return_value = impl()->GetLiteralUndefined(position());
}
} else {
- if (IsSubclassConstructor(function_state_->kind())) {
+ if (IsDerivedConstructor(function_state_->kind())) {
// Because of the return code rewriting that happens in case of a subclass
// constructor we don't want to accept tail calls, therefore we don't set
// ReturnExprScope to kInsideValidReturnStatement here.
@@ -5073,7 +5164,6 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseTryStatement(
}
CatchInfo catch_info(this);
- catch_info.for_promise_reject = allow_natives() && Check(Token::MOD);
if (peek() != Token::CATCH && peek() != Token::FINALLY) {
ReportMessage(MessageTemplate::kNoCatchOrFinally);
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index 3ed7af267e..bcafd78853 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -8,6 +8,7 @@
#include "src/api.h"
#include "src/ast/ast-expression-rewriter.h"
+#include "src/ast/ast-function-literal-id-reindexer.h"
#include "src/ast/ast-literal-reindexer.h"
#include "src/ast/ast-traversal-visitor.h"
#include "src/ast/ast.h"
@@ -15,6 +16,7 @@
#include "src/base/platform/platform.h"
#include "src/char-predicates-inl.h"
#include "src/messages.h"
+#include "src/objects-inl.h"
#include "src/parsing/duplicate-finder.h"
#include "src/parsing/parameter-initializer-rewriter.h"
#include "src/parsing/parse-info.h"
@@ -112,8 +114,13 @@ class DiscardableZoneScope {
fni_(parser->ast_value_factory_, temp_zone),
parser_(parser),
prev_fni_(parser->fni_),
- prev_zone_(parser->zone_) {
+ prev_zone_(parser->zone_),
+ prev_allow_lazy_(parser->allow_lazy_),
+ prev_temp_zoned_(parser->temp_zoned_) {
if (use_temp_zone) {
+ DCHECK(!parser_->temp_zoned_);
+ parser_->allow_lazy_ = false;
+ parser_->temp_zoned_ = true;
parser_->fni_ = &fni_;
parser_->zone_ = temp_zone;
if (parser_->reusable_preparser_ != nullptr) {
@@ -125,6 +132,8 @@ class DiscardableZoneScope {
void Reset() {
parser_->fni_ = prev_fni_;
parser_->zone_ = prev_zone_;
+ parser_->allow_lazy_ = prev_allow_lazy_;
+ parser_->temp_zoned_ = prev_temp_zoned_;
if (parser_->reusable_preparser_ != nullptr) {
parser_->reusable_preparser_->zone_ = prev_zone_;
parser_->reusable_preparser_->factory()->set_zone(prev_zone_);
@@ -139,6 +148,8 @@ class DiscardableZoneScope {
Parser* parser_;
FuncNameInferrer* prev_fni_;
Zone* prev_zone_;
+ bool prev_allow_lazy_;
+ bool prev_temp_zoned_;
DISALLOW_COPY_AND_ASSIGN(DiscardableZoneScope);
};
@@ -146,82 +157,26 @@ class DiscardableZoneScope {
void Parser::SetCachedData(ParseInfo* info) {
DCHECK_NULL(cached_parse_data_);
if (consume_cached_parse_data()) {
- cached_parse_data_ = ParseData::FromCachedData(*info->cached_data());
- if (cached_parse_data_ == nullptr) {
- compile_options_ = ScriptCompiler::kNoCompileOptions;
+ if (allow_lazy_) {
+ cached_parse_data_ = ParseData::FromCachedData(*info->cached_data());
+ if (cached_parse_data_ != nullptr) return;
}
+ compile_options_ = ScriptCompiler::kNoCompileOptions;
}
}
-Expression* Parser::CallClassFieldInitializer(Scope* scope,
- Expression* this_expr) {
- // This produces the expression
- // `.class_field_intializer(this_expr)`, where '.class_field_intializer' is
- // the name
- // of a synthetic variable.
- // 'this_expr' will be 'this' in a base constructor and the result of calling
- // 'super' in a derived one.
- const AstRawString* init_fn_name =
- ast_value_factory()->dot_class_field_init_string();
- VariableProxy* init_fn_proxy = scope->NewUnresolved(factory(), init_fn_name);
- ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
- args->Add(init_fn_proxy, zone());
- args->Add(this_expr, zone());
- return factory()->NewCallRuntime(Runtime::kInlineCall, args,
- kNoSourcePosition);
-}
-
-Expression* Parser::RewriteSuperCall(Expression* super_call) {
- // TODO(bakkot) find a way to avoid this for classes without fields.
- if (!allow_harmony_class_fields()) {
- return super_call;
- }
- // This turns a super call `super()` into a do expression of the form
- // do {
- // tmp x = super();
- // if (.class-field-init)
- // .class-field-init(x)
- // x; // This isn't actually present; our do-expression representation
- // allows specifying that the expression returns x directly.
- // }
- Variable* var_tmp =
- scope()->NewTemporary(ast_value_factory()->empty_string());
- Block* block = factory()->NewBlock(nullptr, 1, false, kNoSourcePosition);
- Assignment* assignment = factory()->NewAssignment(
- Token::ASSIGN, factory()->NewVariableProxy(var_tmp), super_call,
- kNoSourcePosition);
- block->statements()->Add(
- factory()->NewExpressionStatement(assignment, kNoSourcePosition), zone());
- const AstRawString* init_fn_name =
- ast_value_factory()->dot_class_field_init_string();
- VariableProxy* init_fn_proxy =
- scope()->NewUnresolved(factory(), init_fn_name);
- Expression* condition = init_fn_proxy;
- Statement* initialize = factory()->NewExpressionStatement(
- CallClassFieldInitializer(scope(), factory()->NewVariableProxy(var_tmp)),
- kNoSourcePosition);
- IfStatement* if_statement = factory()->NewIfStatement(
- condition, initialize, factory()->NewEmptyStatement(kNoSourcePosition),
- kNoSourcePosition);
- block->statements()->Add(if_statement, zone());
- return factory()->NewDoExpression(block, var_tmp, kNoSourcePosition);
-}
-
FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
- bool call_super,
- bool requires_class_field_init,
- int pos, int end_pos,
- LanguageMode language_mode) {
+ bool call_super, int pos,
+ int end_pos) {
int materialized_literal_count = -1;
int expected_property_count = -1;
const int parameter_count = 0;
if (name == nullptr) name = ast_value_factory()->empty_string();
- FunctionKind kind = call_super ? FunctionKind::kDefaultSubclassConstructor
+ FunctionKind kind = call_super ? FunctionKind::kDefaultDerivedConstructor
: FunctionKind::kDefaultBaseConstructor;
DeclarationScope* function_scope = NewFunctionScope(kind);
- SetLanguageMode(function_scope,
- static_cast<LanguageMode>(language_mode | STRICT));
+ SetLanguageMode(function_scope, STRICT);
// Set start and end position to the same value
function_scope->set_start_position(pos);
function_scope->set_end_position(pos);
@@ -233,9 +188,7 @@ FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
body = new (zone()) ZoneList<Statement*>(call_super ? 2 : 1, zone());
if (call_super) {
- // $super_constructor = %_GetSuperConstructor(<this-function>)
- // %reflect_construct(
- // $super_constructor, InternalArray(...args), new.target)
+ // Create a SuperCallReference and handle in BytecodeGenerator.
auto constructor_args_name = ast_value_factory()->empty_string();
bool is_duplicate;
bool is_rest = true;
@@ -245,29 +198,13 @@ FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
ast_value_factory());
ZoneList<Expression*>* args =
- new (zone()) ZoneList<Expression*>(2, zone());
- VariableProxy* this_function_proxy =
- NewUnresolved(ast_value_factory()->this_function_string(), pos);
- ZoneList<Expression*>* tmp =
new (zone()) ZoneList<Expression*>(1, zone());
- tmp->Add(this_function_proxy, zone());
- Expression* super_constructor = factory()->NewCallRuntime(
- Runtime::kInlineGetSuperConstructor, tmp, pos);
- args->Add(super_constructor, zone());
Spread* spread_args = factory()->NewSpread(
factory()->NewVariableProxy(constructor_args), pos, pos);
- ZoneList<Expression*>* spread_args_expr =
- new (zone()) ZoneList<Expression*>(1, zone());
- spread_args_expr->Add(spread_args, zone());
- args->AddAll(*PrepareSpreadArguments(spread_args_expr), zone());
- VariableProxy* new_target_proxy =
- NewUnresolved(ast_value_factory()->new_target_string(), pos);
- args->Add(new_target_proxy, zone());
- Expression* call = factory()->NewCallRuntime(
- Context::REFLECT_CONSTRUCT_INDEX, args, pos);
- if (requires_class_field_init) {
- call = CallClassFieldInitializer(scope(), call);
- }
+
+ args->Add(spread_args, zone());
+ Expression* super_call_ref = NewSuperCallReference(pos);
+ Expression* call = factory()->NewCall(super_call_ref, args, pos);
body->Add(factory()->NewReturnStatement(call, pos), zone());
}
@@ -280,9 +217,7 @@ FunctionLiteral* Parser::DefaultConstructor(const AstRawString* name,
expected_property_count, parameter_count, parameter_count,
FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::kAnonymousExpression, default_eager_compile_hint(), pos,
- true);
-
- function_literal->set_requires_class_field_init(requires_class_field_init);
+ true, GetNextFunctionLiteralId());
return function_literal;
}
@@ -511,15 +446,6 @@ Literal* Parser::ExpressionFromLiteral(Token::Value token, int pos) {
return NULL;
}
-Expression* Parser::GetIterator(Expression* iterable, int pos) {
- Expression* iterator_symbol_literal =
- factory()->NewSymbolLiteral("iterator_symbol", kNoSourcePosition);
- Expression* prop =
- factory()->NewProperty(iterable, iterator_symbol_literal, pos);
- ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(0, zone());
- return factory()->NewCall(prop, args, pos);
-}
-
void Parser::MarkTailPosition(Expression* expression) {
expression->MarkTail();
}
@@ -582,7 +508,8 @@ Expression* Parser::NewV8Intrinsic(const AstRawString* name,
Parser::Parser(ParseInfo* info)
: ParserBase<Parser>(info->zone(), &scanner_, info->stack_limit(),
info->extension(), info->ast_value_factory(),
- info->isolate()->counters()->runtime_call_stats()),
+ info->isolate()->counters()->runtime_call_stats(),
+ true),
scanner_(info->unicode_cache()),
reusable_preparser_(nullptr),
original_scope_(nullptr),
@@ -591,7 +518,7 @@ Parser::Parser(ParseInfo* info)
compile_options_(info->compile_options()),
cached_parse_data_(nullptr),
total_preparse_skipped_(0),
- parsing_on_main_thread_(true),
+ temp_zoned_(false),
log_(nullptr) {
// Even though we were passed ParseInfo, we should not store it in
// Parser - this makes sure that Isolate is not accidentally accessed via
@@ -615,25 +542,25 @@ Parser::Parser(ParseInfo* info)
set_default_eager_compile_hint(can_compile_lazily
? FunctionLiteral::kShouldLazyCompile
: FunctionLiteral::kShouldEagerCompile);
- set_allow_lazy(FLAG_lazy && info->allow_lazy_parsing() &&
- !info->is_native() && info->extension() == nullptr &&
- can_compile_lazily);
+ allow_lazy_ = FLAG_lazy && info->allow_lazy_parsing() && !info->is_native() &&
+ info->extension() == nullptr && can_compile_lazily;
set_allow_natives(FLAG_allow_natives_syntax || info->is_native());
set_allow_tailcalls(FLAG_harmony_tailcalls && !info->is_native() &&
info->isolate()->is_tail_call_elimination_enabled());
set_allow_harmony_do_expressions(FLAG_harmony_do_expressions);
set_allow_harmony_function_sent(FLAG_harmony_function_sent);
- set_allow_harmony_async_await(FLAG_harmony_async_await);
set_allow_harmony_restrictive_generators(FLAG_harmony_restrictive_generators);
set_allow_harmony_trailing_commas(FLAG_harmony_trailing_commas);
set_allow_harmony_class_fields(FLAG_harmony_class_fields);
+ set_allow_harmony_object_spread(FLAG_harmony_object_spread);
for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
++feature) {
use_counts_[feature] = 0;
}
if (info->ast_value_factory() == NULL) {
// info takes ownership of AstValueFactory.
- info->set_ast_value_factory(new AstValueFactory(zone(), info->hash_seed()));
+ info->set_ast_value_factory(new AstValueFactory(
+ zone(), info->isolate()->ast_string_constants(), info->hash_seed()));
info->set_ast_value_factory_owned();
ast_value_factory_ = info->ast_value_factory();
ast_node_factory_.set_ast_value_factory(ast_value_factory_);
@@ -665,7 +592,6 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
// It's OK to use the Isolate & counters here, since this function is only
// called in the main thread.
DCHECK(parsing_on_main_thread_);
-
RuntimeCallTimerScope runtime_timer(
runtime_call_stats_, info->is_eval() ? &RuntimeCallStats::ParseEval
: &RuntimeCallStats::ParseProgram);
@@ -682,7 +608,11 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
ParserLogger logger;
if (produce_cached_parse_data()) {
- log_ = &logger;
+ if (allow_lazy_) {
+ log_ = &logger;
+ } else {
+ compile_options_ = ScriptCompiler::kNoCompileOptions;
+ }
} else if (consume_cached_parse_data()) {
cached_parse_data_->Initialize();
}
@@ -730,7 +660,10 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
DCHECK_NULL(scope_state_);
DCHECK_NULL(target_stack_);
- ParsingModeScope mode(this, allow_lazy() ? PARSE_LAZILY : PARSE_EAGERLY);
+ ParsingModeScope mode(this, allow_lazy_ ? PARSE_LAZILY : PARSE_EAGERLY);
+ ResetFunctionLiteralId();
+ DCHECK(info->function_literal_id() == FunctionLiteral::kIdTypeTopLevel ||
+ info->function_literal_id() == FunctionLiteral::kIdTypeInvalid);
FunctionLiteral* result = NULL;
{
@@ -764,7 +697,7 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
DCHECK(!is_duplicate);
var->AllocateTo(VariableLocation::PARAMETER, 0);
- PrepareGeneratorVariables(&function_state);
+ PrepareGeneratorVariables();
Expression* initial_yield =
BuildInitialYield(kNoSourcePosition, kGeneratorFunction);
body->Add(
@@ -788,8 +721,6 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
if (ok && is_strict(language_mode())) {
CheckStrictOctalLiteral(beg_pos, scanner()->location().end_pos, &ok);
- CheckDecimalLiteralWithLeadingZero(beg_pos,
- scanner()->location().end_pos);
}
if (ok && is_sloppy(language_mode())) {
// TODO(littledan): Function bindings on the global object that modify
@@ -821,6 +752,8 @@ FunctionLiteral* Parser::DoParseProgram(ParseInfo* info) {
}
}
+ info->set_max_function_literal_id(GetLastFunctionLiteralId());
+
// Make sure the target stack is empty.
DCHECK(target_stack_ == NULL);
@@ -842,6 +775,12 @@ FunctionLiteral* Parser::ParseFunction(Isolate* isolate, ParseInfo* info) {
}
Handle<SharedFunctionInfo> shared_info = info->shared_info();
DeserializeScopeChain(info, info->maybe_outer_scope_info());
+ if (info->asm_function_scope()) {
+ original_scope_ = info->asm_function_scope();
+ factory()->set_zone(info->zone());
+ } else {
+ DCHECK_EQ(factory()->zone(), info->zone());
+ }
// Initialize parser state.
source = String::Flatten(source);
@@ -891,6 +830,10 @@ FunctionLiteral* Parser::DoParseFunction(ParseInfo* info,
fni_ = new (zone()) FuncNameInferrer(ast_value_factory(), zone());
fni_->PushEnclosingName(raw_name);
+ ResetFunctionLiteralId();
+ DCHECK_LT(0, info->function_literal_id());
+ SkipFunctionLiterals(info->function_literal_id() - 1);
+
ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
// Place holder for the result.
@@ -911,7 +854,7 @@ FunctionLiteral* Parser::DoParseFunction(ParseInfo* info,
bool ok = true;
if (IsArrowFunction(kind)) {
- if (allow_harmony_async_await() && IsAsyncFunction(kind)) {
+ if (IsAsyncFunction(kind)) {
DCHECK(!scanner()->HasAnyLineTerminatorAfterNext());
if (!Check(Token::ASYNC)) {
CHECK(stack_overflow());
@@ -951,12 +894,27 @@ FunctionLiteral* Parser::DoParseFunction(ParseInfo* info,
} else {
// BindingIdentifier
ParseFormalParameter(&formals, &ok);
- if (ok) DeclareFormalParameter(formals.scope, formals.at(0));
+ if (ok) DeclareFormalParameters(formals.scope, formals.params);
}
}
if (ok) {
checkpoint.Restore(&formals.materialized_literals_count);
+ if (GetLastFunctionLiteralId() != info->function_literal_id() - 1) {
+ // If there were FunctionLiterals in the parameters, we need to
+ // renumber them to shift down so the next function literal id for
+ // the arrow function is the one requested.
+ AstFunctionLiteralIdReindexer reindexer(
+ stack_limit_,
+ (info->function_literal_id() - 1) - GetLastFunctionLiteralId());
+ for (auto p : formals.params) {
+ if (p->pattern != nullptr) reindexer.Reindex(p->pattern);
+ if (p->initializer != nullptr) reindexer.Reindex(p->initializer);
+ }
+ ResetFunctionLiteralId();
+ SkipFunctionLiterals(info->function_literal_id() - 1);
+ }
+
// Pass `accept_IN=true` to ParseArrowFunctionLiteral --- This should
// not be observable, or else the preparser would have failed.
Expression* expression = ParseArrowFunctionLiteral(true, formals, &ok);
@@ -979,29 +937,12 @@ FunctionLiteral* Parser::DoParseFunction(ParseInfo* info,
}
} else if (IsDefaultConstructor(kind)) {
DCHECK_EQ(scope(), outer);
- bool is_subclass_constructor = IsSubclassConstructor(kind);
- result = DefaultConstructor(
- raw_name, is_subclass_constructor, info->requires_class_field_init(),
- info->start_position(), info->end_position(), info->language_mode());
- if (!is_subclass_constructor && info->requires_class_field_init()) {
- result = InsertClassFieldInitializer(result);
- }
- } else if (info->is_class_field_initializer()) {
- Handle<SharedFunctionInfo> shared_info = info->shared_info();
- DCHECK(!shared_info.is_null());
- if (shared_info->length() == 0) {
- result = ParseClassFieldForInitializer(
- info->start_position() != info->end_position(), &ok);
- } else {
- result = SynthesizeClassFieldInitializer(shared_info->length());
- }
+ result = DefaultConstructor(raw_name, IsDerivedConstructor(kind),
+ info->start_position(), info->end_position());
} else {
result = ParseFunctionLiteral(
raw_name, Scanner::Location::invalid(), kSkipFunctionNameCheck, kind,
kNoSourcePosition, function_type, info->language_mode(), &ok);
- if (info->requires_class_field_init()) {
- result = InsertClassFieldInitializer(result);
- }
}
// Make sure the results agree.
DCHECK(ok == (result != nullptr));
@@ -1009,6 +950,8 @@ FunctionLiteral* Parser::DoParseFunction(ParseInfo* info,
// Make sure the target stack is empty.
DCHECK_NULL(target_stack_);
+ DCHECK_IMPLIES(result,
+ info->function_literal_id() == result->function_literal_id());
return result;
}
@@ -1290,7 +1233,7 @@ Statement* Parser::ParseExportDefault(bool* ok) {
break;
case Token::ASYNC:
- if (allow_harmony_async_await() && PeekAhead() == Token::FUNCTION &&
+ if (PeekAhead() == Token::FUNCTION &&
!scanner()->HasAnyLineTerminatorAfterNext()) {
Consume(Token::ASYNC);
result = ParseAsyncFunctionDeclaration(&local_names, true, CHECK_OK);
@@ -1423,14 +1366,11 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
break;
case Token::ASYNC:
- if (allow_harmony_async_await()) {
- // TODO(neis): Why don't we have the same check here as in
- // ParseStatementListItem?
- Consume(Token::ASYNC);
- result = ParseAsyncFunctionDeclaration(&names, false, CHECK_OK);
- break;
- }
- /* falls through */
+ // TODO(neis): Why don't we have the same check here as in
+ // ParseStatementListItem?
+ Consume(Token::ASYNC);
+ result = ParseAsyncFunctionDeclaration(&names, false, CHECK_OK);
+ break;
default:
*ok = false;
@@ -1533,15 +1473,11 @@ void Parser::DeclareAndInitializeVariables(
}
Statement* Parser::DeclareFunction(const AstRawString* variable_name,
- FunctionLiteral* function, int pos,
- bool is_generator, bool is_async,
+ FunctionLiteral* function, VariableMode mode,
+ int pos, bool is_generator, bool is_async,
+ bool is_sloppy_block_function,
ZoneList<const AstRawString*>* names,
bool* ok) {
- // In ES6, a function behaves as a lexical binding, except in
- // a script scope, or the initial scope of eval or another function.
- VariableMode mode =
- (!scope()->is_declaration_scope() || scope()->is_module_scope()) ? LET
- : VAR;
VariableProxy* proxy =
factory()->NewVariableProxy(variable_name, NORMAL_VARIABLE);
Declaration* declaration =
@@ -1549,18 +1485,12 @@ Statement* Parser::DeclareFunction(const AstRawString* variable_name,
Declare(declaration, DeclarationDescriptor::NORMAL, mode, kCreatedInitialized,
CHECK_OK);
if (names) names->Add(variable_name, zone());
- // Async functions don't undergo sloppy mode block scoped hoisting, and don't
- // allow duplicates in a block. Both are represented by the
- // sloppy_block_function_map. Don't add them to the map for async functions.
- // Generators are also supposed to be prohibited; currently doing this behind
- // a flag and UseCounting violations to assess web compatibility.
- if (is_sloppy(language_mode()) && !scope()->is_declaration_scope() &&
- !is_async && !(allow_harmony_restrictive_generators() && is_generator)) {
- SloppyBlockFunctionStatement* delegate =
- factory()->NewSloppyBlockFunctionStatement(scope());
+ if (is_sloppy_block_function) {
+ SloppyBlockFunctionStatement* statement =
+ factory()->NewSloppyBlockFunctionStatement();
DeclarationScope* target_scope = GetDeclarationScope();
- target_scope->DeclareSloppyBlockFunction(variable_name, delegate);
- return delegate;
+ target_scope->DeclareSloppyBlockFunction(variable_name, scope(), statement);
+ return statement;
}
return factory()->NewEmptyStatement(kNoSourcePosition);
}
@@ -1601,6 +1531,7 @@ Statement* Parser::DeclareNative(const AstRawString* name, int pos, bool* ok) {
ZoneList<const AstRawString*>* Parser::DeclareLabel(
ZoneList<const AstRawString*>* labels, VariableProxy* var, bool* ok) {
+ DCHECK(IsIdentifier(var));
const AstRawString* label = var->raw_name();
// TODO(1240780): We don't check for redeclaration of labels
// during preparsing since keeping track of the set of active
@@ -1635,7 +1566,7 @@ bool Parser::ContainsLabel(ZoneList<const AstRawString*>* labels,
}
Expression* Parser::RewriteReturn(Expression* return_value, int pos) {
- if (IsSubclassConstructor(function_state_->kind())) {
+ if (IsDerivedConstructor(function_state_->kind())) {
// For subclass constructors we need to return this in case of undefined
// return a Smi (transformed into an exception in the ConstructStub)
// for a non object.
@@ -1742,8 +1673,7 @@ void Parser::RewriteCatchPattern(CatchInfo* catch_info, bool* ok) {
DCHECK_NOT_NULL(catch_info->pattern);
catch_info->name = ast_value_factory()->dot_catch_string();
}
- catch_info->variable = catch_info->scope->DeclareLocal(
- catch_info->name, VAR, kCreatedInitialized, NORMAL_VARIABLE);
+ catch_info->variable = catch_info->scope->DeclareLocal(catch_info->name, VAR);
if (catch_info->pattern != nullptr) {
DeclarationDescriptor descriptor;
descriptor.declaration_kind = DeclarationDescriptor::NORMAL;
@@ -1802,15 +1732,9 @@ Statement* Parser::RewriteTryStatement(Block* try_block, Block* catch_block,
DCHECK_NOT_NULL(catch_info.scope);
DCHECK_NOT_NULL(catch_info.variable);
TryCatchStatement* statement;
- if (catch_info.for_promise_reject) {
- statement = factory()->NewTryCatchStatementForPromiseReject(
- try_block, catch_info.scope, catch_info.variable, catch_block,
- kNoSourcePosition);
- } else {
- statement = factory()->NewTryCatchStatement(
- try_block, catch_info.scope, catch_info.variable, catch_block,
- kNoSourcePosition);
- }
+ statement = factory()->NewTryCatchStatement(try_block, catch_info.scope,
+ catch_info.variable,
+ catch_block, kNoSourcePosition);
try_block = factory()->NewBlock(nullptr, 1, false, kNoSourcePosition);
try_block->statements()->Add(statement, zone());
@@ -1897,6 +1821,7 @@ Statement* Parser::InitializeForEachStatement(ForEachStatement* stmt,
body = block;
each = factory()->NewVariableProxy(temp);
}
+ MarkExpressionAsAssigned(each);
stmt->AsForInStatement()->Initialize(each, subject, body);
}
return stmt;
@@ -1958,6 +1883,7 @@ void Parser::DesugarBindingInForEachStatement(ForInfo* for_info,
Block** body_block,
Expression** each_variable,
bool* ok) {
+ DCHECK(for_info->parsing_result.declarations.length() == 1);
DeclarationParsingResult::Declaration& decl =
for_info->parsing_result.declarations[0];
Variable* temp = NewTemporary(ast_value_factory()->dot_for_string());
@@ -2046,16 +1972,17 @@ Statement* Parser::InitializeForOfStatement(ForOfStatement* for_of,
const int nopos = kNoSourcePosition;
auto avfactory = ast_value_factory();
- Variable* iterator = NewTemporary(ast_value_factory()->dot_iterator_string());
- Variable* result = NewTemporary(ast_value_factory()->dot_result_string());
+ Variable* iterator = NewTemporary(avfactory->dot_iterator_string());
+ Variable* result = NewTemporary(avfactory->dot_result_string());
Variable* completion = NewTemporary(avfactory->empty_string());
- // iterator = iterable[Symbol.iterator]()
+ // iterator = GetIterator(iterable)
Expression* assign_iterator;
{
assign_iterator = factory()->NewAssignment(
Token::ASSIGN, factory()->NewVariableProxy(iterator),
- GetIterator(iterable, iterable->position()), iterable->position());
+ factory()->NewGetIterator(iterable, iterable->position()),
+ iterable->position());
}
// !%_IsJSReceiver(result = iterator.next()) &&
@@ -2467,15 +2394,11 @@ void Parser::DeclareArrowFunctionFormalParameters(
if (!parameters->is_simple) {
this->classifier()->RecordNonSimpleParameter();
}
- for (int i = 0; i < parameters->arity; ++i) {
- auto parameter = parameters->at(i);
- DeclareFormalParameter(parameters->scope, parameter);
- if (!this->classifier()
- ->is_valid_formal_parameter_list_without_duplicates() &&
- !duplicate_loc->IsValid()) {
- *duplicate_loc =
- this->classifier()->duplicate_formal_parameter_error().location;
- }
+ DeclareFormalParameters(parameters->scope, parameters->params);
+ if (!this->classifier()
+ ->is_valid_formal_parameter_list_without_duplicates()) {
+ *duplicate_loc =
+ this->classifier()->duplicate_formal_parameter_error().location;
}
DCHECK_EQ(parameters->is_simple, parameters->scope->has_simple_parameters());
}
@@ -2484,28 +2407,28 @@ void Parser::ReindexLiterals(const ParserFormalParameters& parameters) {
if (function_state_->materialized_literal_count() > 0) {
AstLiteralReindexer reindexer;
- for (const auto p : parameters.params) {
- if (p.pattern != nullptr) reindexer.Reindex(p.pattern);
- if (p.initializer != nullptr) reindexer.Reindex(p.initializer);
+ for (auto p : parameters.params) {
+ if (p->pattern != nullptr) reindexer.Reindex(p->pattern);
+ if (p->initializer != nullptr) reindexer.Reindex(p->initializer);
}
DCHECK(reindexer.count() <= function_state_->materialized_literal_count());
}
}
-void Parser::PrepareGeneratorVariables(FunctionState* function_state) {
- // For generators, allocating variables in contexts is currently a win
- // because it minimizes the work needed to suspend and resume an
- // activation. The machine code produced for generators (by full-codegen)
- // relies on this forced context allocation, but not in an essential way.
- scope()->ForceContextAllocation();
+void Parser::PrepareGeneratorVariables() {
+ // For generators, allocating variables in contexts is currently a win because
+ // it minimizes the work needed to suspend and resume an activation. The
+ // code produced for generators relies on this forced context allocation (it
+ // does not restore the frame's parameters upon resume).
+ function_state_->scope()->ForceContextAllocation();
// Calling a generator returns a generator object. That object is stored
// in a temporary variable, a definition that is used by "yield"
// expressions.
Variable* temp =
NewTemporary(ast_value_factory()->dot_generator_object_string());
- function_state->set_generator_object_variable(temp);
+ function_state_->set_generator_object_variable(temp);
}
FunctionLiteral* Parser::ParseFunctionLiteral(
@@ -2536,7 +2459,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
}
FunctionLiteral::EagerCompileHint eager_compile_hint =
- function_state_->next_function_is_parenthesized()
+ function_state_->next_function_is_likely_called()
? FunctionLiteral::kShouldEagerCompile
: default_eager_compile_hint();
@@ -2573,7 +2496,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// immediately). bar can be parsed lazily, but we need to parse it in a mode
// that tracks unresolved variables.
DCHECK_IMPLIES(parse_lazily(), FLAG_lazy);
- DCHECK_IMPLIES(parse_lazily(), allow_lazy());
+ DCHECK_IMPLIES(parse_lazily(), allow_lazy_);
DCHECK_IMPLIES(parse_lazily(), extension_ == nullptr);
bool can_preparse = parse_lazily() &&
@@ -2582,8 +2505,11 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
bool is_lazy_top_level_function =
can_preparse && impl()->AllowsLazyParsingWithoutUnresolvedVariables();
- RuntimeCallTimerScope runtime_timer(runtime_call_stats_,
- &RuntimeCallStats::ParseFunctionLiteral);
+ RuntimeCallTimerScope runtime_timer(
+ runtime_call_stats_,
+ parsing_on_main_thread_
+ ? &RuntimeCallStats::ParseFunctionLiteral
+ : &RuntimeCallStats::ParseBackgroundFunctionLiteral);
// Determine whether we can still lazy parse the inner function.
// The preconditions are:
@@ -2598,29 +2524,26 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// FunctionExpression; even without enclosing parentheses it might be
// immediately invoked.
// - The function literal shouldn't be hinted to eagerly compile.
- // - For asm.js functions the body needs to be available when module
- // validation is active, because we examine the entire module at once.
// Inner functions will be parsed using a temporary Zone. After parsing, we
// will migrate unresolved variable into a Scope in the main Zone.
// TODO(marja): Refactor parsing modes: simplify this.
bool use_temp_zone =
- (FLAG_lazy_inner_functions
+ (FLAG_aggressive_lazy_inner_functions
? can_preparse
: (is_lazy_top_level_function ||
- (allow_lazy() && function_type == FunctionLiteral::kDeclaration &&
- eager_compile_hint == FunctionLiteral::kShouldLazyCompile))) &&
- !(FLAG_validate_asm && scope()->IsAsmModule());
+ (parse_lazily() &&
+ function_type == FunctionLiteral::kDeclaration &&
+ eager_compile_hint == FunctionLiteral::kShouldLazyCompile)));
+
+ DCHECK_IMPLIES(
+ (is_lazy_top_level_function ||
+ (parse_lazily() && function_type == FunctionLiteral::kDeclaration &&
+ eager_compile_hint == FunctionLiteral::kShouldLazyCompile)),
+ can_preparse);
bool is_lazy_inner_function =
use_temp_zone && FLAG_lazy_inner_functions && !is_lazy_top_level_function;
- // This Scope lives in the main zone. We'll migrate data into that zone later.
- DeclarationScope* scope = NewFunctionScope(kind);
- SetLanguageMode(scope, language_mode);
-#ifdef DEBUG
- scope->SetScopeName(function_name);
-#endif
-
ZoneList<Statement*>* body = nullptr;
int materialized_literal_count = -1;
int expected_property_count = -1;
@@ -2628,9 +2551,10 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
int num_parameters = -1;
int function_length = -1;
bool has_duplicate_parameters = false;
+ int function_literal_id = GetNextFunctionLiteralId();
- Expect(Token::LPAREN, CHECK_OK);
- scope->set_start_position(scanner()->location().beg_pos);
+ Zone* outer_zone = zone();
+ DeclarationScope* scope;
{
// Temporary zones can nest. When we migrate free variables (see below), we
@@ -2645,10 +2569,19 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// information when the function is parsed.
Zone temp_zone(zone()->allocator(), ZONE_NAME);
DiscardableZoneScope zone_scope(this, &temp_zone, use_temp_zone);
+
+ // This Scope lives in the main zone. We'll migrate data into that zone
+ // later.
+ scope = NewFunctionScope(kind, outer_zone);
+ SetLanguageMode(scope, language_mode);
#ifdef DEBUG
+ scope->SetScopeName(function_name);
if (use_temp_zone) scope->set_needs_migration();
#endif
+ Expect(Token::LPAREN, CHECK_OK);
+ scope->set_start_position(scanner()->location().beg_pos);
+
// Eager or lazy parse? If is_lazy_top_level_function, we'll parse
// lazily. We'll call SkipFunction, which may decide to
// abort lazy parsing if it suspects that wasn't a good idea. If so (in
@@ -2695,19 +2628,28 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
scope->AnalyzePartially(&previous_zone_ast_node_factory);
}
+ DCHECK_IMPLIES(use_temp_zone, temp_zoned_);
if (FLAG_trace_preparse) {
PrintF(" [%s]: %i-%i %.*s\n",
is_lazy_top_level_function
? "Preparse no-resolution"
- : (use_temp_zone ? "Preparse resolution" : "Full parse"),
+ : (temp_zoned_ ? "Preparse resolution" : "Full parse"),
scope->start_position(), scope->end_position(),
function_name->byte_length(), function_name->raw_data());
+ }
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
if (is_lazy_top_level_function) {
- CHANGE_CURRENT_RUNTIME_COUNTER(runtime_call_stats_,
- PreParseNoVariableResolution);
- } else if (use_temp_zone) {
- CHANGE_CURRENT_RUNTIME_COUNTER(runtime_call_stats_,
- PreParseWithVariableResolution);
+ RuntimeCallStats::CorrectCurrentCounterId(
+ runtime_call_stats_,
+ parsing_on_main_thread_
+ ? &RuntimeCallStats::PreParseNoVariableResolution
+ : &RuntimeCallStats::PreParseBackgroundNoVariableResolution);
+ } else if (temp_zoned_) {
+ RuntimeCallStats::CorrectCurrentCounterId(
+ runtime_call_stats_,
+ parsing_on_main_thread_
+ ? &RuntimeCallStats::PreParseWithVariableResolution
+ : &RuntimeCallStats::PreParseBackgroundWithVariableResolution);
}
}
@@ -2720,8 +2662,6 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
if (is_strict(language_mode)) {
CheckStrictOctalLiteral(scope->start_position(), scope->end_position(),
CHECK_OK);
- CheckDecimalLiteralWithLeadingZero(scope->start_position(),
- scope->end_position());
}
CheckConflictingVarDeclarations(scope, CHECK_OK);
} // DiscardableZoneScope goes out of scope.
@@ -2734,7 +2674,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
function_name, scope, body, materialized_literal_count,
expected_property_count, num_parameters, function_length,
- duplicate_parameters, function_type, eager_compile_hint, pos, true);
+ duplicate_parameters, function_type, eager_compile_hint, pos, true,
+ function_literal_id);
function_literal->set_function_token_position(function_token_pos);
if (should_be_used_once_hint)
function_literal->set_should_be_used_once_hint();
@@ -2782,6 +2723,7 @@ Parser::LazyParsingResult Parser::SkipFunction(
if (entry.uses_super_property())
function_scope->RecordSuperPropertyUsage();
if (entry.calls_eval()) function_scope->RecordEvalCall();
+ SkipFunctionLiterals(entry.num_inner_functions());
return kLazyParsingComplete;
}
cached_parse_data_->Reject();
@@ -2792,17 +2734,16 @@ Parser::LazyParsingResult Parser::SkipFunction(
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.PreParse");
if (reusable_preparser_ == NULL) {
- reusable_preparser_ = new PreParser(zone(), &scanner_, ast_value_factory(),
- &pending_error_handler_,
- runtime_call_stats_, stack_limit_);
- reusable_preparser_->set_allow_lazy(true);
+ reusable_preparser_ = new PreParser(
+ zone(), &scanner_, stack_limit_, ast_value_factory(),
+ &pending_error_handler_, runtime_call_stats_, parsing_on_main_thread_);
#define SET_ALLOW(name) reusable_preparser_->set_allow_##name(allow_##name());
SET_ALLOW(natives);
SET_ALLOW(harmony_do_expressions);
SET_ALLOW(harmony_function_sent);
- SET_ALLOW(harmony_async_await);
SET_ALLOW(harmony_trailing_commas);
SET_ALLOW(harmony_class_fields);
+ SET_ALLOW(harmony_object_spread);
#undef SET_ALLOW
}
// Aborting inner function preparsing would leave scopes in an inconsistent
@@ -2835,13 +2776,15 @@ Parser::LazyParsingResult Parser::SkipFunction(
*has_duplicate_parameters = logger->has_duplicate_parameters();
*materialized_literal_count = logger->literals();
*expected_property_count = logger->properties();
+ SkipFunctionLiterals(logger->num_inner_functions());
if (!is_inner_function && produce_cached_parse_data()) {
DCHECK(log_);
log_->LogFunction(
function_scope->start_position(), function_scope->end_position(),
*num_parameters, *function_length, *has_duplicate_parameters,
*materialized_literal_count, *expected_property_count, language_mode(),
- function_scope->uses_super_property(), function_scope->calls_eval());
+ function_scope->uses_super_property(), function_scope->calls_eval(),
+ logger->num_inner_functions());
}
return kLazyParsingComplete;
}
@@ -2890,6 +2833,7 @@ class InitializerRewriter final
if (to_rewrite->is_rewritten()) return;
Parser::PatternRewriter::RewriteDestructuringAssignment(parser_, to_rewrite,
scope_);
+ AstTraversalVisitor::VisitRewritableExpression(to_rewrite);
}
// Code in function literals does not need to be eagerly rewritten, it will be
@@ -2912,44 +2856,44 @@ Block* Parser::BuildParameterInitializationBlock(
DCHECK(!parameters.is_simple);
DCHECK(scope()->is_function_scope());
Block* init_block = factory()->NewBlock(NULL, 1, true, kNoSourcePosition);
- for (int i = 0; i < parameters.params.length(); ++i) {
- auto parameter = parameters.params[i];
- if (parameter.is_rest && parameter.pattern->IsVariableProxy()) break;
+ int index = 0;
+ for (auto parameter : parameters.params) {
+ if (parameter->is_rest && parameter->pattern->IsVariableProxy()) break;
DeclarationDescriptor descriptor;
descriptor.declaration_kind = DeclarationDescriptor::PARAMETER;
descriptor.scope = scope();
descriptor.hoist_scope = nullptr;
descriptor.mode = LET;
- descriptor.declaration_pos = parameter.pattern->position();
+ descriptor.declaration_pos = parameter->pattern->position();
// The position that will be used by the AssignmentExpression
// which copies from the temp parameter to the pattern.
//
// TODO(adamk): Should this be kNoSourcePosition, since
// it's just copying from a temp var to the real param var?
- descriptor.initialization_pos = parameter.pattern->position();
+ descriptor.initialization_pos = parameter->pattern->position();
Expression* initial_value =
- factory()->NewVariableProxy(parameters.scope->parameter(i));
- if (parameter.initializer != nullptr) {
+ factory()->NewVariableProxy(parameters.scope->parameter(index));
+ if (parameter->initializer != nullptr) {
// IS_UNDEFINED($param) ? initializer : $param
// Ensure initializer is rewritten
- RewriteParameterInitializer(parameter.initializer, scope());
+ RewriteParameterInitializer(parameter->initializer, scope());
auto condition = factory()->NewCompareOperation(
Token::EQ_STRICT,
- factory()->NewVariableProxy(parameters.scope->parameter(i)),
+ factory()->NewVariableProxy(parameters.scope->parameter(index)),
factory()->NewUndefinedLiteral(kNoSourcePosition), kNoSourcePosition);
initial_value = factory()->NewConditional(
- condition, parameter.initializer, initial_value, kNoSourcePosition);
- descriptor.initialization_pos = parameter.initializer->position();
+ condition, parameter->initializer, initial_value, kNoSourcePosition);
+ descriptor.initialization_pos = parameter->initializer->position();
}
Scope* param_scope = scope();
Block* param_block = init_block;
- if (!parameter.is_simple() && scope()->calls_sloppy_eval()) {
+ if (!parameter->is_simple() && scope()->calls_sloppy_eval()) {
param_scope = NewVarblockScope();
param_scope->set_start_position(descriptor.initialization_pos);
- param_scope->set_end_position(parameter.initializer_end_position);
+ param_scope->set_end_position(parameter->initializer_end_position);
param_scope->RecordEvalCall();
param_block = factory()->NewBlock(NULL, 8, true, kNoSourcePosition);
param_block->set_scope(param_scope);
@@ -2964,7 +2908,7 @@ Block* Parser::BuildParameterInitializationBlock(
BlockState block_state(&scope_state_, param_scope);
DeclarationParsingResult::Declaration decl(
- parameter.pattern, parameter.initializer_end_position, initial_value);
+ parameter->pattern, parameter->initializer_end_position, initial_value);
PatternRewriter::DeclareAndInitializeVariables(
this, param_block, &descriptor, &decl, nullptr, CHECK_OK);
@@ -2975,6 +2919,7 @@ Block* Parser::BuildParameterInitializationBlock(
}
init_block->statements()->Add(param_block, zone());
}
+ ++index;
}
return init_block;
}
@@ -3009,8 +2954,7 @@ Block* Parser::BuildRejectPromiseOnException(Block* inner_block, bool* ok) {
Scope* catch_scope = NewScope(CATCH_SCOPE);
catch_scope->set_is_hidden();
Variable* catch_variable =
- catch_scope->DeclareLocal(ast_value_factory()->dot_catch_string(), VAR,
- kCreatedInitialized, NORMAL_VARIABLE);
+ catch_scope->DeclareLocal(ast_value_factory()->dot_catch_string(), VAR);
Block* catch_block = factory()->NewBlock(nullptr, 1, true, kNoSourcePosition);
Expression* promise_reject = BuildRejectPromise(
@@ -3049,15 +2993,20 @@ Block* Parser::BuildRejectPromiseOnException(Block* inner_block, bool* ok) {
return result;
}
-Expression* Parser::BuildCreateJSGeneratorObject(int pos, FunctionKind kind) {
+Assignment* Parser::BuildCreateJSGeneratorObject(int pos, FunctionKind kind) {
+ // .generator = %CreateJSGeneratorObject(...);
DCHECK_NOT_NULL(function_state_->generator_object_variable());
ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
args->Add(factory()->NewThisFunction(pos), zone());
args->Add(IsArrowFunction(kind) ? GetLiteralUndefined(pos)
: ThisExpression(kNoSourcePosition),
zone());
- return factory()->NewCallRuntime(Runtime::kCreateJSGeneratorObject, args,
- pos);
+ Expression* allocation =
+ factory()->NewCallRuntime(Runtime::kCreateJSGeneratorObject, args, pos);
+ VariableProxy* proxy =
+ factory()->NewVariableProxy(function_state_->generator_object_variable());
+ return factory()->NewAssignment(Token::INIT, proxy, allocation,
+ kNoSourcePosition);
}
Expression* Parser::BuildResolvePromise(Expression* value, int pos) {
@@ -3100,17 +3049,13 @@ Variable* Parser::PromiseVariable() {
}
Expression* Parser::BuildInitialYield(int pos, FunctionKind kind) {
- Expression* allocation = BuildCreateJSGeneratorObject(pos, kind);
- VariableProxy* init_proxy =
- factory()->NewVariableProxy(function_state_->generator_object_variable());
- Assignment* assignment = factory()->NewAssignment(
- Token::INIT, init_proxy, allocation, kNoSourcePosition);
- VariableProxy* get_proxy =
+ Assignment* assignment = BuildCreateJSGeneratorObject(pos, kind);
+ VariableProxy* generator =
factory()->NewVariableProxy(function_state_->generator_object_variable());
// The position of the yield is important for reporting the exception
// caused by calling the .throw method on a generator suspended at the
// initial yield (i.e. right after generator instantiation).
- return factory()->NewYield(get_proxy, assignment, scope()->start_position(),
+ return factory()->NewYield(generator, assignment, scope()->start_position(),
Yield::kOnExceptionThrow);
}
@@ -3120,12 +3065,14 @@ ZoneList<Statement*>* Parser::ParseFunction(
DeclarationScope* function_scope, int* num_parameters, int* function_length,
bool* has_duplicate_parameters, int* materialized_literal_count,
int* expected_property_count, bool* ok) {
+ ParsingModeScope mode(this, allow_lazy_ ? PARSE_LAZILY : PARSE_EAGERLY);
+
FunctionState function_state(&function_state_, &scope_state_, function_scope);
- DuplicateFinder duplicate_finder(scanner()->unicode_cache());
+ DuplicateFinder duplicate_finder;
ExpressionClassifier formals_classifier(this, &duplicate_finder);
- if (IsGeneratorFunction(kind)) PrepareGeneratorVariables(&function_state);
+ if (IsResumableFunction(kind)) PrepareGeneratorVariables();
ParserFormalParameters formals(function_scope);
ParseFormalParameterList(&formals, CHECK_OK);
@@ -3164,7 +3111,6 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
const AstRawString* function_name, int pos,
const ParserFormalParameters& parameters, FunctionKind kind,
FunctionLiteral::FunctionType function_type, bool* ok) {
- ParsingModeScope mode(this, allow_lazy() ? PARSE_LAZILY : PARSE_EAGERLY);
ZoneList<Statement*>* result = new(zone()) ZoneList<Statement*>(8, zone());
static const int kFunctionNameAssignmentIndex = 0;
@@ -3241,7 +3187,7 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
ParseStatementList(body, Token::RBRACE, CHECK_OK);
}
- if (IsSubclassConstructor(kind)) {
+ if (IsDerivedConstructor(kind)) {
body->Add(factory()->NewReturnStatement(ThisExpression(kNoSourcePosition),
kNoSourcePosition),
zone());
@@ -3316,130 +3262,6 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
return result;
}
-Expression* Parser::InstallHomeObject(Expression* function_literal,
- Expression* home_object) {
- Block* do_block = factory()->NewBlock(nullptr, 1, false, kNoSourcePosition);
- Variable* result_var =
- scope()->NewTemporary(ast_value_factory()->empty_string());
- DoExpression* do_expr =
- factory()->NewDoExpression(do_block, result_var, kNoSourcePosition);
- Assignment* init = factory()->NewAssignment(
- Token::ASSIGN, factory()->NewVariableProxy(result_var), function_literal,
- kNoSourcePosition);
- do_block->statements()->Add(
- factory()->NewExpressionStatement(init, kNoSourcePosition), zone());
- Property* home_object_property = factory()->NewProperty(
- factory()->NewVariableProxy(result_var),
- factory()->NewSymbolLiteral("home_object_symbol", kNoSourcePosition),
- kNoSourcePosition);
- Assignment* assignment = factory()->NewAssignment(
- Token::ASSIGN, home_object_property, home_object, kNoSourcePosition);
- do_block->statements()->Add(
- factory()->NewExpressionStatement(assignment, kNoSourcePosition), zone());
- return do_expr;
-}
-
-const AstRawString* ClassFieldVariableName(bool is_name,
- AstValueFactory* ast_value_factory,
- int index) {
- std::string name =
- ".class-field-" + std::to_string(index) + (is_name ? "-name" : "-func");
- return ast_value_factory->GetOneByteString(name.c_str());
-}
-
-FunctionLiteral* Parser::SynthesizeClassFieldInitializer(int count) {
- DCHECK(count > 0);
- // Makes a function which reads the names and initializers for each class
- // field out of deterministically named local variables and sets each property
- // to the result of evaluating its corresponding initializer in turn.
-
- // This produces a function which looks like
- // function () {
- // this[.class-field-0-name] = .class-field-0-func();
- // this[.class-field-1-name] = .class-field-1-func();
- // [...]
- // this[.class-field-n-name] = .class-field-n-func();
- // return this;
- // }
- // except that it performs defineProperty, so that instead of '=' it has
- // %DefineDataPropertyInLiteral(this, .class-field-0-name,
- // .class-field-0-func(),
- // DONT_ENUM, false)
-
- RaiseLanguageMode(STRICT);
- FunctionKind kind = FunctionKind::kConciseMethod;
- DeclarationScope* initializer_scope = NewFunctionScope(kind);
- SetLanguageMode(initializer_scope, language_mode());
- initializer_scope->set_start_position(scanner()->location().end_pos);
- initializer_scope->set_end_position(scanner()->location().end_pos);
- FunctionState initializer_state(&function_state_, &scope_state_,
- initializer_scope);
- ZoneList<Statement*>* body = new (zone()) ZoneList<Statement*>(count, zone());
- for (int i = 0; i < count; ++i) {
- const AstRawString* name =
- ClassFieldVariableName(true, ast_value_factory(), i);
- VariableProxy* name_proxy = scope()->NewUnresolved(factory(), name);
- const AstRawString* function_name =
- ClassFieldVariableName(false, ast_value_factory(), i);
- VariableProxy* function_proxy =
- scope()->NewUnresolved(factory(), function_name);
- ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
- args->Add(function_proxy, zone());
- args->Add(ThisExpression(kNoSourcePosition), zone());
- Expression* call = factory()->NewCallRuntime(Runtime::kInlineCall, args,
- kNoSourcePosition);
- ZoneList<Expression*>* define_property_args =
- new (zone()) ZoneList<Expression*>(5, zone());
- define_property_args->Add(ThisExpression(kNoSourcePosition), zone());
- define_property_args->Add(name_proxy, zone());
- define_property_args->Add(call, zone());
- define_property_args->Add(
- factory()->NewNumberLiteral(DONT_ENUM, kNoSourcePosition), zone());
- define_property_args->Add(
- factory()->NewNumberLiteral(
- false, // TODO(bakkot) function name inference a la class { x =
- // function(){}; static y = function(){}; }
- kNoSourcePosition),
- zone());
- body->Add(factory()->NewExpressionStatement(
- factory()->NewCallRuntime(
- Runtime::kDefineDataProperty,
- define_property_args, // TODO(bakkot) verify that this is
- // the same as object_define_property
- kNoSourcePosition),
- kNoSourcePosition),
- zone());
- }
- body->Add(factory()->NewReturnStatement(ThisExpression(kNoSourcePosition),
- kNoSourcePosition),
- zone());
- FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
- ast_value_factory()->empty_string(), initializer_scope, body,
- initializer_state.materialized_literal_count(),
- initializer_state.expected_property_count(), 0, count,
- FunctionLiteral::kNoDuplicateParameters,
- FunctionLiteral::kAnonymousExpression,
- FunctionLiteral::kShouldLazyCompile, initializer_scope->start_position(),
- true);
- function_literal->set_is_class_field_initializer(true);
- return function_literal;
-}
-
-FunctionLiteral* Parser::InsertClassFieldInitializer(
- FunctionLiteral* constructor) {
- Statement* call_initializer = factory()->NewExpressionStatement(
- CallClassFieldInitializer(
- constructor->scope(),
- constructor->scope()->NewUnresolved(
- factory(), ast_value_factory()->this_string(), kNoSourcePosition,
- THIS_VARIABLE)),
- kNoSourcePosition);
- constructor->body()->InsertAt(0, call_initializer, zone());
- return constructor;
-}
-
-// If a class name is specified, this method declares the class variable
-// and sets class_info->proxy to point to that name.
void Parser::DeclareClassVariable(const AstRawString* name, Scope* block_scope,
ClassInfo* class_info, int class_token_pos,
bool* ok) {
@@ -3459,13 +3281,14 @@ void Parser::DeclareClassVariable(const AstRawString* name, Scope* block_scope,
// This method declares a property of the given class. It updates the
// following fields of class_info, as appropriate:
// - constructor
-// - static_initializer_var
-// - instance_field_initializers
// - properties
void Parser::DeclareClassProperty(const AstRawString* class_name,
ClassLiteralProperty* property,
+ ClassLiteralProperty::Kind kind,
+ bool is_static, bool is_constructor,
ClassInfo* class_info, bool* ok) {
- if (class_info->has_seen_constructor && class_info->constructor == nullptr) {
+ if (is_constructor) {
+ DCHECK(!class_info->constructor);
class_info->constructor = GetPropertyValue(property)->AsFunctionLiteral();
DCHECK_NOT_NULL(class_info->constructor);
class_info->constructor->set_raw_name(
@@ -3476,47 +3299,7 @@ void Parser::DeclareClassProperty(const AstRawString* class_name,
if (property->kind() == ClassLiteralProperty::FIELD) {
DCHECK(allow_harmony_class_fields());
- if (property->is_static()) {
- if (class_info->static_initializer_var == nullptr) {
- class_info->static_initializer_var =
- NewTemporary(ast_value_factory()->empty_string());
- }
- // TODO(bakkot) only do this conditionally
- Expression* function = InstallHomeObject(
- property->value(),
- factory()->NewVariableProxy(class_info->static_initializer_var));
- ZoneList<Expression*>* args =
- new (zone()) ZoneList<Expression*>(2, zone());
- args->Add(function, zone());
- args->Add(factory()->NewVariableProxy(class_info->static_initializer_var),
- zone());
- Expression* call = factory()->NewCallRuntime(Runtime::kInlineCall, args,
- kNoSourcePosition);
- property->set_value(call);
- } else {
- // if (is_computed_name) { // TODO(bakkot) figure out why this is
- // necessary for non-computed names in full-codegen
- ZoneList<Expression*>* to_name_args =
- new (zone()) ZoneList<Expression*>(1, zone());
- to_name_args->Add(property->key(), zone());
- property->set_key(factory()->NewCallRuntime(
- Runtime::kToName, to_name_args, kNoSourcePosition));
- //}
- const AstRawString* name = ClassFieldVariableName(
- true, ast_value_factory(),
- class_info->instance_field_initializers->length());
- VariableProxy* name_proxy =
- factory()->NewVariableProxy(name, NORMAL_VARIABLE);
- Declaration* name_declaration = factory()->NewVariableDeclaration(
- name_proxy, scope(), kNoSourcePosition);
- Variable* name_var =
- Declare(name_declaration, DeclarationDescriptor::NORMAL, CONST,
- kNeedsInitialization, ok, scope());
- DCHECK(*ok);
- if (!*ok) return;
- class_info->instance_field_initializers->Add(property->value(), zone());
- property->set_value(factory()->NewVariableProxy(name_var));
- }
+ // TODO(littledan): Implement class fields
}
class_info->properties->Add(property, zone());
}
@@ -3526,9 +3309,9 @@ void Parser::DeclareClassProperty(const AstRawString* class_name,
// - constructor (if missing, it updates it with a default constructor)
// - proxy
// - extends
-// - static_initializer_var
-// - instance_field_initializers
// - properties
+// - has_name_static_property
+// - has_static_computed_names
Expression* Parser::RewriteClassLiteral(const AstRawString* name,
ClassInfo* class_info, int pos,
bool* ok) {
@@ -3538,22 +3321,12 @@ Expression* Parser::RewriteClassLiteral(const AstRawString* name,
DoExpression* do_expr = factory()->NewDoExpression(do_block, result_var, pos);
bool has_extends = class_info->extends != nullptr;
- bool has_instance_fields =
- class_info->instance_field_initializers->length() > 0;
- DCHECK(!has_instance_fields || allow_harmony_class_fields());
bool has_default_constructor = class_info->constructor == nullptr;
if (has_default_constructor) {
class_info->constructor =
- DefaultConstructor(name, has_extends, has_instance_fields, pos, end_pos,
- scope()->language_mode());
+ DefaultConstructor(name, has_extends, pos, end_pos);
}
- if (has_instance_fields && !has_extends) {
- class_info->constructor =
- InsertClassFieldInitializer(class_info->constructor);
- class_info->constructor->set_requires_class_field_init(true);
- } // The derived case is handled by rewriting super calls.
-
scope()->set_end_position(end_pos);
if (name != nullptr) {
@@ -3563,12 +3336,9 @@ Expression* Parser::RewriteClassLiteral(const AstRawString* name,
ClassLiteral* class_literal = factory()->NewClassLiteral(
class_info->proxy, class_info->extends, class_info->constructor,
- class_info->properties, pos, end_pos);
-
- if (class_info->static_initializer_var != nullptr) {
- class_literal->set_static_initializer_proxy(
- factory()->NewVariableProxy(class_info->static_initializer_var));
- }
+ class_info->properties, pos, end_pos,
+ class_info->has_name_static_property,
+ class_info->has_static_computed_names);
do_block->statements()->Add(
factory()->NewExpressionStatement(
@@ -3577,53 +3347,6 @@ Expression* Parser::RewriteClassLiteral(const AstRawString* name,
class_literal, kNoSourcePosition),
pos),
zone());
- if (allow_harmony_class_fields() &&
- (has_instance_fields || (has_extends && !has_default_constructor))) {
- // Default constructors for derived classes without fields will not try to
- // read this variable, so there's no need to create it.
- const AstRawString* init_fn_name =
- ast_value_factory()->dot_class_field_init_string();
- Variable* init_fn_var = scope()->DeclareLocal(
- init_fn_name, CONST, kCreatedInitialized, NORMAL_VARIABLE);
- Expression* initializer =
- has_instance_fields
- ? static_cast<Expression*>(SynthesizeClassFieldInitializer(
- class_info->instance_field_initializers->length()))
- : factory()->NewBooleanLiteral(false, kNoSourcePosition);
- Assignment* assignment = factory()->NewAssignment(
- Token::INIT, factory()->NewVariableProxy(init_fn_var), initializer,
- kNoSourcePosition);
- do_block->statements()->Add(
- factory()->NewExpressionStatement(assignment, kNoSourcePosition),
- zone());
- }
- for (int i = 0; i < class_info->instance_field_initializers->length(); ++i) {
- const AstRawString* function_name =
- ClassFieldVariableName(false, ast_value_factory(), i);
- VariableProxy* function_proxy =
- factory()->NewVariableProxy(function_name, NORMAL_VARIABLE);
- Declaration* function_declaration = factory()->NewVariableDeclaration(
- function_proxy, scope(), kNoSourcePosition);
- Variable* function_var =
- Declare(function_declaration, DeclarationDescriptor::NORMAL, CONST,
- kNeedsInitialization, ok, scope());
- if (!*ok) return nullptr;
- Property* prototype_property = factory()->NewProperty(
- factory()->NewVariableProxy(result_var),
- factory()->NewStringLiteral(ast_value_factory()->prototype_string(),
- kNoSourcePosition),
- kNoSourcePosition);
- Expression* function_value = InstallHomeObject(
- class_info->instance_field_initializers->at(i),
- prototype_property); // TODO(bakkot) ideally this would be conditional,
- // especially in trivial cases
- Assignment* function_assignment = factory()->NewAssignment(
- Token::INIT, factory()->NewVariableProxy(function_var), function_value,
- kNoSourcePosition);
- do_block->statements()->Add(factory()->NewExpressionStatement(
- function_assignment, kNoSourcePosition),
- zone());
- }
do_block->set_scope(scope()->FinalizeBlockScope());
do_expr->set_represented_function(class_info->constructor);
AddFunctionForNameInference(class_info->constructor);
@@ -3775,47 +3498,10 @@ void Parser::Internalize(Isolate* isolate, Handle<Script> script, bool error) {
v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE) {
// Copy over the counters from the background thread to the main counters on
// the isolate.
- // TODO(cbruni,lpy): properly attach the runtime stats to the trace for
- // background parsing.
isolate->counters()->runtime_call_stats()->Add(runtime_call_stats_);
}
}
-
-// ----------------------------------------------------------------------------
-// The Parser interface.
-
-
-bool Parser::ParseStatic(ParseInfo* info) {
- Parser parser(info);
- if (parser.Parse(info)) {
- info->set_language_mode(info->literal()->language_mode());
- return true;
- }
- return false;
-}
-
-
-bool Parser::Parse(ParseInfo* info) {
- DCHECK(info->literal() == NULL);
- FunctionLiteral* result = NULL;
- // Ok to use Isolate here; this function is only called in the main thread.
- DCHECK(parsing_on_main_thread_);
- Isolate* isolate = info->isolate();
-
- if (info->is_toplevel()) {
- SetCachedData(info);
- result = ParseProgram(isolate, info);
- } else {
- result = ParseFunction(isolate, info);
- }
- info->set_literal(result);
-
- Internalize(isolate, info->script(), result == NULL);
- return (result != NULL);
-}
-
-
void Parser::ParseOnBackground(ParseInfo* info) {
parsing_on_main_thread_ = false;
@@ -3823,7 +3509,13 @@ void Parser::ParseOnBackground(ParseInfo* info) {
FunctionLiteral* result = NULL;
ParserLogger logger;
- if (produce_cached_parse_data()) log_ = &logger;
+ if (produce_cached_parse_data()) {
+ if (allow_lazy_) {
+ log_ = &logger;
+ } else {
+ compile_options_ = ScriptCompiler::kNoCompileOptions;
+ }
+ }
if (FLAG_runtime_stats) {
// Create separate runtime stats for background parsing.
runtime_call_stats_ = new (zone()) RuntimeCallStats();
@@ -3837,7 +3529,8 @@ void Parser::ParseOnBackground(ParseInfo* info) {
} else {
DCHECK(info->character_stream() == nullptr);
stream.reset(ScannerStream::For(info->source_stream(),
- info->source_stream_encoding()));
+ info->source_stream_encoding(),
+ runtime_call_stats_));
stream_ptr = stream.get();
}
DCHECK(info->maybe_outer_scope_info().is_null());
@@ -3867,9 +3560,13 @@ void Parser::ParseOnBackground(ParseInfo* info) {
if (result != NULL) *info->cached_data() = logger.GetScriptData();
log_ = NULL;
}
- if (FLAG_runtime_stats) {
- // TODO(cbruni,lpy): properly attach the runtime stats to the trace for
- // background parsing.
+ if (FLAG_runtime_stats &
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING) {
+ auto value = v8::tracing::TracedValue::Create();
+ runtime_call_stats_->Dump(value.get());
+ TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats"),
+ "V8.RuntimeStats", TRACE_EVENT_SCOPE_THREAD,
+ "runtime-call-stats", std::move(value));
}
}
@@ -4057,6 +3754,23 @@ Expression* Parser::SpreadCall(Expression* function,
// Super calls
// $super_constructor = %_GetSuperConstructor(<this-function>)
// %reflect_construct($super_constructor, args, new.target)
+
+ bool only_last_arg_is_spread = false;
+ for (int i = 0; i < args->length(); i++) {
+ if (args->at(i)->IsSpread()) {
+ if (i == args->length() - 1) {
+ only_last_arg_is_spread = true;
+ }
+ break;
+ }
+ }
+
+ if (only_last_arg_is_spread) {
+ // Handle in BytecodeGenerator.
+ Expression* super_call_ref = NewSuperCallReference(pos);
+ return factory()->NewCall(super_call_ref, args, pos);
+ }
+ args = PrepareSpreadArguments(args);
ZoneList<Expression*>* tmp = new (zone()) ZoneList<Expression*>(1, zone());
tmp->Add(function->AsSuperCallReference()->this_function_var(), zone());
Expression* super_constructor = factory()->NewCallRuntime(
@@ -4066,6 +3780,7 @@ Expression* Parser::SpreadCall(Expression* function,
return factory()->NewCallRuntime(Context::REFLECT_CONSTRUCT_INDEX, args,
pos);
} else {
+ args = PrepareSpreadArguments(args);
if (function->IsProperty()) {
// Method calls
if (function->AsProperty()->IsSuperAccess()) {
@@ -4096,6 +3811,7 @@ Expression* Parser::SpreadCall(Expression* function,
Expression* Parser::SpreadCallNew(Expression* function,
ZoneList<Expression*>* args, int pos) {
+ args = PrepareSpreadArguments(args);
args->InsertAt(0, function, zone());
return factory()->NewCallRuntime(Context::REFLECT_CONSTRUCT_INDEX, args, pos);
@@ -4143,23 +3859,13 @@ Expression* Parser::ExpressionListToExpression(ZoneList<Expression*>* args) {
// when desugaring the body of async_function.
void Parser::PrepareAsyncFunctionBody(ZoneList<Statement*>* body,
FunctionKind kind, int pos) {
- // function async_function() {
- // .generator_object = %CreateGeneratorObject();
- // BuildRejectPromiseOnException({
- // ... block ...
- // return %ResolvePromise(.promise, expr), .promise;
- // })
- // }
-
- Variable* temp =
- NewTemporary(ast_value_factory()->dot_generator_object_string());
- function_state_->set_generator_object_variable(temp);
-
- Expression* init_generator_variable = factory()->NewAssignment(
- Token::INIT, factory()->NewVariableProxy(temp),
- BuildCreateJSGeneratorObject(pos, kind), kNoSourcePosition);
- body->Add(factory()->NewExpressionStatement(init_generator_variable,
- kNoSourcePosition),
+ // When parsing an async arrow function, we get here without having called
+ // PrepareGeneratorVariables yet, so do it now.
+ if (function_state_->generator_object_variable() == nullptr) {
+ PrepareGeneratorVariables();
+ }
+ body->Add(factory()->NewExpressionStatement(
+ BuildCreateJSGeneratorObject(pos, kind), kNoSourcePosition),
zone());
}
@@ -4167,7 +3873,7 @@ void Parser::PrepareAsyncFunctionBody(ZoneList<Statement*>* body,
void Parser::RewriteAsyncFunctionBody(ZoneList<Statement*>* body, Block* block,
Expression* return_value, bool* ok) {
// function async_function() {
- // .generator_object = %CreateGeneratorObject();
+ // .generator_object = %CreateJSGeneratorObject();
// BuildRejectPromiseOnException({
// ... block ...
// return %ResolvePromise(.promise, expr), .promise;
@@ -4204,10 +3910,7 @@ Expression* Parser::RewriteAwaitExpression(Expression* value, int await_pos) {
// TODO(littledan): investigate why this ordering is needed in more detail.
Variable* generator_object_variable =
function_state_->generator_object_variable();
-
- // If generator_object_variable is null,
- // TODO(littledan): Is this necessary?
- if (!generator_object_variable) return value;
+ DCHECK_NOT_NULL(generator_object_variable);
const int nopos = kNoSourcePosition;
@@ -4539,8 +4242,7 @@ void Parser::SetFunctionName(Expression* value, const AstRawString* name) {
// let mode = kNext;
// let output = undefined;
//
-// let iterator = iterable[Symbol.iterator]();
-// if (!IS_RECEIVER(iterator)) throw MakeTypeError(kSymbolIteratorInvalid);
+// let iterator = GetIterator(iterable);
//
// while (true) {
// // From the generator to the iterator:
@@ -4643,41 +4345,17 @@ Expression* Parser::RewriteYieldStar(Expression* generator,
initialize_output = factory()->NewExpressionStatement(assignment, nopos);
}
- // let iterator = iterable[Symbol.iterator];
+ // let iterator = GetIterator(iterable);
Variable* var_iterator = NewTemporary(ast_value_factory()->empty_string());
Statement* get_iterator;
{
- Expression* iterator = GetIterator(iterable, nopos);
+ Expression* iterator = factory()->NewGetIterator(iterable, nopos);
Expression* iterator_proxy = factory()->NewVariableProxy(var_iterator);
Expression* assignment = factory()->NewAssignment(
Token::ASSIGN, iterator_proxy, iterator, nopos);
get_iterator = factory()->NewExpressionStatement(assignment, nopos);
}
- // if (!IS_RECEIVER(iterator)) throw MakeTypeError(kSymbolIteratorInvalid);
- Statement* validate_iterator;
- {
- Expression* is_receiver_call;
- {
- auto args = new (zone()) ZoneList<Expression*>(1, zone());
- args->Add(factory()->NewVariableProxy(var_iterator), zone());
- is_receiver_call =
- factory()->NewCallRuntime(Runtime::kInlineIsJSReceiver, args, nopos);
- }
-
- Statement* throw_call;
- {
- Expression* call =
- NewThrowTypeError(MessageTemplate::kSymbolIteratorInvalid,
- ast_value_factory()->empty_string(), nopos);
- throw_call = factory()->NewExpressionStatement(call, nopos);
- }
-
- validate_iterator = factory()->NewIfStatement(
- is_receiver_call, factory()->NewEmptyStatement(nopos), throw_call,
- nopos);
- }
-
// output = iterator.next(input);
Statement* call_next;
{
@@ -4910,8 +4588,7 @@ Expression* Parser::RewriteYieldStar(Expression* generator,
Scope* catch_scope = NewScope(CATCH_SCOPE);
catch_scope->set_is_hidden();
const AstRawString* name = ast_value_factory()->dot_catch_string();
- Variable* catch_variable = catch_scope->DeclareLocal(
- name, VAR, kCreatedInitialized, NORMAL_VARIABLE);
+ Variable* catch_variable = catch_scope->DeclareLocal(name, VAR);
try_catch = factory()->NewTryCatchStatementForDesugaring(
try_block, catch_scope, catch_variable, catch_block, nopos);
@@ -4982,12 +4659,11 @@ Expression* Parser::RewriteYieldStar(Expression* generator,
// The rewriter needs to process the get_value statement only, hence we
// put the preceding statements into an init block.
- Block* do_block_ = factory()->NewBlock(nullptr, 7, true, nopos);
+ Block* do_block_ = factory()->NewBlock(nullptr, 6, true, nopos);
do_block_->statements()->Add(initialize_input, zone());
do_block_->statements()->Add(initialize_mode, zone());
do_block_->statements()->Add(initialize_output, zone());
do_block_->statements()->Add(get_iterator, zone());
- do_block_->statements()->Add(validate_iterator, zone());
do_block_->statements()->Add(loop, zone());
do_block_->statements()->Add(maybe_return_value, zone());
@@ -5198,8 +4874,7 @@ void Parser::FinalizeIteratorUse(Scope* use_scope, Variable* completion,
{
Scope* catch_scope = NewScopeWithParent(use_scope, CATCH_SCOPE);
Variable* catch_variable =
- catch_scope->DeclareLocal(ast_value_factory()->dot_catch_string(), VAR,
- kCreatedInitialized, NORMAL_VARIABLE);
+ catch_scope->DeclareLocal(ast_value_factory()->dot_catch_string(), VAR);
catch_scope->set_is_hidden();
Statement* rethrow;
@@ -5305,8 +4980,7 @@ void Parser::BuildIteratorCloseForCompletion(Scope* scope,
Scope* catch_scope = NewScopeWithParent(scope, CATCH_SCOPE);
Variable* catch_variable =
- catch_scope->DeclareLocal(ast_value_factory()->dot_catch_string(), VAR,
- kCreatedInitialized, NORMAL_VARIABLE);
+ catch_scope->DeclareLocal(ast_value_factory()->dot_catch_string(), VAR);
catch_scope->set_is_hidden();
try_call_return = factory()->NewTryCatchStatement(
@@ -5410,7 +5084,7 @@ Statement* Parser::FinalizeForOfStatement(ForOfStatement* loop,
// %ReThrow(e);
// }
// } finally {
- // if (!(completion === kNormalCompletion || IS_UNDEFINED(#iterator))) {
+ // if (!(completion === kNormalCompletion)) {
// #BuildIteratorCloseForCompletion(#iterator, completion)
// }
// }
@@ -5421,18 +5095,13 @@ Statement* Parser::FinalizeForOfStatement(ForOfStatement* loop,
const int nopos = kNoSourcePosition;
- // !(completion === kNormalCompletion || IS_UNDEFINED(#iterator))
+ // !(completion === kNormalCompletion)
Expression* closing_condition;
{
- Expression* lhs = factory()->NewCompareOperation(
+ Expression* cmp = factory()->NewCompareOperation(
Token::EQ_STRICT, factory()->NewVariableProxy(var_completion),
factory()->NewSmiLiteral(Parser::kNormalCompletion, nopos), nopos);
- Expression* rhs = factory()->NewCompareOperation(
- Token::EQ_STRICT, factory()->NewVariableProxy(loop->iterator()),
- factory()->NewUndefinedLiteral(nopos), nopos);
- closing_condition = factory()->NewUnaryOperation(
- Token::NOT, factory()->NewBinaryOperation(Token::OR, lhs, rhs, nopos),
- nopos);
+ closing_condition = factory()->NewUnaryOperation(Token::NOT, cmp, nopos);
}
Block* final_loop = factory()->NewBlock(nullptr, 2, false, nopos);
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index 736419daf2..a898511b23 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -10,10 +10,12 @@
#include "src/base/compiler-specific.h"
#include "src/globals.h"
#include "src/parsing/parser-base.h"
+#include "src/parsing/parsing.h"
#include "src/parsing/preparse-data-format.h"
#include "src/parsing/preparse-data.h"
#include "src/parsing/preparser.h"
#include "src/pending-compilation-error-handler.h"
+#include "src/utils.h"
namespace v8 {
@@ -36,6 +38,7 @@ class FunctionEntry BASE_EMBEDDED {
kLiteralCountIndex,
kPropertyCountIndex,
kFlagsIndex,
+ kNumInnerFunctionsIndex,
kSize
};
@@ -79,6 +82,7 @@ class FunctionEntry BASE_EMBEDDED {
bool has_duplicate_parameters() const {
return HasDuplicateParametersField::decode(backing_[kFlagsIndex]);
}
+ int num_inner_functions() const { return backing_[kNumInnerFunctionsIndex]; }
bool is_valid() const { return !backing_.is_empty(); }
@@ -135,7 +139,7 @@ class Parser;
struct ParserFormalParameters : FormalParametersBase {
- struct Parameter {
+ struct Parameter : public ZoneObject {
Parameter(const AstRawString* name, Expression* pattern,
Expression* initializer, int initializer_end_position,
bool is_rest)
@@ -149,16 +153,17 @@ struct ParserFormalParameters : FormalParametersBase {
Expression* initializer;
int initializer_end_position;
bool is_rest;
+ Parameter* next_parameter = nullptr;
bool is_simple() const {
return pattern->IsVariableProxy() && initializer == nullptr && !is_rest;
}
+ Parameter** next() { return &next_parameter; }
+ Parameter* const* next() const { return &next_parameter; }
};
explicit ParserFormalParameters(DeclarationScope* scope)
- : FormalParametersBase(scope), params(4, scope->zone()) {}
- ZoneList<Parameter> params;
-
- const Parameter& at(int i) const { return params[i]; }
+ : FormalParametersBase(scope) {}
+ ThreadedList<Parameter> params;
};
template <>
@@ -203,11 +208,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
static bool const IsPreParser() { return false; }
- // Parses the source code represented by the compilation info and sets its
- // function literal. Returns false (and deallocates any allocated AST
- // nodes) if parsing failed.
- static bool ParseStatic(ParseInfo* info);
- bool Parse(ParseInfo* info);
void ParseOnBackground(ParseInfo* info);
// Deserialize the scope chain prior to parsing in which the script is going
@@ -229,6 +229,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
private:
friend class ParserBase<Parser>;
friend class v8::internal::ExpressionClassifier<ParserTypes<Parser>>;
+ friend bool v8::internal::parsing::ParseProgram(ParseInfo*);
+ friend bool v8::internal::parsing::ParseFunction(ParseInfo*);
bool AllowsLazyParsingWithoutUnresolvedVariables() const {
return scope()->AllowsLazyParsingWithoutUnresolvedVariables(
@@ -262,7 +264,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return scope()->NewTemporary(name);
}
- void PrepareGeneratorVariables(FunctionState* function_state);
+ void PrepareGeneratorVariables();
// Limit the allowed number of local variables in a function. The hard limit
// is that offsets computed by FullCodeGenerator::StackOperand and similar
@@ -290,12 +292,10 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return compile_options_;
}
bool consume_cached_parse_data() const {
- return allow_lazy() &&
- compile_options_ == ScriptCompiler::kConsumeParserCache;
+ return compile_options_ == ScriptCompiler::kConsumeParserCache;
}
bool produce_cached_parse_data() const {
- return allow_lazy() &&
- compile_options_ == ScriptCompiler::kProduceParserCache;
+ return compile_options_ == ScriptCompiler::kProduceParserCache;
}
void ParseModuleItemList(ZoneList<Statement*>* body, bool* ok);
@@ -341,8 +341,9 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
const CatchInfo& catch_info, int pos);
Statement* DeclareFunction(const AstRawString* variable_name,
- FunctionLiteral* function, int pos,
- bool is_generator, bool is_async,
+ FunctionLiteral* function, VariableMode mode,
+ int pos, bool is_generator, bool is_async,
+ bool is_sloppy_block_function,
ZoneList<const AstRawString*>* names, bool* ok);
V8_INLINE Statement* DeclareClass(const AstRawString* variable_name,
Expression* value,
@@ -353,6 +354,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
int class_token_pos, bool* ok);
V8_INLINE void DeclareClassProperty(const AstRawString* class_name,
ClassLiteralProperty* property,
+ ClassLiteralProperty::Kind kind,
+ bool is_static, bool is_constructor,
ClassInfo* class_info, bool* ok);
V8_INLINE Expression* RewriteClassLiteral(const AstRawString* name,
ClassInfo* class_info, int pos,
@@ -448,8 +451,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
Expression* BuildIteratorNextResult(Expression* iterator, Variable* result,
int pos);
- Expression* GetIterator(Expression* iterable, int pos);
-
// Initialize the components of a for-in / for-of statement.
Statement* InitializeForEachStatement(ForEachStatement* stmt,
Expression* each, Expression* subject,
@@ -476,11 +477,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
int function_token_position, FunctionLiteral::FunctionType type,
LanguageMode language_mode, bool* ok);
- Expression* InstallHomeObject(Expression* function_literal,
- Expression* home_object);
- FunctionLiteral* SynthesizeClassFieldInitializer(int count);
- FunctionLiteral* InsertClassFieldInitializer(FunctionLiteral* constructor);
-
// Get odd-ball literals.
Literal* GetLiteralUndefined(int position);
@@ -523,8 +519,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// Factory methods.
FunctionLiteral* DefaultConstructor(const AstRawString* name, bool call_super,
- bool requires_class_field_init, int pos,
- int end_pos, LanguageMode language_mode);
+ int pos, int end_pos);
// Skip over a lazy function, either using cached data if we have it, or
// by parsing the function with PreParser. Consumes the ending }.
@@ -599,7 +594,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
int pos);
Expression* SpreadCallNew(Expression* function, ZoneList<Expression*>* args,
int pos);
- Expression* CallClassFieldInitializer(Scope* scope, Expression* this_expr);
Expression* RewriteSuperCall(Expression* call_expression);
void SetLanguageMode(Scope* scope, LanguageMode mode);
@@ -630,7 +624,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
void RewriteParameterInitializer(Expression* expr, Scope* scope);
Expression* BuildInitialYield(int pos, FunctionKind kind);
- Expression* BuildCreateJSGeneratorObject(int pos, FunctionKind kind);
+ Assignment* BuildCreateJSGeneratorObject(int pos, FunctionKind kind);
Expression* BuildResolvePromise(Expression* value, int pos);
Expression* BuildRejectPromise(Expression* value, int pos);
Variable* PromiseVariable();
@@ -686,10 +680,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return identifier == ast_value_factory()->undefined_string();
}
- V8_INLINE bool IsFutureStrictReserved(const AstRawString* identifier) const {
- return scanner()->IdentifierIsFutureStrictReserved(identifier);
- }
-
// Returns true if the expression is of type "this.foo".
V8_INLINE static bool IsThisProperty(Expression* expression) {
DCHECK(expression != NULL);
@@ -724,10 +714,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return identifier == ast_value_factory()->constructor_string();
}
- V8_INLINE bool IsDirectEvalCall(Expression* expression) const {
- if (!expression->IsCall()) return false;
- expression = expression->AsCall()->expression();
- return IsIdentifier(expression) && IsEval(AsIdentifier(expression));
+ V8_INLINE bool IsName(const AstRawString* identifier) const {
+ return identifier == ast_value_factory()->name_string();
}
V8_INLINE static bool IsBoilerplateProperty(
@@ -826,12 +814,11 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// Determine if the expression is a variable proxy and mark it as being used
// in an assignment or with a increment/decrement operator.
- V8_INLINE static Expression* MarkExpressionAsAssigned(
- Expression* expression) {
- VariableProxy* proxy =
- expression != NULL ? expression->AsVariableProxy() : NULL;
- if (proxy != NULL) proxy->set_is_assigned();
- return expression;
+ V8_INLINE static void MarkExpressionAsAssigned(Expression* expression) {
+ DCHECK_NOT_NULL(expression);
+ if (expression->IsVariableProxy()) {
+ expression->AsVariableProxy()->set_is_assigned();
+ }
}
// Returns true if we have a binary expression between two numeric
@@ -1054,34 +1041,32 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
const AstRawString* name = is_simple
? pattern->AsVariableProxy()->raw_name()
: ast_value_factory()->empty_string();
- parameters->params.Add(
- ParserFormalParameters::Parameter(name, pattern, initializer,
- initializer_end_position, is_rest),
- parameters->scope->zone());
+ auto parameter =
+ new (parameters->scope->zone()) ParserFormalParameters::Parameter(
+ name, pattern, initializer, initializer_end_position, is_rest);
+
+ parameters->params.Add(parameter);
}
- V8_INLINE void DeclareFormalParameter(
+ V8_INLINE void DeclareFormalParameters(
DeclarationScope* scope,
- const ParserFormalParameters::Parameter& parameter) {
- bool is_duplicate = false;
- bool is_simple = classifier()->is_simple_parameter_list();
- auto name = is_simple || parameter.is_rest
- ? parameter.name
- : ast_value_factory()->empty_string();
- auto mode = is_simple || parameter.is_rest ? VAR : TEMPORARY;
- if (!is_simple) scope->SetHasNonSimpleParameters();
- bool is_optional = parameter.initializer != nullptr;
- Variable* var =
- scope->DeclareParameter(name, mode, is_optional, parameter.is_rest,
- &is_duplicate, ast_value_factory());
- if (is_duplicate) {
- classifier()->RecordDuplicateFormalParameterError(scanner()->location());
- }
- if (is_sloppy(scope->language_mode())) {
- // TODO(sigurds) Mark every parameter as maybe assigned. This is a
- // conservative approximation necessary to account for parameters
- // that are assigned via the arguments array.
- var->set_maybe_assigned();
+ const ThreadedList<ParserFormalParameters::Parameter>& parameters) {
+ for (auto parameter : parameters) {
+ bool is_duplicate = false;
+ bool is_simple = classifier()->is_simple_parameter_list();
+ auto name = is_simple || parameter->is_rest
+ ? parameter->name
+ : ast_value_factory()->empty_string();
+ auto mode = is_simple || parameter->is_rest ? VAR : TEMPORARY;
+ if (!is_simple) scope->SetHasNonSimpleParameters();
+ bool is_optional = parameter->initializer != nullptr;
+ scope->DeclareParameter(name, mode, is_optional, parameter->is_rest,
+ &is_duplicate, ast_value_factory());
+ if (is_duplicate &&
+ classifier()->is_valid_formal_parameter_list_without_duplicates()) {
+ classifier()->RecordDuplicateFormalParameterError(
+ scanner()->location());
+ }
}
}
@@ -1147,7 +1132,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// parsing.
int use_counts_[v8::Isolate::kUseCounterFeatureCount];
int total_preparse_skipped_;
- bool parsing_on_main_thread_;
+ bool allow_lazy_;
+ bool temp_zoned_;
ParserLogger* log_;
};
diff --git a/deps/v8/src/parsing/parsing.cc b/deps/v8/src/parsing/parsing.cc
new file mode 100644
index 0000000000..db07bde7c7
--- /dev/null
+++ b/deps/v8/src/parsing/parsing.cc
@@ -0,0 +1,62 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/parsing/parsing.h"
+
+#include <memory>
+
+#include "src/ast/ast.h"
+#include "src/objects-inl.h"
+#include "src/parsing/parse-info.h"
+#include "src/parsing/parser.h"
+
+namespace v8 {
+namespace internal {
+namespace parsing {
+
+bool ParseProgram(ParseInfo* info) {
+ DCHECK(info->is_toplevel());
+ DCHECK_NULL(info->literal());
+
+ Parser parser(info);
+
+ FunctionLiteral* result = nullptr;
+ // Ok to use Isolate here; this function is only called in the main thread.
+ DCHECK(parser.parsing_on_main_thread_);
+ Isolate* isolate = info->isolate();
+
+ parser.SetCachedData(info);
+ result = parser.ParseProgram(isolate, info);
+ info->set_literal(result);
+ parser.Internalize(isolate, info->script(), result == nullptr);
+ if (result != nullptr) {
+ info->set_language_mode(info->literal()->language_mode());
+ }
+ return (result != nullptr);
+}
+
+bool ParseFunction(ParseInfo* info) {
+ DCHECK(!info->is_toplevel());
+ DCHECK_NULL(info->literal());
+
+ Parser parser(info);
+
+ FunctionLiteral* result = nullptr;
+ // Ok to use Isolate here; this function is only called in the main thread.
+ DCHECK(parser.parsing_on_main_thread_);
+ Isolate* isolate = info->isolate();
+
+ result = parser.ParseFunction(isolate, info);
+ info->set_literal(result);
+ parser.Internalize(isolate, info->script(), result == nullptr);
+ return (result != nullptr);
+}
+
+bool ParseAny(ParseInfo* info) {
+ return info->is_toplevel() ? ParseProgram(info) : ParseFunction(info);
+}
+
+} // namespace parsing
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/parsing/parsing.h b/deps/v8/src/parsing/parsing.h
new file mode 100644
index 0000000000..1f92c51838
--- /dev/null
+++ b/deps/v8/src/parsing/parsing.h
@@ -0,0 +1,34 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PARSING_PARSING_H_
+#define V8_PARSING_PARSING_H_
+
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class ParseInfo;
+
+namespace parsing {
+
+// Parses the top-level source code represented by the parse info and sets its
+// function literal. Returns false (and deallocates any allocated AST
+// nodes) if parsing failed.
+V8_EXPORT_PRIVATE bool ParseProgram(ParseInfo* info);
+
+// Like ParseProgram but for an individual function.
+V8_EXPORT_PRIVATE bool ParseFunction(ParseInfo* info);
+
+// If you don't know whether info->is_toplevel() is true or not, use this method
+// to dispatch to either of the above functions. Prefer to use the above methods
+// whenever possible.
+V8_EXPORT_PRIVATE bool ParseAny(ParseInfo* info);
+
+} // namespace parsing
+} // namespace internal
+} // namespace v8
+
+#endif // V8_PARSING_PARSING_H_
diff --git a/deps/v8/src/parsing/pattern-rewriter.cc b/deps/v8/src/parsing/pattern-rewriter.cc
index f3d9bb02a3..9eb3f0665b 100644
--- a/deps/v8/src/parsing/pattern-rewriter.cc
+++ b/deps/v8/src/parsing/pattern-rewriter.cc
@@ -4,6 +4,7 @@
#include "src/ast/ast.h"
#include "src/messages.h"
+#include "src/objects-inl.h"
#include "src/parsing/parameter-initializer-rewriter.h"
#include "src/parsing/parser.h"
@@ -219,9 +220,18 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
// But for var declarations we need to do a new lookup.
if (descriptor_->mode == VAR) {
proxy = var_init_scope->NewUnresolved(factory(), name);
+ // TODO(neis): Set is_assigned on proxy.
} else {
DCHECK_NOT_NULL(proxy);
DCHECK_NOT_NULL(proxy->var());
+ if (var_init_scope->is_script_scope() ||
+ var_init_scope->is_module_scope()) {
+ // We have to pessimistically assume that top-level variables will be
+ // assigned. This is because there may be lazily parsed top-level
+ // functions, which, for efficiency, we preparse without variable
+ // tracking.
+ proxy->set_is_assigned();
+ }
}
// Add break location for destructured sub-pattern.
int pos = IsSubPattern() ? pattern->position() : value->position();
@@ -307,7 +317,7 @@ void Parser::PatternRewriter::VisitRewritableExpression(
block_->statements()->Add(factory()->NewExpressionStatement(expr, pos),
zone());
}
- return set_context(old_context);
+ set_context(old_context);
}
// When an extra declaration scope needs to be inserted to account for
@@ -359,7 +369,7 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node,
DCHECK(block_->ignore_completion_value());
auto temp = *temp_var = CreateTempVar(current_value_);
- auto iterator = CreateTempVar(parser_->GetIterator(
+ auto iterator = CreateTempVar(factory()->NewGetIterator(
factory()->NewVariableProxy(temp), kNoSourcePosition));
auto done =
CreateTempVar(factory()->NewBooleanLiteral(false, kNoSourcePosition));
@@ -673,6 +683,7 @@ NOT_A_PATTERN(ForOfStatement)
NOT_A_PATTERN(ForStatement)
NOT_A_PATTERN(FunctionDeclaration)
NOT_A_PATTERN(FunctionLiteral)
+NOT_A_PATTERN(GetIterator)
NOT_A_PATTERN(IfStatement)
NOT_A_PATTERN(Literal)
NOT_A_PATTERN(NativeFunctionLiteral)
diff --git a/deps/v8/src/parsing/preparse-data-format.h b/deps/v8/src/parsing/preparse-data-format.h
index 30d1d75a4f..32e9a23709 100644
--- a/deps/v8/src/parsing/preparse-data-format.h
+++ b/deps/v8/src/parsing/preparse-data-format.h
@@ -14,7 +14,7 @@ struct PreparseDataConstants {
public:
// Layout and constants of the preparse data exchange format.
static const unsigned kMagicNumber = 0xBadDead;
- static const unsigned kCurrentVersion = 13;
+ static const unsigned kCurrentVersion = 14;
static const int kMagicOffset = 0;
static const int kVersionOffset = 1;
diff --git a/deps/v8/src/parsing/preparse-data.cc b/deps/v8/src/parsing/preparse-data.cc
index e9a4e8f4b5..b7c76a4216 100644
--- a/deps/v8/src/parsing/preparse-data.cc
+++ b/deps/v8/src/parsing/preparse-data.cc
@@ -6,6 +6,7 @@
#include "src/base/hashmap.h"
#include "src/base/logging.h"
#include "src/globals.h"
+#include "src/objects-inl.h"
#include "src/parsing/parser.h"
#include "src/parsing/preparse-data-format.h"
@@ -16,7 +17,8 @@ void ParserLogger::LogFunction(int start, int end, int num_parameters,
int function_length,
bool has_duplicate_parameters, int literals,
int properties, LanguageMode language_mode,
- bool uses_super_property, bool calls_eval) {
+ bool uses_super_property, bool calls_eval,
+ int num_inner_functions) {
function_store_.Add(start);
function_store_.Add(end);
function_store_.Add(num_parameters);
@@ -26,6 +28,7 @@ void ParserLogger::LogFunction(int start, int end, int num_parameters,
function_store_.Add(
FunctionEntry::EncodeFlags(language_mode, uses_super_property, calls_eval,
has_duplicate_parameters));
+ function_store_.Add(num_inner_functions);
}
ParserLogger::ParserLogger() {
diff --git a/deps/v8/src/parsing/preparse-data.h b/deps/v8/src/parsing/preparse-data.h
index 767484ad7f..ca70f8a45f 100644
--- a/deps/v8/src/parsing/preparse-data.h
+++ b/deps/v8/src/parsing/preparse-data.h
@@ -52,17 +52,19 @@ class PreParserLogger final {
: end_(-1),
num_parameters_(-1),
function_length_(-1),
- has_duplicate_parameters_(false) {}
+ has_duplicate_parameters_(false),
+ num_inner_functions_(-1) {}
void LogFunction(int end, int num_parameters, int function_length,
- bool has_duplicate_parameters, int literals,
- int properties) {
+ bool has_duplicate_parameters, int literals, int properties,
+ int num_inner_functions) {
end_ = end;
num_parameters_ = num_parameters;
function_length_ = function_length;
has_duplicate_parameters_ = has_duplicate_parameters;
literals_ = literals;
properties_ = properties;
+ num_inner_functions_ = num_inner_functions;
}
int end() const { return end_; }
@@ -81,6 +83,7 @@ class PreParserLogger final {
int properties() const {
return properties_;
}
+ int num_inner_functions() const { return num_inner_functions_; }
private:
int end_;
@@ -90,6 +93,7 @@ class PreParserLogger final {
bool has_duplicate_parameters_;
int literals_;
int properties_;
+ int num_inner_functions_;
};
class ParserLogger final {
@@ -99,7 +103,7 @@ class ParserLogger final {
void LogFunction(int start, int end, int num_parameters, int function_length,
bool has_duplicate_parameters, int literals, int properties,
LanguageMode language_mode, bool uses_super_property,
- bool calls_eval);
+ bool calls_eval, int num_inner_functions);
ScriptData* GetScriptData();
diff --git a/deps/v8/src/parsing/preparser.cc b/deps/v8/src/parsing/preparser.cc
index 1b21c3dc1e..1dae5e9b66 100644
--- a/deps/v8/src/parsing/preparser.cc
+++ b/deps/v8/src/parsing/preparser.cc
@@ -67,6 +67,8 @@ PreParserIdentifier GetSymbolHelper(Scanner* scanner) {
return PreParserIdentifier::Prototype();
if (scanner->LiteralMatches("constructor", 11))
return PreParserIdentifier::Constructor();
+ if (scanner->LiteralMatches("name", 4))
+ return PreParserIdentifier::Name();
return PreParserIdentifier::Default();
}
}
@@ -86,16 +88,19 @@ PreParserIdentifier PreParser::GetSymbol() const {
PreParser::PreParseResult PreParser::PreParseFunction(
FunctionKind kind, DeclarationScope* function_scope, bool parsing_module,
bool is_inner_function, bool may_abort, int* use_counts) {
- RuntimeCallTimerScope runtime_timer(
- runtime_call_stats_,
- track_unresolved_variables_
- ? &RuntimeCallStats::PreParseWithVariableResolution
- : &RuntimeCallStats::PreParseNoVariableResolution);
DCHECK_EQ(FUNCTION_SCOPE, function_scope->scope_type());
parsing_module_ = parsing_module;
use_counts_ = use_counts;
DCHECK(!track_unresolved_variables_);
track_unresolved_variables_ = is_inner_function;
+#ifdef DEBUG
+ function_scope->set_is_being_lazily_parsed(true);
+#endif
+
+ // In the preparser, we use the function literal ids to count how many
+ // FunctionLiterals were encountered. The PreParser doesn't actually persist
+ // FunctionLiterals, so there IDs don't matter.
+ ResetFunctionLiteralId();
// The caller passes the function_scope which is not yet inserted into the
// scope_state_. All scopes above the function_scope are ignored by the
@@ -108,7 +113,7 @@ PreParser::PreParseResult PreParser::PreParseFunction(
PreParserFormalParameters formals(function_scope);
bool has_duplicate_parameters = false;
- DuplicateFinder duplicate_finder(scanner()->unicode_cache());
+ DuplicateFinder duplicate_finder;
std::unique_ptr<ExpressionClassifier> formals_classifier;
// Parse non-arrow function parameters. For arrow functions, the parameters
@@ -126,13 +131,26 @@ PreParser::PreParseResult PreParser::PreParseFunction(
formals_end_position, CHECK_OK_VALUE(kPreParseSuccess));
has_duplicate_parameters =
!classifier()->is_valid_formal_parameter_list_without_duplicates();
+
+ if (track_unresolved_variables_) {
+ function_scope->DeclareVariableName(
+ ast_value_factory()->arguments_string(), VAR);
+ function_scope->DeclareVariableName(ast_value_factory()->this_string(),
+ VAR);
+ }
}
Expect(Token::LBRACE, CHECK_OK_VALUE(kPreParseSuccess));
LazyParsingResult result = ParseStatementListAndLogFunction(
&formals, has_duplicate_parameters, may_abort, ok);
+
+ if (is_sloppy(function_scope->language_mode())) {
+ function_scope->HoistSloppyBlockFunctions(nullptr);
+ }
+
use_counts_ = nullptr;
track_unresolved_variables_ = false;
+
if (result == kLazyParsingAborted) {
return kPreParseAbort;
} else if (stack_overflow()) {
@@ -156,8 +174,6 @@ PreParser::PreParseResult PreParser::PreParseFunction(
if (is_strict(function_scope->language_mode())) {
int end_pos = scanner()->location().end_pos;
CheckStrictOctalLiteral(function_scope->start_position(), end_pos, ok);
- CheckDecimalLiteralWithLeadingZero(function_scope->start_position(),
- end_pos);
}
}
return kPreParseSuccess;
@@ -184,19 +200,23 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
LanguageMode language_mode, bool* ok) {
// Function ::
// '(' FormalParameterList? ')' '{' FunctionBody '}'
+ const RuntimeCallStats::CounterId counters[2][2] = {
+ {&RuntimeCallStats::PreParseBackgroundNoVariableResolution,
+ &RuntimeCallStats::PreParseNoVariableResolution},
+ {&RuntimeCallStats::PreParseBackgroundWithVariableResolution,
+ &RuntimeCallStats::PreParseWithVariableResolution}};
RuntimeCallTimerScope runtime_timer(
runtime_call_stats_,
- track_unresolved_variables_
- ? &RuntimeCallStats::PreParseWithVariableResolution
- : &RuntimeCallStats::PreParseNoVariableResolution);
+ counters[track_unresolved_variables_][parsing_on_main_thread_]);
// Parse function body.
PreParserStatementList body;
DeclarationScope* function_scope = NewFunctionScope(kind);
function_scope->SetLanguageMode(language_mode);
FunctionState function_state(&function_state_, &scope_state_, function_scope);
- DuplicateFinder duplicate_finder(scanner()->unicode_cache());
+ DuplicateFinder duplicate_finder;
ExpressionClassifier formals_classifier(this, &duplicate_finder);
+ GetNextFunctionLiteralId();
Expect(Token::LPAREN, CHECK_OK);
int start_position = scanner()->location().beg_pos;
@@ -216,6 +236,10 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
// Parsing the body may change the language mode in our scope.
language_mode = function_scope->language_mode();
+ if (is_sloppy(language_mode)) {
+ function_scope->HoistSloppyBlockFunctions(nullptr);
+ }
+
// Validate name and parameter names. We can do this only after parsing the
// function, since the function can declare itself strict.
CheckFunctionName(language_mode, function_name, function_name_validity,
@@ -227,7 +251,6 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
int end_position = scanner()->location().end_pos;
if (is_strict(language_mode)) {
CheckStrictOctalLiteral(start_position, end_position, CHECK_OK);
- CheckDecimalLiteralWithLeadingZero(start_position, end_position);
}
function_scope->set_end_position(end_position);
@@ -253,25 +276,26 @@ PreParser::LazyParsingResult PreParser::ParseStatementListAndLogFunction(
DCHECK_EQ(Token::RBRACE, scanner()->peek());
int body_end = scanner()->peek_location().end_pos;
DCHECK(this->scope()->is_function_scope());
- log_.LogFunction(body_end, formals->num_parameters(),
- formals->function_length, has_duplicate_parameters,
- function_state_->materialized_literal_count(),
- function_state_->expected_property_count());
+ log_.LogFunction(
+ body_end, formals->num_parameters(), formals->function_length,
+ has_duplicate_parameters, function_state_->materialized_literal_count(),
+ function_state_->expected_property_count(), GetLastFunctionLiteralId());
return kLazyParsingComplete;
}
PreParserExpression PreParser::ExpressionFromIdentifier(
PreParserIdentifier name, int start_position, InferName infer) {
+ VariableProxy* proxy = nullptr;
if (track_unresolved_variables_) {
AstNodeFactory factory(ast_value_factory());
// Setting the Zone is necessary because zone_ might be the temp Zone, and
// AstValueFactory doesn't know about it.
factory.set_zone(zone());
DCHECK_NOT_NULL(name.string_);
- scope()->NewUnresolved(&factory, name.string_, start_position,
- NORMAL_VARIABLE);
+ proxy = scope()->NewUnresolved(&factory, name.string_, start_position,
+ NORMAL_VARIABLE);
}
- return PreParserExpression::FromIdentifier(name, zone());
+ return PreParserExpression::FromIdentifier(name, proxy, zone());
}
void PreParser::DeclareAndInitializeVariables(
@@ -279,23 +303,16 @@ void PreParser::DeclareAndInitializeVariables(
const DeclarationDescriptor* declaration_descriptor,
const DeclarationParsingResult::Declaration* declaration,
ZoneList<const AstRawString*>* names, bool* ok) {
- if (declaration->pattern.identifiers_ != nullptr) {
+ if (declaration->pattern.variables_ != nullptr) {
DCHECK(FLAG_lazy_inner_functions);
- /* Mimic what Parser does when declaring variables (see
- Parser::PatternRewriter::VisitVariableProxy).
-
- var + no initializer -> RemoveUnresolved
- let / const + no initializer -> RemoveUnresolved
- var + initializer -> RemoveUnresolved followed by NewUnresolved
- let / const + initializer -> RemoveUnresolved
- */
-
- if (declaration->initializer.IsEmpty() ||
- (declaration_descriptor->mode == VariableMode::LET ||
- declaration_descriptor->mode == VariableMode::CONST)) {
- for (auto identifier : *(declaration->pattern.identifiers_)) {
- declaration_descriptor->scope->RemoveUnresolved(identifier);
- }
+ Scope* scope = declaration_descriptor->hoist_scope;
+ if (scope == nullptr) {
+ scope = this->scope();
+ }
+ for (auto variable : *(declaration->pattern.variables_)) {
+ declaration_descriptor->scope->RemoveUnresolved(variable);
+ scope->DeclareVariableName(variable->raw_name(),
+ declaration_descriptor->mode);
}
}
}
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index f4687eb3f7..77fe061f42 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -5,8 +5,10 @@
#ifndef V8_PARSING_PREPARSER_H
#define V8_PARSING_PREPARSER_H
+#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/parsing/parser-base.h"
+#include "src/parsing/preparse-data.h"
namespace v8 {
namespace internal {
@@ -67,6 +69,9 @@ class PreParserIdentifier {
static PreParserIdentifier Async() {
return PreParserIdentifier(kAsyncIdentifier);
}
+ static PreParserIdentifier Name() {
+ return PreParserIdentifier(kNameIdentifier);
+ }
bool IsEmpty() const { return type_ == kEmptyIdentifier; }
bool IsEval() const { return type_ == kEvalIdentifier; }
bool IsArguments() const { return type_ == kArgumentsIdentifier; }
@@ -79,11 +84,7 @@ class PreParserIdentifier {
bool IsConstructor() const { return type_ == kConstructorIdentifier; }
bool IsEnum() const { return type_ == kEnumIdentifier; }
bool IsAwait() const { return type_ == kAwaitIdentifier; }
- bool IsFutureStrictReserved() const {
- return type_ == kFutureStrictReservedIdentifier ||
- type_ == kLetIdentifier || type_ == kStaticIdentifier ||
- type_ == kYieldIdentifier;
- }
+ bool IsName() const { return type_ == kNameIdentifier; }
// Allow identifier->name()[->length()] to work. The preparser
// does not need the actual positions/lengths of the identifiers.
@@ -109,7 +110,8 @@ class PreParserIdentifier {
kConstructorIdentifier,
kEnumIdentifier,
kAwaitIdentifier,
- kAsyncIdentifier
+ kAsyncIdentifier,
+ kNameIdentifier
};
explicit PreParserIdentifier(Type type) : type_(type), string_(nullptr) {}
@@ -125,49 +127,65 @@ class PreParserIdentifier {
class PreParserExpression {
public:
PreParserExpression()
- : code_(TypeField::encode(kEmpty)), identifiers_(nullptr) {}
+ : code_(TypeField::encode(kEmpty)), variables_(nullptr) {}
static PreParserExpression Empty() { return PreParserExpression(); }
static PreParserExpression Default(
- ZoneList<const AstRawString*>* identifiers = nullptr) {
- return PreParserExpression(TypeField::encode(kExpression), identifiers);
+ ZoneList<VariableProxy*>* variables = nullptr) {
+ return PreParserExpression(TypeField::encode(kExpression), variables);
}
static PreParserExpression Spread(PreParserExpression expression) {
return PreParserExpression(TypeField::encode(kSpreadExpression),
- expression.identifiers_);
+ expression.variables_);
}
static PreParserExpression FromIdentifier(PreParserIdentifier id,
+ VariableProxy* variable,
Zone* zone) {
PreParserExpression expression(TypeField::encode(kIdentifierExpression) |
IdentifierTypeField::encode(id.type_));
- expression.AddIdentifier(id.string_, zone);
+ expression.AddVariable(variable, zone);
return expression;
}
static PreParserExpression BinaryOperation(PreParserExpression left,
Token::Value op,
- PreParserExpression right) {
- return PreParserExpression(TypeField::encode(kBinaryOperationExpression));
+ PreParserExpression right,
+ Zone* zone) {
+ if (op == Token::COMMA) {
+ // Possibly an arrow function parameter list.
+ if (left.variables_ == nullptr) {
+ return PreParserExpression(TypeField::encode(kExpression),
+ right.variables_);
+ }
+ if (right.variables_ != nullptr) {
+ for (auto variable : *right.variables_) {
+ left.variables_->Add(variable, zone);
+ }
+ }
+ return PreParserExpression(TypeField::encode(kExpression),
+ left.variables_);
+ }
+ return PreParserExpression(TypeField::encode(kExpression));
}
- static PreParserExpression Assignment() {
+ static PreParserExpression Assignment(ZoneList<VariableProxy*>* variables) {
return PreParserExpression(TypeField::encode(kExpression) |
- ExpressionTypeField::encode(kAssignment));
+ ExpressionTypeField::encode(kAssignment),
+ variables);
}
static PreParserExpression ObjectLiteral(
- ZoneList<const AstRawString*>* identifiers = nullptr) {
+ ZoneList<VariableProxy*>* variables) {
return PreParserExpression(TypeField::encode(kObjectLiteralExpression),
- identifiers);
+ variables);
}
- static PreParserExpression ArrayLiteral(
- ZoneList<const AstRawString*>* identifiers = nullptr) {
+ static PreParserExpression ArrayLiteral(ZoneList<VariableProxy*>* variables) {
return PreParserExpression(TypeField::encode(kArrayLiteralExpression),
- identifiers);
+ variables);
}
static PreParserExpression StringLiteral() {
@@ -284,11 +302,6 @@ class PreParserExpression {
ExpressionTypeField::decode(code_) == kCallEvalExpression);
}
- bool IsDirectEvalCall() const {
- return TypeField::decode(code_) == kExpression &&
- ExpressionTypeField::decode(code_) == kCallEvalExpression;
- }
-
bool IsSuperCallReference() const {
return TypeField::decode(code_) == kExpression &&
ExpressionTypeField::decode(code_) == kSuperCallReference;
@@ -313,10 +326,6 @@ class PreParserExpression {
PreParserExpression AsFunctionLiteral() { return *this; }
- bool IsBinaryOperation() const {
- return TypeField::decode(code_) == kBinaryOperationExpression;
- }
-
// Dummy implementation for making expression->somefunc() work in both Parser
// and PreParser.
PreParserExpression* operator->() { return this; }
@@ -329,15 +338,12 @@ class PreParserExpression {
int position() const { return kNoSourcePosition; }
void set_function_token_position(int position) {}
- void set_is_class_field_initializer(bool is_class_field_initializer) {}
-
private:
enum Type {
kEmpty,
kExpression,
kIdentifierExpression,
kStringLiteralExpression,
- kBinaryOperationExpression,
kSpreadExpression,
kObjectLiteralExpression,
kArrayLiteralExpression
@@ -354,19 +360,18 @@ class PreParserExpression {
kAssignment
};
- explicit PreParserExpression(
- uint32_t expression_code,
- ZoneList<const AstRawString*>* identifiers = nullptr)
- : code_(expression_code), identifiers_(identifiers) {}
+ explicit PreParserExpression(uint32_t expression_code,
+ ZoneList<VariableProxy*>* variables = nullptr)
+ : code_(expression_code), variables_(variables) {}
- void AddIdentifier(const AstRawString* identifier, Zone* zone) {
- if (identifier == nullptr) {
+ void AddVariable(VariableProxy* variable, Zone* zone) {
+ if (variable == nullptr) {
return;
}
- if (identifiers_ == nullptr) {
- identifiers_ = new (zone) ZoneList<const AstRawString*>(1, zone);
+ if (variables_ == nullptr) {
+ variables_ = new (zone) ZoneList<VariableProxy*>(1, zone);
}
- identifiers_->Add(identifier, zone);
+ variables_->Add(variable, zone);
}
// The first three bits are for the Type.
@@ -389,9 +394,9 @@ class PreParserExpression {
typedef BitField<bool, TypeField::kNext, 1> HasCoverInitializedNameField;
uint32_t code_;
- // If the PreParser is used in the identifier tracking mode,
- // PreParserExpression accumulates identifiers in that expression.
- ZoneList<const AstRawString*>* identifiers_;
+ // If the PreParser is used in the variable tracking mode, PreParserExpression
+ // accumulates variables in that expression.
+ ZoneList<VariableProxy*>* variables_;
friend class PreParser;
friend class PreParserFactory;
@@ -401,13 +406,13 @@ class PreParserExpression {
// The pre-parser doesn't need to build lists of expressions, identifiers, or
-// the like. If the PreParser is used in identifier tracking mode, it needs to
-// build lists of identifiers though.
+// the like. If the PreParser is used in variable tracking mode, it needs to
+// build lists of variables though.
template <typename T>
class PreParserList {
public:
// These functions make list->Add(some_expression) work (and do nothing).
- PreParserList() : length_(0), identifiers_(nullptr) {}
+ PreParserList() : length_(0), variables_(nullptr) {}
PreParserList* operator->() { return this; }
void Add(T, Zone* zone);
int length() const { return length_; }
@@ -415,9 +420,9 @@ class PreParserList {
bool IsNull() const { return length_ == -1; }
private:
- explicit PreParserList(int n) : length_(n), identifiers_(nullptr) {}
+ explicit PreParserList(int n) : length_(n), variables_(nullptr) {}
int length_;
- ZoneList<const AstRawString*>* identifiers_;
+ ZoneList<VariableProxy*>* variables_;
friend class PreParser;
friend class PreParserFactory;
@@ -426,14 +431,14 @@ class PreParserList {
template <>
inline void PreParserList<PreParserExpression>::Add(
PreParserExpression expression, Zone* zone) {
- if (expression.identifiers_ != nullptr) {
+ if (expression.variables_ != nullptr) {
DCHECK(FLAG_lazy_inner_functions);
DCHECK(zone != nullptr);
- if (identifiers_ == nullptr) {
- identifiers_ = new (zone) ZoneList<const AstRawString*>(1, zone);
+ if (variables_ == nullptr) {
+ variables_ = new (zone) ZoneList<VariableProxy*>(1, zone);
}
- for (auto identifier : (*expression.identifiers_)) {
- identifiers_->Add(identifier, zone);
+ for (auto identifier : (*expression.variables_)) {
+ variables_->Add(identifier, zone);
}
}
++length_;
@@ -532,7 +537,8 @@ class PreParserStatement {
class PreParserFactory {
public:
explicit PreParserFactory(AstValueFactory* ast_value_factory)
- : zone_(ast_value_factory->zone()) {}
+ : ast_value_factory_(ast_value_factory),
+ zone_(ast_value_factory->zone()) {}
void set_zone(Zone* zone) { zone_ = zone; }
@@ -541,7 +547,14 @@ class PreParserFactory {
// This is needed for object literal property names. Property names are
// normalized to string literals during object literal parsing.
PreParserExpression expression = PreParserExpression::Default();
- expression.AddIdentifier(identifier.string_, zone_);
+ if (identifier.string_ != nullptr) {
+ DCHECK(FLAG_lazy_inner_functions);
+ AstNodeFactory factory(ast_value_factory_);
+ factory.set_zone(zone_);
+ VariableProxy* variable =
+ factory.NewVariableProxy(identifier.string_, NORMAL_VARIABLE);
+ expression.AddVariable(variable, zone_);
+ }
return expression;
}
PreParserExpression NewNumberLiteral(double number,
@@ -559,7 +572,7 @@ class PreParserFactory {
PreParserExpression NewArrayLiteral(PreParserExpressionList values,
int first_spread_index, int literal_index,
int pos) {
- return PreParserExpression::ArrayLiteral(values.identifiers_);
+ return PreParserExpression::ArrayLiteral(values.variables_);
}
PreParserExpression NewClassLiteralProperty(PreParserExpression key,
PreParserExpression value,
@@ -572,18 +585,18 @@ class PreParserFactory {
PreParserExpression value,
ObjectLiteralProperty::Kind kind,
bool is_computed_name) {
- return PreParserExpression::Default(value.identifiers_);
+ return PreParserExpression::Default(value.variables_);
}
PreParserExpression NewObjectLiteralProperty(PreParserExpression key,
PreParserExpression value,
bool is_computed_name) {
- return PreParserExpression::Default(value.identifiers_);
+ return PreParserExpression::Default(value.variables_);
}
PreParserExpression NewObjectLiteral(PreParserExpressionList properties,
int literal_index,
int boilerplate_properties,
int pos) {
- return PreParserExpression::ObjectLiteral(properties.identifiers_);
+ return PreParserExpression::ObjectLiteral(properties.variables_);
}
PreParserExpression NewVariableProxy(void* variable) {
return PreParserExpression::Default();
@@ -604,7 +617,7 @@ class PreParserFactory {
PreParserExpression NewBinaryOperation(Token::Value op,
PreParserExpression left,
PreParserExpression right, int pos) {
- return PreParserExpression::BinaryOperation(left, op, right);
+ return PreParserExpression::BinaryOperation(left, op, right, zone_);
}
PreParserExpression NewCompareOperation(Token::Value op,
PreParserExpression left,
@@ -618,7 +631,9 @@ class PreParserFactory {
PreParserExpression left,
PreParserExpression right,
int pos) {
- return PreParserExpression::Assignment();
+ // Identifiers need to be tracked since this might be a parameter with a
+ // default value inside an arrow function parameter list.
+ return PreParserExpression::Assignment(left.variables_);
}
PreParserExpression NewYield(PreParserExpression generator_object,
PreParserExpression expression, int pos,
@@ -662,7 +677,7 @@ class PreParserFactory {
FunctionLiteral::ParameterFlag has_duplicate_parameters,
FunctionLiteral::FunctionType function_type,
FunctionLiteral::EagerCompileHint eager_compile_hint, int position,
- bool has_braces) {
+ bool has_braces, int function_literal_id) {
return PreParserExpression::Default();
}
@@ -755,14 +770,23 @@ class PreParserFactory {
}
private:
+ AstValueFactory* ast_value_factory_;
Zone* zone_;
};
struct PreParserFormalParameters : FormalParametersBase {
+ struct Parameter : public ZoneObject {
+ explicit Parameter(PreParserExpression pattern) : pattern(pattern) {}
+ Parameter** next() { return &next_parameter; }
+ Parameter* const* next() const { return &next_parameter; }
+ PreParserExpression pattern;
+ Parameter* next_parameter = nullptr;
+ };
explicit PreParserFormalParameters(DeclarationScope* scope)
: FormalParametersBase(scope) {}
- PreParserIdentifier at(int i) { return PreParserIdentifier(); } // Dummy
+
+ ThreadedList<Parameter> params;
};
@@ -838,11 +862,14 @@ class PreParser : public ParserBase<PreParser> {
kPreParseSuccess
};
- PreParser(Zone* zone, Scanner* scanner, AstValueFactory* ast_value_factory,
+ PreParser(Zone* zone, Scanner* scanner, uintptr_t stack_limit,
+ AstValueFactory* ast_value_factory,
PendingCompilationErrorHandler* pending_error_handler,
- RuntimeCallStats* runtime_call_stats, uintptr_t stack_limit)
+ RuntimeCallStats* runtime_call_stats,
+ bool parsing_on_main_thread = true)
: ParserBase<PreParser>(zone, scanner, stack_limit, nullptr,
- ast_value_factory, runtime_call_stats),
+ ast_value_factory, runtime_call_stats,
+ parsing_on_main_thread),
use_counts_(nullptr),
track_unresolved_variables_(false),
pending_error_handler_(pending_error_handler) {}
@@ -859,6 +886,9 @@ class PreParser : public ParserBase<PreParser> {
bool is_module = false) {
DCHECK_NULL(scope_state_);
DeclarationScope* scope = NewScriptScope();
+#ifdef DEBUG
+ scope->set_is_being_lazily_parsed(true);
+#endif
// ModuleDeclarationInstantiation for Source Text Module Records creates a
// new Module Environment Record whose outer lexical environment record is
@@ -877,8 +907,6 @@ class PreParser : public ParserBase<PreParser> {
} else if (is_strict(this->scope()->language_mode())) {
CheckStrictOctalLiteral(start_position, scanner()->location().end_pos,
&ok);
- CheckDecimalLiteralWithLeadingZero(start_position,
- scanner()->location().end_pos);
}
if (materialized_literals) {
*materialized_literals = function_state_->materialized_literal_count();
@@ -958,11 +986,6 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE void MarkCollectedTailCallExpressions() {}
V8_INLINE void MarkTailPosition(PreParserExpression expression) {}
- V8_INLINE PreParserExpressionList
- PrepareSpreadArguments(PreParserExpressionList list) {
- return list;
- }
-
V8_INLINE PreParserExpression SpreadCall(PreParserExpression function,
PreParserExpressionList args,
int pos);
@@ -970,11 +993,6 @@ class PreParser : public ParserBase<PreParser> {
PreParserExpressionList args,
int pos);
- V8_INLINE PreParserExpression
- RewriteSuperCall(PreParserExpression call_expression) {
- return call_expression;
- }
-
V8_INLINE void RewriteDestructuringAssignments() {}
V8_INLINE PreParserExpression RewriteExponentiation(PreParserExpression left,
@@ -1015,8 +1033,7 @@ class PreParser : public ParserBase<PreParser> {
bool* ok) {
DCHECK(!expr.AsIdentifier().IsEnum());
DCHECK(!parsing_module_ || !expr.AsIdentifier().IsAwait());
- DCHECK(is_sloppy(language_mode()) ||
- !IsFutureStrictReserved(expr.AsIdentifier()));
+ DCHECK(IsIdentifier(expr));
return labels;
}
@@ -1035,7 +1052,22 @@ class PreParser : public ParserBase<PreParser> {
PreParserStatementList cases, Scope* scope) {
return PreParserStatement::Default();
}
- V8_INLINE void RewriteCatchPattern(CatchInfo* catch_info, bool* ok) {}
+
+ V8_INLINE void RewriteCatchPattern(CatchInfo* catch_info, bool* ok) {
+ if (track_unresolved_variables_) {
+ if (catch_info->name.string_ != nullptr) {
+ // Unlike in the parser, we need to declare the catch variable as LET
+ // variable, so that it won't get hoisted out of the scope.
+ catch_info->scope->DeclareVariableName(catch_info->name.string_, LET);
+ }
+ if (catch_info->pattern.variables_ != nullptr) {
+ for (auto variable : *catch_info->pattern.variables_) {
+ scope()->DeclareVariableName(variable->raw_name(), LET);
+ }
+ }
+ }
+ }
+
V8_INLINE void ValidateCatchBlock(const CatchInfo& catch_info, bool* ok) {}
V8_INLINE PreParserStatement RewriteTryStatement(
PreParserStatement try_block, PreParserStatement catch_block,
@@ -1060,9 +1092,19 @@ class PreParser : public ParserBase<PreParser> {
}
V8_INLINE PreParserStatement DeclareFunction(
- PreParserIdentifier variable_name, PreParserExpression function, int pos,
- bool is_generator, bool is_async, ZoneList<const AstRawString*>* names,
+ PreParserIdentifier variable_name, PreParserExpression function,
+ VariableMode mode, int pos, bool is_generator, bool is_async,
+ bool is_sloppy_block_function, ZoneList<const AstRawString*>* names,
bool* ok) {
+ DCHECK_NULL(names);
+ if (variable_name.string_ != nullptr) {
+ DCHECK(track_unresolved_variables_);
+ scope()->DeclareVariableName(variable_name.string_, mode);
+ if (is_sloppy_block_function) {
+ GetDeclarationScope()->DeclareSloppyBlockFunction(variable_name.string_,
+ scope());
+ }
+ }
return Statement::Default();
}
@@ -1070,6 +1112,12 @@ class PreParser : public ParserBase<PreParser> {
DeclareClass(PreParserIdentifier variable_name, PreParserExpression value,
ZoneList<const AstRawString*>* names, int class_token_pos,
int end_pos, bool* ok) {
+ // Preparser shouldn't be used in contexts where we need to track the names.
+ DCHECK_NULL(names);
+ if (variable_name.string_ != nullptr) {
+ DCHECK(track_unresolved_variables_);
+ scope()->DeclareVariableName(variable_name.string_, LET);
+ }
return PreParserStatement::Default();
}
V8_INLINE void DeclareClassVariable(PreParserIdentifier name,
@@ -1077,10 +1125,16 @@ class PreParser : public ParserBase<PreParser> {
int class_token_pos, bool* ok) {}
V8_INLINE void DeclareClassProperty(PreParserIdentifier class_name,
PreParserExpression property,
- ClassInfo* class_info, bool* ok) {}
+ ClassLiteralProperty::Kind kind,
+ bool is_static, bool is_constructor,
+ ClassInfo* class_info, bool* ok) {
+ }
V8_INLINE PreParserExpression RewriteClassLiteral(PreParserIdentifier name,
ClassInfo* class_info,
int pos, bool* ok) {
+ bool has_default_constructor = !class_info->has_seen_constructor;
+ // Account for the default constructor.
+ if (has_default_constructor) GetNextFunctionLiteralId();
return PreParserExpression::Default();
}
@@ -1115,10 +1169,6 @@ class PreParser : public ParserBase<PreParser> {
return identifier.IsAwait();
}
- V8_INLINE bool IsFutureStrictReserved(PreParserIdentifier identifier) const {
- return identifier.IsFutureStrictReserved();
- }
-
// Returns true if the expression is of type "this.foo".
V8_INLINE static bool IsThisProperty(PreParserExpression expression) {
return expression.IsThisProperty();
@@ -1146,8 +1196,8 @@ class PreParser : public ParserBase<PreParser> {
return identifier.IsConstructor();
}
- V8_INLINE bool IsDirectEvalCall(PreParserExpression expression) const {
- return expression.IsDirectEvalCall();
+ V8_INLINE bool IsName(PreParserIdentifier identifier) const {
+ return identifier.IsName();
}
V8_INLINE static bool IsBoilerplateProperty(PreParserExpression property) {
@@ -1202,11 +1252,16 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE static void CheckAssigningFunctionLiteralToProperty(
PreParserExpression left, PreParserExpression right) {}
- V8_INLINE static PreParserExpression MarkExpressionAsAssigned(
- PreParserExpression expression) {
+ V8_INLINE void MarkExpressionAsAssigned(PreParserExpression expression) {
// TODO(marja): To be able to produce the same errors, the preparser needs
// to start tracking which expressions are variables and which are assigned.
- return expression;
+ if (expression.variables_ != nullptr) {
+ DCHECK(FLAG_lazy_inner_functions);
+ DCHECK(track_unresolved_variables_);
+ for (auto variable : *expression.variables_) {
+ variable->set_is_assigned();
+ }
+ }
}
V8_INLINE bool ShortcutNumericLiteralBinaryExpression(PreParserExpression* x,
@@ -1229,6 +1284,11 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE PreParserStatement
BuildInitializationBlock(DeclarationParsingResult* parsing_result,
ZoneList<const AstRawString*>* names, bool* ok) {
+ for (auto declaration : parsing_result->declarations) {
+ DeclareAndInitializeVariables(PreParserStatement::Default(),
+ &(parsing_result->descriptor), &declaration,
+ names, ok);
+ }
return PreParserStatement::Default();
}
@@ -1236,15 +1296,25 @@ class PreParser : public ParserBase<PreParser> {
InitializeForEachStatement(PreParserStatement stmt, PreParserExpression each,
PreParserExpression subject,
PreParserStatement body, int each_keyword_pos) {
+ MarkExpressionAsAssigned(each);
return stmt;
}
V8_INLINE PreParserStatement RewriteForVarInLegacy(const ForInfo& for_info) {
return PreParserStatement::Null();
}
+
V8_INLINE void DesugarBindingInForEachStatement(
ForInfo* for_info, PreParserStatement* body_block,
- PreParserExpression* each_variable, bool* ok) {}
+ PreParserExpression* each_variable, bool* ok) {
+ if (track_unresolved_variables_) {
+ DCHECK(for_info->parsing_result.declarations.length() == 1);
+ DeclareAndInitializeVariables(
+ PreParserStatement::Default(), &for_info->parsing_result.descriptor,
+ &for_info->parsing_result.declarations[0], nullptr, ok);
+ }
+ }
+
V8_INLINE PreParserStatement CreateForEachStatementTDZ(
PreParserStatement init_block, const ForInfo& for_info, bool* ok) {
return init_block;
@@ -1449,14 +1519,30 @@ class PreParser : public ParserBase<PreParser> {
PreParserExpression initializer,
int initializer_end_position,
bool is_rest) {
+ if (track_unresolved_variables_) {
+ DCHECK(FLAG_lazy_inner_functions);
+ parameters->params.Add(new (zone())
+ PreParserFormalParameters::Parameter(pattern));
+ }
parameters->UpdateArityAndFunctionLength(!initializer.IsEmpty(), is_rest);
}
- V8_INLINE void DeclareFormalParameter(DeclarationScope* scope,
- PreParserIdentifier parameter) {
+ V8_INLINE void DeclareFormalParameters(
+ DeclarationScope* scope,
+ const ThreadedList<PreParserFormalParameters::Parameter>& parameters) {
if (!classifier()->is_simple_parameter_list()) {
scope->SetHasNonSimpleParameters();
}
+ if (track_unresolved_variables_) {
+ DCHECK(FLAG_lazy_inner_functions);
+ for (auto parameter : parameters) {
+ if (parameter->pattern.variables_ != nullptr) {
+ for (auto variable : *parameter->pattern.variables_) {
+ scope->DeclareVariableName(variable->raw_name(), VAR);
+ }
+ }
+ }
+ }
}
V8_INLINE void DeclareArrowFunctionFormalParameters(
@@ -1465,6 +1551,14 @@ class PreParser : public ParserBase<PreParser> {
bool* ok) {
// TODO(wingo): Detect duplicated identifiers in paramlists. Detect
// parameter lists that are too long.
+ if (track_unresolved_variables_) {
+ DCHECK(FLAG_lazy_inner_functions);
+ if (params.variables_ != nullptr) {
+ for (auto variable : *params.variables_) {
+ parameters->scope->DeclareVariableName(variable->raw_name(), VAR);
+ }
+ }
+ }
}
V8_INLINE void ReindexLiterals(const PreParserFormalParameters& parameters) {}
@@ -1485,7 +1579,7 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE PreParserExpression
ExpressionListToExpression(PreParserExpressionList args) {
- return PreParserExpression::Default(args.identifiers_);
+ return PreParserExpression::Default(args.variables_);
}
V8_INLINE void AddAccessorPrefixToFunctionName(bool is_get,
@@ -1535,8 +1629,8 @@ PreParserStatementList PreParser::ParseEagerFunctionBody(
FunctionLiteral::FunctionType function_type, bool* ok) {
PreParserStatementList result;
- Scope* inner_scope = scope();
- if (!parameters.is_simple) inner_scope = NewScope(BLOCK_SCOPE);
+ DeclarationScope* inner_scope = scope()->AsDeclarationScope();
+ if (!parameters.is_simple) inner_scope = NewVarblockScope();
{
BlockState block_state(&scope_state_, inner_scope);
@@ -1545,6 +1639,10 @@ PreParserStatementList PreParser::ParseEagerFunctionBody(
}
Expect(Token::RBRACE, ok);
+
+ if (is_sloppy(inner_scope->language_mode())) {
+ inner_scope->HoistSloppyBlockFunctions(nullptr);
+ }
return result;
}
diff --git a/deps/v8/src/parsing/rewriter.cc b/deps/v8/src/parsing/rewriter.cc
index 69ac4171c2..b56457e540 100644
--- a/deps/v8/src/parsing/rewriter.cc
+++ b/deps/v8/src/parsing/rewriter.cc
@@ -6,6 +6,7 @@
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
+#include "src/objects-inl.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parser.h"
@@ -14,8 +15,8 @@ namespace internal {
class Processor final : public AstVisitor<Processor> {
public:
- Processor(Isolate* isolate, DeclarationScope* closure_scope, Variable* result,
- AstValueFactory* ast_value_factory)
+ Processor(uintptr_t stack_limit, DeclarationScope* closure_scope,
+ Variable* result, AstValueFactory* ast_value_factory)
: result_(result),
result_assigned_(false),
replacement_(nullptr),
@@ -25,7 +26,7 @@ class Processor final : public AstVisitor<Processor> {
closure_scope_(closure_scope),
factory_(ast_value_factory) {
DCHECK_EQ(closure_scope, closure_scope->GetClosureScope());
- InitializeAstVisitor(isolate);
+ InitializeAstVisitor(stack_limit);
}
Processor(Parser* parser, DeclarationScope* closure_scope, Variable* result,
@@ -243,7 +244,6 @@ void Processor::VisitTryFinallyStatement(TryFinallyStatement* node) {
// Only rewrite finally if it could contain 'break' or 'continue'. Always
// rewrite try.
if (breakable_) {
- bool set_after = is_set_;
// Only set result before a 'break' or 'continue'.
is_set_ = true;
Visit(node->finally_block());
@@ -265,7 +265,6 @@ void Processor::VisitTryFinallyStatement(TryFinallyStatement* node) {
0, factory()->NewExpressionStatement(save, kNoSourcePosition), zone());
node->finally_block()->statements()->Add(
factory()->NewExpressionStatement(restore, kNoSourcePosition), zone());
- is_set_ = set_after;
}
Visit(node->try_block());
node->set_try_block(replacement_->AsBlock());
@@ -356,23 +355,36 @@ DECLARATION_NODE_LIST(DEF_VISIT)
// Assumes code has been parsed. Mutates the AST, so the AST should not
// continue to be used in the case of failure.
bool Rewriter::Rewrite(ParseInfo* info) {
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ DisallowHandleDereference no_deref;
+
+ RuntimeCallTimerScope runtimeTimer(
+ info->isolate(), &RuntimeCallStats::CompileRewriteReturnResult);
+
FunctionLiteral* function = info->literal();
DCHECK_NOT_NULL(function);
Scope* scope = function->scope();
DCHECK_NOT_NULL(scope);
if (!scope->is_script_scope() && !scope->is_eval_scope()) return true;
+
DeclarationScope* closure_scope = scope->GetClosureScope();
ZoneList<Statement*>* body = function->body();
if (!body->is_empty()) {
Variable* result = closure_scope->NewTemporary(
info->ast_value_factory()->dot_result_string());
- // The name string must be internalized at this point.
- info->ast_value_factory()->Internalize(info->isolate());
- DCHECK(!result->name().is_null());
- Processor processor(info->isolate(), closure_scope, result,
- info->ast_value_factory());
+ Processor processor(info->isolate()->stack_guard()->real_climit(),
+ closure_scope, result, info->ast_value_factory());
processor.Process(body);
+
+ // TODO(leszeks): Remove this check and releases once internalization is
+ // moved out of parsing/analysis.
+ DCHECK(ThreadId::Current().Equals(info->isolate()->thread_id()));
+ no_deref.Release();
+ no_handles.Release();
+ no_allocation.Release();
+
// Internalize any values created during rewriting.
info->ast_value_factory()->Internalize(info->isolate());
if (processor.HasStackOverflow()) return false;
@@ -392,6 +404,10 @@ bool Rewriter::Rewrite(ParseInfo* info) {
bool Rewriter::Rewrite(Parser* parser, DeclarationScope* closure_scope,
DoExpression* expr, AstValueFactory* factory) {
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ DisallowHandleDereference no_deref;
+
Block* block = expr->block();
DCHECK_EQ(closure_scope, closure_scope->GetClosureScope());
DCHECK(block->scope() == nullptr ||
diff --git a/deps/v8/src/parsing/scanner-character-streams.cc b/deps/v8/src/parsing/scanner-character-streams.cc
index f7c7fd526f..d3162dfbb2 100644
--- a/deps/v8/src/parsing/scanner-character-streams.cc
+++ b/deps/v8/src/parsing/scanner-character-streams.cc
@@ -5,6 +5,7 @@
#include "src/parsing/scanner-character-streams.h"
#include "include/v8.h"
+#include "src/counters.h"
#include "src/globals.h"
#include "src/handles.h"
#include "src/objects-inl.h"
@@ -194,9 +195,11 @@ size_t ExternalOneByteStringUtf16CharacterStream::FillBuffer(size_t from_pos) {
class Utf8ExternalStreamingStream : public BufferedUtf16CharacterStream {
public:
Utf8ExternalStreamingStream(
- ScriptCompiler::ExternalSourceStream* source_stream)
+ ScriptCompiler::ExternalSourceStream* source_stream,
+ RuntimeCallStats* stats)
: current_({0, {0, 0, unibrow::Utf8::Utf8IncrementalBuffer(0)}}),
- source_stream_(source_stream) {}
+ source_stream_(source_stream),
+ stats_(stats) {}
~Utf8ExternalStreamingStream() override {
for (size_t i = 0; i < chunks_.size(); i++) delete[] chunks_[i].data;
}
@@ -245,6 +248,7 @@ class Utf8ExternalStreamingStream : public BufferedUtf16CharacterStream {
std::vector<Chunk> chunks_;
Position current_;
ScriptCompiler::ExternalSourceStream* source_stream_;
+ RuntimeCallStats* stats_;
};
bool Utf8ExternalStreamingStream::SkipToPosition(size_t position) {
@@ -335,6 +339,7 @@ void Utf8ExternalStreamingStream::FillBufferFromCurrentChunk() {
}
bool Utf8ExternalStreamingStream::FetchChunk() {
+ RuntimeCallTimerScope scope(stats_, &RuntimeCallStats::GetMoreDataCallback);
DCHECK_EQ(current_.chunk_no, chunks_.size());
DCHECK(chunks_.empty() || chunks_.back().length != 0);
@@ -466,20 +471,23 @@ void DeleteChunks(Chunks& chunks) {
// Return the chunk index for the chunk containing position.
// If position is behind the end of the stream, the index of the last,
// zero-length chunk is returned.
-size_t FindChunk(Chunks& chunks, ScriptCompiler::ExternalSourceStream* source_,
- size_t position) {
+size_t FindChunk(Chunks& chunks, ScriptCompiler::ExternalSourceStream* source,
+ size_t position, RuntimeCallStats* stats) {
size_t end_pos =
chunks.empty() ? 0 : (chunks.back().byte_pos + chunks.back().byte_length);
// Get more data if needed. We usually won't enter the loop body.
bool out_of_data = !chunks.empty() && chunks.back().byte_length == 0;
- while (!out_of_data && end_pos <= position + 1) {
- const uint8_t* chunk = nullptr;
- size_t len = source_->GetMoreData(&chunk);
-
- chunks.push_back({chunk, len, end_pos});
- end_pos += len;
- out_of_data = (len == 0);
+ {
+ RuntimeCallTimerScope scope(stats, &RuntimeCallStats::GetMoreDataCallback);
+ while (!out_of_data && end_pos <= position + 1) {
+ const uint8_t* chunk = nullptr;
+ size_t len = source->GetMoreData(&chunk);
+
+ chunks.push_back({chunk, len, end_pos});
+ end_pos += len;
+ out_of_data = (len == 0);
+ }
}
// Here, we should always have at least one chunk, and we either have the
@@ -520,8 +528,8 @@ size_t FindChunk(Chunks& chunks, ScriptCompiler::ExternalSourceStream* source_,
class OneByteExternalStreamingStream : public BufferedUtf16CharacterStream {
public:
explicit OneByteExternalStreamingStream(
- ScriptCompiler::ExternalSourceStream* source)
- : source_(source) {}
+ ScriptCompiler::ExternalSourceStream* source, RuntimeCallStats* stats)
+ : source_(source), stats_(stats) {}
~OneByteExternalStreamingStream() override { DeleteChunks(chunks_); }
protected:
@@ -530,10 +538,11 @@ class OneByteExternalStreamingStream : public BufferedUtf16CharacterStream {
private:
Chunks chunks_;
ScriptCompiler::ExternalSourceStream* source_;
+ RuntimeCallStats* stats_;
};
size_t OneByteExternalStreamingStream::FillBuffer(size_t position) {
- const Chunk& chunk = chunks_[FindChunk(chunks_, source_, position)];
+ const Chunk& chunk = chunks_[FindChunk(chunks_, source_, position, stats_)];
if (chunk.byte_length == 0) return 0;
size_t start_pos = position - chunk.byte_pos;
@@ -554,7 +563,7 @@ size_t OneByteExternalStreamingStream::FillBuffer(size_t position) {
class TwoByteExternalStreamingStream : public Utf16CharacterStream {
public:
explicit TwoByteExternalStreamingStream(
- ScriptCompiler::ExternalSourceStream* source);
+ ScriptCompiler::ExternalSourceStream* source, RuntimeCallStats* stats);
~TwoByteExternalStreamingStream() override;
protected:
@@ -562,14 +571,16 @@ class TwoByteExternalStreamingStream : public Utf16CharacterStream {
Chunks chunks_;
ScriptCompiler::ExternalSourceStream* source_;
+ RuntimeCallStats* stats_;
uc16 one_char_buffer_;
};
TwoByteExternalStreamingStream::TwoByteExternalStreamingStream(
- ScriptCompiler::ExternalSourceStream* source)
+ ScriptCompiler::ExternalSourceStream* source, RuntimeCallStats* stats)
: Utf16CharacterStream(&one_char_buffer_, &one_char_buffer_,
&one_char_buffer_, 0),
source_(source),
+ stats_(stats),
one_char_buffer_(0) {}
TwoByteExternalStreamingStream::~TwoByteExternalStreamingStream() {
@@ -581,7 +592,7 @@ bool TwoByteExternalStreamingStream::ReadBlock() {
// We'll search for the 2nd byte of our character, to make sure we
// have enough data for at least one character.
- size_t chunk_no = FindChunk(chunks_, source_, 2 * position + 1);
+ size_t chunk_no = FindChunk(chunks_, source_, 2 * position + 1, stats_);
// Out of data? Return 0.
if (chunks_[chunk_no].byte_length == 0) {
@@ -649,7 +660,7 @@ bool TwoByteExternalStreamingStream::ReadBlock() {
class TwoByteExternalBufferedStream : public Utf16CharacterStream {
public:
explicit TwoByteExternalBufferedStream(
- ScriptCompiler::ExternalSourceStream* source);
+ ScriptCompiler::ExternalSourceStream* source, RuntimeCallStats* stats);
~TwoByteExternalBufferedStream();
protected:
@@ -667,11 +678,14 @@ class TwoByteExternalBufferedStream : public Utf16CharacterStream {
Chunks chunks_;
ScriptCompiler::ExternalSourceStream* source_;
+ RuntimeCallStats* stats_;
};
TwoByteExternalBufferedStream::TwoByteExternalBufferedStream(
- ScriptCompiler::ExternalSourceStream* source)
- : Utf16CharacterStream(buffer_, buffer_, buffer_, 0), source_(source) {}
+ ScriptCompiler::ExternalSourceStream* source, RuntimeCallStats* stats)
+ : Utf16CharacterStream(buffer_, buffer_, buffer_, 0),
+ source_(source),
+ stats_(stats) {}
TwoByteExternalBufferedStream::~TwoByteExternalBufferedStream() {
DeleteChunks(chunks_);
@@ -680,7 +694,7 @@ TwoByteExternalBufferedStream::~TwoByteExternalBufferedStream() {
bool TwoByteExternalBufferedStream::ReadBlock() {
size_t position = pos();
// Find chunk in which the position belongs
- size_t chunk_no = FindChunk(chunks_, source_, 2 * position + 1);
+ size_t chunk_no = FindChunk(chunks_, source_, 2 * position + 1, stats_);
// Out of data? Return 0.
if (chunks_[chunk_no].byte_length == 0) {
@@ -726,7 +740,7 @@ size_t TwoByteExternalBufferedStream::FillBuffer(size_t position,
{
size_t new_pos = position / kBufferSize * kBufferSize;
if (new_pos != position) {
- chunk_no = FindChunk(chunks_, source_, 2 * new_pos + 1);
+ chunk_no = FindChunk(chunks_, source_, 2 * new_pos + 1, stats_);
buffer_pos_ = new_pos;
buffer_cursor_ = buffer_start_ + (position - buffer_pos_);
position = new_pos;
@@ -768,7 +782,7 @@ size_t TwoByteExternalBufferedStream::FillBuffer(size_t position,
totalLength += bytes_to_move;
position = (current->byte_pos + current->byte_length) / 2;
if (position - buffer_pos_ < kBufferSize) {
- chunk_no = FindChunk(chunks_, source_, 2 * position + 1);
+ chunk_no = FindChunk(chunks_, source_, 2 * position + 1, stats_);
current = &chunks_[chunk_no];
odd_start = current->byte_pos % 2;
bytes_to_move = i::Min(2 * kBufferSize - totalLength, current->byte_length);
@@ -781,7 +795,7 @@ size_t TwoByteExternalBufferedStream::FillBuffer(size_t position,
current->data, bytes_to_move);
totalLength += bytes_to_move;
position = (current->byte_pos + current->byte_length) / 2;
- chunk_no = FindChunk(chunks_, source_, 2 * position + 1);
+ chunk_no = FindChunk(chunks_, source_, 2 * position + 1, stats_);
current = &chunks_[chunk_no];
odd_start = current->byte_pos % 2;
bytes_to_move =
@@ -828,18 +842,19 @@ std::unique_ptr<Utf16CharacterStream> ScannerStream::ForTesting(
Utf16CharacterStream* ScannerStream::For(
ScriptCompiler::ExternalSourceStream* source_stream,
- v8::ScriptCompiler::StreamedSource::Encoding encoding) {
+ v8::ScriptCompiler::StreamedSource::Encoding encoding,
+ RuntimeCallStats* stats) {
switch (encoding) {
case v8::ScriptCompiler::StreamedSource::TWO_BYTE:
#if !(V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64)
- return new TwoByteExternalStreamingStream(source_stream);
+ return new TwoByteExternalStreamingStream(source_stream, stats);
#else
- return new TwoByteExternalBufferedStream(source_stream);
+ return new TwoByteExternalBufferedStream(source_stream, stats);
#endif
case v8::ScriptCompiler::StreamedSource::ONE_BYTE:
- return new OneByteExternalStreamingStream(source_stream);
+ return new OneByteExternalStreamingStream(source_stream, stats);
case v8::ScriptCompiler::StreamedSource::UTF8:
- return new Utf8ExternalStreamingStream(source_stream);
+ return new Utf8ExternalStreamingStream(source_stream, stats);
}
UNREACHABLE();
return nullptr;
diff --git a/deps/v8/src/parsing/scanner-character-streams.h b/deps/v8/src/parsing/scanner-character-streams.h
index ac81613ab7..b9c28248dc 100644
--- a/deps/v8/src/parsing/scanner-character-streams.h
+++ b/deps/v8/src/parsing/scanner-character-streams.h
@@ -12,6 +12,7 @@ namespace v8 {
namespace internal {
class Utf16CharacterStream;
+class RuntimeCallStats;
class ScannerStream {
public:
@@ -20,7 +21,8 @@ class ScannerStream {
int end_pos);
static Utf16CharacterStream* For(
ScriptCompiler::ExternalSourceStream* source_stream,
- ScriptCompiler::StreamedSource::Encoding encoding);
+ ScriptCompiler::StreamedSource::Encoding encoding,
+ RuntimeCallStats* stats);
// For testing:
static std::unique_ptr<Utf16CharacterStream> ForTesting(const char* data);
diff --git a/deps/v8/src/parsing/scanner.cc b/deps/v8/src/parsing/scanner.cc
index 363ab7dfe9..bfb5e03d68 100644
--- a/deps/v8/src/parsing/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -26,6 +26,68 @@ Handle<String> Scanner::LiteralBuffer::Internalize(Isolate* isolate) const {
return isolate->factory()->InternalizeTwoByteString(two_byte_literal());
}
+int Scanner::LiteralBuffer::NewCapacity(int min_capacity) {
+ int capacity = Max(min_capacity, backing_store_.length());
+ int new_capacity = Min(capacity * kGrowthFactory, capacity + kMaxGrowth);
+ return new_capacity;
+}
+
+void Scanner::LiteralBuffer::ExpandBuffer() {
+ Vector<byte> new_store = Vector<byte>::New(NewCapacity(kInitialCapacity));
+ MemCopy(new_store.start(), backing_store_.start(), position_);
+ backing_store_.Dispose();
+ backing_store_ = new_store;
+}
+
+void Scanner::LiteralBuffer::ConvertToTwoByte() {
+ DCHECK(is_one_byte_);
+ Vector<byte> new_store;
+ int new_content_size = position_ * kUC16Size;
+ if (new_content_size >= backing_store_.length()) {
+ // Ensure room for all currently read code units as UC16 as well
+ // as the code unit about to be stored.
+ new_store = Vector<byte>::New(NewCapacity(new_content_size));
+ } else {
+ new_store = backing_store_;
+ }
+ uint8_t* src = backing_store_.start();
+ uint16_t* dst = reinterpret_cast<uint16_t*>(new_store.start());
+ for (int i = position_ - 1; i >= 0; i--) {
+ dst[i] = src[i];
+ }
+ if (new_store.start() != backing_store_.start()) {
+ backing_store_.Dispose();
+ backing_store_ = new_store;
+ }
+ position_ = new_content_size;
+ is_one_byte_ = false;
+}
+
+void Scanner::LiteralBuffer::AddCharSlow(uc32 code_unit) {
+ if (position_ >= backing_store_.length()) ExpandBuffer();
+ if (is_one_byte_) {
+ if (code_unit <= static_cast<uc32>(unibrow::Latin1::kMaxChar)) {
+ backing_store_[position_] = static_cast<byte>(code_unit);
+ position_ += kOneByteSize;
+ return;
+ }
+ ConvertToTwoByte();
+ }
+ if (code_unit <=
+ static_cast<uc32>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
+ *reinterpret_cast<uint16_t*>(&backing_store_[position_]) = code_unit;
+ position_ += kUC16Size;
+ } else {
+ *reinterpret_cast<uint16_t*>(&backing_store_[position_]) =
+ unibrow::Utf16::LeadSurrogate(code_unit);
+ position_ += kUC16Size;
+ if (position_ >= backing_store_.length()) ExpandBuffer();
+ *reinterpret_cast<uint16_t*>(&backing_store_[position_]) =
+ unibrow::Utf16::TrailSurrogate(code_unit);
+ position_ += kUC16Size;
+ }
+}
+
// ----------------------------------------------------------------------------
// Scanner::BookmarkScope
@@ -78,10 +140,8 @@ bool Scanner::BookmarkScope::HasBeenApplied() {
Scanner::Scanner(UnicodeCache* unicode_cache)
: unicode_cache_(unicode_cache),
octal_pos_(Location::invalid()),
- decimal_with_leading_zero_pos_(Location::invalid()),
- found_html_comment_(false) {
-}
-
+ octal_message_(MessageTemplate::kNone),
+ found_html_comment_(false) {}
void Scanner::Initialize(Utf16CharacterStream* source) {
source_ = source;
@@ -917,6 +977,7 @@ uc32 Scanner::ScanOctalEscape(uc32 c, int length) {
// occur before the "use strict" directive.
if (c != '0' || i > 0) {
octal_pos_ = Location(source_pos() - i - 1, source_pos() - 1);
+ octal_message_ = MessageTemplate::kStrictOctalEscape;
}
return x;
}
@@ -1130,6 +1191,7 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
if (c0_ < '0' || '7' < c0_) {
// Octal literal finished.
octal_pos_ = Location(start_pos, source_pos());
+ octal_message_ = MessageTemplate::kStrictOctalLiteral;
break;
}
AddLiteralCharAdvance();
@@ -1152,13 +1214,16 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
}
if (next_.literal_chars->one_byte_literal().length() <= 10 &&
- value <= Smi::kMaxValue && c0_ != '.' && c0_ != 'e' && c0_ != 'E') {
+ value <= Smi::kMaxValue && c0_ != '.' &&
+ (c0_ == kEndOfInput || !unicode_cache_->IsIdentifierStart(c0_))) {
next_.smi_value_ = static_cast<uint32_t>(value);
literal.Complete();
HandleLeadSurrogate();
- if (kind == DECIMAL_WITH_LEADING_ZERO)
- decimal_with_leading_zero_pos_ = Location(start_pos, source_pos());
+ if (kind == DECIMAL_WITH_LEADING_ZERO) {
+ octal_pos_ = Location(start_pos, source_pos());
+ octal_message_ = MessageTemplate::kStrictDecimalWithLeadingZero;
+ }
return Token::SMI;
}
HandleLeadSurrogate();
@@ -1198,8 +1263,10 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
literal.Complete();
- if (kind == DECIMAL_WITH_LEADING_ZERO)
- decimal_with_leading_zero_pos_ = Location(start_pos, source_pos());
+ if (kind == DECIMAL_WITH_LEADING_ZERO) {
+ octal_pos_ = Location(start_pos, source_pos());
+ octal_message_ = MessageTemplate::kStrictDecimalWithLeadingZero;
+ }
return Token::NUMBER;
}
@@ -1339,19 +1406,6 @@ static Token::Value KeywordOrIdentifierToken(const uint8_t* input,
}
-bool Scanner::IdentifierIsFutureStrictReserved(
- const AstRawString* string) const {
- // Keywords are always 1-byte strings.
- if (!string->is_one_byte()) return false;
- if (string->IsOneByteEqualTo("let") || string->IsOneByteEqualTo("static") ||
- string->IsOneByteEqualTo("yield")) {
- return true;
- }
- return Token::FUTURE_STRICT_RESERVED_WORD ==
- KeywordOrIdentifierToken(string->raw_data(), string->length());
-}
-
-
Token::Value Scanner::ScanIdentifierOrKeyword() {
DCHECK(unicode_cache_->IsIdentifierStart(c0_));
LiteralScope literal(this);
@@ -1612,14 +1666,13 @@ bool Scanner::ContainsDot() {
return std::find(str.begin(), str.end(), '.') != str.end();
}
-
-int Scanner::FindSymbol(DuplicateFinder* finder, int value) {
+bool Scanner::FindSymbol(DuplicateFinder* finder) {
// TODO(vogelheim): Move this logic into the calling class; this can be fully
// implemented using the public interface.
if (is_literal_one_byte()) {
- return finder->AddOneByteSymbol(literal_one_byte_string(), value);
+ return finder->AddOneByteSymbol(literal_one_byte_string());
}
- return finder->AddTwoByteSymbol(literal_two_byte_string(), value);
+ return finder->AddTwoByteSymbol(literal_two_byte_string());
}
void Scanner::SeekNext(size_t position) {
diff --git a/deps/v8/src/parsing/scanner.h b/deps/v8/src/parsing/scanner.h
index 6f6fab5543..075b9ca6b2 100644
--- a/deps/v8/src/parsing/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -268,20 +268,17 @@ class Scanner {
return false;
}
- int FindSymbol(DuplicateFinder* finder, int value);
+ bool FindSymbol(DuplicateFinder* finder);
UnicodeCache* unicode_cache() { return unicode_cache_; }
// Returns the location of the last seen octal literal.
Location octal_position() const { return octal_pos_; }
- void clear_octal_position() { octal_pos_ = Location::invalid(); }
- // Returns the location of the last seen decimal literal with a leading zero.
- Location decimal_with_leading_zero_position() const {
- return decimal_with_leading_zero_pos_;
- }
- void clear_decimal_with_leading_zero_position() {
- decimal_with_leading_zero_pos_ = Location::invalid();
+ void clear_octal_position() {
+ octal_pos_ = Location::invalid();
+ octal_message_ = MessageTemplate::kNone;
}
+ MessageTemplate::Template octal_message() const { return octal_message_; }
// Returns the value of the last smi that was scanned.
uint32_t smi_value() const { return current_.smi_value_; }
@@ -328,8 +325,6 @@ class Scanner {
return tmp;
}
- bool IdentifierIsFutureStrictReserved(const AstRawString* string) const;
-
bool FoundHtmlComment() const { return found_html_comment_; }
private:
@@ -358,36 +353,16 @@ class Scanner {
~LiteralBuffer() { backing_store_.Dispose(); }
INLINE(void AddChar(char code_unit)) {
- if (position_ >= backing_store_.length()) ExpandBuffer();
- DCHECK(is_one_byte_);
DCHECK(IsValidAscii(code_unit));
- backing_store_[position_] = static_cast<byte>(code_unit);
- position_ += kOneByteSize;
- return;
+ AddOneByteChar(static_cast<byte>(code_unit));
}
INLINE(void AddChar(uc32 code_unit)) {
- if (position_ >= backing_store_.length()) ExpandBuffer();
- if (is_one_byte_) {
- if (code_unit <= static_cast<uc32>(unibrow::Latin1::kMaxChar)) {
- backing_store_[position_] = static_cast<byte>(code_unit);
- position_ += kOneByteSize;
- return;
- }
- ConvertToTwoByte();
- }
- if (code_unit <=
- static_cast<uc32>(unibrow::Utf16::kMaxNonSurrogateCharCode)) {
- *reinterpret_cast<uint16_t*>(&backing_store_[position_]) = code_unit;
- position_ += kUC16Size;
+ if (is_one_byte_ &&
+ code_unit <= static_cast<uc32>(unibrow::Latin1::kMaxChar)) {
+ AddOneByteChar(static_cast<byte>(code_unit));
} else {
- *reinterpret_cast<uint16_t*>(&backing_store_[position_]) =
- unibrow::Utf16::LeadSurrogate(code_unit);
- position_ += kUC16Size;
- if (position_ >= backing_store_.length()) ExpandBuffer();
- *reinterpret_cast<uint16_t*>(&backing_store_[position_]) =
- unibrow::Utf16::TrailSurrogate(code_unit);
- position_ += kUC16Size;
+ AddCharSlow(code_unit);
}
}
@@ -439,43 +414,18 @@ class Scanner {
return iscntrl(code_unit) || isprint(code_unit);
}
- inline int NewCapacity(int min_capacity) {
- int capacity = Max(min_capacity, backing_store_.length());
- int new_capacity = Min(capacity * kGrowthFactory, capacity + kMaxGrowth);
- return new_capacity;
- }
-
- void ExpandBuffer() {
- Vector<byte> new_store = Vector<byte>::New(NewCapacity(kInitialCapacity));
- MemCopy(new_store.start(), backing_store_.start(), position_);
- backing_store_.Dispose();
- backing_store_ = new_store;
- }
-
- void ConvertToTwoByte() {
+ INLINE(void AddOneByteChar(byte one_byte_char)) {
DCHECK(is_one_byte_);
- Vector<byte> new_store;
- int new_content_size = position_ * kUC16Size;
- if (new_content_size >= backing_store_.length()) {
- // Ensure room for all currently read code units as UC16 as well
- // as the code unit about to be stored.
- new_store = Vector<byte>::New(NewCapacity(new_content_size));
- } else {
- new_store = backing_store_;
- }
- uint8_t* src = backing_store_.start();
- uint16_t* dst = reinterpret_cast<uint16_t*>(new_store.start());
- for (int i = position_ - 1; i >= 0; i--) {
- dst[i] = src[i];
- }
- if (new_store.start() != backing_store_.start()) {
- backing_store_.Dispose();
- backing_store_ = new_store;
- }
- position_ = new_content_size;
- is_one_byte_ = false;
+ if (position_ >= backing_store_.length()) ExpandBuffer();
+ backing_store_[position_] = one_byte_char;
+ position_ += kOneByteSize;
}
+ void AddCharSlow(uc32 code_unit);
+ int NewCapacity(int min_capacity);
+ void ExpandBuffer();
+ void ConvertToTwoByte();
+
bool is_one_byte_;
int position_;
Vector<byte> backing_store_;
@@ -787,7 +737,7 @@ class Scanner {
// Last-seen positions of potentially problematic tokens.
Location octal_pos_;
- Location decimal_with_leading_zero_pos_;
+ MessageTemplate::Template octal_message_;
// One Unicode character look-ahead; c0_ < 0 at the end of the input.
uc32 c0_;
diff --git a/deps/v8/src/perf-jit.cc b/deps/v8/src/perf-jit.cc
index 6641a1259b..907a4cde2c 100644
--- a/deps/v8/src/perf-jit.cc
+++ b/deps/v8/src/perf-jit.cc
@@ -212,7 +212,7 @@ void PerfJitLogger::LogRecordedBuffer(AbstractCode* abstract_code,
DCHECK(code->instruction_start() == code->address() + Code::kHeaderSize);
// Debug info has to be emitted first.
- if (FLAG_perf_prof_debug_info && shared != nullptr) {
+ if (FLAG_perf_prof && shared != nullptr) {
LogWriteDebugInfo(code, shared);
}
@@ -246,33 +246,47 @@ void PerfJitLogger::LogRecordedBuffer(AbstractCode* abstract_code,
LogWriteBytes(reinterpret_cast<const char*>(code_pointer), code_size);
}
-void PerfJitLogger::LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared) {
- // Compute the entry count and get the name of the script.
- uint32_t entry_count = 0;
- for (SourcePositionTableIterator iterator(code->source_position_table());
- !iterator.done(); iterator.Advance()) {
- entry_count++;
- }
- if (entry_count == 0) return;
- Handle<Script> script(Script::cast(shared->script()));
- Handle<Object> name_or_url(Script::GetNameOrSourceURL(script));
+namespace {
+std::unique_ptr<char[]> GetScriptName(Handle<Script> script) {
+ Object* name_or_url = script->GetNameOrSourceURL();
int name_length = 0;
std::unique_ptr<char[]> name_string;
if (name_or_url->IsString()) {
- name_string =
- Handle<String>::cast(name_or_url)
- ->ToCString(DISALLOW_NULLS, FAST_STRING_TRAVERSAL, &name_length);
- DCHECK_EQ(0, name_string.get()[name_length]);
+ return String::cast(name_or_url)
+ ->ToCString(DISALLOW_NULLS, FAST_STRING_TRAVERSAL, &name_length);
} else {
const char unknown[] = "<unknown>";
name_length = static_cast<int>(strlen(unknown));
char* buffer = NewArray<char>(name_length);
base::OS::StrNCpy(buffer, name_length + 1, unknown,
static_cast<size_t>(name_length));
- name_string = std::unique_ptr<char[]>(buffer);
+ return std::unique_ptr<char[]>(buffer);
+ }
+}
+
+SourcePositionInfo GetSourcePositionInfo(Handle<Code> code,
+ Handle<SharedFunctionInfo> function,
+ SourcePosition pos) {
+ if (code->is_turbofanned() || code->is_crankshafted()) {
+ DisallowHeapAllocation disallow;
+ return pos.InliningStack(code)[0];
+ } else {
+ return SourcePositionInfo(pos, function);
+ }
+}
+
+} // namespace
+
+void PerfJitLogger::LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared) {
+ // Compute the entry count and get the name of the script.
+ uint32_t entry_count = 0;
+ for (SourcePositionTableIterator iterator(code->source_position_table());
+ !iterator.done(); iterator.Advance()) {
+ entry_count++;
}
- DCHECK_EQ(name_length, static_cast<int>(strlen(name_string.get())));
+ if (entry_count == 0) return;
+ Handle<Script> script(Script::cast(shared->script()));
PerfJitCodeDebugInfo debug_info;
@@ -284,42 +298,44 @@ void PerfJitLogger::LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared) {
uint32_t size = sizeof(debug_info);
// Add the sizes of fixed parts of entries.
size += entry_count * sizeof(PerfJitDebugEntry);
- // Add the size of the name after the first entry.
- size += (static_cast<uint32_t>(name_length) + 1) * entry_count;
+ // Add the size of the name after each entry.
- int padding = ((size + 7) & (~7)) - size;
+ Handle<Code> code_handle(code);
+ Handle<SharedFunctionInfo> function_handle(shared);
+ for (SourcePositionTableIterator iterator(code->source_position_table());
+ !iterator.done(); iterator.Advance()) {
+ SourcePositionInfo info(GetSourcePositionInfo(code_handle, function_handle,
+ iterator.source_position()));
+ Handle<Script> script(Script::cast(info.function->script()));
+ std::unique_ptr<char[]> name_string = GetScriptName(script);
+ size += (static_cast<uint32_t>(strlen(name_string.get())) + 1);
+ }
+ int padding = ((size + 7) & (~7)) - size;
debug_info.size_ = size + padding;
-
LogWriteBytes(reinterpret_cast<const char*>(&debug_info), sizeof(debug_info));
- int script_line_offset = script->line_offset();
- Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
Address code_start = code->instruction_start();
for (SourcePositionTableIterator iterator(code->source_position_table());
!iterator.done(); iterator.Advance()) {
- int position = iterator.source_position().ScriptOffset();
- int line_number = Script::GetLineNumber(script, position);
- // Compute column.
- int relative_line_number = line_number - script_line_offset;
- int start =
- (relative_line_number == 0)
- ? 0
- : Smi::cast(line_ends->get(relative_line_number - 1))->value() + 1;
- int column_offset = position - start;
- if (relative_line_number == 0) {
- // For the case where the code is on the same line as the script tag.
- column_offset += script->column_offset();
- }
-
+ SourcePositionInfo info(GetSourcePositionInfo(code_handle, function_handle,
+ iterator.source_position()));
PerfJitDebugEntry entry;
+ // TODO(danno): There seems to be a bug in the dwarf handling of JIT code in
+ // the perf tool. It seems to erroneously believe that the first instruction
+ // of functions is at offset 0x40 when displayed in "perf report". To
+ // compensate for this, add a magic constant to the position addresses when
+ // writing them out.
entry.address_ =
- reinterpret_cast<uint64_t>(code_start + iterator.code_offset());
- entry.line_number_ = line_number;
- entry.column_ = column_offset;
+ reinterpret_cast<intptr_t>(code_start + iterator.code_offset() + 0x40);
+ entry.line_number_ = info.line + 1;
+ entry.column_ = info.column + 1;
LogWriteBytes(reinterpret_cast<const char*>(&entry), sizeof(entry));
- LogWriteBytes(name_string.get(), name_length + 1);
+ Handle<Script> script(Script::cast(info.function->script()));
+ std::unique_ptr<char[]> name_string = GetScriptName(script);
+ LogWriteBytes(name_string.get(),
+ static_cast<uint32_t>(strlen(name_string.get())) + 1);
}
char padding_bytes[] = "\0\0\0\0\0\0\0\0";
LogWriteBytes(padding_bytes, padding);
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
index 08a8005ee1..32408f3079 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -66,6 +66,9 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
#ifndef USE_SIMULATOR
// Probe for additional features at runtime.
base::CPU cpu;
+ if (cpu.part() == base::CPU::PPC_POWER9) {
+ supported_ |= (1u << MODULO);
+ }
#if V8_TARGET_ARCH_PPC64
if (cpu.part() == base::CPU::PPC_POWER8) {
supported_ |= (1u << FPR_GPR_MOV);
@@ -79,6 +82,7 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
if (cpu.part() == base::CPU::PPC_POWER7 ||
cpu.part() == base::CPU::PPC_POWER8) {
supported_ |= (1u << ISELECT);
+ supported_ |= (1u << VSX);
}
#if V8_OS_LINUX
if (!(cpu.part() == base::CPU::PPC_G5 || cpu.part() == base::CPU::PPC_G4)) {
@@ -96,6 +100,8 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
supported_ |= (1u << FPU);
supported_ |= (1u << LWSYNC);
supported_ |= (1u << ISELECT);
+ supported_ |= (1u << VSX);
+ supported_ |= (1u << MODULO);
#if V8_TARGET_ARCH_PPC64
supported_ |= (1u << FPR_GPR_MOV);
#endif
@@ -171,14 +177,19 @@ Address RelocInfo::wasm_global_reference() {
return Assembler::target_address_at(pc_, host_);
}
+uint32_t RelocInfo::wasm_function_table_size_reference() {
+ DCHECK(IsWasmFunctionTableSizeReference(rmode_));
+ return static_cast<uint32_t>(
+ reinterpret_cast<intptr_t>(Assembler::target_address_at(pc_, host_)));
+}
void RelocInfo::unchecked_update_wasm_memory_reference(
Address address, ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
}
-void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
- ICacheFlushMode flush_mode) {
+void RelocInfo::unchecked_update_wasm_size(uint32_t size,
+ ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate_, pc_, host_,
reinterpret_cast<Address>(size), flush_mode);
}
@@ -641,6 +652,14 @@ void Assembler::xo_form(Instr instr, Register rt, Register ra, Register rb,
emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | o | r);
}
+void Assembler::xx3_form(Instr instr, DoubleRegister t, DoubleRegister a,
+ DoubleRegister b) {
+ int AX = ((a.code() & 0x20) >> 5) & 0x1;
+ int BX = ((b.code() & 0x20) >> 5) & 0x1;
+ int TX = ((t.code() & 0x20) >> 5) & 0x1;
+ emit(instr | (t.code() & 0x1F) * B21 | (a.code() & 0x1F) * B16 | (b.code()
+ & 0x1F) * B11 | AX * B2 | BX * B1 | TX);
+}
void Assembler::md_form(Instr instr, Register ra, Register rs, int shift,
int maskbit, RCBit r) {
@@ -936,6 +955,13 @@ void Assembler::divwu(Register dst, Register src1, Register src2, OEBit o,
xo_form(EXT2 | DIVWU, dst, src1, src2, o, r);
}
+void Assembler::modsw(Register rt, Register ra, Register rb) {
+ x_form(EXT2 | MODSW, ra, rt, rb, LeaveRC);
+}
+
+void Assembler::moduw(Register rt, Register ra, Register rb) {
+ x_form(EXT2 | MODUW, ra, rt, rb, LeaveRC);
+}
void Assembler::addi(Register dst, Register src, const Operand& imm) {
DCHECK(!src.is(r0)); // use li instead to show intent
@@ -1540,6 +1566,14 @@ void Assembler::divdu(Register dst, Register src1, Register src2, OEBit o,
RCBit r) {
xo_form(EXT2 | DIVDU, dst, src1, src2, o, r);
}
+
+void Assembler::modsd(Register rt, Register ra, Register rb) {
+ x_form(EXT2 | MODSD, ra, rt, rb, LeaveRC);
+}
+
+void Assembler::modud(Register rt, Register ra, Register rb) {
+ x_form(EXT2 | MODUD, ra, rt, rb, LeaveRC);
+}
#endif
@@ -2322,6 +2356,24 @@ void Assembler::fmsub(const DoubleRegister frt, const DoubleRegister fra,
frc.code() * B6 | rc);
}
+// Support for VSX instructions
+
+void Assembler::xsadddp(const DoubleRegister frt, const DoubleRegister fra,
+ const DoubleRegister frb) {
+ xx3_form(EXT6 | XSADDDP, frt, fra, frb);
+}
+void Assembler::xssubdp(const DoubleRegister frt, const DoubleRegister fra,
+ const DoubleRegister frb) {
+ xx3_form(EXT6 | XSSUBDP, frt, fra, frb);
+}
+void Assembler::xsdivdp(const DoubleRegister frt, const DoubleRegister fra,
+ const DoubleRegister frb) {
+ xx3_form(EXT6 | XSDIVDP, frt, fra, frb);
+}
+void Assembler::xsmuldp(const DoubleRegister frt, const DoubleRegister fra,
+ const DoubleRegister frb) {
+ xx3_form(EXT6 | XSMULDP, frt, fra, frb);
+}
// Pseudo instructions.
void Assembler::nop(int type) {
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
index f49ac6305e..b385af0321 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -837,6 +837,8 @@ class Assembler : public AssemblerBase {
RCBit r = LeaveRC);
void divwu(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
RCBit r = LeaveRC);
+ void modsw(Register rt, Register ra, Register rb);
+ void moduw(Register rt, Register ra, Register rb);
void addi(Register dst, Register src, const Operand& imm);
void addis(Register dst, Register src, const Operand& imm);
@@ -932,6 +934,8 @@ class Assembler : public AssemblerBase {
RCBit r = LeaveRC);
void divdu(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
RCBit r = LeaveRC);
+ void modsd(Register rt, Register ra, Register rb);
+ void modud(Register rt, Register ra, Register rb);
#endif
void rlwinm(Register ra, Register rs, int sh, int mb, int me,
@@ -1104,6 +1108,17 @@ class Assembler : public AssemblerBase {
const DoubleRegister frc, const DoubleRegister frb,
RCBit rc = LeaveRC);
+ // Support for VSX instructions
+
+ void xsadddp(const DoubleRegister frt, const DoubleRegister fra,
+ const DoubleRegister frb);
+ void xssubdp(const DoubleRegister frt, const DoubleRegister fra,
+ const DoubleRegister frb);
+ void xsdivdp(const DoubleRegister frt, const DoubleRegister fra,
+ const DoubleRegister frb);
+ void xsmuldp(const DoubleRegister frt, const DoubleRegister fra,
+ const DoubleRegister frc);
+
// Pseudo instructions
// Different nop operations are used by the code generator to detect certain
@@ -1188,9 +1203,6 @@ class Assembler : public AssemblerBase {
// Debugging
- // Mark generator continuation.
- void RecordGeneratorContinuation();
-
// Mark address of a debug break slot.
void RecordDebugBreakSlot(RelocInfo::Mode mode);
@@ -1409,6 +1421,8 @@ class Assembler : public AssemblerBase {
void x_form(Instr instr, Register ra, Register rs, Register rb, RCBit r);
void xo_form(Instr instr, Register rt, Register ra, Register rb, OEBit o,
RCBit r);
+ void xx3_form(Instr instr, DoubleRegister t, DoubleRegister a,
+ DoubleRegister b);
void md_form(Instr instr, Register ra, Register rs, int shift, int maskbit,
RCBit r);
void mds_form(Instr instr, Register ra, Register rs, Register rb, int maskbit,
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index a48fc06116..c2c27d6c99 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -32,17 +32,6 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kNewArray);
}
-void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
- descriptor->Initialize(r3, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
-void FastFunctionBindStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
- descriptor->Initialize(r3, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
Condition cond);
static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
@@ -664,8 +653,11 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cc == eq) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(lhs, rhs);
- __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+ __ Push(cp);
+ __ Call(strict() ? isolate()->builtins()->StrictEqual()
+ : isolate()->builtins()->Equal(),
+ RelocInfo::CODE_TARGET);
+ __ Pop(cp);
}
// Turn true into 0 and false into some non-zero value.
STATIC_ASSERT(EQUAL == 0);
@@ -879,7 +871,6 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
SaveFPRegsMode mode = kSaveFPRegs;
CEntryStub(isolate, 1, mode).GetCode();
StoreBufferOverflowStub(isolate, mode).GetCode();
- isolate->set_fp_stubs_generated(true);
}
@@ -2170,45 +2161,6 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
}
-
-enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
-
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, Register dest,
- Register src, Register count,
- Register scratch,
- String::Encoding encoding) {
- if (FLAG_debug_code) {
- // Check that destination is word aligned.
- __ andi(r0, dest, Operand(kPointerAlignmentMask));
- __ Check(eq, kDestinationOfCopyNotAligned, cr0);
- }
-
- // Nothing to do for zero characters.
- Label done;
- if (encoding == String::TWO_BYTE_ENCODING) {
- // double the length
- __ add(count, count, count, LeaveOE, SetRC);
- __ beq(&done, cr0);
- } else {
- __ cmpi(count, Operand::Zero());
- __ beq(&done);
- }
-
- // Copy count bytes from src to dst.
- Label byte_loop;
- __ mtctr(count);
- __ bind(&byte_loop);
- __ lbz(scratch, MemOperand(src));
- __ addi(src, src, Operand(1));
- __ stb(scratch, MemOperand(dest));
- __ addi(dest, dest, Operand(1));
- __ bdnz(&byte_loop);
-
- __ bind(&done);
-}
-
-
void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
Register left,
Register right,
@@ -2842,84 +2794,6 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(
__ bne(miss);
}
-
-// Probe the name dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found. Jump to
-// the |miss| label otherwise.
-// If lookup was successful |scratch2| will be equal to elements + 4 * index.
-void NameDictionaryLookupStub::GeneratePositiveLookup(
- MacroAssembler* masm, Label* miss, Label* done, Register elements,
- Register name, Register scratch1, Register scratch2) {
- DCHECK(!elements.is(scratch1));
- DCHECK(!elements.is(scratch2));
- DCHECK(!name.is(scratch1));
- DCHECK(!name.is(scratch2));
-
- __ AssertName(name);
-
- // Compute the capacity mask.
- __ LoadP(scratch1, FieldMemOperand(elements, kCapacityOffset));
- __ SmiUntag(scratch1); // convert smi to int
- __ subi(scratch1, scratch1, Operand(1));
-
- // Generate an unrolled loop that performs a few probes before
- // giving up. Measurements done on Gmail indicate that 2 probes
- // cover ~93% of loads from dictionaries.
- for (int i = 0; i < kInlinedProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ lwz(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
- if (i > 0) {
- // Add the probe offset (i + i * i) left shifted to avoid right shifting
- // the hash in a separate instruction. The value hash + i + i * i is right
- // shifted in the following and instruction.
- DCHECK(NameDictionary::GetProbeOffset(i) <
- 1 << (32 - Name::kHashFieldOffset));
- __ addi(scratch2, scratch2,
- Operand(NameDictionary::GetProbeOffset(i) << Name::kHashShift));
- }
- __ srwi(scratch2, scratch2, Operand(Name::kHashShift));
- __ and_(scratch2, scratch1, scratch2);
-
- // Scale the index by multiplying by the entry size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- // scratch2 = scratch2 * 3.
- __ ShiftLeftImm(ip, scratch2, Operand(1));
- __ add(scratch2, scratch2, ip);
-
- // Check if the key is identical to the name.
- __ ShiftLeftImm(ip, scratch2, Operand(kPointerSizeLog2));
- __ add(scratch2, elements, ip);
- __ LoadP(ip, FieldMemOperand(scratch2, kElementsStartOffset));
- __ cmp(name, ip);
- __ beq(done);
- }
-
- const int spill_mask = (r0.bit() | r9.bit() | r8.bit() | r7.bit() | r6.bit() |
- r5.bit() | r4.bit() | r3.bit()) &
- ~(scratch1.bit() | scratch2.bit());
-
- __ mflr(r0);
- __ MultiPush(spill_mask);
- if (name.is(r3)) {
- DCHECK(!elements.is(r4));
- __ mr(r4, name);
- __ mr(r3, elements);
- } else {
- __ mr(r3, elements);
- __ mr(r4, name);
- }
- NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
- __ CallStub(&stub);
- __ cmpi(r3, Operand::Zero());
- __ mr(scratch2, r5);
- __ MultiPop(spill_mask);
- __ mtlr(r0);
-
- __ bne(done);
- __ beq(miss);
-}
-
-
void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
@@ -3202,246 +3076,6 @@ void CallICTrampolineStub::Generate(MacroAssembler* masm) {
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
-
-static void HandleArrayCases(MacroAssembler* masm, Register feedback,
- Register receiver_map, Register scratch1,
- Register scratch2, bool is_polymorphic,
- Label* miss) {
- // feedback initially contains the feedback array
- Label next_loop, prepare_next;
- Label start_polymorphic;
-
- Register cached_map = scratch1;
-
- __ LoadP(cached_map,
- FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
- __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
- __ cmp(receiver_map, cached_map);
- __ bne(&start_polymorphic);
- // found, now call handler.
- Register handler = feedback;
- __ LoadP(handler,
- FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
- __ addi(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(ip);
-
-
- Register length = scratch2;
- __ bind(&start_polymorphic);
- __ LoadP(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
- if (!is_polymorphic) {
- // If the IC could be monomorphic we have to make sure we don't go past the
- // end of the feedback array.
- __ CmpSmiLiteral(length, Smi::FromInt(2), r0);
- __ beq(miss);
- }
-
- Register too_far = length;
- Register pointer_reg = feedback;
-
- // +-----+------+------+-----+-----+ ... ----+
- // | map | len | wm0 | h0 | wm1 | hN |
- // +-----+------+------+-----+-----+ ... ----+
- // 0 1 2 len-1
- // ^ ^
- // | |
- // pointer_reg too_far
- // aka feedback scratch2
- // also need receiver_map
- // use cached_map (scratch1) to look in the weak map values.
- __ SmiToPtrArrayOffset(r0, length);
- __ add(too_far, feedback, r0);
- __ addi(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ addi(pointer_reg, feedback,
- Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
-
- __ bind(&next_loop);
- __ LoadP(cached_map, MemOperand(pointer_reg));
- __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
- __ cmp(receiver_map, cached_map);
- __ bne(&prepare_next);
- __ LoadP(handler, MemOperand(pointer_reg, kPointerSize));
- __ addi(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(ip);
-
- __ bind(&prepare_next);
- __ addi(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
- __ cmp(pointer_reg, too_far);
- __ blt(&next_loop);
-
- // We exhausted our array of map handler pairs.
- __ b(miss);
-}
-
-
-static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
- Register receiver_map, Register feedback,
- Register vector, Register slot,
- Register scratch, Label* compare_map,
- Label* load_smi_map, Label* try_array) {
- __ JumpIfSmi(receiver, load_smi_map);
- __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ bind(compare_map);
- Register cached_map = scratch;
- // Move the weak map into the weak_cell register.
- __ LoadP(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
- __ cmp(cached_map, receiver_map);
- __ bne(try_array);
- Register handler = feedback;
- __ SmiToPtrArrayOffset(r0, slot);
- __ add(handler, vector, r0);
- __ LoadP(handler,
- FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
- __ addi(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(ip);
-}
-
-void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
- KeyedStoreICStub stub(isolate(), state());
- stub.GenerateForTrampoline(masm);
-}
-
-void KeyedStoreICStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
-
-void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-
-static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
- Register receiver_map, Register scratch1,
- Register scratch2, Label* miss) {
- // feedback initially contains the feedback array
- Label next_loop, prepare_next;
- Label start_polymorphic;
- Label transition_call;
-
- Register cached_map = scratch1;
- Register too_far = scratch2;
- Register pointer_reg = feedback;
- __ LoadP(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
-
- // +-----+------+------+-----+-----+-----+ ... ----+
- // | map | len | wm0 | wt0 | h0 | wm1 | hN |
- // +-----+------+------+-----+-----+ ----+ ... ----+
- // 0 1 2 len-1
- // ^ ^
- // | |
- // pointer_reg too_far
- // aka feedback scratch2
- // also need receiver_map
- // use cached_map (scratch1) to look in the weak map values.
- __ SmiToPtrArrayOffset(r0, too_far);
- __ add(too_far, feedback, r0);
- __ addi(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ addi(pointer_reg, feedback,
- Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
-
- __ bind(&next_loop);
- __ LoadP(cached_map, MemOperand(pointer_reg));
- __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
- __ cmp(receiver_map, cached_map);
- __ bne(&prepare_next);
- // Is it a transitioning store?
- __ LoadP(too_far, MemOperand(pointer_reg, kPointerSize));
- __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
- __ bne(&transition_call);
- __ LoadP(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
- __ addi(ip, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(ip);
-
- __ bind(&transition_call);
- __ LoadP(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
- __ JumpIfSmi(too_far, miss);
-
- __ LoadP(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
-
- // Load the map into the correct register.
- DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
- __ mr(feedback, too_far);
-
- __ addi(ip, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(ip);
-
- __ bind(&prepare_next);
- __ addi(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
- __ cmpl(pointer_reg, too_far);
- __ blt(&next_loop);
-
- // We exhausted our array of map handler pairs.
- __ b(miss);
-}
-
-void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // r4
- Register key = StoreWithVectorDescriptor::NameRegister(); // r5
- Register vector = StoreWithVectorDescriptor::VectorRegister(); // r6
- Register slot = StoreWithVectorDescriptor::SlotRegister(); // r7
- DCHECK(StoreWithVectorDescriptor::ValueRegister().is(r3)); // r3
- Register feedback = r8;
- Register receiver_map = r9;
- Register scratch1 = r10;
-
- __ SmiToPtrArrayOffset(r0, slot);
- __ add(feedback, vector, r0);
- __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
- scratch1, &compare_map, &load_smi_map, &try_array);
-
- __ bind(&try_array);
- // Is it a fixed array?
- __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
- __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
- __ bne(&not_array);
-
- // We have a polymorphic element handler.
- Label polymorphic, try_poly_name;
- __ bind(&polymorphic);
-
- Register scratch2 = r11;
-
- HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
- &miss);
-
- __ bind(&not_array);
- // Is it generic?
- __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
- __ bne(&try_poly_name);
- Handle<Code> megamorphic_stub =
- KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
- __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
-
- __ bind(&try_poly_name);
- // We might have a name in feedback, and a fixed array in the next slot.
- __ cmp(key, feedback);
- __ bne(&miss);
- // If the name comparison succeeded, we know we have a fixed array with
- // at least one map/handler pair.
- __ SmiToPtrArrayOffset(r0, slot);
- __ add(feedback, vector, r0);
- __ LoadP(feedback,
- FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
- HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
- &miss);
-
- __ bind(&miss);
- KeyedStoreIC::GenerateMiss(masm);
-
- __ bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ b(&compare_map);
-}
-
-
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
PredictableCodeSizeScope predictable(masm,
@@ -3812,122 +3446,6 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateCase(masm, FAST_ELEMENTS);
}
-void FastNewObjectStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r4 : target
- // -- r6 : new target
- // -- cp : context
- // -- lr : return address
- // -----------------------------------
- __ AssertFunction(r4);
- __ AssertReceiver(r6);
-
- // Verify that the new target is a JSFunction.
- Label new_object;
- __ CompareObjectType(r6, r5, r5, JS_FUNCTION_TYPE);
- __ bne(&new_object);
-
- // Load the initial map and verify that it's in fact a map.
- __ LoadP(r5, FieldMemOperand(r6, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(r5, &new_object);
- __ CompareObjectType(r5, r3, r3, MAP_TYPE);
- __ bne(&new_object);
-
- // Fall back to runtime if the target differs from the new target's
- // initial map constructor.
- __ LoadP(r3, FieldMemOperand(r5, Map::kConstructorOrBackPointerOffset));
- __ cmp(r3, r4);
- __ bne(&new_object);
-
- // Allocate the JSObject on the heap.
- Label allocate, done_allocate;
- __ lbz(r7, FieldMemOperand(r5, Map::kInstanceSizeOffset));
- __ Allocate(r7, r3, r8, r9, &allocate, SIZE_IN_WORDS);
- __ bind(&done_allocate);
-
- // Initialize the JSObject fields.
- __ StoreP(r5, FieldMemOperand(r3, JSObject::kMapOffset), r0);
- __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r6, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
- __ StoreP(r6, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
- STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ addi(r4, r3, Operand(JSObject::kHeaderSize - kHeapObjectTag));
-
- // ----------- S t a t e -------------
- // -- r3 : result (tagged)
- // -- r4 : result fields (untagged)
- // -- r8 : result end (untagged)
- // -- r5 : initial map
- // -- cp : context
- // -- lr : return address
- // -----------------------------------
-
- // Perform in-object slack tracking if requested.
- Label slack_tracking;
- STATIC_ASSERT(Map::kNoSlackTracking == 0);
- __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
- __ lwz(r6, FieldMemOperand(r5, Map::kBitField3Offset));
- __ DecodeField<Map::ConstructionCounter>(r10, r6, SetRC);
- __ bne(&slack_tracking, cr0);
- {
- // Initialize all in-object fields with undefined.
- __ InitializeFieldsWithFiller(r4, r8, r9);
- __ Ret();
- }
- __ bind(&slack_tracking);
- {
- // Decrease generous allocation count.
- STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
- __ Add(r6, r6, -(1 << Map::ConstructionCounter::kShift), r0);
- __ stw(r6, FieldMemOperand(r5, Map::kBitField3Offset));
-
- // Initialize the in-object fields with undefined.
- __ lbz(r7, FieldMemOperand(r5, Map::kUnusedPropertyFieldsOffset));
- __ ShiftLeftImm(r7, r7, Operand(kPointerSizeLog2));
- __ sub(r7, r8, r7);
- __ InitializeFieldsWithFiller(r4, r7, r9);
-
- // Initialize the remaining (reserved) fields with one pointer filler map.
- __ LoadRoot(r9, Heap::kOnePointerFillerMapRootIndex);
- __ InitializeFieldsWithFiller(r4, r8, r9);
-
- // Check if we can finalize the instance size.
- __ cmpi(r10, Operand(Map::kSlackTrackingCounterEnd));
- __ Ret(ne);
-
- // Finalize the instance size.
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r3, r5);
- __ CallRuntime(Runtime::kFinalizeInstanceSize);
- __ Pop(r3);
- }
- __ Ret();
- }
-
- // Fall back to %AllocateInNewSpace.
- __ bind(&allocate);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- STATIC_ASSERT(kSmiTag == 0);
- __ ShiftLeftImm(r7, r7,
- Operand(kPointerSizeLog2 + kSmiTagSize + kSmiShiftSize));
- __ Push(r5, r7);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ Pop(r5);
- }
- __ lbz(r8, FieldMemOperand(r5, Map::kInstanceSizeOffset));
- __ ShiftLeftImm(r8, r8, Operand(kPointerSizeLog2));
- __ add(r8, r3, r8);
- __ subi(r8, r8, Operand(kHeapObjectTag));
- __ b(&done_allocate);
-
- // Fall back to %NewObject.
- __ bind(&new_object);
- __ Push(r4, r6);
- __ TailCallRuntime(Runtime::kNewObject);
-}
-
void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r4 : function
diff --git a/deps/v8/src/ppc/code-stubs-ppc.h b/deps/v8/src/ppc/code-stubs-ppc.h
index d394171d89..f873f93679 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.h
+++ b/deps/v8/src/ppc/code-stubs-ppc.h
@@ -16,15 +16,6 @@ void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
class StringHelper : public AllStatic {
public:
- // Generate code for copying a large number of characters. This function
- // is allowed to spend extra time setting up conditions to make copying
- // faster. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharacters(MacroAssembler* masm, Register dest,
- Register src, Register count,
- Register scratch,
- String::Encoding encoding);
-
// Compares two flat one-byte strings and returns result in r0.
static void GenerateCompareFlatOneByteStrings(MacroAssembler* masm,
Register left, Register right,
@@ -297,10 +288,6 @@ class NameDictionaryLookupStub : public PlatformCodeStub {
Register properties, Handle<Name> name,
Register scratch0);
- static void GeneratePositiveLookup(MacroAssembler* masm, Label* miss,
- Label* done, Register elements,
- Register name, Register r0, Register r1);
-
bool SometimesSetsUpAFrame() override { return false; }
private:
diff --git a/deps/v8/src/ppc/codegen-ppc.cc b/deps/v8/src/ppc/codegen-ppc.cc
index 07853edc20..bb365b4e63 100644
--- a/deps/v8/src/ppc/codegen-ppc.cc
+++ b/deps/v8/src/ppc/codegen-ppc.cc
@@ -73,304 +73,6 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ ACCESS_MASM(masm)
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- MacroAssembler* masm, Register receiver, Register key, Register value,
- Register target_map, AllocationSiteMode mode,
- Label* allocation_memento_found) {
- Register scratch_elements = r7;
- DCHECK(!AreAliased(receiver, key, value, target_map, scratch_elements));
-
- if (mode == TRACK_ALLOCATION_SITE) {
- DCHECK(allocation_memento_found != NULL);
- __ JumpIfJSArrayHasAllocationMemento(receiver, scratch_elements, r11,
- allocation_memento_found);
- }
-
- // Set transitioned map.
- __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
- __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, r11,
- kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateSmiToDouble(
- MacroAssembler* masm, Register receiver, Register key, Register value,
- Register target_map, AllocationSiteMode mode, Label* fail) {
- // lr contains the return address
- Label loop, entry, convert_hole, only_change_map, done;
- Register elements = r7;
- Register length = r8;
- Register array = r9;
- Register array_end = array;
-
- // target_map parameter can be clobbered.
- Register scratch1 = target_map;
- Register scratch2 = r10;
- Register scratch3 = r11;
- Register scratch4 = r14;
-
- // Verify input registers don't conflict with locals.
- DCHECK(!AreAliased(receiver, key, value, target_map, elements, length, array,
- scratch2));
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch3, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
- __ beq(&only_change_map);
-
- __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
- // length: number of elements (smi-tagged)
-
- // Allocate new FixedDoubleArray.
- __ SmiToDoubleArrayOffset(scratch3, length);
- __ addi(scratch3, scratch3, Operand(FixedDoubleArray::kHeaderSize));
- __ Allocate(scratch3, array, scratch4, scratch2, fail, DOUBLE_ALIGNMENT);
- __ subi(array, array, Operand(kHeapObjectTag));
- // array: destination FixedDoubleArray, not tagged as heap object.
- // elements: source FixedArray.
-
- // Set destination FixedDoubleArray's length and map.
- __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
- __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
- // Update receiver's map.
- __ StoreP(scratch2, MemOperand(array, HeapObject::kMapOffset));
-
- __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
- __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
- kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- // Replace receiver's backing store with newly created FixedDoubleArray.
- __ addi(scratch1, array, Operand(kHeapObjectTag));
- __ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
- __ RecordWriteField(receiver, JSObject::kElementsOffset, scratch1, scratch2,
- kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- // Prepare for conversion loop.
- __ addi(scratch1, elements,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ addi(scratch2, array, Operand(FixedDoubleArray::kHeaderSize));
- __ SmiToDoubleArrayOffset(array_end, length);
- __ add(array_end, scratch2, array_end);
-// Repurpose registers no longer in use.
-#if V8_TARGET_ARCH_PPC64
- Register hole_int64 = elements;
- __ mov(hole_int64, Operand(kHoleNanInt64));
-#else
- Register hole_lower = elements;
- Register hole_upper = length;
- __ mov(hole_lower, Operand(kHoleNanLower32));
- __ mov(hole_upper, Operand(kHoleNanUpper32));
-#endif
- // scratch1: begin of source FixedArray element fields, not tagged
- // hole_lower: kHoleNanLower32 OR hol_int64
- // hole_upper: kHoleNanUpper32
- // array_end: end of destination FixedDoubleArray, not tagged
- // scratch2: begin of FixedDoubleArray element fields, not tagged
-
- __ b(&entry);
-
- __ bind(&only_change_map);
- __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
- __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
- kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ b(&done);
-
- // Convert and copy elements.
- __ bind(&loop);
- __ LoadP(scratch3, MemOperand(scratch1));
- __ addi(scratch1, scratch1, Operand(kPointerSize));
- // scratch3: current element
- __ UntagAndJumpIfNotSmi(scratch3, scratch3, &convert_hole);
-
- // Normal smi, convert to double and store.
- __ ConvertIntToDouble(scratch3, d0);
- __ stfd(d0, MemOperand(scratch2, 0));
- __ addi(scratch2, scratch2, Operand(8));
- __ b(&entry);
-
- // Hole found, store the-hole NaN.
- __ bind(&convert_hole);
- if (FLAG_debug_code) {
- __ LoadP(scratch3, MemOperand(scratch1, -kPointerSize));
- __ CompareRoot(scratch3, Heap::kTheHoleValueRootIndex);
- __ Assert(eq, kObjectFoundInSmiOnlyArray);
- }
-#if V8_TARGET_ARCH_PPC64
- __ std(hole_int64, MemOperand(scratch2, 0));
-#else
- __ stw(hole_upper, MemOperand(scratch2, Register::kExponentOffset));
- __ stw(hole_lower, MemOperand(scratch2, Register::kMantissaOffset));
-#endif
- __ addi(scratch2, scratch2, Operand(8));
-
- __ bind(&entry);
- __ cmp(scratch2, array_end);
- __ blt(&loop);
-
- __ bind(&done);
-}
-
-
-void ElementsTransitionGenerator::GenerateDoubleToObject(
- MacroAssembler* masm, Register receiver, Register key, Register value,
- Register target_map, AllocationSiteMode mode, Label* fail) {
- // Register lr contains the return address.
- Label loop, convert_hole, gc_required, only_change_map;
- Register elements = r7;
- Register array = r9;
- Register length = r8;
- Register scratch = r10;
- Register scratch3 = r11;
- Register hole_value = r14;
-
- // Verify input registers don't conflict with locals.
- DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length,
- scratch));
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch3, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
- __ beq(&only_change_map);
-
- __ Push(target_map, receiver, key, value);
- __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
- // elements: source FixedDoubleArray
- // length: number of elements (smi-tagged)
-
- // Allocate new FixedArray.
- // Re-use value and target_map registers, as they have been saved on the
- // stack.
- Register array_size = value;
- Register allocate_scratch = target_map;
- __ li(array_size, Operand(FixedDoubleArray::kHeaderSize));
- __ SmiToPtrArrayOffset(r0, length);
- __ add(array_size, array_size, r0);
- __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
- NO_ALLOCATION_FLAGS);
- // array: destination FixedArray, tagged as heap object
- // Set destination FixedDoubleArray's length and map.
- __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
- __ StoreP(length, FieldMemOperand(array,
- FixedDoubleArray::kLengthOffset), r0);
- __ StoreP(scratch, FieldMemOperand(array, HeapObject::kMapOffset), r0);
-
- // Prepare for conversion loop.
- Register src_elements = elements;
- Register dst_elements = target_map;
- Register dst_end = length;
- Register heap_number_map = scratch;
- __ addi(src_elements, elements,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- __ SmiToPtrArrayOffset(length, length);
- __ LoadRoot(hole_value, Heap::kTheHoleValueRootIndex);
-
- Label initialization_loop, loop_done;
- __ ShiftRightImm(r0, length, Operand(kPointerSizeLog2), SetRC);
- __ beq(&loop_done, cr0);
-
- // Allocating heap numbers in the loop below can fail and cause a jump to
- // gc_required. We can't leave a partly initialized FixedArray behind,
- // so pessimistically fill it with holes now.
- __ mtctr(r0);
- __ addi(dst_elements, array,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
- __ bind(&initialization_loop);
- __ StorePU(hole_value, MemOperand(dst_elements, kPointerSize));
- __ bdnz(&initialization_loop);
-
- __ addi(dst_elements, array,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(dst_end, dst_elements, length);
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- // Using offsetted addresses in src_elements to fully take advantage of
- // post-indexing.
- // dst_elements: begin of destination FixedArray element fields, not tagged
- // src_elements: begin of source FixedDoubleArray element fields,
- // not tagged, +4
- // dst_end: end of destination FixedArray, not tagged
- // array: destination FixedArray
- // hole_value: the-hole pointer
- // heap_number_map: heap number map
- __ b(&loop);
-
- // Call into runtime if GC is required.
- __ bind(&gc_required);
- __ Pop(target_map, receiver, key, value);
- __ b(fail);
-
- // Replace the-hole NaN with the-hole pointer.
- __ bind(&convert_hole);
- __ StoreP(hole_value, MemOperand(dst_elements));
- __ addi(dst_elements, dst_elements, Operand(kPointerSize));
- __ cmpl(dst_elements, dst_end);
- __ bge(&loop_done);
-
- __ bind(&loop);
- Register upper_bits = key;
- __ lwz(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
- __ addi(src_elements, src_elements, Operand(kDoubleSize));
- // upper_bits: current element's upper 32 bit
- // src_elements: address of next element's upper 32 bit
- __ Cmpi(upper_bits, Operand(kHoleNanUpper32), r0);
- __ beq(&convert_hole);
-
- // Non-hole double, copy value into a heap number.
- Register heap_number = receiver;
- Register scratch2 = value;
- __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
- &gc_required);
- // heap_number: new heap number
-#if V8_TARGET_ARCH_PPC64
- __ ld(scratch2, MemOperand(src_elements, -kDoubleSize));
- // subtract tag for std
- __ addi(upper_bits, heap_number, Operand(-kHeapObjectTag));
- __ std(scratch2, MemOperand(upper_bits, HeapNumber::kValueOffset));
-#else
- __ lwz(scratch2,
- MemOperand(src_elements, Register::kMantissaOffset - kDoubleSize));
- __ lwz(upper_bits,
- MemOperand(src_elements, Register::kExponentOffset - kDoubleSize));
- __ stw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
- __ stw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
-#endif
- __ mr(scratch2, dst_elements);
- __ StoreP(heap_number, MemOperand(dst_elements));
- __ addi(dst_elements, dst_elements, Operand(kPointerSize));
- __ RecordWrite(array, scratch2, heap_number, kLRHasNotBeenSaved,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ cmpl(dst_elements, dst_end);
- __ blt(&loop);
- __ bind(&loop_done);
-
- __ Pop(target_map, receiver, key, value);
- // Replace receiver's backing store with newly created and filled FixedArray.
- __ StoreP(array, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
- __ RecordWriteField(receiver, JSObject::kElementsOffset, array, scratch,
- kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- __ bind(&only_change_map);
- // Update receiver's map.
- __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
- __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
- kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-
// assume ip can be used as a scratch register below
void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
Register index, Register result,
@@ -493,31 +195,25 @@ bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
return result;
}
+Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
+ if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
-void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
- MarkingParity* parity) {
- if (IsYoungSequence(isolate, sequence)) {
- *age = kNoAgeCodeAge;
- *parity = NO_MARKING_PARITY;
- } else {
- Code* code = NULL;
- Address target_address =
- Assembler::target_address_at(sequence + kCodeAgingTargetDelta, code);
- Code* stub = GetCodeFromTargetAddress(target_address);
- GetCodeAgeAndParity(stub, age, parity);
- }
+ Code* code = NULL;
+ Address target_address =
+ Assembler::target_address_at(sequence + kCodeAgingTargetDelta, code);
+ Code* stub = GetCodeFromTargetAddress(target_address);
+ return GetAgeOfCodeAgeStub(stub);
}
-
-void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Code::Age age,
- MarkingParity parity) {
+void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
+ Code::Age age) {
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
Assembler::FlushICache(isolate, sequence, young_length);
} else {
// FIXED_SEQUENCE
- Code* stub = GetCodeAgeStub(isolate, age, parity);
+ Code* stub = GetCodeAgeStub(isolate, age);
CodePatcher patcher(isolate, sequence,
young_length / Assembler::kInstrSize);
Assembler::BlockTrampolinePoolScope block_trampoline_pool(patcher.masm());
diff --git a/deps/v8/src/ppc/constants-ppc.h b/deps/v8/src/ppc/constants-ppc.h
index 393f039e27..daa52257d6 100644
--- a/deps/v8/src/ppc/constants-ppc.h
+++ b/deps/v8/src/ppc/constants-ppc.h
@@ -145,6 +145,7 @@ enum Opcode {
STFDU = 55 << 26, // Store Floating-Point Double with Update
LD = 58 << 26, // Load Double Word
EXT3 = 59 << 26, // Extended code set 3
+ EXT6 = 60 << 26, // Extended code set 6
STD = 62 << 26, // Store Double Word (optionally with Update)
EXT4 = 63 << 26 // Extended code set 4
};
@@ -203,24 +204,27 @@ enum OpcodeExt2 {
STWX = 151 << 1, // store word w/ x-form
MTVSRD = 179 << 1, // Move To VSR Doubleword
STDUX = 181 << 1,
- STWUX = 183 << 1, // store word w/ update x-form
- /*
- MTCRF
- MTMSR
- STWCXx
- SUBFZEX
- */
- ADDZEX = 202 << 1, // Add to Zero Extended
- /*
- MTSR
+ STWUX = 183 << 1, // store word w/ update x-form
+ /*
+ MTCRF
+ MTMSR
+ STWCXx
+ SUBFZEX
*/
+ ADDZEX = 202 << 1, // Add to Zero Extended
+ /*
+ MTSR
+ */
+
MTVSRWA = 211 << 1, // Move To VSR Word Algebraic
STBX = 215 << 1, // store byte w/ x-form
MULLD = 233 << 1, // Multiply Low Double Word
MULLW = 235 << 1, // Multiply Low Word
MTVSRWZ = 243 << 1, // Move To VSR Word And Zero
STBUX = 247 << 1, // store byte w/ update x-form
+ MODUD = 265 << 1, // Modulo Unsigned Dword
ADDX = 266 << 1, // Add
+ MODUW = 267 << 1, // Modulo Unsigned Word
LHZX = 279 << 1, // load half-word zero w/ x-form
LHZUX = 311 << 1, // load half-word zero w/ update x-form
LWAX = 341 << 1, // load word algebraic w/ x-form
@@ -254,6 +258,8 @@ enum OpcodeExt2 {
STFSUX = 695 << 1, // store float-single w/ update x-form
STFDX = 727 << 1, // store float-double w/ x-form
STFDUX = 759 << 1, // store float-double w/ update x-form
+ MODSD = 777 << 1, // Modulo Signed Dword
+ MODSW = 779 << 1, // Modulo Signed Word
LHBRX = 790 << 1, // load half word byte reversed w/ x-form
SRAW = 792 << 1, // Shift Right Algebraic Word
SRAD = 794 << 1, // Shift Right Algebraic Double Word
@@ -314,10 +320,37 @@ enum OpcodeExt5 {
RLDCR = 9 << 1 // Rotate Left Double Word then Clear Right
};
+// Bits 10-3
+#define XX3_OPCODE_LIST(V) \
+ V(xsaddsp, XSADDSP, 0 << 3) /* VSX Scalar Add SP */ \
+ V(xssubsp, XSSUBSP, 8 << 3) /* VSX Scalar Subtract SP */ \
+ V(xsmulsp, XSMULSP, 16 << 3) /* VSX Scalar Multiply SP */ \
+ V(xsdivsp, XSDIVSP, 24 << 3) /* VSX Scalar Divide SP */ \
+ V(xsadddp, XSADDDP, 32 << 3) /* VSX Scalar Add DP */ \
+ V(xssubdp, XSSUBDP, 40 << 3) /* VSX Scalar Subtract DP */ \
+ V(xsmuldp, XSMULDP, 48 << 3) /* VSX Scalar Multiply DP */ \
+ V(xsdivdp, XSDIVDP, 56 << 3) /* VSX Scalar Divide DP */ \
+ V(xsmaxdp, XSMAXDP, 160 << 3) /* VSX Scalar Maximum DP */ \
+ V(xsmindp, XSMINDP, 168 << 3) /* VSX Scalar Minimum DP */
+
+// Bits 10-2
+#define XX2_OPCODE_LIST(V) \
+ V(XSCVDPSP, XSCVDPSP, 265 << 2) /* VSX Scalar Convert DP to SP */ \
+ V(XSCVSPDP, XSCVSPDP, 329 << 2) /* VSX Scalar Convert SP to DP */
+
+enum OpcodeExt6 {
+#define DECLARE_OPCODES(name, opcode_name, opcode_value) \
+ opcode_name = opcode_value,
+ XX3_OPCODE_LIST(DECLARE_OPCODES) XX2_OPCODE_LIST(DECLARE_OPCODES)
+#undef DECLARE_OPCODES
+};
+
// Instruction encoding bits and masks.
enum {
// Instruction encoding bit
B1 = 1 << 1,
+ B2 = 1 << 2,
+ B3 = 1 << 3,
B4 = 1 << 4,
B5 = 1 << 5,
B7 = 1 << 7,
diff --git a/deps/v8/src/ppc/disasm-ppc.cc b/deps/v8/src/ppc/disasm-ppc.cc
index c0a02a8b9c..5da45f27f0 100644
--- a/deps/v8/src/ppc/disasm-ppc.cc
+++ b/deps/v8/src/ppc/disasm-ppc.cc
@@ -82,6 +82,7 @@ class Decoder {
void DecodeExt3(Instruction* instr);
void DecodeExt4(Instruction* instr);
void DecodeExt5(Instruction* instr);
+ void DecodeExt6(Instruction* instr);
const disasm::NameConverter& converter_;
Vector<char> out_buffer_;
@@ -561,6 +562,24 @@ void Decoder::DecodeExt2(Instruction* instr) {
return;
}
#endif
+ case MODSW: {
+ Format(instr, "modsw 'rt, 'ra, 'rb");
+ return;
+ }
+ case MODUW: {
+ Format(instr, "moduw 'rt, 'ra, 'rb");
+ return;
+ }
+#if V8_TARGET_ARCH_PPC64
+ case MODSD: {
+ Format(instr, "modsd 'rt, 'ra, 'rb");
+ return;
+ }
+ case MODUD: {
+ Format(instr, "modud 'rt, 'ra, 'rb");
+ return;
+ }
+#endif
case SRAWIX: {
Format(instr, "srawi'. 'ra,'rs,'sh");
return;
@@ -1073,6 +1092,28 @@ void Decoder::DecodeExt5(Instruction* instr) {
Unknown(instr); // not used by V8
}
+void Decoder::DecodeExt6(Instruction* instr) {
+ switch (instr->Bits(10, 3) << 3) {
+#define DECODE_XX3_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: { \
+ Format(instr, #name" 'Dt, 'Da, 'Db"); \
+ return; \
+ }
+ XX3_OPCODE_LIST(DECODE_XX3_INSTRUCTIONS)
+#undef DECODE_XX3_INSTRUCTIONS
+ }
+ switch (instr->Bits(10, 2) << 2) {
+#define DECODE_XX2_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: { \
+ Format(instr, #name" 'Dt, 'Db"); \
+ return; \
+ }
+ XX2_OPCODE_LIST(DECODE_XX2_INSTRUCTIONS)
+ }
+#undef DECODE_XX3_INSTRUCTIONS
+ Unknown(instr); // not used by V8
+}
+
#undef VERIFIY
// Disassemble the instruction at *instr_ptr into the output buffer.
@@ -1360,6 +1401,10 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
DecodeExt5(instr);
break;
}
+ case EXT6: {
+ DecodeExt6(instr);
+ break;
+ }
#if V8_TARGET_ARCH_PPC64
case LD: {
switch (instr->Bits(1, 0)) {
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index 74ad56405f..4ff59bbaf1 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -63,13 +63,7 @@ const Register GrowArrayElementsDescriptor::KeyRegister() { return r6; }
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {r5};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastNewObjectDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r4, r6};
+ Register registers[] = {r4, r5, r6};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index 6588540035..172971ea0b 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -1403,19 +1403,17 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
}
-
-void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual) {
- Label skip_flooding;
- ExternalReference last_step_action =
- ExternalReference::debug_last_step_action_address(isolate());
- STATIC_ASSERT(StepFrame > StepIn);
- mov(r7, Operand(last_step_action));
+void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_hook;
+ ExternalReference debug_hook_avtive =
+ ExternalReference::debug_hook_on_function_call_address(isolate());
+ mov(r7, Operand(debug_hook_avtive));
LoadByte(r7, MemOperand(r7), r0);
extsb(r7, r7);
- cmpi(r7, Operand(StepIn));
- blt(&skip_flooding);
+ CmpSmiLiteral(r7, Smi::kZero, r0);
+ beq(&skip_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -1431,7 +1429,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
Push(new_target);
}
Push(fun, fun);
- CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ CallRuntime(Runtime::kDebugOnFunctionCall);
Pop(fun);
if (new_target.is_valid()) {
Pop(new_target);
@@ -1445,7 +1443,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
SmiUntag(expected.reg());
}
}
- bind(&skip_flooding);
+ bind(&skip_hook);
}
@@ -1459,8 +1457,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK(function.is(r4));
DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r6));
- if (call_wrapper.NeedsDebugStepCheck()) {
- FloodFunctionIfStepping(function, new_target, expected, actual);
+ if (call_wrapper.NeedsDebugHookCheck()) {
+ CheckDebugHook(function, new_target, expected, actual);
}
// Clear the new.target register if not given.
@@ -1949,103 +1947,6 @@ void MacroAssembler::FastAllocate(int object_size, Register result,
addi(result, result, Operand(kHeapObjectTag));
}
-
-void MacroAssembler::AllocateTwoByteString(Register result, Register length,
- Register scratch1, Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- slwi(scratch1, length, Operand(1)); // Length in bytes, not chars.
- addi(scratch1, scratch1,
- Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
- mov(r0, Operand(~kObjectAlignmentMask));
- and_(scratch1, scratch1, r0);
-
- // Allocate two-byte string in new space.
- Allocate(scratch1, result, scratch2, scratch3, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map, length and hash field.
- InitializeNewString(result, length, Heap::kStringMapRootIndex, scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteString(Register result, Register length,
- Register scratch1, Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- DCHECK(kCharSize == 1);
- addi(scratch1, length,
- Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
- li(r0, Operand(~kObjectAlignmentMask));
- and_(scratch1, scratch1, r0);
-
- // Allocate one-byte string in new space.
- Allocate(scratch1, result, scratch2, scratch3, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map, length and hash field.
- InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
- scratch1, scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteConsString(Register result, Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- InitializeNewString(result, length, Heap::kConsStringMapRootIndex, scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
- scratch1, scratch2);
-}
-
-
-void MacroAssembler::AllocateTwoByteSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- InitializeNewString(result, length, Heap::kSlicedStringMapRootIndex, scratch1,
- scratch2);
-}
-
-
-void MacroAssembler::AllocateOneByteSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
- scratch1, scratch2);
-}
-
-
void MacroAssembler::CompareObjectType(Register object, Register map,
Register type_reg, InstanceType type) {
const Register temp = type_reg.is(no_reg) ? r0 : type_reg;
@@ -2070,60 +1971,6 @@ void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
cmp(obj, r0);
}
-void MacroAssembler::CheckFastObjectElements(Register map, Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
- ble(fail);
- cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
- bgt(fail);
-}
-
-
-void MacroAssembler::CheckFastSmiElements(Register map, Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
- cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
- bgt(fail);
-}
-
-
-void MacroAssembler::StoreNumberToDoubleElements(
- Register value_reg, Register key_reg, Register elements_reg,
- Register scratch1, DoubleRegister double_scratch, Label* fail,
- int elements_offset) {
- DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
- Label smi_value, store;
-
- // Handle smi values specially.
- JumpIfSmi(value_reg, &smi_value);
-
- // Ensure that the object is a heap number
- CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), fail,
- DONT_DO_SMI_CHECK);
-
- lfd(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
- // Double value, turn potential sNaN into qNaN.
- CanonicalizeNaN(double_scratch);
- b(&store);
-
- bind(&smi_value);
- SmiToDouble(double_scratch, value_reg);
-
- bind(&store);
- SmiToDoubleArrayOffset(scratch1, key_reg);
- add(scratch1, elements_reg, scratch1);
- stfd(double_scratch, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize -
- elements_offset));
-}
-
-
void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
Register right,
Register overflow_dst,
@@ -2737,25 +2584,6 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
}
-
-void MacroAssembler::LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind, ElementsKind transitioned_kind,
- Register map_in_out, Register scratch, Label* no_map_match) {
- DCHECK(IsFastElementsKind(expected_kind));
- DCHECK(IsFastElementsKind(transitioned_kind));
-
- // Check that the function's map is the same as the expected cached map.
- LoadP(scratch, NativeContextMemOperand());
- LoadP(ip, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
- cmp(map_in_out, ip);
- bne(no_map_match);
-
- // Use the transitioned cached map.
- LoadP(map_in_out,
- ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
-}
-
-
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
LoadP(dst, NativeContextMemOperand());
LoadP(dst, ContextMemOperand(dst, index));
@@ -2840,16 +2668,6 @@ void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
beq(smi_case, cr0);
}
-
-void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, Register src,
- Label* non_smi_case) {
- STATIC_ASSERT(kSmiTag == 0);
- TestBitRange(src, kSmiTagSize - 1, 0, r0);
- SmiUntag(dst, src);
- bne(non_smi_case, cr0);
-}
-
-
void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
Label* on_either_smi) {
STATIC_ASSERT(kSmiTag == 0);
@@ -3130,19 +2948,6 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
bne(failure);
}
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
- Register scratch,
- Label* failure) {
- const int kFlatOneByteStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatOneByteStringTag =
- kStringTag | kOneByteStringTag | kSeqStringTag;
- andi(scratch, type, Operand(kFlatOneByteStringMask));
- cmpi(scratch, Operand(kFlatOneByteStringTag));
- bne(failure);
-}
-
static const int kRegisterPassedArguments = 8;
@@ -3867,7 +3672,6 @@ void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
subi(sp, sp, Operand(kFloatSize));
- frsp(src, src);
stfs(src, MemOperand(sp, 0));
nop(GROUP_ENDING_NOP); // LHS/RAW optimization
lwz(dst, MemOperand(sp, 0));
@@ -4492,44 +4296,6 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
return no_reg;
}
-
-void MacroAssembler::JumpIfDictionaryInPrototypeChain(Register object,
- Register scratch0,
- Register scratch1,
- Label* found) {
- DCHECK(!scratch1.is(scratch0));
- Register current = scratch0;
- Label loop_again, end;
-
- // scratch contained elements pointer.
- mr(current, object);
- LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
- LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
- CompareRoot(current, Heap::kNullValueRootIndex);
- beq(&end);
-
- // Loop based on the map going up the prototype chain.
- bind(&loop_again);
- LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
-
- STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
- STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
- lbz(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
- cmpi(scratch1, Operand(JS_OBJECT_TYPE));
- blt(found);
-
- lbz(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
- DecodeField<Map::ElementsKindBits>(scratch1);
- cmpi(scratch1, Operand(DICTIONARY_ELEMENTS));
- beq(found);
- LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
- CompareRoot(current, Heap::kNullValueRootIndex);
- bne(&loop_again);
-
- bind(&end);
-}
-
-
#ifdef DEBUG
bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
Register reg5, Register reg6, Register reg7, Register reg8,
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index 28eceb18a4..0d16c4b1ed 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -470,16 +470,6 @@ class MacroAssembler : public Assembler {
LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
}
- // Conditionally load the cached Array transitioned map of type
- // transitioned_kind from the native context if the map in register
- // map_in_out is the cached Array map in the native context of
- // expected_kind.
- void LoadTransitionedArrayMapConditional(ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match);
-
void LoadNativeContextSlot(int index, Register dst);
// Load the initial map from the global function. The registers
@@ -621,9 +611,10 @@ class MacroAssembler : public Assembler {
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
- void FloodFunctionIfStepping(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual);
+ // On function call, call into the debugger if necessary.
+ void CheckDebugHook(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
@@ -722,25 +713,6 @@ class MacroAssembler : public Assembler {
void FastAllocate(Register object_size, Register result, Register result_end,
Register scratch, AllocationFlags flags);
- void AllocateTwoByteString(Register result, Register length,
- Register scratch1, Register scratch2,
- Register scratch3, Label* gc_required);
- void AllocateOneByteString(Register result, Register length,
- Register scratch1, Register scratch2,
- Register scratch3, Label* gc_required);
- void AllocateTwoByteConsString(Register result, Register length,
- Register scratch1, Register scratch2,
- Label* gc_required);
- void AllocateOneByteConsString(Register result, Register length,
- Register scratch1, Register scratch2,
- Label* gc_required);
- void AllocateTwoByteSlicedString(Register result, Register length,
- Register scratch1, Register scratch2,
- Label* gc_required);
- void AllocateOneByteSlicedString(Register result, Register length,
- Register scratch1, Register scratch2,
- Label* gc_required);
-
// Allocates a heap number or jumps to the gc_required label if the young
// space is full and a scavenge is needed. All registers are clobbered also
// when control continues at the gc_required label.
@@ -803,22 +775,6 @@ class MacroAssembler : public Assembler {
// sets the flags and leaves the object type in the type_reg register.
void CompareInstanceType(Register map, Register type_reg, InstanceType type);
- // Check if a map for a JSObject indicates that the object can have both smi
- // and HeapObject elements. Jump to the specified label if it does not.
- void CheckFastObjectElements(Register map, Register scratch, Label* fail);
-
- // Check if a map for a JSObject indicates that the object has fast smi only
- // elements. Jump to the specified label if it does not.
- void CheckFastSmiElements(Register map, Register scratch, Label* fail);
-
- // Check to see if maybe_number can be stored as a double in
- // FastDoubleElements. If it can, store it at the index specified by key in
- // the FastDoubleElements array elements. Otherwise jump to fail.
- void StoreNumberToDoubleElements(Register value_reg, Register key_reg,
- Register elements_reg, Register scratch1,
- DoubleRegister double_scratch, Label* fail,
- int elements_offset = 0);
-
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
// set with result of map compare. If multiple map compares are required, the
@@ -1309,10 +1265,6 @@ class MacroAssembler : public Assembler {
// Souce and destination can be the same register.
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
- // Untag the source value into destination and jump if source is not a smi.
- // Souce and destination can be the same register.
- void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
-
inline void TestIfSmi(Register value, Register scratch) {
TestBitRange(value, kSmiTagSize - 1, 0, scratch);
}
@@ -1434,11 +1386,6 @@ class MacroAssembler : public Assembler {
Register first_object_instance_type, Register second_object_instance_type,
Register scratch1, Register scratch2, Label* failure);
- // Check if instance type is sequential one-byte string and jump to label if
- // it is not.
- void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
- Label* failure);
-
void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
void EmitSeqStringSetCharCheck(Register string, Register index,
@@ -1528,21 +1475,6 @@ class MacroAssembler : public Assembler {
Register scratch2_reg,
Label* no_memento_found);
- void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
- Register scratch_reg,
- Register scratch2_reg,
- Label* memento_found) {
- Label no_memento_found;
- TestJSArrayForAllocationMemento(receiver_reg, scratch_reg, scratch2_reg,
- &no_memento_found);
- beq(memento_found);
- bind(&no_memento_found);
- }
-
- // Jumps to found label if a prototype map has dictionary elements.
- void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
- Register scratch1, Label* found);
-
// Loads the constant pool pointer (kConstantPoolRegister).
void LoadConstantPoolPointerRegisterFromCodeTargetAddress(
Register code_target_address);
diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc
index 84fbb399b3..e3761579b0 100644
--- a/deps/v8/src/ppc/simulator-ppc.cc
+++ b/deps/v8/src/ppc/simulator-ppc.cc
@@ -1708,6 +1708,60 @@ bool Simulator::ExecuteExt2_10bit(Instruction* instr) {
break;
}
#endif
+ case MODUW: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ uint32_t ra_val = get_register(ra);
+ uint32_t rb_val = get_register(rb);
+ uint32_t alu_out = (rb_val == 0) ? -1 : ra_val % rb_val;
+ set_register(rt, alu_out);
+ break;
+ }
+#if V8_TARGET_ARCH_PPC64
+ case MODUD: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ uint64_t ra_val = get_register(ra);
+ uint64_t rb_val = get_register(rb);
+ uint64_t alu_out = (rb_val == 0) ? -1 : ra_val % rb_val;
+ set_register(rt, alu_out);
+ break;
+ }
+#endif
+ case MODSW: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ int32_t ra_val = get_register(ra);
+ int32_t rb_val = get_register(rb);
+ bool overflow = (ra_val == kMinInt && rb_val == -1);
+ // result is undefined if divisor is zero or if operation
+ // is 0x80000000 / -1.
+ int32_t alu_out = (rb_val == 0 || overflow) ? -1 : ra_val % rb_val;
+ set_register(rt, alu_out);
+ break;
+ }
+#if V8_TARGET_ARCH_PPC64
+ case MODSD: {
+ int rt = instr->RTValue();
+ int ra = instr->RAValue();
+ int rb = instr->RBValue();
+ int64_t ra_val = get_register(ra);
+ int64_t rb_val = get_register(rb);
+ int64_t one = 1; // work-around gcc
+ int64_t kMinLongLong = (one << 63);
+ // result is undefined if divisor is zero or if operation
+ // is 0x80000000_00000000 / -1.
+ int64_t alu_out =
+ (rb_val == 0 || (ra_val == kMinLongLong && rb_val == -1))
+ ? -1
+ : ra_val % rb_val;
+ set_register(rt, alu_out);
+ break;
+ }
+#endif
case SRAW: {
int rs = instr->RSValue();
int ra = instr->RAValue();
@@ -3295,6 +3349,51 @@ void Simulator::ExecuteExt5(Instruction* instr) {
}
#endif
+void Simulator::ExecuteExt6(Instruction* instr) {
+ switch (instr->Bits(10, 3) << 3) {
+ case XSADDDP: {
+ int frt = instr->RTValue();
+ int fra = instr->RAValue();
+ int frb = instr->RBValue();
+ double fra_val = get_double_from_d_register(fra);
+ double frb_val = get_double_from_d_register(frb);
+ double frt_val = fra_val + frb_val;
+ set_d_register_from_double(frt, frt_val);
+ return;
+ }
+ case XSSUBDP: {
+ int frt = instr->RTValue();
+ int fra = instr->RAValue();
+ int frb = instr->RBValue();
+ double fra_val = get_double_from_d_register(fra);
+ double frb_val = get_double_from_d_register(frb);
+ double frt_val = fra_val - frb_val;
+ set_d_register_from_double(frt, frt_val);
+ return;
+ }
+ case XSMULDP: {
+ int frt = instr->RTValue();
+ int fra = instr->RAValue();
+ int frb = instr->RBValue();
+ double fra_val = get_double_from_d_register(fra);
+ double frb_val = get_double_from_d_register(frb);
+ double frt_val = fra_val * frb_val;
+ set_d_register_from_double(frt, frt_val);
+ return;
+ }
+ case XSDIVDP: {
+ int frt = instr->RTValue();
+ int fra = instr->RAValue();
+ int frb = instr->RBValue();
+ double fra_val = get_double_from_d_register(fra);
+ double frb_val = get_double_from_d_register(frb);
+ double frt_val = fra_val / frb_val;
+ set_d_register_from_double(frt, frt_val);
+ return;
+ }
+ }
+ UNIMPLEMENTED(); // Not used by V8.
+}
void Simulator::ExecuteGeneric(Instruction* instr) {
int opcode = instr->OpcodeValue() << 26;
@@ -3701,7 +3800,16 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
int32_t val = ReadW(ra_val + offset, instr);
float* fptr = reinterpret_cast<float*>(&val);
- set_d_register_from_double(frt, static_cast<double>(*fptr));
+// Conversion using double changes sNan to qNan on ia32/x64
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+ if (val == 0x7fa00000) {
+ set_d_register(frt, 0x7ff4000000000000);
+ } else {
+#endif
+ set_d_register_from_double(frt, static_cast<double>(*fptr));
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+ }
+#endif
if (opcode == LFSU) {
DCHECK(ra != 0);
set_register(ra, ra_val + offset);
@@ -3731,7 +3839,19 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int32_t offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
float frs_val = static_cast<float>(get_double_from_d_register(frs));
- int32_t* p = reinterpret_cast<int32_t*>(&frs_val);
+ int32_t* p;
+// Conversion using double changes sNan to qNan on ia32/x64
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+ int64_t frs_isnan = get_d_register(frs);
+ int32_t frs_nan_single = 0x7fa00000;
+ if (frs_isnan == 0x7ff4000000000000) {
+ p = &frs_nan_single;
+ } else {
+#endif
+ p = reinterpret_cast<int32_t*>(&frs_val);
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+ }
+#endif
WriteW(ra_val + offset, *p, instr);
if (opcode == STFSU) {
DCHECK(ra != 0);
@@ -3810,6 +3930,10 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
break;
}
#endif
+ case EXT6: {
+ ExecuteExt6(instr);
+ break;
+ }
default: {
UNIMPLEMENTED();
diff --git a/deps/v8/src/ppc/simulator-ppc.h b/deps/v8/src/ppc/simulator-ppc.h
index d061545099..91e7f05ea5 100644
--- a/deps/v8/src/ppc/simulator-ppc.h
+++ b/deps/v8/src/ppc/simulator-ppc.h
@@ -321,6 +321,7 @@ class Simulator {
#if V8_TARGET_ARCH_PPC64
void ExecuteExt5(Instruction* instr);
#endif
+ void ExecuteExt6(Instruction* instr);
void ExecuteGeneric(Instruction* instr);
void SetFPSCR(int bit) { fp_condition_reg_ |= (1 << (31 - bit)); }
diff --git a/deps/v8/src/profiler/heap-snapshot-generator-inl.h b/deps/v8/src/profiler/heap-snapshot-generator-inl.h
index 169ab569e8..eeb212a7a3 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator-inl.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator-inl.h
@@ -38,13 +38,17 @@ int HeapEntry::set_children_index(int index) {
return next_index;
}
-
-HeapGraphEdge** HeapEntry::children_arr() {
+std::deque<HeapGraphEdge*>::iterator HeapEntry::children_begin() {
DCHECK(children_index_ >= 0);
- SLOW_DCHECK(children_index_ < snapshot_->children().length() ||
- (children_index_ == snapshot_->children().length() &&
+ SLOW_DCHECK(
+ children_index_ < static_cast<int>(snapshot_->children().size()) ||
+ (children_index_ == static_cast<int>(snapshot_->children().size()) &&
children_count_ == 0));
- return &snapshot_->children().first() + children_index_;
+ return snapshot_->children().begin() + children_index_;
+}
+
+std::deque<HeapGraphEdge*>::iterator HeapEntry::children_end() {
+ return children_begin() + children_count_;
}
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index 2fd682e567..fbb4e973d6 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -63,7 +63,7 @@ void HeapEntry::SetNamedReference(HeapGraphEdge::Type type,
const char* name,
HeapEntry* entry) {
HeapGraphEdge edge(type, name, this->index(), entry->index());
- snapshot_->edges().Add(edge);
+ snapshot_->edges().push_back(edge);
++children_count_;
}
@@ -72,7 +72,7 @@ void HeapEntry::SetIndexedReference(HeapGraphEdge::Type type,
int index,
HeapEntry* entry) {
HeapGraphEdge edge(type, index, this->index(), entry->index());
- snapshot_->edges().Add(edge);
+ snapshot_->edges().push_back(edge);
++children_count_;
}
@@ -97,9 +97,8 @@ void HeapEntry::Print(
base::OS::Print("\"\n");
}
if (--max_depth == 0) return;
- Vector<HeapGraphEdge*> ch = children();
- for (int i = 0; i < ch.length(); ++i) {
- HeapGraphEdge& edge = *ch[i];
+ for (auto i = children_begin(); i != children_end(); ++i) {
+ HeapGraphEdge& edge = **i;
const char* edge_prefix = "";
EmbeddedVector<char, 64> index;
const char* edge_name = index.start();
@@ -270,15 +269,15 @@ HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type,
void HeapSnapshot::FillChildren() {
- DCHECK(children().is_empty());
- children().Allocate(edges().length());
+ DCHECK(children().empty());
+ children().resize(edges().size());
int children_index = 0;
for (int i = 0; i < entries().length(); ++i) {
HeapEntry* entry = &entries()[i];
children_index = entry->set_children_index(children_index);
}
- DCHECK(edges().length() == children_index);
- for (int i = 0; i < edges().length(); ++i) {
+ DCHECK_EQ(edges().size(), static_cast<size_t>(children_index));
+ for (size_t i = 0; i < edges().size(); ++i) {
HeapGraphEdge* edge = &edges()[i];
edge->ReplaceToIndexWithEntry(this);
edge->from()->add_child(edge);
@@ -335,12 +334,10 @@ void HeapSnapshot::Print(int max_depth) {
size_t HeapSnapshot::RawSnapshotSize() const {
- return
- sizeof(*this) +
- GetMemoryUsedByList(entries_) +
- GetMemoryUsedByList(edges_) +
- GetMemoryUsedByList(children_) +
- GetMemoryUsedByList(sorted_entries_);
+ return sizeof(*this) + GetMemoryUsedByList(entries_) +
+ edges_.size() * sizeof(decltype(edges_)::value_type) +
+ children_.size() * sizeof(decltype(children_)::value_type) +
+ GetMemoryUsedByList(sorted_entries_);
}
@@ -2797,8 +2794,8 @@ void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge,
void HeapSnapshotJSONSerializer::SerializeEdges() {
- List<HeapGraphEdge*>& edges = snapshot_->children();
- for (int i = 0; i < edges.length(); ++i) {
+ std::deque<HeapGraphEdge*>& edges = snapshot_->children();
+ for (size_t i = 0; i < edges.size(); ++i) {
DCHECK(i == 0 ||
edges[i - 1]->from()->index() <= edges[i]->from()->index());
SerializeEdge(edges[i], i == 0);
@@ -2916,7 +2913,7 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() {
writer_->AddString(",\"node_count\":");
writer_->AddNumber(snapshot_->entries().length());
writer_->AddString(",\"edge_count\":");
- writer_->AddNumber(snapshot_->edges().length());
+ writer_->AddNumber(static_cast<double>(snapshot_->edges().size()));
writer_->AddString(",\"trace_function_count\":");
uint32_t count = 0;
AllocationTracker* tracker = snapshot_->profiler()->allocation_tracker();
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index b235ff0502..b4de8b57e1 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -5,6 +5,7 @@
#ifndef V8_PROFILER_HEAP_SNAPSHOT_GENERATOR_H_
#define V8_PROFILER_HEAP_SNAPSHOT_GENERATOR_H_
+#include <deque>
#include <unordered_map>
#include "include/v8-profiler.h"
@@ -115,10 +116,9 @@ class HeapEntry BASE_EMBEDDED {
int children_count() const { return children_count_; }
INLINE(int set_children_index(int index));
void add_child(HeapGraphEdge* edge) {
- children_arr()[children_count_++] = edge;
+ *(children_begin() + children_count_++) = edge;
}
- Vector<HeapGraphEdge*> children() {
- return Vector<HeapGraphEdge*>(children_arr(), children_count_); }
+ HeapGraphEdge* child(int i) { return *(children_begin() + i); }
INLINE(Isolate* isolate() const);
void SetIndexedReference(
@@ -130,7 +130,8 @@ class HeapEntry BASE_EMBEDDED {
const char* prefix, const char* edge_name, int max_depth, int indent);
private:
- INLINE(HeapGraphEdge** children_arr());
+ INLINE(std::deque<HeapGraphEdge*>::iterator children_begin());
+ INLINE(std::deque<HeapGraphEdge*>::iterator children_end());
const char* TypeAsString();
unsigned type_: 4;
@@ -163,8 +164,8 @@ class HeapSnapshot {
return &entries_[gc_subroot_indexes_[index]];
}
List<HeapEntry>& entries() { return entries_; }
- List<HeapGraphEdge>& edges() { return edges_; }
- List<HeapGraphEdge*>& children() { return children_; }
+ std::deque<HeapGraphEdge>& edges() { return edges_; }
+ std::deque<HeapGraphEdge*>& children() { return children_; }
void RememberLastJSObjectId();
SnapshotObjectId max_snapshot_js_object_id() const {
return max_snapshot_js_object_id_;
@@ -192,8 +193,8 @@ class HeapSnapshot {
int gc_roots_index_;
int gc_subroot_indexes_[VisitorSynchronization::kNumberOfSyncTags];
List<HeapEntry> entries_;
- List<HeapGraphEdge> edges_;
- List<HeapGraphEdge*> children_;
+ std::deque<HeapGraphEdge> edges_;
+ std::deque<HeapGraphEdge*> children_;
List<HeapEntry*> sorted_entries_;
SnapshotObjectId max_snapshot_js_object_id_;
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index b647670b59..72e02b360b 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -637,7 +637,16 @@ void CpuProfilesCollection::AddPathToCurrentProfiles(
ProfileGenerator::ProfileGenerator(Isolate* isolate,
CpuProfilesCollection* profiles)
- : isolate_(isolate), profiles_(profiles) {}
+ : isolate_(isolate), profiles_(profiles) {
+ RuntimeCallStats* rcs = isolate_->counters()->runtime_call_stats();
+ for (int i = 0; i < RuntimeCallStats::counters_count; ++i) {
+ RuntimeCallCounter* counter = &(rcs->*(RuntimeCallStats::counters[i]));
+ DCHECK(counter->name());
+ auto entry = new CodeEntry(CodeEventListener::FUNCTION_TAG, counter->name(),
+ CodeEntry::kEmptyNamePrefix, "native V8Runtime");
+ code_map_.AddCode(reinterpret_cast<Address>(counter), entry, 1);
+ }
+}
void ProfileGenerator::RecordTickSample(const TickSample& sample) {
std::vector<CodeEntry*> entries;
@@ -742,20 +751,7 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
}
CodeEntry* ProfileGenerator::FindEntry(void* address) {
- CodeEntry* entry = code_map_.FindEntry(reinterpret_cast<Address>(address));
- if (!entry) {
- RuntimeCallStats* rcs = isolate_->counters()->runtime_call_stats();
- void* start = reinterpret_cast<void*>(rcs);
- void* end = reinterpret_cast<void*>(rcs + 1);
- if (start <= address && address < end) {
- RuntimeCallCounter* counter =
- reinterpret_cast<RuntimeCallCounter*>(address);
- entry = new CodeEntry(CodeEventListener::FUNCTION_TAG, counter->name,
- CodeEntry::kEmptyNamePrefix, "native V8Runtime");
- code_map_.AddCode(reinterpret_cast<Address>(address), entry, 1);
- }
- }
- return entry;
+ return code_map_.FindEntry(reinterpret_cast<Address>(address));
}
CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
diff --git a/deps/v8/src/profiler/profiler-listener.cc b/deps/v8/src/profiler/profiler-listener.cc
index 640f967e3d..4de524aeef 100644
--- a/deps/v8/src/profiler/profiler-listener.cc
+++ b/deps/v8/src/profiler/profiler-listener.cc
@@ -84,9 +84,9 @@ void ProfilerListener::CodeCreateEvent(CodeEventListener::LogEventsAndTags tag,
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->start = abstract_code->address();
- Script* script = Script::cast(shared->script());
JITLineInfoTable* line_table = NULL;
- if (script) {
+ if (shared->script()->IsScript()) {
+ Script* script = Script::cast(shared->script());
line_table = new JITLineInfoTable();
int offset = abstract_code->IsCode() ? Code::kHeaderSize
: BytecodeArray::kHeaderSize;
@@ -269,8 +269,9 @@ void ProfilerListener::RecordDeoptInlinedFrames(CodeEntry* entry,
std::vector<CpuProfileDeoptFrame> inlined_frames;
for (SourcePositionInfo& pos_info : last_position.InliningStack(code)) {
DCHECK(pos_info.position.ScriptOffset() != kNoSourcePosition);
- size_t offset = static_cast<size_t>(pos_info.position.ScriptOffset());
+ if (!pos_info.function->script()->IsScript()) continue;
int script_id = Script::cast(pos_info.function->script())->id();
+ size_t offset = static_cast<size_t>(pos_info.position.ScriptOffset());
inlined_frames.push_back(CpuProfileDeoptFrame({script_id, offset}));
}
if (!inlined_frames.empty() &&
diff --git a/deps/v8/src/promise-utils.cc b/deps/v8/src/promise-utils.cc
deleted file mode 100644
index 607dbe8caa..0000000000
--- a/deps/v8/src/promise-utils.cc
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/promise-utils.h"
-
-#include "src/factory.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-enum PromiseResolvingFunctionContextSlot {
- kAlreadyVisitedSlot = Context::MIN_CONTEXT_SLOTS,
- kPromiseSlot,
- kDebugEventSlot,
- kPromiseContextLength,
-};
-
-JSObject* PromiseUtils::GetPromise(Handle<Context> context) {
- return JSObject::cast(context->get(kPromiseSlot));
-}
-
-Object* PromiseUtils::GetDebugEvent(Handle<Context> context) {
- return context->get(kDebugEventSlot);
-}
-
-bool PromiseUtils::HasAlreadyVisited(Handle<Context> context) {
- return Smi::cast(context->get(kAlreadyVisitedSlot))->value() != 0;
-}
-
-void PromiseUtils::SetAlreadyVisited(Handle<Context> context) {
- context->set(kAlreadyVisitedSlot, Smi::FromInt(1));
-}
-
-void PromiseUtils::CreateResolvingFunctions(Isolate* isolate,
- Handle<JSObject> promise,
- Handle<Object> debug_event,
- Handle<JSFunction>* resolve,
- Handle<JSFunction>* reject) {
- DCHECK(debug_event->IsTrue(isolate) || debug_event->IsFalse(isolate));
- Handle<Context> context =
- isolate->factory()->NewPromiseResolvingFunctionContext(
- kPromiseContextLength);
- context->set_native_context(*isolate->native_context());
- // We set the closure to be an empty function, same as native context.
- context->set_closure(isolate->native_context()->closure());
- context->set(kAlreadyVisitedSlot, Smi::kZero);
- context->set(kPromiseSlot, *promise);
- context->set(kDebugEventSlot, *debug_event);
-
- Handle<SharedFunctionInfo> resolve_shared_fun(
- isolate->native_context()->promise_resolve_shared_fun(), isolate);
- Handle<JSFunction> resolve_fun =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(
- isolate->sloppy_function_without_prototype_map(), resolve_shared_fun,
- isolate->native_context(), TENURED);
-
- Handle<SharedFunctionInfo> reject_shared_fun(
- isolate->native_context()->promise_reject_shared_fun(), isolate);
- Handle<JSFunction> reject_fun =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(
- isolate->sloppy_function_without_prototype_map(), reject_shared_fun,
- isolate->native_context(), TENURED);
-
- resolve_fun->set_context(*context);
- reject_fun->set_context(*context);
-
- *resolve = resolve_fun;
- *reject = reject_fun;
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/promise-utils.h b/deps/v8/src/promise-utils.h
deleted file mode 100644
index 6ed6fcde5f..0000000000
--- a/deps/v8/src/promise-utils.h
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_PROMISE_UTILS_H_
-#define V8_PROMISE_UTILS_H_
-
-#include "src/objects.h"
-
-namespace v8 {
-namespace internal {
-
-// Helper methods for Promise builtins.
-class PromiseUtils : public AllStatic {
- public:
- // These get and set the slots on the PromiseResolvingContext, which
- // is used by the resolve/reject promise callbacks.
- static JSObject* GetPromise(Handle<Context> context);
- static Object* GetDebugEvent(Handle<Context> context);
- static bool HasAlreadyVisited(Handle<Context> context);
- static void SetAlreadyVisited(Handle<Context> context);
-
- static void CreateResolvingFunctions(Isolate* isolate,
- Handle<JSObject> promise,
- Handle<Object> debug_event,
- Handle<JSFunction>* resolve,
- Handle<JSFunction>* reject);
-};
-} // namespace internal
-} // namespace v8
-
-#endif // V8_PROMISE_UTILS_H_
diff --git a/deps/v8/src/property-descriptor.cc b/deps/v8/src/property-descriptor.cc
index f22a2630e2..70ddd5d521 100644
--- a/deps/v8/src/property-descriptor.cc
+++ b/deps/v8/src/property-descriptor.cc
@@ -61,19 +61,26 @@ bool ToPropertyDescriptorFastPath(Isolate* isolate, Handle<JSReceiver> obj,
PropertyDetails details = descs->GetDetails(i);
Name* key = descs->GetKey(i);
Handle<Object> value;
- switch (details.type()) {
- case DATA:
+ if (details.location() == kField) {
+ if (details.kind() == kData) {
value = JSObject::FastPropertyAt(Handle<JSObject>::cast(obj),
details.representation(),
FieldIndex::ForDescriptor(map, i));
- break;
- case DATA_CONSTANT:
- value = handle(descs->GetConstant(i), isolate);
- break;
- case ACCESSOR:
- case ACCESSOR_CONSTANT:
+ } else {
+ DCHECK_EQ(kAccessor, details.kind());
// Bail out to slow path.
return false;
+ }
+
+ } else {
+ DCHECK_EQ(kDescriptor, details.location());
+ if (details.kind() == kData) {
+ value = handle(descs->GetValue(i), isolate);
+ } else {
+ DCHECK_EQ(kAccessor, details.kind());
+ // Bail out to slow path.
+ return false;
+ }
}
Heap* heap = isolate->heap();
if (key == heap->enumerable_string()) {
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index d720b1c3d2..d616ae76e1 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -64,28 +64,14 @@ STATIC_ASSERT(SKIP_SYMBOLS ==
class Smi;
class TypeInfo;
-// Type of properties.
// Order of kinds is significant.
// Must fit in the BitField PropertyDetails::KindField.
enum PropertyKind { kData = 0, kAccessor = 1 };
-
// Order of modes is significant.
-// Must fit in the BitField PropertyDetails::StoreModeField.
+// Must fit in the BitField PropertyDetails::LocationField.
enum PropertyLocation { kField = 0, kDescriptor = 1 };
-
-// Order of properties is significant.
-// Must fit in the BitField PropertyDetails::TypeField.
-// A copy of this is in debug/mirrors.js.
-enum PropertyType {
- DATA = (kField << 1) | kData,
- DATA_CONSTANT = (kDescriptor << 1) | kData,
- ACCESSOR = (kField << 1) | kAccessor,
- ACCESSOR_CONSTANT = (kDescriptor << 1) | kAccessor
-};
-
-
class Representation {
public:
enum Kind {
@@ -234,27 +220,17 @@ enum class PropertyCellConstantType {
// They are used both in property dictionaries and instance descriptors.
class PropertyDetails BASE_EMBEDDED {
public:
- PropertyDetails(PropertyAttributes attributes, PropertyType type, int index,
+ // Property details for dictionary mode properties/elements.
+ PropertyDetails(PropertyKind kind, PropertyAttributes attributes, int index,
PropertyCellType cell_type) {
- value_ = TypeField::encode(type) | AttributesField::encode(attributes) |
+ value_ = KindField::encode(kind) | LocationField::encode(kField) |
+ AttributesField::encode(attributes) |
DictionaryStorageField::encode(index) |
PropertyCellTypeField::encode(cell_type);
-
- DCHECK(type == this->type());
- DCHECK(attributes == this->attributes());
}
- PropertyDetails(PropertyAttributes attributes,
- PropertyType type,
- Representation representation,
- int field_index = 0) {
- value_ = TypeField::encode(type)
- | AttributesField::encode(attributes)
- | RepresentationField::encode(EncodeRepresentation(representation))
- | FieldIndexField::encode(field_index);
- }
-
- PropertyDetails(PropertyAttributes attributes, PropertyKind kind,
+ // Property details for fast mode properties.
+ PropertyDetails(PropertyKind kind, PropertyAttributes attributes,
PropertyLocation location, Representation representation,
int field_index = 0) {
value_ = KindField::encode(kind) | LocationField::encode(location) |
@@ -265,7 +241,7 @@ class PropertyDetails BASE_EMBEDDED {
static PropertyDetails Empty(
PropertyCellType cell_type = PropertyCellType::kNoCell) {
- return PropertyDetails(NONE, DATA, 0, cell_type);
+ return PropertyDetails(kData, NONE, 0, cell_type);
}
int pointer() const { return DescriptorPointer::decode(value_); }
@@ -310,8 +286,6 @@ class PropertyDetails BASE_EMBEDDED {
PropertyKind kind() const { return KindField::decode(value_); }
PropertyLocation location() const { return LocationField::decode(value_); }
- PropertyType type() const { return TypeField::decode(value_); }
-
PropertyAttributes attributes() const {
return AttributesField::decode(value_);
}
@@ -360,12 +334,6 @@ class PropertyDetails BASE_EMBEDDED {
: public BitField<uint32_t, 9 + kDescriptorIndexBitCount,
kDescriptorIndexBitCount> {}; // NOLINT
- // NOTE: TypeField overlaps with KindField and LocationField.
- class TypeField : public BitField<PropertyType, 0, 2> {};
- STATIC_ASSERT(KindField::kNext == LocationField::kShift);
- STATIC_ASSERT(TypeField::kShift == KindField::kShift);
- STATIC_ASSERT(TypeField::kNext == LocationField::kNext);
-
// All bits for both fast and slow objects must fit in a smi.
STATIC_ASSERT(DictionaryStorageField::kNext <= 31);
STATIC_ASSERT(FieldIndexField::kNext <= 31);
@@ -377,6 +345,19 @@ class PropertyDetails BASE_EMBEDDED {
void Print(bool dictionary_mode);
#endif
+ enum PrintMode {
+ kPrintAttributes = 1 << 0,
+ kPrintFieldIndex = 1 << 1,
+ kPrintRepresentation = 1 << 2,
+ kPrintPointer = 1 << 3,
+
+ kForProperties = kPrintFieldIndex,
+ kForTransitions = kPrintAttributes,
+ kPrintFull = -1,
+ };
+ void PrintAsSlowTo(std::ostream& out);
+ void PrintAsFastTo(std::ostream& out, PrintMode mode = kPrintFull);
+
private:
PropertyDetails(int value, int pointer) {
value_ = DescriptorPointer::update(value, pointer);
@@ -395,7 +376,6 @@ class PropertyDetails BASE_EMBEDDED {
std::ostream& operator<<(std::ostream& os,
const PropertyAttributes& attributes);
-std::ostream& operator<<(std::ostream& os, const PropertyDetails& details);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/property.cc b/deps/v8/src/property.cc
index a4e0d67102..1c2666b00a 100644
--- a/deps/v8/src/property.cc
+++ b/deps/v8/src/property.cc
@@ -6,6 +6,7 @@
#include "src/field-type.h"
#include "src/handles-inl.h"
+#include "src/objects-inl.h"
#include "src/ostreams.h"
namespace v8 {
@@ -21,72 +22,56 @@ std::ostream& operator<<(std::ostream& os,
return os;
}
-DataDescriptor::DataDescriptor(Handle<Name> key, int field_index,
- PropertyAttributes attributes,
- Representation representation)
- : Descriptor(key, FieldType::Any(key->GetIsolate()), attributes, DATA,
- representation, field_index) {}
-
-struct FastPropertyDetails {
- explicit FastPropertyDetails(const PropertyDetails& v) : details(v) {}
- const PropertyDetails details;
-};
-
+Descriptor Descriptor::DataField(Handle<Name> key, int field_index,
+ PropertyAttributes attributes,
+ Representation representation) {
+ return DataField(key, field_index, FieldType::Any(key->GetIsolate()),
+ attributes, representation);
+}
// Outputs PropertyDetails as a dictionary details.
-std::ostream& operator<<(std::ostream& os, const PropertyDetails& details) {
+void PropertyDetails::PrintAsSlowTo(std::ostream& os) {
os << "(";
- if (details.location() == kDescriptor) {
- os << "immutable ";
- }
- os << (details.kind() == kData ? "data" : "accessor");
- return os << ", dictionary_index: " << details.dictionary_index()
- << ", attrs: " << details.attributes() << ")";
+ os << (kind() == kData ? "data" : "accessor");
+ os << ", dictionary_index: " << dictionary_index();
+ os << ", attrs: " << attributes() << ")";
}
-
// Outputs PropertyDetails as a descriptor array details.
-std::ostream& operator<<(std::ostream& os,
- const FastPropertyDetails& details_fast) {
- const PropertyDetails& details = details_fast.details;
+void PropertyDetails::PrintAsFastTo(std::ostream& os, PrintMode mode) {
os << "(";
- if (details.location() == kDescriptor) {
- os << "immutable ";
+ os << (kind() == kData ? "data" : "accessor");
+ if (location() == kField) {
+ os << " field";
+ if (mode & kPrintFieldIndex) {
+ os << " " << field_index();
+ }
+ if (mode & kPrintRepresentation) {
+ os << ":" << representation().Mnemonic();
+ }
+ } else {
+ os << " descriptor";
}
- os << (details.kind() == kData ? "data" : "accessor");
- os << ": " << details.representation().Mnemonic();
- if (details.location() == kField) {
- os << ", field_index: " << details.field_index();
+ if (mode & kPrintPointer) {
+ os << ", p: " << pointer();
}
- return os << ", p: " << details.pointer()
- << ", attrs: " << details.attributes() << ")";
+ if (mode & kPrintAttributes) {
+ os << ", attrs: " << attributes();
+ }
+ os << ")";
}
-
#ifdef OBJECT_PRINT
void PropertyDetails::Print(bool dictionary_mode) {
OFStream os(stdout);
if (dictionary_mode) {
- os << *this;
+ PrintAsSlowTo(os);
} else {
- os << FastPropertyDetails(*this);
+ PrintAsFastTo(os, PrintMode::kPrintFull);
}
os << "\n" << std::flush;
}
#endif
-
-std::ostream& operator<<(std::ostream& os, const Descriptor& d) {
- Object* value = *d.GetValue();
- os << "Descriptor " << Brief(*d.GetKey()) << " @ " << Brief(value) << " ";
- if (value->IsAccessorPair()) {
- AccessorPair* pair = AccessorPair::cast(value);
- os << "(get: " << Brief(pair->getter())
- << ", set: " << Brief(pair->setter()) << ") ";
- }
- os << FastPropertyDetails(d.GetDetails());
- return os;
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index 233233c5d4..177f06b769 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -18,22 +18,47 @@ namespace internal {
// Each descriptor has a key, property attributes, property type,
// property index (in the actual instance-descriptor array) and
// optionally a piece of data.
-class Descriptor BASE_EMBEDDED {
+class Descriptor final BASE_EMBEDDED {
public:
+ Descriptor() : details_(Smi::kZero) {}
+
Handle<Name> GetKey() const { return key_; }
Handle<Object> GetValue() const { return value_; }
PropertyDetails GetDetails() const { return details_; }
void SetSortedKeyIndex(int index) { details_ = details_.set_pointer(index); }
+ static Descriptor DataField(Handle<Name> key, int field_index,
+ PropertyAttributes attributes,
+ Representation representation);
+
+ static Descriptor DataField(Handle<Name> key, int field_index,
+ Handle<Object> wrapped_field_type,
+ PropertyAttributes attributes,
+ Representation representation) {
+ DCHECK(wrapped_field_type->IsSmi() || wrapped_field_type->IsWeakCell());
+ return Descriptor(key, wrapped_field_type, kData, attributes, kField,
+ representation, field_index);
+ }
+
+ static Descriptor DataConstant(Handle<Name> key, Handle<Object> value,
+ PropertyAttributes attributes) {
+ return Descriptor(key, value, kData, attributes, kDescriptor,
+ value->OptimalRepresentation());
+ }
+
+ static Descriptor AccessorConstant(Handle<Name> key, Handle<Object> foreign,
+ PropertyAttributes attributes) {
+ return Descriptor(key, foreign, kAccessor, attributes, kDescriptor,
+ Representation::Tagged());
+ }
+
private:
Handle<Name> key_;
Handle<Object> value_;
PropertyDetails details_;
protected:
- Descriptor() : details_(Smi::kZero) {}
-
void Init(Handle<Name> key, Handle<Object> value, PropertyDetails details) {
DCHECK(key->IsUniqueName());
DCHECK_IMPLIES(key->IsPrivate(), !details.IsEnumerable());
@@ -48,57 +73,21 @@ class Descriptor BASE_EMBEDDED {
DCHECK_IMPLIES(key->IsPrivate(), !details_.IsEnumerable());
}
- Descriptor(Handle<Name> key, Handle<Object> value,
- PropertyAttributes attributes, PropertyType type,
+ Descriptor(Handle<Name> key, Handle<Object> value, PropertyKind kind,
+ PropertyAttributes attributes, PropertyLocation location,
Representation representation, int field_index = 0)
: key_(key),
value_(value),
- details_(attributes, type, representation, field_index) {
+ details_(kind, attributes, location, representation, field_index) {
DCHECK(key->IsUniqueName());
DCHECK_IMPLIES(key->IsPrivate(), !details_.IsEnumerable());
}
friend class DescriptorArray;
friend class Map;
+ friend class MapUpdater;
};
-
-std::ostream& operator<<(std::ostream& os, const Descriptor& d);
-
-
-class DataDescriptor final : public Descriptor {
- public:
- DataDescriptor(Handle<Name> key, int field_index,
- PropertyAttributes attributes, Representation representation);
- // The field type is either a simple type or a map wrapped in a weak cell.
- DataDescriptor(Handle<Name> key, int field_index,
- Handle<Object> wrapped_field_type,
- PropertyAttributes attributes, Representation representation)
- : Descriptor(key, wrapped_field_type, attributes, DATA, representation,
- field_index) {
- DCHECK(wrapped_field_type->IsSmi() || wrapped_field_type->IsWeakCell());
- }
-};
-
-
-class DataConstantDescriptor final : public Descriptor {
- public:
- DataConstantDescriptor(Handle<Name> key, Handle<Object> value,
- PropertyAttributes attributes)
- : Descriptor(key, value, attributes, DATA_CONSTANT,
- value->OptimalRepresentation()) {}
-};
-
-
-class AccessorConstantDescriptor final : public Descriptor {
- public:
- AccessorConstantDescriptor(Handle<Name> key, Handle<Object> foreign,
- PropertyAttributes attributes)
- : Descriptor(key, foreign, attributes, ACCESSOR_CONSTANT,
- Representation::Tagged()) {}
-};
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/prototype.h b/deps/v8/src/prototype.h
index 38d6cab985..3d973dbf9c 100644
--- a/deps/v8/src/prototype.h
+++ b/deps/v8/src/prototype.h
@@ -161,8 +161,7 @@ class PrototypeIterator {
// we visit to an arbitrarily chosen large number.
seen_proxies_++;
if (seen_proxies_ > kProxyPrototypeLimit) {
- isolate_->Throw(
- *isolate_->factory()->NewRangeError(MessageTemplate::kStackOverflow));
+ isolate_->StackOverflow();
return false;
}
MaybeHandle<Object> proto =
@@ -174,6 +173,7 @@ class PrototypeIterator {
}
bool IsAtEnd() const { return is_at_end_; }
+ Isolate* isolate() const { return isolate_; }
private:
Isolate* isolate_;
diff --git a/deps/v8/src/regexp/interpreter-irregexp.cc b/deps/v8/src/regexp/interpreter-irregexp.cc
index 14834d512a..4f8f96a536 100644
--- a/deps/v8/src/regexp/interpreter-irregexp.cc
+++ b/deps/v8/src/regexp/interpreter-irregexp.cc
@@ -9,6 +9,7 @@
#include "src/regexp/interpreter-irregexp.h"
#include "src/ast/ast.h"
+#include "src/objects-inl.h"
#include "src/regexp/bytecodes-irregexp.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
diff --git a/deps/v8/src/regexp/jsregexp.cc b/deps/v8/src/regexp/jsregexp.cc
index f0abc9a8b3..8b21459059 100644
--- a/deps/v8/src/regexp/jsregexp.cc
+++ b/deps/v8/src/regexp/jsregexp.cc
@@ -27,7 +27,7 @@
#include "src/unicode-decoder.h"
#ifdef V8_I18N_SUPPORT
-#include "unicode/uset.h"
+#include "unicode/uniset.h"
#include "unicode/utypes.h"
#endif // V8_I18N_SUPPORT
@@ -451,7 +451,7 @@ void RegExpImpl::IrregexpInitialize(Handle<JSRegExp> re,
int RegExpImpl::IrregexpPrepare(Handle<JSRegExp> regexp,
Handle<String> subject) {
- subject = String::Flatten(subject);
+ DCHECK(subject->IsFlat());
// Check representation of the underlying storage.
bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
@@ -565,6 +565,8 @@ MaybeHandle<Object> RegExpImpl::IrregexpExec(
Isolate* isolate = regexp->GetIsolate();
DCHECK_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
+ subject = String::Flatten(subject);
+
// Prepare space for the return values.
#if defined(V8_INTERPRETED_REGEXP) && defined(DEBUG)
if (FLAG_trace_regexp_bytecodes) {
@@ -5114,30 +5116,22 @@ void AddUnicodeCaseEquivalents(RegExpCompiler* compiler,
// Use ICU to compute the case fold closure over the ranges.
DCHECK(compiler->unicode());
DCHECK(compiler->ignore_case());
- USet* set = uset_openEmpty();
+ icu::UnicodeSet set;
for (int i = 0; i < ranges->length(); i++) {
- uset_addRange(set, ranges->at(i).from(), ranges->at(i).to());
+ set.add(ranges->at(i).from(), ranges->at(i).to());
}
ranges->Clear();
- uset_closeOver(set, USET_CASE_INSENSITIVE);
+ set.closeOver(USET_CASE_INSENSITIVE);
// Full case mapping map single characters to multiple characters.
// Those are represented as strings in the set. Remove them so that
// we end up with only simple and common case mappings.
- uset_removeAllStrings(set);
- int item_count = uset_getItemCount(set);
- int item_result = 0;
- UErrorCode ec = U_ZERO_ERROR;
+ set.removeAllStrings();
Zone* zone = compiler->zone();
- for (int i = 0; i < item_count; i++) {
- uc32 start = 0;
- uc32 end = 0;
- item_result += uset_getItem(set, i, &start, &end, nullptr, 0, &ec);
- ranges->Add(CharacterRange::Range(start, end), zone);
+ for (int i = 0; i < set.getRangeCount(); i++) {
+ ranges->Add(CharacterRange::Range(set.getRangeStart(i), set.getRangeEnd(i)),
+ zone);
}
// No errors and everything we collected have been ranges.
- DCHECK_EQ(U_ZERO_ERROR, ec);
- DCHECK_EQ(0, item_result);
- uset_close(set);
#else
// Fallback if ICU is not included.
CharacterRange::AddCaseEquivalents(compiler->isolate(), compiler->zone(),
@@ -6742,8 +6736,7 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
// Inserted here, instead of in Assembler, because it depends on information
// in the AST that isn't replicated in the Node structure.
static const int kMaxBacksearchLimit = 1024;
- if (is_end_anchored &&
- !is_start_anchored &&
+ if (is_end_anchored && !is_start_anchored && !is_sticky &&
max_length < kMaxBacksearchLimit) {
macro_assembler.SetCurrentPositionFromEnd(max_length);
}
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc b/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc
index a0bb5e7d73..3316c33229 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler-irregexp.cc
@@ -7,9 +7,10 @@
#include "src/regexp/regexp-macro-assembler-irregexp.h"
#include "src/ast/ast.h"
+#include "src/objects-inl.h"
#include "src/regexp/bytecodes-irregexp.h"
-#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-macro-assembler-irregexp-inl.h"
+#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc b/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
index abdf577209..d311a09e41 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
@@ -5,6 +5,7 @@
#include "src/regexp/regexp-macro-assembler-tracer.h"
#include "src/ast/ast.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index fd3123f674..3035f6a9a9 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -13,7 +13,7 @@
#include "src/utils.h"
#ifdef V8_I18N_SUPPORT
-#include "unicode/uset.h"
+#include "unicode/uniset.h"
#endif // V8_I18N_SUPPORT
namespace v8 {
@@ -75,6 +75,7 @@ void RegExpParser::Advance() {
if (has_next()) {
StackLimitCheck check(isolate());
if (check.HasOverflowed()) {
+ if (FLAG_abort_on_stack_overflow) FATAL("Aborting on stack overflow");
ReportError(CStrVector(
MessageTemplate::TemplateString(MessageTemplate::kStackOverflow)));
} else if (zone()->excess_allocation()) {
@@ -1082,37 +1083,37 @@ bool IsExactPropertyValueAlias(const char* property_value_name,
bool LookupPropertyValueName(UProperty property,
const char* property_value_name, bool negate,
ZoneList<CharacterRange>* result, Zone* zone) {
+ UProperty property_for_lookup = property;
+ if (property_for_lookup == UCHAR_SCRIPT_EXTENSIONS) {
+ // For the property Script_Extensions, we have to do the property value
+ // name lookup as if the property is Script.
+ property_for_lookup = UCHAR_SCRIPT;
+ }
int32_t property_value =
- u_getPropertyValueEnum(property, property_value_name);
+ u_getPropertyValueEnum(property_for_lookup, property_value_name);
if (property_value == UCHAR_INVALID_CODE) return false;
// We require the property name to match exactly to one of the property value
// aliases. However, u_getPropertyValueEnum uses loose matching.
- if (!IsExactPropertyValueAlias(property_value_name, property,
+ if (!IsExactPropertyValueAlias(property_value_name, property_for_lookup,
property_value)) {
return false;
}
- USet* set = uset_openEmpty();
UErrorCode ec = U_ZERO_ERROR;
- uset_applyIntPropertyValue(set, property, property_value, &ec);
- bool success = ec == U_ZERO_ERROR && !uset_isEmpty(set);
+ icu::UnicodeSet set;
+ set.applyIntPropertyValue(property, property_value, ec);
+ bool success = ec == U_ZERO_ERROR && !set.isEmpty();
if (success) {
- uset_removeAllStrings(set);
- if (negate) uset_complement(set);
- int item_count = uset_getItemCount(set);
- int item_result = 0;
- for (int i = 0; i < item_count; i++) {
- uc32 start = 0;
- uc32 end = 0;
- item_result += uset_getItem(set, i, &start, &end, nullptr, 0, &ec);
- result->Add(CharacterRange::Range(start, end), zone);
+ set.removeAllStrings();
+ if (negate) set.complement();
+ for (int i = 0; i < set.getRangeCount(); i++) {
+ result->Add(
+ CharacterRange::Range(set.getRangeStart(i), set.getRangeEnd(i)),
+ zone);
}
- DCHECK_EQ(U_ZERO_ERROR, ec);
- DCHECK_EQ(0, item_result);
}
- uset_close(set);
return success;
}
@@ -1196,9 +1197,14 @@ bool RegExpParser::ParsePropertyClass(ZoneList<CharacterRange>* result,
const char* property_name = first_part.ToConstVector().start();
const char* value_name = second_part.ToConstVector().start();
UProperty property = u_getPropertyEnum(property_name);
- if (property < UCHAR_INT_START) return false;
- if (property >= UCHAR_INT_LIMIT) return false;
if (!IsExactPropertyAlias(property_name, property)) return false;
+ if (property == UCHAR_GENERAL_CATEGORY) {
+ // We want to allow aggregate value names such as "Letter".
+ property = UCHAR_GENERAL_CATEGORY_MASK;
+ } else if (property != UCHAR_SCRIPT &&
+ property != UCHAR_SCRIPT_EXTENSIONS) {
+ return false;
+ }
return LookupPropertyValueName(property, value_name, negate, result,
zone());
}
@@ -1720,12 +1726,10 @@ bool RegExpBuilder::NeedsDesugaringForUnicode(RegExpCharacterClass* cc) {
bool RegExpBuilder::NeedsDesugaringForIgnoreCase(uc32 c) {
#ifdef V8_I18N_SUPPORT
if (unicode() && ignore_case()) {
- USet* set = uset_open(c, c);
- uset_closeOver(set, USET_CASE_INSENSITIVE);
- uset_removeAllStrings(set);
- bool result = uset_size(set) > 1;
- uset_close(set);
- return result;
+ icu::UnicodeSet set(c, c);
+ set.closeOver(USET_CASE_INSENSITIVE);
+ set.removeAllStrings();
+ return set.size() > 1;
}
// In the case where ICU is not included, we act as if the unicode flag is
// not set, and do not desugar.
diff --git a/deps/v8/src/regexp/regexp-utils.cc b/deps/v8/src/regexp/regexp-utils.cc
index 62daf3f1d5..d40431866a 100644
--- a/deps/v8/src/regexp/regexp-utils.cc
+++ b/deps/v8/src/regexp/regexp-utils.cc
@@ -118,12 +118,6 @@ Maybe<bool> RegExpUtils::IsRegExp(Isolate* isolate, Handle<Object> object) {
Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
- if (isolate->regexp_function()->initial_map() == receiver->map()) {
- // Fast-path for unmodified JSRegExp instances.
- // TODO(ishell): Adapt for new fast-path logic.
- return Just(true);
- }
-
Handle<Object> match;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, match,
@@ -180,8 +174,7 @@ MaybeHandle<Object> RegExpUtils::SetAdvancedStringIndex(
ASSIGN_RETURN_ON_EXCEPTION(isolate, last_index_obj,
Object::ToLength(isolate, last_index_obj), Object);
-
- const int last_index = Handle<Smi>::cast(last_index_obj)->value();
+ const int last_index = PositiveNumberToUint32(*last_index_obj);
const int new_last_index =
AdvanceStringIndex(isolate, string, last_index, unicode);
diff --git a/deps/v8/src/regexp/x87/OWNERS b/deps/v8/src/regexp/x87/OWNERS
index dd9998b261..61245ae8e2 100644
--- a/deps/v8/src/regexp/x87/OWNERS
+++ b/deps/v8/src/regexp/x87/OWNERS
@@ -1 +1,2 @@
weiliang.lin@intel.com
+chunyang.dai@intel.com
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index 0de9e1c2bb..1a1ef6a847 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -170,7 +170,7 @@ void RuntimeProfiler::AttemptOnStackReplacement(JavaScriptFrame* frame,
int loop_nesting_levels) {
JSFunction* function = frame->function();
SharedFunctionInfo* shared = function->shared();
- if (!FLAG_use_osr || function->shared()->IsBuiltin()) {
+ if (!FLAG_use_osr || !function->shared()->IsUserJavaScript()) {
return;
}
@@ -474,10 +474,10 @@ void RuntimeProfiler::MarkCandidatesForOptimization() {
// Update shared function info ticks after checking for whether functions
// should be optimized to keep FCG (which updates ticks on code) and
// Ignition (which updates ticks on shared function info) in sync.
- List<JSFunction*> functions(4);
+ List<SharedFunctionInfo*> functions(4);
frame->GetFunctions(&functions);
for (int i = functions.length(); --i >= 0;) {
- SharedFunctionInfo* shared_function_info = functions[i]->shared();
+ SharedFunctionInfo* shared_function_info = functions[i];
int ticks = shared_function_info->profiler_ticks();
if (ticks < Smi::kMaxValue) {
shared_function_info->set_profiler_ticks(ticks + 1);
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index 1a2d957caf..a9cbc208b3 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -19,7 +19,7 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_FinishArrayPrototypeSetup) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSArray, prototype, 0);
Object* length = prototype->length();
CHECK(length->IsSmi());
@@ -60,17 +60,12 @@ static void InstallBuiltin(
RUNTIME_FUNCTION(Runtime_SpecialArrayFunctions) {
HandleScope scope(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
Handle<JSObject> holder =
isolate->factory()->NewJSObject(isolate->object_function());
InstallBuiltin(isolate, holder, "pop", Builtins::kArrayPop);
- if (FLAG_minimal) {
- InstallBuiltin(isolate, holder, "push", Builtins::kArrayPush);
- } else {
- FastArrayPushStub stub(isolate);
- InstallCode(isolate, holder, "push", stub.GetCode());
- }
+ InstallBuiltin(isolate, holder, "push", Builtins::kFastArrayPush);
InstallBuiltin(isolate, holder, "shift", Builtins::kArrayShift);
InstallBuiltin(isolate, holder, "unshift", Builtins::kArrayUnshift);
InstallBuiltin(isolate, holder, "slice", Builtins::kArraySlice);
@@ -90,7 +85,7 @@ RUNTIME_FUNCTION(Runtime_SpecialArrayFunctions) {
RUNTIME_FUNCTION(Runtime_FixedArrayGet) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_CHECKED(FixedArray, object, 0);
CONVERT_SMI_ARG_CHECKED(index, 1);
return object->get(index);
@@ -99,7 +94,7 @@ RUNTIME_FUNCTION(Runtime_FixedArrayGet) {
RUNTIME_FUNCTION(Runtime_FixedArraySet) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_CHECKED(FixedArray, object, 0);
CONVERT_SMI_ARG_CHECKED(index, 1);
CONVERT_ARG_CHECKED(Object, value, 2);
@@ -127,7 +122,7 @@ RUNTIME_FUNCTION(Runtime_TransitionElementsKind) {
// Returns -1 if hole removal is not supported by this method.
RUNTIME_FUNCTION(Runtime_RemoveArrayHoles) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
if (object->IsJSProxy()) return Smi::FromInt(-1);
@@ -139,7 +134,7 @@ RUNTIME_FUNCTION(Runtime_RemoveArrayHoles) {
// Move contents of argument 0 (an array) to argument 1 (an array)
RUNTIME_FUNCTION(Runtime_MoveArrayContents) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSArray, from, 0);
CONVERT_ARG_HANDLE_CHECKED(JSArray, to, 1);
JSObject::ValidateElements(from);
@@ -162,7 +157,7 @@ RUNTIME_FUNCTION(Runtime_MoveArrayContents) {
// How many elements does this object/array have?
RUNTIME_FUNCTION(Runtime_EstimateNumberOfElements) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
Handle<FixedArrayBase> elements(array->elements(), isolate);
SealHandleScope shs(isolate);
@@ -205,7 +200,7 @@ RUNTIME_FUNCTION(Runtime_EstimateNumberOfElements) {
// Intervals can span over some keys that are not in the object.
RUNTIME_FUNCTION(Runtime_GetArrayKeys) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0);
CONVERT_NUMBER_CHECKED(uint32_t, length, Uint32, args[1]);
ElementsKind kind = array->GetElementsKind();
@@ -249,8 +244,7 @@ RUNTIME_FUNCTION(Runtime_GetArrayKeys) {
}
if (j != keys->length()) {
- isolate->heap()->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
- *keys, keys->length() - j);
+ isolate->heap()->RightTrimFixedArray(*keys, keys->length() - j);
}
return *isolate->factory()->NewJSArrayWithElements(keys);
@@ -363,7 +357,7 @@ RUNTIME_FUNCTION(Runtime_NewArray) {
RUNTIME_FUNCTION(Runtime_NormalizeElements) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0);
CHECK(!array->HasFixedTypedArrayElements());
CHECK(!array->IsJSGlobalProxy());
@@ -375,7 +369,7 @@ RUNTIME_FUNCTION(Runtime_NormalizeElements) {
// GrowArrayElements returns a sentinel Smi if the object was normalized.
RUNTIME_FUNCTION(Runtime_GrowArrayElements) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
CONVERT_NUMBER_CHECKED(int, key, Int32, args[1]);
@@ -399,7 +393,7 @@ RUNTIME_FUNCTION(Runtime_GrowArrayElements) {
RUNTIME_FUNCTION(Runtime_HasComplexElements) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0);
for (PrototypeIterator iter(isolate, array, kStartAtReceiver);
!iter.IsAtEnd(); iter.Advance()) {
@@ -421,7 +415,7 @@ RUNTIME_FUNCTION(Runtime_HasComplexElements) {
// ES6 22.1.2.2 Array.isArray
RUNTIME_FUNCTION(Runtime_ArrayIsArray) {
HandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
Maybe<bool> result = Object::IsArray(object);
MAYBE_RETURN(result, isolate->heap()->exception());
@@ -430,14 +424,14 @@ RUNTIME_FUNCTION(Runtime_ArrayIsArray) {
RUNTIME_FUNCTION(Runtime_IsArray) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Object, obj, 0);
return isolate->heap()->ToBoolean(obj->IsJSArray());
}
RUNTIME_FUNCTION(Runtime_ArraySpeciesConstructor) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, original_array, 0);
RETURN_RESULT_OR_FAILURE(
isolate, Object::ArraySpeciesConstructor(isolate, original_array));
@@ -446,7 +440,7 @@ RUNTIME_FUNCTION(Runtime_ArraySpeciesConstructor) {
// ES7 22.1.3.11 Array.prototype.includes
RUNTIME_FUNCTION(Runtime_ArrayIncludes_Slow) {
HandleScope shs(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, search_element, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, from_index, 2);
@@ -502,8 +496,7 @@ RUNTIME_FUNCTION(Runtime_ArrayIncludes_Slow) {
// If the receiver is not a special receiver type, and the length is a valid
// element index, perform fast operation tailored to specific ElementsKinds.
- if (object->map()->instance_type() > LAST_SPECIAL_RECEIVER_TYPE &&
- len < kMaxUInt32 &&
+ if (!object->map()->IsSpecialReceiverMap() && len < kMaxUInt32 &&
JSObject::PrototypeHasNoElements(isolate, JSObject::cast(*object))) {
Handle<JSObject> obj = Handle<JSObject>::cast(object);
ElementsAccessor* elements = obj->GetElementsAccessor();
@@ -538,21 +531,21 @@ RUNTIME_FUNCTION(Runtime_ArrayIncludes_Slow) {
RUNTIME_FUNCTION(Runtime_ArrayIndexOf) {
HandleScope shs(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, search_element, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, from_index, 2);
// Let O be ? ToObject(this value).
- Handle<Object> receiver_obj = args.at<Object>(0);
- if (receiver_obj->IsNull(isolate) || receiver_obj->IsUndefined(isolate)) {
+ Handle<Object> receiver_obj = args.at(0);
+ if (receiver_obj->IsNullOrUndefined(isolate)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
isolate->factory()->NewStringFromAsciiChecked(
"Array.prototype.indexOf")));
}
Handle<JSReceiver> object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, object, Object::ToObject(isolate, args.at<Object>(0)));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, object,
+ Object::ToObject(isolate, args.at(0)));
// Let len be ? ToLength(? Get(O, "length")).
int64_t len;
@@ -601,8 +594,7 @@ RUNTIME_FUNCTION(Runtime_ArrayIndexOf) {
// If the receiver is not a special receiver type, and the length is a valid
// element index, perform fast operation tailored to specific ElementsKinds.
- if (object->map()->instance_type() > LAST_SPECIAL_RECEIVER_TYPE &&
- len < kMaxUInt32 &&
+ if (!object->map()->IsSpecialReceiverMap() && len < kMaxUInt32 &&
JSObject::PrototypeHasNoElements(isolate, JSObject::cast(*object))) {
Handle<JSObject> obj = Handle<JSObject>::cast(object);
ElementsAccessor* elements = obj->GetElementsAccessor();
@@ -636,47 +628,22 @@ RUNTIME_FUNCTION(Runtime_ArrayIndexOf) {
return Smi::FromInt(-1);
}
+
RUNTIME_FUNCTION(Runtime_SpreadIterablePrepare) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, spread, 0);
- if (spread->IsJSArray()) {
- // Check that the spread arg has fast elements
- Handle<JSArray> spread_array = Handle<JSArray>::cast(spread);
- ElementsKind array_kind = spread_array->GetElementsKind();
-
- // And that it has the orignal ArrayPrototype
- JSObject* array_proto = JSObject::cast(spread_array->map()->prototype());
- Map* iterator_map = isolate->initial_array_iterator_prototype()->map();
-
- // Check that the iterator acts as expected.
- // If IsArrayIteratorLookupChainIntact(), then we know that the initial
- // ArrayIterator is being used. If the map of the prototype has changed,
- // then take the slow path.
-
- if (isolate->is_initial_array_prototype(array_proto) &&
- isolate->IsArrayIteratorLookupChainIntact() &&
- isolate->is_initial_array_iterator_prototype_map(iterator_map)) {
- if (IsFastPackedElementsKind(array_kind)) {
- return *spread;
- }
- if (IsFastHoleyElementsKind(array_kind) &&
- isolate->IsFastArrayConstructorPrototypeChainIntact()) {
- return *spread;
- }
- }
+ // Iterate over the spread if we need to.
+ if (spread->IterationHasObservableEffects()) {
+ Handle<JSFunction> spread_iterable_function = isolate->spread_iterable();
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, spread,
+ Execution::Call(isolate, spread_iterable_function,
+ isolate->factory()->undefined_value(), 1, &spread));
}
- Handle<JSFunction> spread_iterable_function = isolate->spread_iterable();
-
- Handle<Object> spreaded;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, spreaded,
- Execution::Call(isolate, spread_iterable_function,
- isolate->factory()->undefined_value(), 1, &spread));
-
- return *spreaded;
+ return *spread;
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-atomics.cc b/deps/v8/src/runtime/runtime-atomics.cc
index 3bd0738dd2..ff7ded9b09 100644
--- a/deps/v8/src/runtime/runtime-atomics.cc
+++ b/deps/v8/src/runtime/runtime-atomics.cc
@@ -349,7 +349,7 @@ RUNTIME_FUNCTION(Runtime_ThrowInvalidAtomicAccessIndexError) {
RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
+ DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
CONVERT_SIZE_ARG_CHECKED(index, 1);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(oldobj, 2);
@@ -383,7 +383,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
RUNTIME_FUNCTION(Runtime_AtomicsAdd) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
CONVERT_SIZE_ARG_CHECKED(index, 1);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
@@ -415,7 +415,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsAdd) {
RUNTIME_FUNCTION(Runtime_AtomicsSub) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
CONVERT_SIZE_ARG_CHECKED(index, 1);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
@@ -447,7 +447,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsSub) {
RUNTIME_FUNCTION(Runtime_AtomicsAnd) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
CONVERT_SIZE_ARG_CHECKED(index, 1);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
@@ -479,7 +479,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsAnd) {
RUNTIME_FUNCTION(Runtime_AtomicsOr) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
CONVERT_SIZE_ARG_CHECKED(index, 1);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
@@ -511,7 +511,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsOr) {
RUNTIME_FUNCTION(Runtime_AtomicsXor) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
CONVERT_SIZE_ARG_CHECKED(index, 1);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
@@ -543,7 +543,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsXor) {
RUNTIME_FUNCTION(Runtime_AtomicsExchange) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
CONVERT_SIZE_ARG_CHECKED(index, 1);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);
@@ -575,7 +575,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsExchange) {
RUNTIME_FUNCTION(Runtime_AtomicsIsLockFree) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_NUMBER_ARG_HANDLE_CHECKED(size, 0);
uint32_t usize = NumberToUint32(*size);
return isolate->heap()->ToBoolean(AtomicIsLockFree(usize));
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index 323604ffde..246079232b 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -7,8 +7,10 @@
#include <stdlib.h>
#include <limits>
+#include "src/accessors.h"
#include "src/arguments.h"
#include "src/debug/debug.h"
+#include "src/elements.h"
#include "src/frames-inl.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
@@ -18,17 +20,9 @@ namespace v8 {
namespace internal {
-RUNTIME_FUNCTION(Runtime_ThrowNonMethodError) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 0);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewReferenceError(MessageTemplate::kNonMethod));
-}
-
-
RUNTIME_FUNCTION(Runtime_ThrowUnsupportedSuperError) {
HandleScope scope(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewReferenceError(MessageTemplate::kUnsupportedSuper));
}
@@ -36,7 +30,7 @@ RUNTIME_FUNCTION(Runtime_ThrowUnsupportedSuperError) {
RUNTIME_FUNCTION(Runtime_ThrowConstructorNonCallableError) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 0);
Handle<Object> name(constructor->shared()->name(), isolate);
THROW_NEW_ERROR_RETURN_FAILURE(
@@ -44,40 +38,63 @@ RUNTIME_FUNCTION(Runtime_ThrowConstructorNonCallableError) {
}
-RUNTIME_FUNCTION(Runtime_ThrowArrayNotSubclassableError) {
+RUNTIME_FUNCTION(Runtime_ThrowStaticPrototypeError) {
HandleScope scope(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kArrayNotSubclassable));
+ isolate, NewTypeError(MessageTemplate::kStaticPrototype));
}
-
-static Object* ThrowStaticPrototypeError(Isolate* isolate) {
+RUNTIME_FUNCTION(Runtime_ThrowSuperAlreadyCalledError) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(0, args.length());
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kStaticPrototype));
+ isolate, NewReferenceError(MessageTemplate::kSuperAlreadyCalled));
}
+namespace {
-RUNTIME_FUNCTION(Runtime_ThrowStaticPrototypeError) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 0);
- return ThrowStaticPrototypeError(isolate);
+Object* ThrowNotSuperConstructor(Isolate* isolate, Handle<Object> constructor,
+ Handle<JSFunction> function) {
+ Handle<Object> super_name;
+ if (constructor->IsJSFunction()) {
+ super_name = handle(Handle<JSFunction>::cast(constructor)->shared()->name(),
+ isolate);
+ } else if (constructor->IsOddball()) {
+ DCHECK(constructor->IsNull(isolate));
+ super_name = isolate->factory()->null_string();
+ } else {
+ super_name = Object::NoSideEffectsToString(isolate, constructor);
+ }
+ // null constructor
+ if (Handle<String>::cast(super_name)->length() == 0) {
+ super_name = isolate->factory()->null_string();
+ }
+ Handle<Object> function_name(function->shared()->name(), isolate);
+ // anonymous class
+ if (Handle<String>::cast(function_name)->length() == 0) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(MessageTemplate::kNotSuperConstructorAnonymousClass,
+ super_name));
+ }
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kNotSuperConstructor, super_name,
+ function_name));
}
+} // namespace
-RUNTIME_FUNCTION(Runtime_ThrowIfStaticPrototype) {
+RUNTIME_FUNCTION(Runtime_ThrowNotSuperConstructor) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(Name, name, 0);
- if (Name::Equals(name, isolate->factory()->prototype_string())) {
- return ThrowStaticPrototypeError(isolate);
- }
- return *name;
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, constructor, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 1);
+ return ThrowNotSuperConstructor(isolate, constructor, function);
}
-
RUNTIME_FUNCTION(Runtime_HomeObjectSymbol) {
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
return isolate->heap()->home_object_symbol();
}
@@ -143,13 +160,6 @@ static MaybeHandle<Object> DefineClass(Isolate* isolate,
prototype, attribs),
Object);
- // TODO(arv): Only do this conditionally.
- Handle<Symbol> home_object_symbol(isolate->heap()->home_object_symbol());
- RETURN_ON_EXCEPTION(
- isolate, JSObject::SetOwnPropertyIgnoreAttributes(
- constructor, home_object_symbol, prototype, DONT_ENUM),
- Object);
-
if (!constructor_parent.is_null()) {
MAYBE_RETURN_NULL(JSObject::SetPrototype(constructor, constructor_parent,
false, Object::THROW_ON_ERROR));
@@ -171,13 +181,14 @@ static MaybeHandle<Object> DefineClass(Isolate* isolate,
handle(Smi::FromInt(end_position), isolate), STRICT),
Object);
- return constructor;
+ // Caller already has access to constructor, so return the prototype.
+ return prototype;
}
RUNTIME_FUNCTION(Runtime_DefineClass) {
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
+ DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, super_class, 0);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 1);
CONVERT_SMI_ARG_CHECKED(start_position, 2);
@@ -189,6 +200,42 @@ RUNTIME_FUNCTION(Runtime_DefineClass) {
}
namespace {
+void InstallClassNameAccessor(Isolate* isolate, Handle<JSObject> object) {
+ PropertyAttributes attrs =
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
+ // Cannot fail since this should only be called when creating an object
+ // literal.
+ CHECK(!JSObject::SetAccessor(
+ object, Accessors::FunctionNameInfo(object->GetIsolate(), attrs))
+ .is_null());
+}
+} // anonymous namespace
+
+RUNTIME_FUNCTION(Runtime_InstallClassNameAccessor) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ InstallClassNameAccessor(isolate, object);
+ return *object;
+}
+
+RUNTIME_FUNCTION(Runtime_InstallClassNameAccessorWithCheck) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+
+ // If a property named "name" is already defined, exit.
+ Handle<Name> key = isolate->factory()->name_string();
+ if (JSObject::HasRealNamedProperty(object, key).FromMaybe(false)) {
+ return *object;
+ }
+
+ // Define the "name" accessor.
+ InstallClassNameAccessor(isolate, object);
+ return *object;
+}
+
+namespace {
enum class SuperMode { kLoad, kStore };
@@ -326,7 +373,7 @@ MaybeHandle<Object> StoreElementToSuper(Isolate* isolate,
RUNTIME_FUNCTION(Runtime_StoreToSuper_Strict) {
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
+ DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
CONVERT_ARG_HANDLE_CHECKED(JSObject, home_object, 1);
CONVERT_ARG_HANDLE_CHECKED(Name, name, 2);
@@ -339,7 +386,7 @@ RUNTIME_FUNCTION(Runtime_StoreToSuper_Strict) {
RUNTIME_FUNCTION(Runtime_StoreToSuper_Sloppy) {
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
+ DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
CONVERT_ARG_HANDLE_CHECKED(JSObject, home_object, 1);
CONVERT_ARG_HANDLE_CHECKED(Name, name, 2);
@@ -373,7 +420,7 @@ static MaybeHandle<Object> StoreKeyedToSuper(
RUNTIME_FUNCTION(Runtime_StoreKeyedToSuper_Strict) {
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
+ DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
CONVERT_ARG_HANDLE_CHECKED(JSObject, home_object, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 2);
@@ -387,7 +434,7 @@ RUNTIME_FUNCTION(Runtime_StoreKeyedToSuper_Strict) {
RUNTIME_FUNCTION(Runtime_StoreKeyedToSuper_Sloppy) {
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
+ DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
CONVERT_ARG_HANDLE_CHECKED(JSObject, home_object, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 2);
@@ -403,7 +450,56 @@ RUNTIME_FUNCTION(Runtime_GetSuperConstructor) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(JSFunction, active_function, 0);
- return active_function->map()->prototype();
+ Object* prototype = active_function->map()->prototype();
+ if (!prototype->IsConstructor()) {
+ HandleScope scope(isolate);
+ return ThrowNotSuperConstructor(isolate, handle(prototype, isolate),
+ handle(active_function, isolate));
+ }
+ return prototype;
+}
+
+RUNTIME_FUNCTION(Runtime_NewWithSpread) {
+ HandleScope scope(isolate);
+ DCHECK_LE(3, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, constructor, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, new_target, 1);
+
+ int constructor_argc = args.length() - 2;
+ CONVERT_ARG_HANDLE_CHECKED(Object, spread, args.length() - 1);
+
+ // Iterate over the spread if we need to.
+ if (spread->IterationHasObservableEffects()) {
+ Handle<JSFunction> spread_iterable_function = isolate->spread_iterable();
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, spread,
+ Execution::Call(isolate, spread_iterable_function,
+ isolate->factory()->undefined_value(), 1, &spread));
+ }
+
+ uint32_t spread_length;
+ Handle<JSArray> spread_array = Handle<JSArray>::cast(spread);
+ CHECK(spread_array->length()->ToArrayIndex(&spread_length));
+ int result_length = constructor_argc - 1 + spread_length;
+ ScopedVector<Handle<Object>> construct_args(result_length);
+
+ // Append each of the individual args to the result.
+ for (int i = 0; i < constructor_argc - 1; i++) {
+ construct_args[i] = args.at<Object>(2 + i);
+ }
+
+ // Append element of the spread to the result.
+ ElementsAccessor* accessor = spread_array->GetElementsAccessor();
+ for (uint32_t i = 0; i < spread_length; i++) {
+ DCHECK(accessor->HasElement(spread_array, i));
+ Handle<Object> element = accessor->Get(spread_array, i);
+ construct_args[constructor_argc - 1 + i] = element;
+ }
+
+ // Call the constructor.
+ RETURN_RESULT_OR_FAILURE(
+ isolate, Execution::New(isolate, constructor, new_target, result_length,
+ construct_args.start()));
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-collections.cc b/deps/v8/src/runtime/runtime-collections.cc
index 57e5d98532..15c1fab76f 100644
--- a/deps/v8/src/runtime/runtime-collections.cc
+++ b/deps/v8/src/runtime/runtime-collections.cc
@@ -14,7 +14,7 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_StringGetRawHashField) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
return *isolate->factory()->NewNumberFromUint(string->hash_field());
}
@@ -22,14 +22,14 @@ RUNTIME_FUNCTION(Runtime_StringGetRawHashField) {
RUNTIME_FUNCTION(Runtime_TheHole) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
return isolate->heap()->the_hole_value();
}
RUNTIME_FUNCTION(Runtime_JSCollectionGetTable) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(JSObject, object, 0);
CHECK(object->IsJSSet() || object->IsJSMap());
return static_cast<JSCollection*>(object)->table();
@@ -38,7 +38,7 @@ RUNTIME_FUNCTION(Runtime_JSCollectionGetTable) {
RUNTIME_FUNCTION(Runtime_GenericHash) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
Smi* hash = Object::GetOrCreateHash(isolate, object);
return hash;
@@ -47,7 +47,7 @@ RUNTIME_FUNCTION(Runtime_GenericHash) {
RUNTIME_FUNCTION(Runtime_SetInitialize) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
JSSet::Initialize(holder, isolate);
return *holder;
@@ -56,7 +56,7 @@ RUNTIME_FUNCTION(Runtime_SetInitialize) {
RUNTIME_FUNCTION(Runtime_SetGrow) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table()));
table = OrderedHashSet::EnsureGrowable(table);
@@ -67,7 +67,7 @@ RUNTIME_FUNCTION(Runtime_SetGrow) {
RUNTIME_FUNCTION(Runtime_SetShrink) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table()));
table = OrderedHashSet::Shrink(table);
@@ -78,7 +78,7 @@ RUNTIME_FUNCTION(Runtime_SetShrink) {
RUNTIME_FUNCTION(Runtime_SetClear) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
JSSet::Clear(holder);
return isolate->heap()->undefined_value();
@@ -87,7 +87,7 @@ RUNTIME_FUNCTION(Runtime_SetClear) {
RUNTIME_FUNCTION(Runtime_SetIteratorInitialize) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSSetIterator, holder, 0);
CONVERT_ARG_HANDLE_CHECKED(JSSet, set, 1);
CONVERT_SMI_ARG_CHECKED(kind, 2)
@@ -103,7 +103,7 @@ RUNTIME_FUNCTION(Runtime_SetIteratorInitialize) {
RUNTIME_FUNCTION(Runtime_SetIteratorClone) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSSetIterator, holder, 0);
Handle<JSSetIterator> result = isolate->factory()->NewJSSetIterator();
@@ -117,7 +117,7 @@ RUNTIME_FUNCTION(Runtime_SetIteratorClone) {
RUNTIME_FUNCTION(Runtime_SetIteratorNext) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_CHECKED(JSSetIterator, holder, 0);
CONVERT_ARG_CHECKED(JSArray, value_array, 1);
return holder->Next(value_array);
@@ -130,7 +130,7 @@ RUNTIME_FUNCTION(Runtime_SetIteratorNext) {
// 2: Iteration kind
RUNTIME_FUNCTION(Runtime_SetIteratorDetails) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSSetIterator, holder, 0);
Handle<FixedArray> details = isolate->factory()->NewFixedArray(4);
details->set(0, isolate->heap()->ToBoolean(holder->HasMore()));
@@ -142,7 +142,7 @@ RUNTIME_FUNCTION(Runtime_SetIteratorDetails) {
RUNTIME_FUNCTION(Runtime_MapInitialize) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
JSMap::Initialize(holder, isolate);
return *holder;
@@ -151,7 +151,7 @@ RUNTIME_FUNCTION(Runtime_MapInitialize) {
RUNTIME_FUNCTION(Runtime_MapShrink) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()));
table = OrderedHashMap::Shrink(table);
@@ -162,7 +162,7 @@ RUNTIME_FUNCTION(Runtime_MapShrink) {
RUNTIME_FUNCTION(Runtime_MapClear) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
JSMap::Clear(holder);
return isolate->heap()->undefined_value();
@@ -171,7 +171,7 @@ RUNTIME_FUNCTION(Runtime_MapClear) {
RUNTIME_FUNCTION(Runtime_MapGrow) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()));
table = OrderedHashMap::EnsureGrowable(table);
@@ -182,7 +182,7 @@ RUNTIME_FUNCTION(Runtime_MapGrow) {
RUNTIME_FUNCTION(Runtime_MapIteratorInitialize) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSMapIterator, holder, 0);
CONVERT_ARG_HANDLE_CHECKED(JSMap, map, 1);
CONVERT_SMI_ARG_CHECKED(kind, 2)
@@ -199,7 +199,7 @@ RUNTIME_FUNCTION(Runtime_MapIteratorInitialize) {
RUNTIME_FUNCTION(Runtime_MapIteratorClone) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSMapIterator, holder, 0);
Handle<JSMapIterator> result = isolate->factory()->NewJSMapIterator();
@@ -217,7 +217,7 @@ RUNTIME_FUNCTION(Runtime_MapIteratorClone) {
// 2: Iteration kind
RUNTIME_FUNCTION(Runtime_MapIteratorDetails) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSMapIterator, holder, 0);
Handle<FixedArray> details = isolate->factory()->NewFixedArray(4);
details->set(0, isolate->heap()->ToBoolean(holder->HasMore()));
@@ -229,7 +229,7 @@ RUNTIME_FUNCTION(Runtime_MapIteratorDetails) {
RUNTIME_FUNCTION(Runtime_GetWeakMapEntries) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, holder, 0);
CONVERT_NUMBER_CHECKED(int, max_entries, Int32, args[1]);
CHECK(max_entries >= 0);
@@ -264,7 +264,7 @@ RUNTIME_FUNCTION(Runtime_GetWeakMapEntries) {
RUNTIME_FUNCTION(Runtime_MapIteratorNext) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_CHECKED(JSMapIterator, holder, 0);
CONVERT_ARG_CHECKED(JSArray, value_array, 1);
return holder->Next(value_array);
@@ -273,7 +273,7 @@ RUNTIME_FUNCTION(Runtime_MapIteratorNext) {
RUNTIME_FUNCTION(Runtime_WeakCollectionInitialize) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
JSWeakCollection::Initialize(weak_collection, isolate);
return *weak_collection;
@@ -282,7 +282,7 @@ RUNTIME_FUNCTION(Runtime_WeakCollectionInitialize) {
RUNTIME_FUNCTION(Runtime_WeakCollectionGet) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
CONVERT_SMI_ARG_CHECKED(hash, 2)
@@ -298,7 +298,7 @@ RUNTIME_FUNCTION(Runtime_WeakCollectionGet) {
RUNTIME_FUNCTION(Runtime_WeakCollectionHas) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
CONVERT_SMI_ARG_CHECKED(hash, 2)
@@ -313,7 +313,7 @@ RUNTIME_FUNCTION(Runtime_WeakCollectionHas) {
RUNTIME_FUNCTION(Runtime_WeakCollectionDelete) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
CONVERT_SMI_ARG_CHECKED(hash, 2)
@@ -328,7 +328,7 @@ RUNTIME_FUNCTION(Runtime_WeakCollectionDelete) {
RUNTIME_FUNCTION(Runtime_WeakCollectionSet) {
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
+ DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
CHECK(key->IsJSReceiver() || key->IsSymbol());
@@ -344,7 +344,7 @@ RUNTIME_FUNCTION(Runtime_WeakCollectionSet) {
RUNTIME_FUNCTION(Runtime_GetWeakSetValues) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, holder, 0);
CONVERT_NUMBER_CHECKED(int, max_values, Int32, args[1]);
CHECK(max_values >= 0);
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index 472e076de4..f1c76bb2ac 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -11,7 +11,6 @@
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
#include "src/full-codegen/full-codegen.h"
-#include "src/interpreter/bytecode-array-iterator.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/v8threads.h"
@@ -93,11 +92,11 @@ RUNTIME_FUNCTION(Runtime_InstantiateAsmJs) {
}
Handle<JSObject> foreign;
if (args[2]->IsJSObject()) {
- foreign = args.at<i::JSObject>(2);
+ foreign = args.at<JSObject>(2);
}
Handle<JSArrayBuffer> memory;
if (args[3]->IsJSArrayBuffer()) {
- memory = args.at<i::JSArrayBuffer>(3);
+ memory = args.at<JSArrayBuffer>(3);
}
if (function->shared()->HasAsmWasmData() &&
AsmJs::IsStdlibValid(isolate, handle(function->shared()->asm_wasm_data()),
@@ -128,7 +127,7 @@ RUNTIME_FUNCTION(Runtime_InstantiateAsmJs) {
RUNTIME_FUNCTION(Runtime_NotifyStubFailure) {
HandleScope scope(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
DCHECK(AllowHeapAllocation::IsAllowed());
delete deoptimizer;
@@ -159,7 +158,7 @@ class ActivationsFinder : public ThreadVisitor {
RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_SMI_ARG_CHECKED(type_arg, 0);
Deoptimizer::BailoutType type =
static_cast<Deoptimizer::BailoutType>(type_arg);
@@ -270,9 +269,9 @@ BailoutId DetermineEntryAndDisarmOSRForBaseline(JavaScriptFrame* frame) {
// Revert the patched back edge table, regardless of whether OSR succeeds.
BackEdgeTable::Revert(frame->isolate(), *caller_code);
+ // Return a BailoutId representing an AST id of the {IterationStatement}.
uint32_t pc_offset =
static_cast<uint32_t>(frame->pc() - caller_code->instruction_start());
-
return caller_code->TranslatePcOffsetToAstId(pc_offset);
}
@@ -293,27 +292,15 @@ BailoutId DetermineEntryAndDisarmOSRForInterpreter(JavaScriptFrame* frame) {
// Reset the OSR loop nesting depth to disarm back edges.
bytecode->set_osr_loop_nesting_level(0);
- // Translate the offset of the jump instruction to the jump target offset of
- // that instruction so that the derived BailoutId points to the loop header.
- // TODO(mstarzinger): This can be merged with {BytecodeBranchAnalysis} which
- // already performs a pre-pass over the bytecode stream anyways.
- int jump_offset = iframe->GetBytecodeOffset();
- interpreter::BytecodeArrayIterator iterator(bytecode);
- while (iterator.current_offset() + iterator.current_prefix_offset() <
- jump_offset) {
- iterator.Advance();
- }
- DCHECK(interpreter::Bytecodes::IsJump(iterator.current_bytecode()));
- int jump_target_offset = iterator.GetJumpTargetOffset();
-
- return BailoutId(jump_target_offset);
+ // Return a BailoutId representing the bytecode offset of the back branch.
+ return BailoutId(iframe->GetBytecodeOffset());
}
} // namespace
RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
// We're not prepared to handle a function with arguments object.
@@ -398,7 +385,7 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
RUNTIME_FUNCTION(Runtime_TryInstallOptimizedCode) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
// First check if this is a real stack overflow.
@@ -465,9 +452,9 @@ static Object* CompileGlobalEval(Isolate* isolate, Handle<String> source,
RUNTIME_FUNCTION(Runtime_ResolvePossiblyDirectEval) {
HandleScope scope(isolate);
- DCHECK(args.length() == 6);
+ DCHECK_EQ(6, args.length());
- Handle<Object> callee = args.at<Object>(0);
+ Handle<Object> callee = args.at(0);
// If "eval" didn't refer to the original GlobalEval, it's not a
// direct call to eval.
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index 824ea92a0f..d24a450cbf 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -5,6 +5,7 @@
#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
+#include "src/compiler.h"
#include "src/debug/debug-evaluate.h"
#include "src/debug/debug-frames.h"
#include "src/debug/debug-scopes.h"
@@ -24,7 +25,7 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_DebugBreak) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
isolate->debug()->set_return_value(value);
@@ -38,7 +39,7 @@ RUNTIME_FUNCTION(Runtime_DebugBreak) {
RUNTIME_FUNCTION(Runtime_DebugBreakOnBytecode) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
isolate->debug()->set_return_value(value);
@@ -65,7 +66,7 @@ RUNTIME_FUNCTION(Runtime_DebugBreakOnBytecode) {
RUNTIME_FUNCTION(Runtime_HandleDebuggerStatement) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
if (isolate->debug()->break_points_active()) {
isolate->debug()->HandleDebugBreak();
}
@@ -79,9 +80,8 @@ RUNTIME_FUNCTION(Runtime_HandleDebuggerStatement) {
// args[1]: object supplied during callback
RUNTIME_FUNCTION(Runtime_SetDebugEventListener) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
- CHECK(args[0]->IsJSFunction() || args[0]->IsUndefined(isolate) ||
- args[0]->IsNull(isolate));
+ DCHECK_EQ(2, args.length());
+ CHECK(args[0]->IsJSFunction() || args[0]->IsNullOrUndefined(isolate));
CONVERT_ARG_HANDLE_CHECKED(Object, callback, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, data, 1);
isolate->debug()->SetEventListener(callback, data);
@@ -92,7 +92,7 @@ RUNTIME_FUNCTION(Runtime_SetDebugEventListener) {
RUNTIME_FUNCTION(Runtime_ScheduleBreak) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
isolate->stack_guard()->RequestDebugBreak();
return isolate->heap()->undefined_value();
}
@@ -136,14 +136,6 @@ static Handle<Object> DebugGetProperty(LookupIterator* it,
return it->isolate()->factory()->undefined_value();
}
-
-static Handle<Object> DebugGetProperty(Handle<Object> object,
- Handle<Name> name) {
- LookupIterator it(object, name);
- return DebugGetProperty(&it);
-}
-
-
template <class IteratorType>
static MaybeHandle<JSArray> GetIteratorInternalProperties(
Isolate* isolate, Handle<IteratorType> object) {
@@ -248,24 +240,8 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
result->set(5, generator->receiver());
return factory->NewJSArrayWithElements(result);
} else if (object->IsJSPromise()) {
- Handle<JSObject> promise = Handle<JSObject>::cast(object);
-
- Handle<Object> status_obj =
- DebugGetProperty(promise, isolate->factory()->promise_state_symbol());
- CHECK(status_obj->IsSmi());
- const char* status = "rejected";
- int status_val = Handle<Smi>::cast(status_obj)->value();
- switch (status_val) {
- case kPromiseFulfilled:
- status = "resolved";
- break;
- case kPromisePending:
- status = "pending";
- break;
- default:
- DCHECK_EQ(kPromiseRejected, status_val);
- }
-
+ Handle<JSPromise> promise = Handle<JSPromise>::cast(object);
+ const char* status = JSPromise::Status(promise->status());
Handle<FixedArray> result = factory->NewFixedArray(2 * 2);
Handle<String> promise_status =
factory->NewStringFromAsciiChecked("[[PromiseStatus]]");
@@ -273,8 +249,7 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
Handle<String> status_str = factory->NewStringFromAsciiChecked(status);
result->set(1, *status_str);
- Handle<Object> value_obj =
- DebugGetProperty(promise, isolate->factory()->promise_result_symbol());
+ Handle<Object> value_obj(promise->result(), isolate);
Handle<String> promise_value =
factory->NewStringFromAsciiChecked("[[PromiseValue]]");
result->set(2, *promise_value);
@@ -315,7 +290,7 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
RUNTIME_FUNCTION(Runtime_DebugGetInternalProperties) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
RETURN_RESULT_OR_FAILURE(isolate,
Runtime::GetInternalProperties(isolate, obj));
@@ -407,7 +382,7 @@ RUNTIME_FUNCTION(Runtime_DebugGetPropertyDetails) {
RUNTIME_FUNCTION(Runtime_DebugGetProperty) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
@@ -416,14 +391,13 @@ RUNTIME_FUNCTION(Runtime_DebugGetProperty) {
return *DebugGetProperty(&it);
}
-
-// Return the property type calculated from the property details.
+// Return the property kind calculated from the property details.
// args[0]: smi with property details.
-RUNTIME_FUNCTION(Runtime_DebugPropertyTypeFromDetails) {
+RUNTIME_FUNCTION(Runtime_DebugPropertyKindFromDetails) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
- return Smi::FromInt(static_cast<int>(details.type()));
+ return Smi::FromInt(static_cast<int>(details.kind()));
}
@@ -431,7 +405,7 @@ RUNTIME_FUNCTION(Runtime_DebugPropertyTypeFromDetails) {
// args[0]: smi with property details.
RUNTIME_FUNCTION(Runtime_DebugPropertyAttributesFromDetails) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
return Smi::FromInt(static_cast<int>(details.attributes()));
}
@@ -439,7 +413,7 @@ RUNTIME_FUNCTION(Runtime_DebugPropertyAttributesFromDetails) {
RUNTIME_FUNCTION(Runtime_CheckExecutionState) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
CHECK(isolate->debug()->CheckExecutionState(break_id));
return isolate->heap()->true_value();
@@ -448,7 +422,7 @@ RUNTIME_FUNCTION(Runtime_CheckExecutionState) {
RUNTIME_FUNCTION(Runtime_GetFrameCount) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
CHECK(isolate->debug()->CheckExecutionState(break_id));
@@ -460,22 +434,18 @@ RUNTIME_FUNCTION(Runtime_GetFrameCount) {
return Smi::kZero;
}
+ List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
for (StackTraceFrameIterator it(isolate, id); !it.done(); it.Advance()) {
- List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
- if (it.is_wasm()) {
- n++;
- } else {
- it.javascript_frame()->Summarize(&frames);
- for (int i = frames.length() - 1; i >= 0; i--) {
- // Omit functions from native and extension scripts.
- if (frames[i].function()->shared()->IsSubjectToDebugging()) n++;
- }
+ frames.Clear();
+ it.frame()->Summarize(&frames);
+ for (int i = frames.length() - 1; i >= 0; i--) {
+ // Omit functions from native and extension scripts.
+ if (frames[i].is_subject_to_debugging()) n++;
}
}
return Smi::FromInt(n);
}
-
static const int kFrameDetailsFrameIdIndex = 0;
static const int kFrameDetailsReceiverIndex = 1;
static const int kFrameDetailsFunctionIndex = 2;
@@ -508,7 +478,7 @@ static const int kFrameDetailsFirstDynamicIndex = 10;
// Return value if any
RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
CHECK(isolate->debug()->CheckExecutionState(break_id));
@@ -524,11 +494,11 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
StackTraceFrameIterator it(isolate, id);
// Inlined frame index in optimized frame, starting from outer function.
- int inlined_jsframe_index =
+ int inlined_frame_index =
DebugFrameHelper::FindIndexedNonNativeFrame(&it, index);
- if (inlined_jsframe_index == -1) return heap->undefined_value();
+ if (inlined_frame_index == -1) return heap->undefined_value();
- FrameInspector frame_inspector(it.frame(), inlined_jsframe_index, isolate);
+ FrameInspector frame_inspector(it.frame(), inlined_frame_index, isolate);
// Traverse the saved contexts chain to find the active context for the
// selected frame.
@@ -539,10 +509,7 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
Handle<Object> frame_id(DebugFrameHelper::WrapFrameId(it.frame()->id()),
isolate);
- // Find source position in unoptimized code.
- int position = frame_inspector.GetSourcePosition();
-
- if (it.is_wasm()) {
+ if (frame_inspector.summary().IsWasm()) {
// Create the details array (no dynamic information for wasm).
Handle<FixedArray> details =
isolate->factory()->NewFixedArray(kFrameDetailsFirstDynamicIndex);
@@ -551,10 +518,7 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
details->set(kFrameDetailsFrameIdIndex, *frame_id);
// Add the function name.
- Handle<Object> wasm_instance(it.wasm_frame()->wasm_instance(), isolate);
- int func_index = it.wasm_frame()->function_index();
- Handle<String> func_name =
- wasm::GetWasmFunctionName(isolate, wasm_instance, func_index);
+ Handle<String> func_name = frame_inspector.summary().FunctionName();
details->set(kFrameDetailsFunctionIndex, *func_name);
// Add the script wrapper
@@ -569,21 +533,8 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
details->set(kFrameDetailsLocalCountIndex, Smi::kZero);
// Add the source position.
- // For wasm, it is function-local, so translate it to a module-relative
- // position, such that together with the script it uniquely identifies the
- // position.
- Handle<Object> positionValue;
- if (position != kNoSourcePosition) {
- int translated_position = position;
- if (!wasm::WasmIsAsmJs(*wasm_instance, isolate)) {
- Handle<WasmCompiledModule> compiled_module(
- wasm::GetCompiledModule(JSObject::cast(*wasm_instance)), isolate);
- translated_position +=
- wasm::GetFunctionCodeOffset(compiled_module, func_index);
- }
- details->set(kFrameDetailsSourcePositionIndex,
- Smi::FromInt(translated_position));
- }
+ int position = frame_inspector.summary().SourcePosition();
+ details->set(kFrameDetailsSourcePositionIndex, Smi::FromInt(position));
// Add the constructor information.
details->set(kFrameDetailsConstructCallIndex, heap->ToBoolean(false));
@@ -604,6 +555,9 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
return *isolate->factory()->NewJSArrayWithElements(details);
}
+ // Find source position in unoptimized code.
+ int position = frame_inspector.GetSourcePosition();
+
// Handle JavaScript frames.
bool is_optimized = it.frame()->is_optimized();
@@ -685,7 +639,7 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
// the provided parameters whereas the function frame always have the number
// of arguments matching the functions parameters. The rest of the
// information (except for what is collected above) is the same.
- if ((inlined_jsframe_index == 0) &&
+ if ((inlined_frame_index == 0) &&
it.javascript_frame()->has_adapted_arguments()) {
it.AdvanceToArgumentsFrame();
frame_inspector.SetArgumentsFrame(it.frame());
@@ -743,7 +697,7 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
}
if (is_optimized) {
flags |= 1 << 1;
- flags |= inlined_jsframe_index << 2;
+ flags |= inlined_frame_index << 2;
}
details->set(kFrameDetailsFlagsIndex, Smi::FromInt(flags));
@@ -778,7 +732,7 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
// Add the receiver (same as in function frame).
Handle<Object> receiver(it.frame()->receiver(), isolate);
- DCHECK(!function->shared()->IsBuiltin());
+ DCHECK(function->shared()->IsUserJavaScript());
DCHECK_IMPLIES(is_sloppy(shared->language_mode()), receiver->IsJSReceiver());
details->set(kFrameDetailsReceiverIndex, *receiver);
@@ -789,7 +743,7 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
RUNTIME_FUNCTION(Runtime_GetScopeCount) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
CHECK(isolate->debug()->CheckExecutionState(break_id));
@@ -822,7 +776,7 @@ RUNTIME_FUNCTION(Runtime_GetScopeCount) {
// 1: Scope object
RUNTIME_FUNCTION(Runtime_GetScopeDetails) {
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
+ DCHECK_EQ(4, args.length());
CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
CHECK(isolate->debug()->CheckExecutionState(break_id));
@@ -918,7 +872,7 @@ RUNTIME_FUNCTION(Runtime_GetFunctionScopeCount) {
RUNTIME_FUNCTION(Runtime_GetFunctionScopeDetails) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
// Check arguments.
CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
@@ -957,7 +911,7 @@ RUNTIME_FUNCTION(Runtime_GetGeneratorScopeCount) {
RUNTIME_FUNCTION(Runtime_GetGeneratorScopeDetails) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
if (!args[0]->IsJSGeneratorObject()) {
return isolate->heap()->undefined_value();
@@ -1004,7 +958,7 @@ static bool SetScopeVariableValue(ScopeIterator* it, int index,
// Return true if success and false otherwise
RUNTIME_FUNCTION(Runtime_SetScopeVariableValue) {
HandleScope scope(isolate);
- DCHECK(args.length() == 6);
+ DCHECK_EQ(6, args.length());
// Check arguments.
CONVERT_NUMBER_CHECKED(int, index, Int32, args[3]);
@@ -1043,7 +997,7 @@ RUNTIME_FUNCTION(Runtime_SetScopeVariableValue) {
RUNTIME_FUNCTION(Runtime_DebugPrintScopes) {
HandleScope scope(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
#ifdef DEBUG
// Print the scopes for the top frame.
@@ -1063,7 +1017,7 @@ RUNTIME_FUNCTION(Runtime_DebugPrintScopes) {
// args[0]: disable break state
RUNTIME_FUNCTION(Runtime_SetBreakPointsActive) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_BOOLEAN_ARG_CHECKED(active, 0);
isolate->debug()->set_break_points_active(active);
return isolate->heap()->undefined_value();
@@ -1077,7 +1031,7 @@ static bool IsPositionAlignmentCodeCorrect(int alignment) {
RUNTIME_FUNCTION(Runtime_GetBreakLocations) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CHECK(isolate->debug()->is_active());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
CONVERT_NUMBER_CHECKED(int32_t, statement_aligned_code, Int32, args[1]);
@@ -1107,7 +1061,7 @@ RUNTIME_FUNCTION(Runtime_GetBreakLocations) {
// args[2]: number: break point object
RUNTIME_FUNCTION(Runtime_SetFunctionBreakPoint) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CHECK(isolate->debug()->is_active());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
@@ -1132,7 +1086,7 @@ RUNTIME_FUNCTION(Runtime_SetFunctionBreakPoint) {
// args[3]: number: break point object
RUNTIME_FUNCTION(Runtime_SetScriptBreakPoint) {
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
+ DCHECK_EQ(4, args.length());
CHECK(isolate->debug()->is_active());
CONVERT_ARG_HANDLE_CHECKED(JSValue, wrapper, 0);
CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
@@ -1164,7 +1118,7 @@ RUNTIME_FUNCTION(Runtime_SetScriptBreakPoint) {
// args[0]: number: break point object
RUNTIME_FUNCTION(Runtime_ClearBreakPoint) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CHECK(isolate->debug()->is_active());
CONVERT_ARG_HANDLE_CHECKED(Object, break_point_object_arg, 0);
@@ -1180,7 +1134,7 @@ RUNTIME_FUNCTION(Runtime_ClearBreakPoint) {
// args[1]: Boolean indicating on/off.
RUNTIME_FUNCTION(Runtime_ChangeBreakOnException) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_NUMBER_CHECKED(uint32_t, type_arg, Uint32, args[0]);
CONVERT_BOOLEAN_ARG_CHECKED(enable, 1);
@@ -1197,7 +1151,7 @@ RUNTIME_FUNCTION(Runtime_ChangeBreakOnException) {
// args[0]: boolean indicating uncaught exceptions
RUNTIME_FUNCTION(Runtime_IsBreakOnException) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_NUMBER_CHECKED(uint32_t, type_arg, Uint32, args[0]);
ExceptionBreakType type = static_cast<ExceptionBreakType>(type_arg);
@@ -1213,7 +1167,7 @@ RUNTIME_FUNCTION(Runtime_IsBreakOnException) {
// of frames to step down.
RUNTIME_FUNCTION(Runtime_PrepareStep) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
CHECK(isolate->debug()->CheckExecutionState(break_id));
@@ -1236,11 +1190,23 @@ RUNTIME_FUNCTION(Runtime_PrepareStep) {
return isolate->heap()->undefined_value();
}
+RUNTIME_FUNCTION(Runtime_PrepareStepFrame) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(0, args.length());
+ CHECK(isolate->debug()->CheckExecutionState());
+
+ // Clear all current stepping setup.
+ isolate->debug()->ClearStepping();
+
+ // Prepare step.
+ isolate->debug()->PrepareStep(StepFrame);
+ return isolate->heap()->undefined_value();
+}
// Clear all stepping set by PrepareStep.
RUNTIME_FUNCTION(Runtime_ClearStepping) {
HandleScope scope(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
CHECK(isolate->debug()->is_active());
isolate->debug()->ClearStepping();
return isolate->heap()->undefined_value();
@@ -1252,21 +1218,19 @@ RUNTIME_FUNCTION(Runtime_DebugEvaluate) {
// Check the execution state and decode arguments frame and source to be
// evaluated.
- DCHECK(args.length() == 6);
+ DCHECK_EQ(4, args.length());
CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
CHECK(isolate->debug()->CheckExecutionState(break_id));
CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
CONVERT_ARG_HANDLE_CHECKED(String, source, 3);
- CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 4);
- CONVERT_ARG_HANDLE_CHECKED(HeapObject, context_extension, 5);
StackFrame::Id id = DebugFrameHelper::UnwrapFrameId(wrapped_id);
RETURN_RESULT_OR_FAILURE(
- isolate, DebugEvaluate::Local(isolate, id, inlined_jsframe_index, source,
- disable_break, context_extension));
+ isolate,
+ DebugEvaluate::Local(isolate, id, inlined_jsframe_index, source));
}
@@ -1275,23 +1239,19 @@ RUNTIME_FUNCTION(Runtime_DebugEvaluateGlobal) {
// Check the execution state and decode arguments frame and source to be
// evaluated.
- DCHECK(args.length() == 4);
+ DCHECK_EQ(2, args.length());
CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
CHECK(isolate->debug()->CheckExecutionState(break_id));
CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
- CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 2);
- CONVERT_ARG_HANDLE_CHECKED(HeapObject, context_extension, 3);
- RETURN_RESULT_OR_FAILURE(
- isolate,
- DebugEvaluate::Global(isolate, source, disable_break, context_extension));
+ RETURN_RESULT_OR_FAILURE(isolate, DebugEvaluate::Global(isolate, source));
}
RUNTIME_FUNCTION(Runtime_DebugGetLoadedScripts) {
HandleScope scope(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
// This runtime function is used by the debugger to determine whether the
// debugger is active or not. Hence we fail gracefully here and don't crash.
@@ -1342,7 +1302,7 @@ static bool HasInPrototypeChainIgnoringProxies(Isolate* isolate,
// args[2]: the the maximum number of objects to return
RUNTIME_FUNCTION(Runtime_DebugReferencedBy) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, target, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, filter, 1);
CHECK(filter->IsUndefined(isolate) || filter->IsJSObject());
@@ -1399,7 +1359,7 @@ RUNTIME_FUNCTION(Runtime_DebugReferencedBy) {
// args[1]: the the maximum number of objects to return
RUNTIME_FUNCTION(Runtime_DebugConstructedBy) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 0);
CONVERT_NUMBER_CHECKED(int32_t, max_references, Int32, args[1]);
CHECK(max_references >= 0);
@@ -1432,7 +1392,7 @@ RUNTIME_FUNCTION(Runtime_DebugConstructedBy) {
// args[0]: the object to find the prototype for.
RUNTIME_FUNCTION(Runtime_DebugGetPrototype) {
HandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
// TODO(1543): Come up with a solution for clients to handle potential errors
// thrown by an intermediate proxy.
@@ -1444,7 +1404,7 @@ RUNTIME_FUNCTION(Runtime_DebugGetPrototype) {
// TODO(5530): Remove once uses in debug.js are gone.
RUNTIME_FUNCTION(Runtime_DebugSetScriptSource) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSValue, script_wrapper, 0);
CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
@@ -1497,7 +1457,7 @@ RUNTIME_FUNCTION(Runtime_FunctionGetDebugName) {
// to have a stack with C++ frame in the middle.
RUNTIME_FUNCTION(Runtime_ExecuteInDebugContext) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
DebugScope debug_scope(isolate->debug());
@@ -1514,7 +1474,7 @@ RUNTIME_FUNCTION(Runtime_ExecuteInDebugContext) {
RUNTIME_FUNCTION(Runtime_GetDebugContext) {
HandleScope scope(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
Handle<Context> context;
{
DebugScope debug_scope(isolate->debug());
@@ -1534,7 +1494,7 @@ RUNTIME_FUNCTION(Runtime_GetDebugContext) {
// Presently, it only does a full GC.
RUNTIME_FUNCTION(Runtime_CollectGarbage) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags,
GarbageCollectionReason::kRuntime);
return isolate->heap()->undefined_value();
@@ -1544,7 +1504,7 @@ RUNTIME_FUNCTION(Runtime_CollectGarbage) {
// Gets the current heap usage.
RUNTIME_FUNCTION(Runtime_GetHeapUsage) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
int usage = static_cast<int>(isolate->heap()->SizeOfObjects());
if (!Smi::IsValid(usage)) {
return *isolate->factory()->NewNumberFromInt(usage);
@@ -1561,7 +1521,7 @@ RUNTIME_FUNCTION(Runtime_GetHeapUsage) {
// some kind of user interaction the performance is not crucial.
RUNTIME_FUNCTION(Runtime_GetScript) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, script_name, 0);
Handle<Script> found;
@@ -1585,55 +1545,75 @@ RUNTIME_FUNCTION(Runtime_GetScript) {
// TODO(5530): Remove once uses in debug.js are gone.
RUNTIME_FUNCTION(Runtime_ScriptLineCount) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(JSValue, script, 0);
CHECK(script->value()->IsScript());
Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
+ if (script_handle->type() == Script::TYPE_WASM) {
+ // Return 0 for now; this function will disappear soon anyway.
+ return Smi::FromInt(0);
+ }
+
Script::InitLineEnds(script_handle);
FixedArray* line_ends_array = FixedArray::cast(script_handle->line_ends());
return Smi::FromInt(line_ends_array->length());
}
+namespace {
+
+int ScriptLinePosition(Handle<Script> script, int line) {
+ if (line < 0) return -1;
+
+ if (script->type() == Script::TYPE_WASM) {
+ return WasmCompiledModule::cast(script->wasm_compiled_module())
+ ->GetFunctionOffset(line);
+ }
+
+ Script::InitLineEnds(script);
+
+ FixedArray* line_ends_array = FixedArray::cast(script->line_ends());
+ const int line_count = line_ends_array->length();
+ DCHECK_LT(0, line_count);
+
+ if (line == 0) return 0;
+ // If line == line_count, we return the first position beyond the last line.
+ if (line > line_count) return -1;
+ return Smi::cast(line_ends_array->get(line - 1))->value() + 1;
+}
+
+} // namespace
+
// TODO(5530): Remove once uses in debug.js are gone.
RUNTIME_FUNCTION(Runtime_ScriptLineStartPosition) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_CHECKED(JSValue, script, 0);
CONVERT_NUMBER_CHECKED(int32_t, line, Int32, args[1]);
CHECK(script->value()->IsScript());
Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
- Script::InitLineEnds(script_handle);
-
- FixedArray* line_ends_array = FixedArray::cast(script_handle->line_ends());
- const int line_count = line_ends_array->length();
-
- // If line == line_count, we return the first position beyond the last line.
- if (line < 0 || line > line_count) {
- return Smi::FromInt(-1);
- } else if (line == 0) {
- return Smi::kZero;
- } else {
- DCHECK(0 < line && line <= line_count);
- const int pos = Smi::cast(line_ends_array->get(line - 1))->value() + 1;
- return Smi::FromInt(pos);
- }
+ return Smi::FromInt(ScriptLinePosition(script_handle, line));
}
// TODO(5530): Remove once uses in debug.js are gone.
RUNTIME_FUNCTION(Runtime_ScriptLineEndPosition) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_CHECKED(JSValue, script, 0);
CONVERT_NUMBER_CHECKED(int32_t, line, Int32, args[1]);
CHECK(script->value()->IsScript());
Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
+ if (script_handle->type() == Script::TYPE_WASM) {
+ // Return zero for now; this function will disappear soon anyway.
+ return Smi::FromInt(0);
+ }
+
Script::InitLineEnds(script_handle);
FixedArray* line_ends_array = FixedArray::cast(script_handle->line_ends());
@@ -1679,6 +1659,20 @@ static Handle<Object> GetJSPositionInfo(Handle<Script> script, int position,
namespace {
+int ScriptLinePositionWithOffset(Handle<Script> script, int line, int offset) {
+ if (line < 0 || offset < 0) return -1;
+
+ if (line == 0) return ScriptLinePosition(script, line) + offset;
+
+ Script::PositionInfo info;
+ if (!Script::GetPositionInfo(script, offset, &info, Script::NO_OFFSET)) {
+ return -1;
+ }
+
+ const int total_line = info.line + line;
+ return ScriptLinePosition(script, total_line);
+}
+
Handle<Object> ScriptLocationFromLine(Isolate* isolate, Handle<Script> script,
Handle<Object> opt_line,
Handle<Object> opt_column,
@@ -1686,51 +1680,24 @@ Handle<Object> ScriptLocationFromLine(Isolate* isolate, Handle<Script> script,
// Line and column are possibly undefined and we need to handle these cases,
// additionally subtracting corresponding offsets.
- int32_t line;
- if (opt_line->IsNull(isolate) || opt_line->IsUndefined(isolate)) {
- line = 0;
- } else {
+ int32_t line = 0;
+ if (!opt_line->IsNullOrUndefined(isolate)) {
CHECK(opt_line->IsNumber());
line = NumberToInt32(*opt_line) - script->line_offset();
}
- int32_t column;
- if (opt_column->IsNull(isolate) || opt_column->IsUndefined(isolate)) {
- column = 0;
- } else {
+ int32_t column = 0;
+ if (!opt_column->IsNullOrUndefined(isolate)) {
CHECK(opt_column->IsNumber());
column = NumberToInt32(*opt_column);
if (line == 0) column -= script->column_offset();
}
- if (line < 0 || column < 0 || offset < 0) {
- return isolate->factory()->null_value();
- }
-
- Script::InitLineEnds(script);
-
- FixedArray* line_ends_array = FixedArray::cast(script->line_ends());
- const int line_count = line_ends_array->length();
-
- int position;
- if (line == 0) {
- position = offset + column;
- } else {
- Script::PositionInfo info;
- if (!Script::GetPositionInfo(script, offset, &info, Script::NO_OFFSET) ||
- info.line + line >= line_count) {
- return isolate->factory()->null_value();
- }
+ int line_position = ScriptLinePositionWithOffset(script, line, offset);
+ if (line_position < 0 || column < 0) return isolate->factory()->null_value();
- const int offset_line = info.line + line;
- const int offset_line_position =
- (offset_line == 0)
- ? 0
- : Smi::cast(line_ends_array->get(offset_line - 1))->value() + 1;
- position = offset_line_position + column;
- }
-
- return GetJSPositionInfo(script, position, Script::NO_OFFSET, isolate);
+ return GetJSPositionInfo(script, line_position + column, Script::NO_OFFSET,
+ isolate);
}
// Slow traversal over all scripts on the heap.
@@ -1760,7 +1727,7 @@ bool GetScriptById(Isolate* isolate, int needle, Handle<Script>* result) {
// TODO(5530): Remove once uses in debug.js are gone.
RUNTIME_FUNCTION(Runtime_ScriptLocationFromLine) {
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
+ DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSValue, script, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, opt_line, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, opt_column, 2);
@@ -1776,7 +1743,7 @@ RUNTIME_FUNCTION(Runtime_ScriptLocationFromLine) {
// TODO(5530): Rename once conflicting function has been deleted.
RUNTIME_FUNCTION(Runtime_ScriptLocationFromLine2) {
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
+ DCHECK_EQ(4, args.length());
CONVERT_NUMBER_CHECKED(int32_t, scriptid, Int32, args[0]);
CONVERT_ARG_HANDLE_CHECKED(Object, opt_line, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, opt_column, 2);
@@ -1791,7 +1758,7 @@ RUNTIME_FUNCTION(Runtime_ScriptLocationFromLine2) {
// TODO(5530): Remove once uses in debug.js are gone.
RUNTIME_FUNCTION(Runtime_ScriptPositionInfo) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_CHECKED(JSValue, script, 0);
CONVERT_NUMBER_CHECKED(int32_t, position, Int32, args[1]);
CONVERT_BOOLEAN_ARG_CHECKED(with_offset, 2);
@@ -1804,18 +1771,39 @@ RUNTIME_FUNCTION(Runtime_ScriptPositionInfo) {
return *GetJSPositionInfo(script_handle, position, offset_flag, isolate);
}
+// TODO(5530): Rename once conflicting function has been deleted.
+RUNTIME_FUNCTION(Runtime_ScriptPositionInfo2) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ CONVERT_NUMBER_CHECKED(int32_t, scriptid, Int32, args[0]);
+ CONVERT_NUMBER_CHECKED(int32_t, position, Int32, args[1]);
+ CONVERT_BOOLEAN_ARG_CHECKED(with_offset, 2);
+
+ Handle<Script> script;
+ CHECK(GetScriptById(isolate, scriptid, &script));
+
+ const Script::OffsetFlag offset_flag =
+ with_offset ? Script::WITH_OFFSET : Script::NO_OFFSET;
+ return *GetJSPositionInfo(script, position, offset_flag, isolate);
+}
+
// Returns the given line as a string, or null if line is out of bounds.
// The parameter line is expected to include the script's line offset.
// TODO(5530): Remove once uses in debug.js are gone.
RUNTIME_FUNCTION(Runtime_ScriptSourceLine) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_CHECKED(JSValue, script, 0);
CONVERT_NUMBER_CHECKED(int32_t, line, Int32, args[1]);
CHECK(script->value()->IsScript());
Handle<Script> script_handle = Handle<Script>(Script::cast(script->value()));
+ if (script_handle->type() == Script::TYPE_WASM) {
+ // Return null for now; this function will disappear soon anyway.
+ return isolate->heap()->null_value();
+ }
+
Script::InitLineEnds(script_handle);
FixedArray* line_ends_array = FixedArray::cast(script_handle->line_ends());
@@ -1837,14 +1825,19 @@ RUNTIME_FUNCTION(Runtime_ScriptSourceLine) {
return *str;
}
-// Set one shot breakpoints for the callback function that is passed to a
-// built-in function such as Array.forEach to enable stepping into the callback,
-// if we are indeed stepping and the callback is subject to debugging.
-RUNTIME_FUNCTION(Runtime_DebugPrepareStepInIfStepping) {
+// On function call, depending on circumstances, prepare for stepping in,
+// or perform a side effect check.
+RUNTIME_FUNCTION(Runtime_DebugOnFunctionCall) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
- isolate->debug()->PrepareStepIn(fun);
+ if (isolate->debug()->last_step_action() >= StepIn) {
+ isolate->debug()->PrepareStepIn(fun);
+ }
+ if (isolate->needs_side_effect_check() &&
+ !isolate->debug()->PerformSideEffectCheck(fun)) {
+ return isolate->heap()->exception();
+ }
return isolate->heap()->undefined_value();
}
@@ -1856,17 +1849,17 @@ RUNTIME_FUNCTION(Runtime_DebugPrepareStepInSuspendedGenerator) {
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(Runtime_DebugRecordAsyncFunction) {
+RUNTIME_FUNCTION(Runtime_DebugRecordGenerator) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
CHECK(isolate->debug()->last_step_action() >= StepNext);
- isolate->debug()->RecordAsyncFunction(generator);
+ isolate->debug()->RecordGenerator(generator);
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(Runtime_DebugPushPromise) {
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
isolate->PushPromise(promise);
@@ -1875,28 +1868,57 @@ RUNTIME_FUNCTION(Runtime_DebugPushPromise) {
RUNTIME_FUNCTION(Runtime_DebugPopPromise) {
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
SealHandleScope shs(isolate);
isolate->PopPromise();
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(Runtime_DebugNextMicrotaskId) {
+RUNTIME_FUNCTION(Runtime_DebugNextAsyncTaskId) {
HandleScope scope(isolate);
- DCHECK(args.length() == 0);
- return Smi::FromInt(isolate->GetNextDebugMicrotaskId());
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
+ return Smi::FromInt(isolate->debug()->NextAsyncTaskId(promise));
}
-RUNTIME_FUNCTION(Runtime_DebugAsyncTaskEvent) {
- DCHECK(args.length() == 3);
+RUNTIME_FUNCTION(Runtime_DebugAsyncFunctionPromiseCreated) {
+ DCHECK_EQ(1, args.length());
HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(String, type, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, id, 1);
- CONVERT_ARG_HANDLE_CHECKED(String, name, 2);
- isolate->debug()->OnAsyncTaskEvent(type, id, name);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
+ isolate->PushPromise(promise);
+ int id = isolate->debug()->NextAsyncTaskId(promise);
+ Handle<Symbol> async_stack_id_symbol =
+ isolate->factory()->promise_async_stack_id_symbol();
+ JSObject::SetProperty(promise, async_stack_id_symbol,
+ handle(Smi::FromInt(id), isolate), STRICT)
+ .Assert();
+ isolate->debug()->OnAsyncTaskEvent(debug::kDebugEnqueueAsyncFunction, id);
return isolate->heap()->undefined_value();
}
+RUNTIME_FUNCTION(Runtime_DebugPromiseReject) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSPromise, rejected_promise, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+
+ isolate->debug()->OnPromiseReject(rejected_promise, value);
+ return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_DebugAsyncEventEnqueueRecurring) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
+ CONVERT_SMI_ARG_CHECKED(status, 1);
+ if (isolate->debug()->is_active()) {
+ isolate->debug()->OnAsyncTaskEvent(
+ status == v8::Promise::kFulfilled ? debug::kDebugEnqueuePromiseResolve
+ : debug::kDebugEnqueuePromiseReject,
+ isolate->debug()->NextAsyncTaskId(promise));
+ }
+ return isolate->heap()->undefined_value();
+}
RUNTIME_FUNCTION(Runtime_DebugIsActive) {
SealHandleScope shs(isolate);
diff --git a/deps/v8/src/runtime/runtime-error.cc b/deps/v8/src/runtime/runtime-error.cc
index 3a9b192029..6ded550d04 100644
--- a/deps/v8/src/runtime/runtime-error.cc
+++ b/deps/v8/src/runtime/runtime-error.cc
@@ -15,7 +15,7 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_ErrorToString) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, recv, 0);
RETURN_RESULT_OR_FAILURE(isolate, ErrorUtils::ToString(isolate, recv));
}
diff --git a/deps/v8/src/runtime/runtime-function.cc b/deps/v8/src/runtime/runtime-function.cc
index a91ab28cc6..31da4a4535 100644
--- a/deps/v8/src/runtime/runtime-function.cc
+++ b/deps/v8/src/runtime/runtime-function.cc
@@ -17,7 +17,7 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_FunctionGetName) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, function, 0);
if (function->IsJSBoundFunction()) {
@@ -32,7 +32,7 @@ RUNTIME_FUNCTION(Runtime_FunctionGetName) {
RUNTIME_FUNCTION(Runtime_FunctionSetName) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, f, 0);
CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
@@ -45,7 +45,7 @@ RUNTIME_FUNCTION(Runtime_FunctionSetName) {
RUNTIME_FUNCTION(Runtime_FunctionRemovePrototype) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(JSFunction, f, 0);
CHECK(f->RemovePrototype());
@@ -99,7 +99,7 @@ RUNTIME_FUNCTION(Runtime_FunctionGetSourceCode) {
RUNTIME_FUNCTION(Runtime_FunctionGetScriptSourcePosition) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(JSFunction, fun, 0);
int pos = fun->shared()->start_position();
@@ -108,7 +108,7 @@ RUNTIME_FUNCTION(Runtime_FunctionGetScriptSourcePosition) {
RUNTIME_FUNCTION(Runtime_FunctionGetContextData) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(JSFunction, fun, 0);
FixedArray* array = fun->native_context()->embedder_data();
@@ -117,7 +117,7 @@ RUNTIME_FUNCTION(Runtime_FunctionGetContextData) {
RUNTIME_FUNCTION(Runtime_FunctionSetInstanceClassName) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_CHECKED(JSFunction, fun, 0);
CONVERT_ARG_CHECKED(String, name, 1);
@@ -128,7 +128,7 @@ RUNTIME_FUNCTION(Runtime_FunctionSetInstanceClassName) {
RUNTIME_FUNCTION(Runtime_FunctionSetLength) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_CHECKED(JSFunction, fun, 0);
CONVERT_SMI_ARG_CHECKED(length, 1);
@@ -140,7 +140,7 @@ RUNTIME_FUNCTION(Runtime_FunctionSetLength) {
RUNTIME_FUNCTION(Runtime_FunctionSetPrototype) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
@@ -153,7 +153,7 @@ RUNTIME_FUNCTION(Runtime_FunctionSetPrototype) {
RUNTIME_FUNCTION(Runtime_FunctionIsAPIFunction) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(JSFunction, f, 0);
return isolate->heap()->ToBoolean(f->shared()->IsApiFunction());
@@ -162,7 +162,7 @@ RUNTIME_FUNCTION(Runtime_FunctionIsAPIFunction) {
RUNTIME_FUNCTION(Runtime_SetCode) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, target, 0);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, source, 1);
@@ -203,8 +203,14 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
source_shared->opt_count_and_bailout_reason());
target_shared->set_native(was_native);
target_shared->set_profiler_ticks(source_shared->profiler_ticks());
- SharedFunctionInfo::SetScript(
- target_shared, Handle<Object>(source_shared->script(), isolate));
+ target_shared->set_function_literal_id(source_shared->function_literal_id());
+
+ Handle<Object> source_script(source_shared->script(), isolate);
+ if (source_script->IsScript()) {
+ SharedFunctionInfo::SetScript(source_shared,
+ isolate->factory()->undefined_value());
+ }
+ SharedFunctionInfo::SetScript(target_shared, source_script);
// Set the code of the target function.
target->ReplaceCode(source_shared->code());
@@ -254,10 +260,10 @@ RUNTIME_FUNCTION(Runtime_IsConstructor) {
RUNTIME_FUNCTION(Runtime_SetForceInlineFlag) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ CONVERT_ARG_CHECKED(Object, object, 0);
if (object->IsJSFunction()) {
- JSFunction* func = JSFunction::cast(*object);
+ JSFunction* func = JSFunction::cast(object);
func->shared()->set_force_inline(true);
}
return isolate->heap()->undefined_value();
@@ -272,7 +278,7 @@ RUNTIME_FUNCTION(Runtime_Call) {
CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 1);
ScopedVector<Handle<Object>> argv(argc);
for (int i = 0; i < argc; ++i) {
- argv[i] = args.at<Object>(2 + i);
+ argv[i] = args.at(2 + i);
}
RETURN_RESULT_OR_FAILURE(
isolate, Execution::Call(isolate, target, receiver, argc, argv.start()));
@@ -282,7 +288,7 @@ RUNTIME_FUNCTION(Runtime_Call) {
// ES6 section 9.2.1.2, OrdinaryCallBindThis for sloppy callee.
RUNTIME_FUNCTION(Runtime_ConvertReceiver) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0);
return *Object::ConvertReceiver(isolate, receiver).ToHandleChecked();
}
diff --git a/deps/v8/src/runtime/runtime-futex.cc b/deps/v8/src/runtime/runtime-futex.cc
index a93bb23645..4af0831acf 100644
--- a/deps/v8/src/runtime/runtime-futex.cc
+++ b/deps/v8/src/runtime/runtime-futex.cc
@@ -19,7 +19,7 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_AtomicsWait) {
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
+ DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
CONVERT_SIZE_ARG_CHECKED(index, 1);
CONVERT_INT32_ARG_CHECKED(value, 2);
@@ -37,7 +37,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsWait) {
RUNTIME_FUNCTION(Runtime_AtomicsWake) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
CONVERT_SIZE_ARG_CHECKED(index, 1);
CONVERT_INT32_ARG_CHECKED(count, 2);
@@ -53,7 +53,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsWake) {
RUNTIME_FUNCTION(Runtime_AtomicsNumWaitersForTesting) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0);
CONVERT_SIZE_ARG_CHECKED(index, 1);
CHECK(sta->GetBuffer()->is_shared());
diff --git a/deps/v8/src/runtime/runtime-generator.cc b/deps/v8/src/runtime/runtime-generator.cc
index bb63a3d0d0..96486736e1 100644
--- a/deps/v8/src/runtime/runtime-generator.cc
+++ b/deps/v8/src/runtime/runtime-generator.cc
@@ -5,7 +5,6 @@
#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
-#include "src/debug/debug.h"
#include "src/factory.h"
#include "src/frames-inl.h"
#include "src/objects-inl.h"
@@ -15,76 +14,30 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_CreateJSGeneratorObject) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 1);
CHECK(IsResumableFunction(function->shared()->kind()));
- Handle<FixedArray> operand_stack;
- if (function->shared()->HasBytecodeArray()) {
- // New-style generators.
- DCHECK(!function->shared()->HasBaselineCode());
- int size = function->shared()->bytecode_array()->register_count();
- operand_stack = isolate->factory()->NewFixedArray(size);
- } else {
- // Old-style generators.
- DCHECK(function->shared()->HasBaselineCode());
- operand_stack = isolate->factory()->empty_fixed_array();
- }
+ // Underlying function needs to have bytecode available.
+ DCHECK(function->shared()->HasBytecodeArray());
+ DCHECK(!function->shared()->HasBaselineCode());
+ int size = function->shared()->bytecode_array()->register_count();
+ Handle<FixedArray> register_file = isolate->factory()->NewFixedArray(size);
Handle<JSGeneratorObject> generator =
isolate->factory()->NewJSGeneratorObject(function);
generator->set_function(*function);
generator->set_context(isolate->context());
generator->set_receiver(*receiver);
- generator->set_operand_stack(*operand_stack);
+ generator->set_register_file(*register_file);
generator->set_continuation(JSGeneratorObject::kGeneratorExecuting);
return *generator;
}
-RUNTIME_FUNCTION(Runtime_SuspendJSGeneratorObject) {
- HandleScope handle_scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator_object, 0);
-
- JavaScriptFrameIterator stack_iterator(isolate);
- JavaScriptFrame* frame = stack_iterator.frame();
- CHECK(IsResumableFunction(frame->function()->shared()->kind()));
- DCHECK_EQ(frame->function(), generator_object->function());
- DCHECK(frame->function()->shared()->is_compiled());
- DCHECK(!frame->function()->IsOptimized());
-
- isolate->debug()->RecordAsyncFunction(generator_object);
-
- // The caller should have saved the context and continuation already.
- DCHECK_EQ(generator_object->context(), Context::cast(frame->context()));
- DCHECK_LT(0, generator_object->continuation());
-
- // We expect there to be at least two values on the operand stack: the return
- // value of the yield expression, and the arguments to this runtime call.
- // Neither of those should be saved.
- int operands_count = frame->ComputeOperandsCount();
- DCHECK_GE(operands_count, 1 + args.length());
- operands_count -= 1 + args.length();
-
- if (operands_count == 0) {
- // Although it's semantically harmless to call this function with an
- // operands_count of zero, it is also unnecessary.
- DCHECK_EQ(generator_object->operand_stack(),
- isolate->heap()->empty_fixed_array());
- } else {
- Handle<FixedArray> operand_stack =
- isolate->factory()->NewFixedArray(operands_count);
- frame->SaveOperandStack(*operand_stack);
- generator_object->set_operand_stack(*operand_stack);
- }
-
- return isolate->heap()->undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_GeneratorClose) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
generator->set_continuation(JSGeneratorObject::kGeneratorClosed);
@@ -94,7 +47,7 @@ RUNTIME_FUNCTION(Runtime_GeneratorClose) {
RUNTIME_FUNCTION(Runtime_GeneratorGetFunction) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
return generator->function();
@@ -102,15 +55,23 @@ RUNTIME_FUNCTION(Runtime_GeneratorGetFunction) {
RUNTIME_FUNCTION(Runtime_GeneratorGetReceiver) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
return generator->receiver();
}
+RUNTIME_FUNCTION(Runtime_GeneratorGetContext) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
+
+ return generator->context();
+}
+
RUNTIME_FUNCTION(Runtime_GeneratorGetInputOrDebugPos) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
return generator->input_or_debug_pos();
@@ -118,7 +79,7 @@ RUNTIME_FUNCTION(Runtime_GeneratorGetInputOrDebugPos) {
RUNTIME_FUNCTION(Runtime_GeneratorGetResumeMode) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
return Smi::FromInt(generator->resume_mode());
@@ -126,7 +87,7 @@ RUNTIME_FUNCTION(Runtime_GeneratorGetResumeMode) {
RUNTIME_FUNCTION(Runtime_GeneratorGetContinuation) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
return Smi::FromInt(generator->continuation());
@@ -134,7 +95,7 @@ RUNTIME_FUNCTION(Runtime_GeneratorGetContinuation) {
RUNTIME_FUNCTION(Runtime_GeneratorGetSourcePosition) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
if (!generator->is_suspended()) return isolate->heap()->undefined_value();
diff --git a/deps/v8/src/runtime/runtime-i18n.cc b/deps/v8/src/runtime/runtime-i18n.cc
index 75e0952581..6630fadc10 100644
--- a/deps/v8/src/runtime/runtime-i18n.cc
+++ b/deps/v8/src/runtime/runtime-i18n.cc
@@ -8,13 +8,15 @@
#include <memory>
-#include "src/api.h"
#include "src/api-natives.h"
+#include "src/api.h"
#include "src/arguments.h"
#include "src/factory.h"
#include "src/i18n.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
+#include "src/string-case.h"
+#include "src/utils.h"
#include "unicode/brkiter.h"
#include "unicode/calendar.h"
@@ -70,7 +72,7 @@ RUNTIME_FUNCTION(Runtime_CanonicalizeLanguageTag) {
HandleScope scope(isolate);
Factory* factory = isolate->factory();
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, locale_id_str, 0);
v8::String::Utf8Value locale_id(v8::Utils::ToLocal(locale_id_str));
@@ -107,7 +109,7 @@ RUNTIME_FUNCTION(Runtime_AvailableLocalesOf) {
HandleScope scope(isolate);
Factory* factory = isolate->factory();
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, service, 0);
const icu::Locale* available_locales = NULL;
@@ -152,7 +154,7 @@ RUNTIME_FUNCTION(Runtime_GetDefaultICULocale) {
HandleScope scope(isolate);
Factory* factory = isolate->factory();
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
icu::Locale default_locale;
@@ -173,7 +175,7 @@ RUNTIME_FUNCTION(Runtime_GetLanguageTagVariants) {
HandleScope scope(isolate);
Factory* factory = isolate->factory();
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSArray, input, 0);
@@ -257,7 +259,7 @@ RUNTIME_FUNCTION(Runtime_GetLanguageTagVariants) {
RUNTIME_FUNCTION(Runtime_IsInitializedIntlObject) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
@@ -273,7 +275,7 @@ RUNTIME_FUNCTION(Runtime_IsInitializedIntlObject) {
RUNTIME_FUNCTION(Runtime_IsInitializedIntlObjectOfType) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
CONVERT_ARG_HANDLE_CHECKED(String, expected_type, 1);
@@ -291,63 +293,33 @@ RUNTIME_FUNCTION(Runtime_IsInitializedIntlObjectOfType) {
RUNTIME_FUNCTION(Runtime_MarkAsInitializedIntlObjectOfType) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, input, 0);
CONVERT_ARG_HANDLE_CHECKED(String, type, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, impl, 2);
Handle<Symbol> marker = isolate->factory()->intl_initialized_marker_symbol();
JSObject::SetProperty(input, marker, type, STRICT).Assert();
- marker = isolate->factory()->intl_impl_object_symbol();
- JSObject::SetProperty(input, marker, impl, STRICT).Assert();
-
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(Runtime_GetImplFromInitializedIntlObject) {
- HandleScope scope(isolate);
-
- DCHECK(args.length() == 1);
-
- CONVERT_ARG_HANDLE_CHECKED(JSObject, input, 0);
-
- if (!input->IsJSObject()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kNotIntlObject, input));
- }
-
- Handle<JSObject> obj = Handle<JSObject>::cast(input);
-
- Handle<Symbol> marker = isolate->factory()->intl_impl_object_symbol();
-
- Handle<Object> impl = JSReceiver::GetDataProperty(obj, marker);
- if (!impl->IsJSObject()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kNotIntlObject, obj));
- }
- return *impl;
-}
-
-
RUNTIME_FUNCTION(Runtime_CreateDateTimeFormat) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
- Handle<ObjectTemplateInfo> date_format_template = I18N::GetTemplate(isolate);
+ Handle<JSFunction> constructor(
+ isolate->native_context()->intl_date_time_format_function());
- // Create an empty object wrapper.
Handle<JSObject> local_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, local_object,
- ApiNatives::InstantiateObject(date_format_template));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, local_object,
+ JSObject::New(constructor, constructor));
// Set date time formatter as internal field of the resulting JS object.
icu::SimpleDateFormat* date_format =
@@ -357,11 +329,6 @@ RUNTIME_FUNCTION(Runtime_CreateDateTimeFormat) {
local_object->SetInternalField(0, reinterpret_cast<Smi*>(date_format));
- Factory* factory = isolate->factory();
- Handle<String> key = factory->NewStringFromStaticChars("dateFormat");
- Handle<String> value = factory->NewStringFromStaticChars("valid");
- JSObject::AddProperty(local_object, key, value, NONE);
-
// Make object handle weak so we can delete the data format once GC kicks in.
Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
GlobalHandles::MakeWeak(wrapper.location(), wrapper.location(),
@@ -374,7 +341,7 @@ RUNTIME_FUNCTION(Runtime_CreateDateTimeFormat) {
RUNTIME_FUNCTION(Runtime_InternalDateFormat) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, date_format_holder, 0);
CONVERT_ARG_HANDLE_CHECKED(JSDate, date, 1);
@@ -384,7 +351,7 @@ RUNTIME_FUNCTION(Runtime_InternalDateFormat) {
icu::SimpleDateFormat* date_format =
DateFormat::UnpackDateFormat(isolate, date_format_holder);
- if (!date_format) return isolate->ThrowIllegalOperation();
+ CHECK_NOT_NULL(date_format);
icu::UnicodeString result;
date_format->format(value->Number(), result);
@@ -475,7 +442,7 @@ RUNTIME_FUNCTION(Runtime_InternalDateFormatToParts) {
HandleScope scope(isolate);
Factory* factory = isolate->factory();
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, date_format_holder, 0);
CONVERT_ARG_HANDLE_CHECKED(JSDate, date, 1);
@@ -485,7 +452,7 @@ RUNTIME_FUNCTION(Runtime_InternalDateFormatToParts) {
icu::SimpleDateFormat* date_format =
DateFormat::UnpackDateFormat(isolate, date_format_holder);
- if (!date_format) return isolate->ThrowIllegalOperation();
+ CHECK_NOT_NULL(date_format);
icu::UnicodeString formatted;
icu::FieldPositionIterator fp_iter;
@@ -528,47 +495,21 @@ RUNTIME_FUNCTION(Runtime_InternalDateFormatToParts) {
return *result;
}
-RUNTIME_FUNCTION(Runtime_InternalDateParse) {
- HandleScope scope(isolate);
-
- DCHECK(args.length() == 2);
-
- CONVERT_ARG_HANDLE_CHECKED(JSObject, date_format_holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, date_string, 1);
-
- v8::String::Utf8Value utf8_date(v8::Utils::ToLocal(date_string));
- icu::UnicodeString u_date(icu::UnicodeString::fromUTF8(*utf8_date));
- icu::SimpleDateFormat* date_format =
- DateFormat::UnpackDateFormat(isolate, date_format_holder);
- if (!date_format) return isolate->ThrowIllegalOperation();
-
- UErrorCode status = U_ZERO_ERROR;
- UDate date = date_format->parse(u_date, status);
- if (U_FAILURE(status)) return isolate->heap()->undefined_value();
-
- RETURN_RESULT_OR_FAILURE(
- isolate, JSDate::New(isolate->date_function(), isolate->date_function(),
- static_cast<double>(date)));
-}
-
-
RUNTIME_FUNCTION(Runtime_CreateNumberFormat) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
- Handle<ObjectTemplateInfo> number_format_template =
- I18N::GetTemplate(isolate);
+ Handle<JSFunction> constructor(
+ isolate->native_context()->intl_number_format_function());
- // Create an empty object wrapper.
Handle<JSObject> local_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, local_object,
- ApiNatives::InstantiateObject(number_format_template));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, local_object,
+ JSObject::New(constructor, constructor));
// Set number formatter as internal field of the resulting JS object.
icu::DecimalFormat* number_format =
@@ -578,11 +519,6 @@ RUNTIME_FUNCTION(Runtime_CreateNumberFormat) {
local_object->SetInternalField(0, reinterpret_cast<Smi*>(number_format));
- Factory* factory = isolate->factory();
- Handle<String> key = factory->NewStringFromStaticChars("numberFormat");
- Handle<String> value = factory->NewStringFromStaticChars("valid");
- JSObject::AddProperty(local_object, key, value, NONE);
-
Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
GlobalHandles::MakeWeak(wrapper.location(), wrapper.location(),
NumberFormat::DeleteNumberFormat,
@@ -594,7 +530,7 @@ RUNTIME_FUNCTION(Runtime_CreateNumberFormat) {
RUNTIME_FUNCTION(Runtime_InternalNumberFormat) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, number_format_holder, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, number, 1);
@@ -604,7 +540,7 @@ RUNTIME_FUNCTION(Runtime_InternalNumberFormat) {
icu::DecimalFormat* number_format =
NumberFormat::UnpackNumberFormat(isolate, number_format_holder);
- if (!number_format) return isolate->ThrowIllegalOperation();
+ CHECK_NOT_NULL(number_format);
icu::UnicodeString result;
number_format->format(value->Number(), result);
@@ -616,62 +552,21 @@ RUNTIME_FUNCTION(Runtime_InternalNumberFormat) {
}
-RUNTIME_FUNCTION(Runtime_InternalNumberParse) {
- HandleScope scope(isolate);
-
- DCHECK(args.length() == 2);
-
- CONVERT_ARG_HANDLE_CHECKED(JSObject, number_format_holder, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, number_string, 1);
-
- isolate->CountUsage(v8::Isolate::UseCounterFeature::kIntlV8Parse);
-
- v8::String::Utf8Value utf8_number(v8::Utils::ToLocal(number_string));
- icu::UnicodeString u_number(icu::UnicodeString::fromUTF8(*utf8_number));
- icu::DecimalFormat* number_format =
- NumberFormat::UnpackNumberFormat(isolate, number_format_holder);
- if (!number_format) return isolate->ThrowIllegalOperation();
-
- UErrorCode status = U_ZERO_ERROR;
- icu::Formattable result;
- // ICU 4.6 doesn't support parseCurrency call. We need to wait for ICU49
- // to be part of Chrome.
- // TODO(cira): Include currency parsing code using parseCurrency call.
- // We need to check if the formatter parses all currencies or only the
- // one it was constructed with (it will impact the API - how to return ISO
- // code and the value).
- number_format->parse(u_number, result, status);
- if (U_FAILURE(status)) return isolate->heap()->undefined_value();
-
- switch (result.getType()) {
- case icu::Formattable::kDouble:
- return *isolate->factory()->NewNumber(result.getDouble());
- case icu::Formattable::kLong:
- return *isolate->factory()->NewNumberFromInt(result.getLong());
- case icu::Formattable::kInt64:
- return *isolate->factory()->NewNumber(
- static_cast<double>(result.getInt64()));
- default:
- return isolate->heap()->undefined_value();
- }
-}
-
-
RUNTIME_FUNCTION(Runtime_CreateCollator) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
- Handle<ObjectTemplateInfo> collator_template = I18N::GetTemplate(isolate);
+ Handle<JSFunction> constructor(
+ isolate->native_context()->intl_collator_function());
- // Create an empty object wrapper.
Handle<JSObject> local_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, local_object, ApiNatives::InstantiateObject(collator_template));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, local_object,
+ JSObject::New(constructor, constructor));
// Set collator as internal field of the resulting JS object.
icu::Collator* collator =
@@ -681,11 +576,6 @@ RUNTIME_FUNCTION(Runtime_CreateCollator) {
local_object->SetInternalField(0, reinterpret_cast<Smi*>(collator));
- Factory* factory = isolate->factory();
- Handle<String> key = factory->NewStringFromStaticChars("collator");
- Handle<String> value = factory->NewStringFromStaticChars("valid");
- JSObject::AddProperty(local_object, key, value, NONE);
-
Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
GlobalHandles::MakeWeak(wrapper.location(), wrapper.location(),
Collator::DeleteCollator,
@@ -697,14 +587,14 @@ RUNTIME_FUNCTION(Runtime_CreateCollator) {
RUNTIME_FUNCTION(Runtime_InternalCompare) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, collator_holder, 0);
CONVERT_ARG_HANDLE_CHECKED(String, string1, 1);
CONVERT_ARG_HANDLE_CHECKED(String, string2, 2);
icu::Collator* collator = Collator::UnpackCollator(isolate, collator_holder);
- if (!collator) return isolate->ThrowIllegalOperation();
+ CHECK_NOT_NULL(collator);
string1 = String::Flatten(string1);
string2 = String::Flatten(string2);
@@ -742,7 +632,7 @@ RUNTIME_FUNCTION(Runtime_StringNormalize) {
{"nfkc", UNORM2_DECOMPOSE},
};
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
CONVERT_NUMBER_CHECKED(int, form_id, Int32, args[1]);
@@ -791,23 +681,21 @@ RUNTIME_FUNCTION(Runtime_StringNormalize) {
RUNTIME_FUNCTION(Runtime_CreateBreakIterator) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, resolved, 2);
- Handle<ObjectTemplateInfo> break_iterator_template =
- I18N::GetTemplate2(isolate);
+ Handle<JSFunction> constructor(
+ isolate->native_context()->intl_v8_break_iterator_function());
- // Create an empty object wrapper.
Handle<JSObject> local_object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, local_object,
- ApiNatives::InstantiateObject(break_iterator_template));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, local_object,
+ JSObject::New(constructor, constructor));
// Set break iterator as internal field of the resulting JS object.
- icu::BreakIterator* break_iterator = BreakIterator::InitializeBreakIterator(
+ icu::BreakIterator* break_iterator = V8BreakIterator::InitializeBreakIterator(
isolate, locale, options, resolved);
if (!break_iterator) return isolate->ThrowIllegalOperation();
@@ -816,16 +704,11 @@ RUNTIME_FUNCTION(Runtime_CreateBreakIterator) {
// Make sure that the pointer to adopted text is NULL.
local_object->SetInternalField(1, static_cast<Smi*>(nullptr));
- Factory* factory = isolate->factory();
- Handle<String> key = factory->NewStringFromStaticChars("breakIterator");
- Handle<String> value = factory->NewStringFromStaticChars("valid");
- JSObject::AddProperty(local_object, key, value, NONE);
-
// Make object handle weak so we can delete the break iterator once GC kicks
// in.
Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
GlobalHandles::MakeWeak(wrapper.location(), wrapper.location(),
- BreakIterator::DeleteBreakIterator,
+ V8BreakIterator::DeleteBreakIterator,
WeakCallbackType::kInternalFields);
return *local_object;
}
@@ -834,14 +717,14 @@ RUNTIME_FUNCTION(Runtime_CreateBreakIterator) {
RUNTIME_FUNCTION(Runtime_BreakIteratorAdoptText) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
CONVERT_ARG_HANDLE_CHECKED(String, text, 1);
icu::BreakIterator* break_iterator =
- BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
- if (!break_iterator) return isolate->ThrowIllegalOperation();
+ V8BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+ CHECK_NOT_NULL(break_iterator);
icu::UnicodeString* u_text = reinterpret_cast<icu::UnicodeString*>(
break_iterator_holder->GetInternalField(1));
@@ -865,13 +748,13 @@ RUNTIME_FUNCTION(Runtime_BreakIteratorAdoptText) {
RUNTIME_FUNCTION(Runtime_BreakIteratorFirst) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
icu::BreakIterator* break_iterator =
- BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
- if (!break_iterator) return isolate->ThrowIllegalOperation();
+ V8BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+ CHECK_NOT_NULL(break_iterator);
return *isolate->factory()->NewNumberFromInt(break_iterator->first());
}
@@ -880,13 +763,13 @@ RUNTIME_FUNCTION(Runtime_BreakIteratorFirst) {
RUNTIME_FUNCTION(Runtime_BreakIteratorNext) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
icu::BreakIterator* break_iterator =
- BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
- if (!break_iterator) return isolate->ThrowIllegalOperation();
+ V8BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+ CHECK_NOT_NULL(break_iterator);
return *isolate->factory()->NewNumberFromInt(break_iterator->next());
}
@@ -895,13 +778,13 @@ RUNTIME_FUNCTION(Runtime_BreakIteratorNext) {
RUNTIME_FUNCTION(Runtime_BreakIteratorCurrent) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
icu::BreakIterator* break_iterator =
- BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
- if (!break_iterator) return isolate->ThrowIllegalOperation();
+ V8BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+ CHECK_NOT_NULL(break_iterator);
return *isolate->factory()->NewNumberFromInt(break_iterator->current());
}
@@ -910,13 +793,13 @@ RUNTIME_FUNCTION(Runtime_BreakIteratorCurrent) {
RUNTIME_FUNCTION(Runtime_BreakIteratorBreakType) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
icu::BreakIterator* break_iterator =
- BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
- if (!break_iterator) return isolate->ThrowIllegalOperation();
+ V8BreakIterator::UnpackBreakIterator(isolate, break_iterator_holder);
+ CHECK_NOT_NULL(break_iterator);
// TODO(cira): Remove cast once ICU fixes base BreakIterator class.
icu::RuleBasedBreakIterator* rule_based_iterator =
@@ -956,6 +839,7 @@ MUST_USE_RESULT Object* LocaleConvertCase(Handle<String> s, Isolate* isolate,
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result, isolate->factory()->NewRawTwoByteString(dest_length));
DisallowHeapAllocation no_gc;
+ DCHECK(s->IsFlat());
String::FlatContent flat = s->GetFlatContent();
const UChar* src = GetUCharBufferFromFlat(flat, &sap, src_length);
status = U_ZERO_ERROR;
@@ -1041,15 +925,14 @@ bool ToUpperFastASCII(const Vector<const Char>& src,
const uint16_t sharp_s = 0xDF;
template <typename Char>
-bool ToUpperOneByte(const Vector<const Char>& src,
- Handle<SeqOneByteString> result, int* sharp_s_count) {
+bool ToUpperOneByte(const Vector<const Char>& src, uint8_t* dest,
+ int* sharp_s_count) {
// Still pretty-fast path for the input with non-ASCII Latin-1 characters.
// There are two special cases.
// 1. U+00B5 and U+00FF are mapped to a character beyond U+00FF.
// 2. Lower case sharp-S converts to "SS" (two characters)
*sharp_s_count = 0;
- int32_t index = 0;
for (auto it = src.begin(); it != src.end(); ++it) {
uint16_t ch = static_cast<uint16_t>(*it);
if (V8_UNLIKELY(ch == sharp_s)) {
@@ -1061,7 +944,7 @@ bool ToUpperOneByte(const Vector<const Char>& src,
// need to take the 16-bit path.
return false;
}
- result->SeqOneByteStringSet(index++, ToLatin1Upper(ch));
+ *dest++ = ToLatin1Upper(ch);
}
return true;
@@ -1082,105 +965,112 @@ void ToUpperWithSharpS(const Vector<const Char>& src,
}
}
-} // namespace
+inline int FindFirstUpperOrNonAscii(Handle<String> s, int length) {
+ for (int index = 0; index < length; ++index) {
+ uint16_t ch = s->Get(index);
+ if (V8_UNLIKELY(IsASCIIUpper(ch) || ch & ~0x7F)) {
+ return index;
+ }
+ }
+ return length;
+}
-RUNTIME_FUNCTION(Runtime_StringToLowerCaseI18N) {
- HandleScope scope(isolate);
- DCHECK_EQ(args.length(), 1);
- CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
+MUST_USE_RESULT Object* ConvertToLower(Handle<String> s, Isolate* isolate) {
+ if (!s->HasOnlyOneByteChars()) {
+ // Use a slower implementation for strings with characters beyond U+00FF.
+ return LocaleConvertCase(s, isolate, false, "");
+ }
int length = s->length();
- s = String::Flatten(s);
- // First scan the string for uppercase and non-ASCII characters:
- if (s->HasOnlyOneByteChars()) {
- int first_index_to_lower = length;
- for (int index = 0; index < length; ++index) {
- // Blink specializes this path for one-byte strings, so it
- // does not need to do a generic get, but can do the equivalent
- // of SeqOneByteStringGet.
- uint16_t ch = s->Get(index);
- if (V8_UNLIKELY(IsASCIIUpper(ch) || ch & ~0x7F)) {
- first_index_to_lower = index;
- break;
- }
- }
+ // We depend here on the invariant that the length of a Latin1
+ // string is invariant under ToLowerCase, and the result always
+ // fits in the Latin1 range in the *root locale*. It does not hold
+ // for ToUpperCase even in the root locale.
+
+ // Scan the string for uppercase and non-ASCII characters for strings
+ // shorter than a machine-word without any memory allocation overhead.
+ // TODO(jshin): Apply this to a longer input by breaking FastAsciiConvert()
+ // to two parts, one for scanning the prefix with no change and the other for
+ // handling ASCII-only characters.
+ int index_to_first_unprocessed = length;
+ const bool is_short = length < static_cast<int>(sizeof(uintptr_t));
+ if (is_short) {
+ index_to_first_unprocessed = FindFirstUpperOrNonAscii(s, length);
// Nothing to do if the string is all ASCII with no uppercase.
- if (first_index_to_lower == length) return *s;
+ if (index_to_first_unprocessed == length) return *s;
+ }
- // We depend here on the invariant that the length of a Latin1
- // string is invariant under ToLowerCase, and the result always
- // fits in the Latin1 range in the *root locale*. It does not hold
- // for ToUpperCase even in the root locale.
- Handle<SeqOneByteString> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, isolate->factory()->NewRawOneByteString(length));
+ Handle<SeqOneByteString> result =
+ isolate->factory()->NewRawOneByteString(length).ToHandleChecked();
- DisallowHeapAllocation no_gc;
- String::FlatContent flat = s->GetFlatContent();
- if (flat.IsOneByte()) {
- const uint8_t* src = flat.ToOneByteVector().start();
- CopyChars(result->GetChars(), src,
- static_cast<size_t>(first_index_to_lower));
- for (int index = first_index_to_lower; index < length; ++index) {
- uint16_t ch = static_cast<uint16_t>(src[index]);
- result->SeqOneByteStringSet(index, ToLatin1Lower(ch));
- }
- } else {
- const uint16_t* src = flat.ToUC16Vector().start();
- CopyChars(result->GetChars(), src,
- static_cast<size_t>(first_index_to_lower));
- for (int index = first_index_to_lower; index < length; ++index) {
- uint16_t ch = src[index];
- result->SeqOneByteStringSet(index, ToLatin1Lower(ch));
- }
+ DisallowHeapAllocation no_gc;
+ DCHECK(s->IsFlat());
+ String::FlatContent flat = s->GetFlatContent();
+ uint8_t* dest = result->GetChars();
+ if (flat.IsOneByte()) {
+ const uint8_t* src = flat.ToOneByteVector().start();
+ bool has_changed_character = false;
+ index_to_first_unprocessed = FastAsciiConvert<true>(
+ reinterpret_cast<char*>(dest), reinterpret_cast<const char*>(src),
+ length, &has_changed_character);
+ // If not ASCII, we keep the result up to index_to_first_unprocessed and
+ // process the rest.
+ if (index_to_first_unprocessed == length)
+ return has_changed_character ? *result : *s;
+
+ for (int index = index_to_first_unprocessed; index < length; ++index) {
+ dest[index] = ToLatin1Lower(static_cast<uint16_t>(src[index]));
+ }
+ } else {
+ if (index_to_first_unprocessed == length) {
+ DCHECK(!is_short);
+ index_to_first_unprocessed = FindFirstUpperOrNonAscii(s, length);
+ }
+ // Nothing to do if the string is all ASCII with no uppercase.
+ if (index_to_first_unprocessed == length) return *s;
+ const uint16_t* src = flat.ToUC16Vector().start();
+ CopyChars(dest, src, index_to_first_unprocessed);
+ for (int index = index_to_first_unprocessed; index < length; ++index) {
+ dest[index] = ToLatin1Lower(static_cast<uint16_t>(src[index]));
}
-
- return *result;
}
- // Blink had an additional case here for ASCII 2-byte strings, but
- // that is subsumed by the above code (assuming there isn't a false
- // negative for HasOnlyOneByteChars).
-
- // Do a slower implementation for cases that include non-ASCII characters.
- return LocaleConvertCase(s, isolate, false, "");
+ return *result;
}
-RUNTIME_FUNCTION(Runtime_StringToUpperCaseI18N) {
- HandleScope scope(isolate);
- DCHECK_EQ(args.length(), 1);
- CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
-
- // This function could be optimized for no-op cases the way lowercase
- // counterpart is, but in empirical testing, few actual calls to upper()
- // are no-ops. So, it wouldn't be worth the extra time for pre-scanning.
-
+MUST_USE_RESULT Object* ConvertToUpper(Handle<String> s, Isolate* isolate) {
int32_t length = s->length();
- s = String::Flatten(s);
-
if (s->HasOnlyOneByteChars()) {
- Handle<SeqOneByteString> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, isolate->factory()->NewRawOneByteString(length));
+ Handle<SeqOneByteString> result =
+ isolate->factory()->NewRawOneByteString(length).ToHandleChecked();
+ DCHECK(s->IsFlat());
int sharp_s_count;
bool is_result_single_byte;
{
DisallowHeapAllocation no_gc;
String::FlatContent flat = s->GetFlatContent();
- // If it was ok to slow down ASCII-only input slightly, ToUpperFastASCII
- // could be removed because ToUpperOneByte is pretty fast now (it
- // does not call ICU API any more.).
+ uint8_t* dest = result->GetChars();
if (flat.IsOneByte()) {
Vector<const uint8_t> src = flat.ToOneByteVector();
- if (ToUpperFastASCII(src, result)) return *result;
- is_result_single_byte = ToUpperOneByte(src, result, &sharp_s_count);
+ bool has_changed_character = false;
+ int index_to_first_unprocessed =
+ FastAsciiConvert<false>(reinterpret_cast<char*>(result->GetChars()),
+ reinterpret_cast<const char*>(src.start()),
+ length, &has_changed_character);
+ if (index_to_first_unprocessed == length)
+ return has_changed_character ? *result : *s;
+ // If not ASCII, we keep the result up to index_to_first_unprocessed and
+ // process the rest.
+ is_result_single_byte =
+ ToUpperOneByte(src.SubVector(index_to_first_unprocessed, length),
+ dest + index_to_first_unprocessed, &sharp_s_count);
} else {
DCHECK(flat.IsTwoByte());
Vector<const uint16_t> src = flat.ToUC16Vector();
if (ToUpperFastASCII(src, result)) return *result;
- is_result_single_byte = ToUpperOneByte(src, result, &sharp_s_count);
+ is_result_single_byte = ToUpperOneByte(src, dest, &sharp_s_count);
}
}
@@ -1211,26 +1101,67 @@ RUNTIME_FUNCTION(Runtime_StringToUpperCaseI18N) {
return LocaleConvertCase(s, isolate, true, "");
}
+MUST_USE_RESULT Object* ConvertCase(Handle<String> s, bool is_upper,
+ Isolate* isolate) {
+ return is_upper ? ConvertToUpper(s, isolate) : ConvertToLower(s, isolate);
+}
+
+} // namespace
+
+RUNTIME_FUNCTION(Runtime_StringToLowerCaseI18N) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(args.length(), 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
+ s = String::Flatten(s);
+ return ConvertToLower(s, isolate);
+}
+
+RUNTIME_FUNCTION(Runtime_StringToUpperCaseI18N) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(args.length(), 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
+ s = String::Flatten(s);
+ return ConvertToUpper(s, isolate);
+}
+
RUNTIME_FUNCTION(Runtime_StringLocaleConvertCase) {
HandleScope scope(isolate);
DCHECK_EQ(args.length(), 3);
CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
CONVERT_BOOLEAN_ARG_CHECKED(is_upper, 1);
- CONVERT_ARG_HANDLE_CHECKED(SeqOneByteString, lang, 2);
-
- // All the languages requiring special handling ("az", "el", "lt", "tr")
- // have a 2-letter language code.
- DCHECK(lang->length() == 2);
- uint8_t lang_str[3];
- memcpy(lang_str, lang->GetChars(), 2);
- lang_str[2] = 0;
+ CONVERT_ARG_HANDLE_CHECKED(String, lang_arg, 2);
+
+ // Primary language tag can be up to 8 characters long in theory.
+ // https://tools.ietf.org/html/bcp47#section-2.2.1
+ DCHECK(lang_arg->length() <= 8);
+ lang_arg = String::Flatten(lang_arg);
s = String::Flatten(s);
+
+ // All the languages requiring special-handling have two-letter codes.
+ if (V8_UNLIKELY(lang_arg->length() > 2))
+ return ConvertCase(s, is_upper, isolate);
+
+ char c1, c2;
+ {
+ DisallowHeapAllocation no_gc;
+ String::FlatContent lang = lang_arg->GetFlatContent();
+ c1 = lang.Get(0);
+ c2 = lang.Get(1);
+ }
// TODO(jshin): Consider adding a fast path for ASCII or Latin-1. The fastpath
// in the root locale needs to be adjusted for az, lt and tr because even case
// mapping of ASCII range characters are different in those locales.
- // Greek (el) does not require any adjustment, though.
- return LocaleConvertCase(s, isolate, is_upper,
- reinterpret_cast<const char*>(lang_str));
+ // Greek (el) does not require any adjustment.
+ if (V8_UNLIKELY(c1 == 't' && c2 == 'r'))
+ return LocaleConvertCase(s, isolate, is_upper, "tr");
+ if (V8_UNLIKELY(c1 == 'e' && c2 == 'l'))
+ return LocaleConvertCase(s, isolate, is_upper, "el");
+ if (V8_UNLIKELY(c1 == 'l' && c2 == 't'))
+ return LocaleConvertCase(s, isolate, is_upper, "lt");
+ if (V8_UNLIKELY(c1 == 'a' && c2 == 'z'))
+ return LocaleConvertCase(s, isolate, is_upper, "az");
+
+ return ConvertCase(s, is_upper, isolate);
}
RUNTIME_FUNCTION(Runtime_DateCacheVersion) {
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index 621f33547e..6ff0a09b61 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -9,13 +9,14 @@
#include "src/arguments.h"
#include "src/ast/prettyprinter.h"
#include "src/bootstrapper.h"
+#include "src/builtins/builtins.h"
#include "src/conversions.h"
#include "src/debug/debug.h"
#include "src/frames-inl.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/parsing/parse-info.h"
-#include "src/parsing/parser.h"
+#include "src/parsing/parsing.h"
#include "src/wasm/wasm-module.h"
namespace v8 {
@@ -23,7 +24,7 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_CheckIsBootstrapping) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
CHECK(isolate->bootstrapper()->IsActive());
return isolate->heap()->undefined_value();
}
@@ -31,7 +32,7 @@ RUNTIME_FUNCTION(Runtime_CheckIsBootstrapping) {
RUNTIME_FUNCTION(Runtime_ExportFromRuntime) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, container, 0);
CHECK(isolate->bootstrapper()->IsActive());
JSObject::NormalizeProperties(container, KEEP_INOBJECT_PROPERTIES, 10,
@@ -44,7 +45,7 @@ RUNTIME_FUNCTION(Runtime_ExportFromRuntime) {
RUNTIME_FUNCTION(Runtime_ExportExperimentalFromRuntime) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, container, 0);
CHECK(isolate->bootstrapper()->IsActive());
JSObject::NormalizeProperties(container, KEEP_INOBJECT_PROPERTIES, 10,
@@ -57,7 +58,7 @@ RUNTIME_FUNCTION(Runtime_ExportExperimentalFromRuntime) {
RUNTIME_FUNCTION(Runtime_InstallToContext) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
CHECK(array->HasFastElements());
CHECK(isolate->bootstrapper()->IsActive());
@@ -82,14 +83,14 @@ RUNTIME_FUNCTION(Runtime_InstallToContext) {
RUNTIME_FUNCTION(Runtime_Throw) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
return isolate->Throw(args[0]);
}
RUNTIME_FUNCTION(Runtime_ReThrow) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
return isolate->ReThrow(args[0]);
}
@@ -106,9 +107,9 @@ RUNTIME_FUNCTION(Runtime_ThrowTypeError) {
CONVERT_SMI_ARG_CHECKED(message_id_smi, 0);
Handle<Object> undefined = isolate->factory()->undefined_value();
- Handle<Object> arg0 = (args.length() > 1) ? args.at<Object>(1) : undefined;
- Handle<Object> arg1 = (args.length() > 2) ? args.at<Object>(2) : undefined;
- Handle<Object> arg2 = (args.length() > 3) ? args.at<Object>(3) : undefined;
+ Handle<Object> arg0 = (args.length() > 1) ? args.at(1) : undefined;
+ Handle<Object> arg1 = (args.length() > 2) ? args.at(2) : undefined;
+ Handle<Object> arg2 = (args.length() > 3) ? args.at(3) : undefined;
MessageTemplate::Template message_id =
static_cast<MessageTemplate::Template>(message_id_smi);
@@ -117,77 +118,23 @@ RUNTIME_FUNCTION(Runtime_ThrowTypeError) {
NewTypeError(message_id, arg0, arg1, arg2));
}
-RUNTIME_FUNCTION(Runtime_ThrowWasmError) {
- HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_SMI_ARG_CHECKED(message_id, 0);
- CONVERT_SMI_ARG_CHECKED(byte_offset, 1);
- Handle<Object> error_obj = isolate->factory()->NewWasmRuntimeError(
- static_cast<MessageTemplate::Template>(message_id));
-
- // For wasm traps, the byte offset (a.k.a source position) can not be
- // determined from relocation info, since the explicit checks for traps
- // converge in one singe block which calls this runtime function.
- // We hence pass the byte offset explicitely, and patch it into the top-most
- // frame (a wasm frame) on the collected stack trace.
- // TODO(wasm): This implementation is temporary, see bug #5007:
- // https://bugs.chromium.org/p/v8/issues/detail?id=5007
- Handle<JSObject> error = Handle<JSObject>::cast(error_obj);
- Handle<Object> stack_trace_obj = JSReceiver::GetDataProperty(
- error, isolate->factory()->stack_trace_symbol());
- // Patch the stack trace (array of <receiver, function, code, position>).
- if (stack_trace_obj->IsJSArray()) {
- Handle<FrameArray> stack_elements(
- FrameArray::cast(JSArray::cast(*stack_trace_obj)->elements()));
- DCHECK(stack_elements->Code(0)->kind() == AbstractCode::WASM_FUNCTION);
- DCHECK(stack_elements->Offset(0)->value() >= 0);
- stack_elements->SetOffset(0, Smi::FromInt(-1 - byte_offset));
- }
-
- // Patch the detailed stack trace (array of JSObjects with various
- // properties).
- Handle<Object> detailed_stack_trace_obj = JSReceiver::GetDataProperty(
- error, isolate->factory()->detailed_stack_trace_symbol());
- if (detailed_stack_trace_obj->IsJSArray()) {
- Handle<FixedArray> stack_elements(
- FixedArray::cast(JSArray::cast(*detailed_stack_trace_obj)->elements()));
- DCHECK_GE(stack_elements->length(), 1);
- Handle<JSObject> top_frame(JSObject::cast(stack_elements->get(0)));
- Handle<String> wasm_offset_key =
- isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("column"));
- LookupIterator it(top_frame, wasm_offset_key, top_frame,
- LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
- if (it.IsFound()) {
- DCHECK(JSReceiver::GetDataProperty(&it)->IsSmi());
- // Make column number 1-based here.
- Maybe<bool> data_set = JSReceiver::SetDataProperty(
- &it, handle(Smi::FromInt(byte_offset + 1), isolate));
- DCHECK(data_set.IsJust() && data_set.FromJust() == true);
- USE(data_set);
- }
- }
-
- return isolate->Throw(*error_obj);
-}
-
RUNTIME_FUNCTION(Runtime_UnwindAndFindExceptionHandler) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
return isolate->UnwindAndFindHandler();
}
RUNTIME_FUNCTION(Runtime_PromoteScheduledException) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
return isolate->PromoteScheduledException();
}
RUNTIME_FUNCTION(Runtime_ThrowReferenceError) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewReferenceError(MessageTemplate::kNotDefined, name));
@@ -196,7 +143,7 @@ RUNTIME_FUNCTION(Runtime_ThrowReferenceError) {
RUNTIME_FUNCTION(Runtime_NewTypeError) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_INT32_ARG_CHECKED(template_index, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, arg0, 1);
auto message_template =
@@ -207,7 +154,7 @@ RUNTIME_FUNCTION(Runtime_NewTypeError) {
RUNTIME_FUNCTION(Runtime_NewReferenceError) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_INT32_ARG_CHECKED(template_index, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, arg0, 1);
auto message_template =
@@ -218,7 +165,7 @@ RUNTIME_FUNCTION(Runtime_NewReferenceError) {
RUNTIME_FUNCTION(Runtime_NewSyntaxError) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_INT32_ARG_CHECKED(template_index, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, arg0, 1);
auto message_template =
@@ -234,7 +181,7 @@ RUNTIME_FUNCTION(Runtime_ThrowCannotConvertToPrimitive) {
RUNTIME_FUNCTION(Runtime_ThrowIllegalInvocation) {
HandleScope scope(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kIllegalInvocation));
}
@@ -249,6 +196,14 @@ RUNTIME_FUNCTION(Runtime_ThrowIncompatibleMethodReceiver) {
NewTypeError(MessageTemplate::kIncompatibleMethodReceiver, arg0, arg1));
}
+RUNTIME_FUNCTION(Runtime_ThrowInvalidHint) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, hint, 0);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kInvalidHint, hint));
+}
+
RUNTIME_FUNCTION(Runtime_ThrowInvalidStringLength) {
HandleScope scope(isolate);
THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewInvalidStringLengthError());
@@ -256,13 +211,20 @@ RUNTIME_FUNCTION(Runtime_ThrowInvalidStringLength) {
RUNTIME_FUNCTION(Runtime_ThrowIteratorResultNotAnObject) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
THROW_NEW_ERROR_RETURN_FAILURE(
isolate,
NewTypeError(MessageTemplate::kIteratorResultNotAnObject, value));
}
+RUNTIME_FUNCTION(Runtime_ThrowSymbolIteratorInvalid) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(0, args.length());
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kSymbolIteratorInvalid));
+}
+
RUNTIME_FUNCTION(Runtime_ThrowNotGeneric) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -290,7 +252,7 @@ RUNTIME_FUNCTION(Runtime_ThrowApplyNonFunction) {
RUNTIME_FUNCTION(Runtime_StackGuard) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
// First check if this is a real stack overflow.
StackLimitCheck check(isolate);
@@ -304,14 +266,14 @@ RUNTIME_FUNCTION(Runtime_StackGuard) {
RUNTIME_FUNCTION(Runtime_Interrupt) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
return isolate->stack_guard()->HandleInterrupts();
}
RUNTIME_FUNCTION(Runtime_AllocateInNewSpace) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_SMI_ARG_CHECKED(size, 0);
CHECK(IsAligned(size, kPointerSize));
CHECK(size > 0);
@@ -322,14 +284,14 @@ RUNTIME_FUNCTION(Runtime_AllocateInNewSpace) {
RUNTIME_FUNCTION(Runtime_AllocateInTargetSpace) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_SMI_ARG_CHECKED(size, 0);
CONVERT_SMI_ARG_CHECKED(flags, 1);
CHECK(IsAligned(size, kPointerSize));
CHECK(size > 0);
- CHECK(size <= kMaxRegularHeapObjectSize);
bool double_align = AllocateDoubleAlignFlag::decode(flags);
AllocationSpace space = AllocateTargetSpace::decode(flags);
+ CHECK(size <= kMaxRegularHeapObjectSize || space == LO_SPACE);
return *isolate->factory()->NewFillerObject(size, double_align, space);
}
@@ -365,20 +327,19 @@ namespace {
bool ComputeLocation(Isolate* isolate, MessageLocation* target) {
JavaScriptFrameIterator it(isolate);
if (!it.done()) {
- JavaScriptFrame* frame = it.frame();
- JSFunction* fun = frame->function();
- Object* script = fun->shared()->script();
+ // Compute the location from the function and the relocation info of the
+ // baseline code. For optimized code this will use the deoptimization
+ // information to get canonical location information.
+ List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
+ it.frame()->Summarize(&frames);
+ auto& summary = frames.last().AsJavaScript();
+ Handle<SharedFunctionInfo> shared(summary.function()->shared());
+ Handle<Object> script(shared->script(), isolate);
+ int pos = summary.abstract_code()->SourcePosition(summary.code_offset());
if (script->IsScript() &&
- !(Script::cast(script)->source()->IsUndefined(isolate))) {
- Handle<Script> casted_script(Script::cast(script), isolate);
- // Compute the location from the function and the relocation info of the
- // baseline code. For optimized code this will use the deoptimization
- // information to get canonical location information.
- List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
- it.frame()->Summarize(&frames);
- FrameSummary& summary = frames.last();
- int pos = summary.abstract_code()->SourcePosition(summary.code_offset());
- *target = MessageLocation(casted_script, pos, pos + 1, handle(fun));
+ !(Handle<Script>::cast(script)->source()->IsUndefined(isolate))) {
+ Handle<Script> casted_script = Handle<Script>::cast(script);
+ *target = MessageLocation(casted_script, pos, pos + 1, shared);
return true;
}
}
@@ -390,12 +351,9 @@ Handle<String> RenderCallSite(Isolate* isolate, Handle<Object> object) {
MessageLocation location;
if (ComputeLocation(isolate, &location)) {
Zone zone(isolate->allocator(), ZONE_NAME);
- std::unique_ptr<ParseInfo> info(
- location.function()->shared()->is_function()
- ? new ParseInfo(&zone, handle(location.function()->shared()))
- : new ParseInfo(&zone, location.script()));
- if (Parser::ParseStatic(info.get())) {
- CallPrinter printer(isolate, location.function()->shared()->IsBuiltin());
+ std::unique_ptr<ParseInfo> info(new ParseInfo(&zone, location.shared()));
+ if (parsing::ParseAny(info.get())) {
+ CallPrinter printer(isolate, location.shared()->IsUserJavaScript());
Handle<String> str = printer.Print(info->literal(), location.start_pos());
if (str->length() > 0) return str;
} else {
@@ -522,20 +480,20 @@ RUNTIME_FUNCTION(Runtime_OrdinaryHasInstance) {
isolate, Object::OrdinaryHasInstance(isolate, callable, object));
}
-RUNTIME_FUNCTION(Runtime_IsWasmInstance) {
+RUNTIME_FUNCTION(Runtime_Typeof) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_CHECKED(Object, object, 0);
- bool is_wasm_instance =
- object->IsJSObject() && wasm::IsWasmInstance(JSObject::cast(object));
- return *isolate->factory()->ToBoolean(is_wasm_instance);
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ return *Object::TypeOf(isolate, object);
}
-RUNTIME_FUNCTION(Runtime_Typeof) {
+RUNTIME_FUNCTION(Runtime_AllowDynamicFunction) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- return *Object::TypeOf(isolate, object);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, target, 0);
+ Handle<JSObject> global_proxy(target->global_proxy(), isolate);
+ return *isolate->factory()->ToBoolean(
+ Builtins::AllowDynamicFunction(isolate, target, global_proxy));
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-interpreter.cc b/deps/v8/src/runtime/runtime-interpreter.cc
index 62eee1744f..2201b4c337 100644
--- a/deps/v8/src/runtime/runtime-interpreter.cc
+++ b/deps/v8/src/runtime/runtime-interpreter.cc
@@ -21,9 +21,9 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_InterpreterNewClosure) {
HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
+ DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 0);
- CONVERT_SMI_ARG_CHECKED(pretenured_flag, 1);
+ CONVERT_SMI_ARG_CHECKED(pretenured_flag, 3);
Handle<Context> context(isolate->context(), isolate);
return *isolate->factory()->NewFunctionFromSharedFunctionInfo(
shared, context, static_cast<PretenureFlag>(pretenured_flag));
@@ -155,22 +155,6 @@ RUNTIME_FUNCTION(Runtime_InterpreterTraceBytecodeExit) {
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(Runtime_InterpreterClearPendingMessage) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(0, args.length());
- Object* message = isolate->thread_local_top()->pending_message_obj_;
- isolate->clear_pending_message();
- return message;
-}
-
-RUNTIME_FUNCTION(Runtime_InterpreterSetPendingMessage) {
- SealHandleScope shs(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, message, 0);
- isolate->thread_local_top()->pending_message_obj_ = *message;
- return isolate->heap()->undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_InterpreterAdvanceBytecodeOffset) {
SealHandleScope shs(isolate);
DCHECK_EQ(2, args.length());
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index 8bb4522a98..45b83293b6 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -113,7 +113,7 @@ MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
static MaybeHandle<Object> CreateArrayLiteralBoilerplate(
Isolate* isolate, Handle<LiteralsArray> literals,
- Handle<FixedArray> elements) {
+ Handle<ConstantElementsPair> elements) {
// Create the JSArray.
Handle<JSFunction> constructor = isolate->array_function();
@@ -124,9 +124,8 @@ static MaybeHandle<Object> CreateArrayLiteralBoilerplate(
isolate->factory()->NewJSObject(constructor, pretenure_flag));
ElementsKind constant_elements_kind =
- static_cast<ElementsKind>(Smi::cast(elements->get(0))->value());
- Handle<FixedArrayBase> constant_elements_values(
- FixedArrayBase::cast(elements->get(1)));
+ static_cast<ElementsKind>(elements->elements_kind());
+ Handle<FixedArrayBase> constant_elements_values(elements->constant_values());
{
DisallowHeapAllocation no_gc;
@@ -186,14 +185,21 @@ static MaybeHandle<Object> CreateArrayLiteralBoilerplate(
MUST_USE_RESULT static MaybeHandle<Object> CreateLiteralBoilerplate(
Isolate* isolate, Handle<LiteralsArray> literals,
Handle<FixedArray> array) {
- Handle<FixedArray> elements = CompileTimeValue::GetElements(array);
+ Handle<HeapObject> elements = CompileTimeValue::GetElements(array);
switch (CompileTimeValue::GetLiteralType(array)) {
- case CompileTimeValue::OBJECT_LITERAL_FAST_ELEMENTS:
- return CreateObjectLiteralBoilerplate(isolate, literals, elements, true);
- case CompileTimeValue::OBJECT_LITERAL_SLOW_ELEMENTS:
- return CreateObjectLiteralBoilerplate(isolate, literals, elements, false);
- case CompileTimeValue::ARRAY_LITERAL:
- return CreateArrayLiteralBoilerplate(isolate, literals, elements);
+ case CompileTimeValue::OBJECT_LITERAL_FAST_ELEMENTS: {
+ Handle<FixedArray> props = Handle<FixedArray>::cast(elements);
+ return CreateObjectLiteralBoilerplate(isolate, literals, props, true);
+ }
+ case CompileTimeValue::OBJECT_LITERAL_SLOW_ELEMENTS: {
+ Handle<FixedArray> props = Handle<FixedArray>::cast(elements);
+ return CreateObjectLiteralBoilerplate(isolate, literals, props, false);
+ }
+ case CompileTimeValue::ARRAY_LITERAL: {
+ Handle<ConstantElementsPair> elems =
+ Handle<ConstantElementsPair>::cast(elements);
+ return CreateArrayLiteralBoilerplate(isolate, literals, elems);
+ }
default:
UNREACHABLE();
return MaybeHandle<Object>();
@@ -270,12 +276,11 @@ RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
MUST_USE_RESULT static MaybeHandle<AllocationSite> GetLiteralAllocationSite(
Isolate* isolate, Handle<LiteralsArray> literals, int literals_index,
- Handle<FixedArray> elements) {
+ Handle<ConstantElementsPair> elements) {
// Check if boilerplate exists. If not, create it first.
Handle<Object> literal_site(literals->literal(literals_index), isolate);
Handle<AllocationSite> site;
if (literal_site->IsUndefined(isolate)) {
- DCHECK(*elements != isolate->heap()->empty_fixed_array());
Handle<Object> boilerplate;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, boilerplate,
@@ -298,10 +303,9 @@ MUST_USE_RESULT static MaybeHandle<AllocationSite> GetLiteralAllocationSite(
return site;
}
-
static MaybeHandle<JSObject> CreateArrayLiteralImpl(
Isolate* isolate, Handle<LiteralsArray> literals, int literals_index,
- Handle<FixedArray> elements, int flags) {
+ Handle<ConstantElementsPair> elements, int flags) {
CHECK(literals_index >= 0 && literals_index < literals->literals_count());
Handle<AllocationSite> site;
ASSIGN_RETURN_ON_EXCEPTION(
@@ -328,7 +332,7 @@ RUNTIME_FUNCTION(Runtime_CreateArrayLiteral) {
DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, closure, 0);
CONVERT_SMI_ARG_CHECKED(literals_index, 1);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
+ CONVERT_ARG_HANDLE_CHECKED(ConstantElementsPair, elements, 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
Handle<LiteralsArray> literals(closure->literals(), isolate);
@@ -343,7 +347,7 @@ RUNTIME_FUNCTION(Runtime_CreateArrayLiteralStubBailout) {
DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, closure, 0);
CONVERT_SMI_ARG_CHECKED(literals_index, 1);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
+ CONVERT_ARG_HANDLE_CHECKED(ConstantElementsPair, elements, 2);
Handle<LiteralsArray> literals(closure->literals(), isolate);
RETURN_RESULT_OR_FAILURE(
diff --git a/deps/v8/src/runtime/runtime-liveedit.cc b/deps/v8/src/runtime/runtime-liveedit.cc
index a19ccaa584..56493252c8 100644
--- a/deps/v8/src/runtime/runtime-liveedit.cc
+++ b/deps/v8/src/runtime/runtime-liveedit.cc
@@ -21,7 +21,7 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_LiveEditFindSharedFunctionInfosForScript) {
HandleScope scope(isolate);
CHECK(isolate->debug()->live_edit_enabled());
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(JSValue, script_value, 0);
CHECK(script_value->value()->IsScript());
@@ -63,7 +63,7 @@ RUNTIME_FUNCTION(Runtime_LiveEditFindSharedFunctionInfosForScript) {
RUNTIME_FUNCTION(Runtime_LiveEditGatherCompileInfo) {
HandleScope scope(isolate);
CHECK(isolate->debug()->live_edit_enabled());
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_CHECKED(JSValue, script, 0);
CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
@@ -81,7 +81,7 @@ RUNTIME_FUNCTION(Runtime_LiveEditGatherCompileInfo) {
RUNTIME_FUNCTION(Runtime_LiveEditReplaceScript) {
HandleScope scope(isolate);
CHECK(isolate->debug()->live_edit_enabled());
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_CHECKED(JSValue, original_script_value, 0);
CONVERT_ARG_HANDLE_CHECKED(String, new_source, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, old_script_name, 2);
@@ -100,15 +100,31 @@ RUNTIME_FUNCTION(Runtime_LiveEditReplaceScript) {
}
}
+// Recreate the shared function infos array after changing the IDs of all
+// SharedFunctionInfos.
+RUNTIME_FUNCTION(Runtime_LiveEditFixupScript) {
+ HandleScope scope(isolate);
+ CHECK(isolate->debug()->live_edit_enabled());
+ DCHECK_EQ(args.length(), 2);
+ CONVERT_ARG_CHECKED(JSValue, script_value, 0);
+ CONVERT_INT32_ARG_CHECKED(max_function_literal_id, 1);
+
+ CHECK(script_value->value()->IsScript());
+ Handle<Script> script(Script::cast(script_value->value()));
+
+ LiveEdit::FixupScript(script, max_function_literal_id);
+ return isolate->heap()->undefined_value();
+}
RUNTIME_FUNCTION(Runtime_LiveEditFunctionSourceUpdated) {
HandleScope scope(isolate);
CHECK(isolate->debug()->live_edit_enabled());
- DCHECK(args.length() == 1);
+ DCHECK_EQ(args.length(), 2);
CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_info, 0);
+ CONVERT_INT32_ARG_CHECKED(new_function_literal_id, 1);
CHECK(SharedInfoWrapper::IsInstance(shared_info));
- LiveEdit::FunctionSourceUpdated(shared_info);
+ LiveEdit::FunctionSourceUpdated(shared_info, new_function_literal_id);
return isolate->heap()->undefined_value();
}
@@ -117,7 +133,7 @@ RUNTIME_FUNCTION(Runtime_LiveEditFunctionSourceUpdated) {
RUNTIME_FUNCTION(Runtime_LiveEditReplaceFunctionCode) {
HandleScope scope(isolate);
CHECK(isolate->debug()->live_edit_enabled());
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSArray, new_compile_info, 0);
CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_info, 1);
CHECK(SharedInfoWrapper::IsInstance(shared_info));
@@ -131,7 +147,7 @@ RUNTIME_FUNCTION(Runtime_LiveEditReplaceFunctionCode) {
RUNTIME_FUNCTION(Runtime_LiveEditFunctionSetScript) {
HandleScope scope(isolate);
CHECK(isolate->debug()->live_edit_enabled());
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, script_object, 1);
@@ -158,7 +174,7 @@ RUNTIME_FUNCTION(Runtime_LiveEditFunctionSetScript) {
RUNTIME_FUNCTION(Runtime_LiveEditReplaceRefToNestedFunction) {
HandleScope scope(isolate);
CHECK(isolate->debug()->live_edit_enabled());
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSValue, parent_wrapper, 0);
CONVERT_ARG_HANDLE_CHECKED(JSValue, orig_wrapper, 1);
@@ -181,7 +197,7 @@ RUNTIME_FUNCTION(Runtime_LiveEditReplaceRefToNestedFunction) {
RUNTIME_FUNCTION(Runtime_LiveEditPatchFunctionPositions) {
HandleScope scope(isolate);
CHECK(isolate->debug()->live_edit_enabled());
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0);
CONVERT_ARG_HANDLE_CHECKED(JSArray, position_change_array, 1);
CHECK(SharedInfoWrapper::IsInstance(shared_array));
@@ -198,7 +214,7 @@ RUNTIME_FUNCTION(Runtime_LiveEditPatchFunctionPositions) {
RUNTIME_FUNCTION(Runtime_LiveEditCheckAndDropActivations) {
HandleScope scope(isolate);
CHECK(isolate->debug()->live_edit_enabled());
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSArray, old_shared_array, 0);
CONVERT_ARG_HANDLE_CHECKED(JSArray, new_shared_array, 1);
CONVERT_BOOLEAN_ARG_CHECKED(do_drop, 2);
@@ -236,7 +252,7 @@ RUNTIME_FUNCTION(Runtime_LiveEditCheckAndDropActivations) {
RUNTIME_FUNCTION(Runtime_LiveEditCompareStrings) {
HandleScope scope(isolate);
CHECK(isolate->debug()->live_edit_enabled());
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, s1, 0);
CONVERT_ARG_HANDLE_CHECKED(String, s2, 1);
@@ -256,7 +272,7 @@ RUNTIME_FUNCTION(Runtime_LiveEditCompareStrings) {
RUNTIME_FUNCTION(Runtime_LiveEditRestartFrame) {
HandleScope scope(isolate);
CHECK(isolate->debug()->live_edit_enabled());
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
CHECK(isolate->debug()->CheckExecutionState(break_id));
diff --git a/deps/v8/src/runtime/runtime-maths.cc b/deps/v8/src/runtime/runtime-maths.cc
index 404305a150..5bd7bde1eb 100644
--- a/deps/v8/src/runtime/runtime-maths.cc
+++ b/deps/v8/src/runtime/runtime-maths.cc
@@ -15,7 +15,7 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_GenerateRandomNumbers) {
HandleScope scope(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
Handle<Context> native_context = isolate->native_context();
DCHECK_EQ(0, native_context->math_random_index()->value());
diff --git a/deps/v8/src/runtime/runtime-module.cc b/deps/v8/src/runtime/runtime-module.cc
index 2b813430e0..f2a9761203 100644
--- a/deps/v8/src/runtime/runtime-module.cc
+++ b/deps/v8/src/runtime/runtime-module.cc
@@ -11,7 +11,7 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_GetModuleNamespace) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_SMI_ARG_CHECKED(module_request, 0);
Handle<Module> module(isolate->context()->module());
return *Module::GetModuleNamespace(module, module_request);
@@ -19,7 +19,7 @@ RUNTIME_FUNCTION(Runtime_GetModuleNamespace) {
RUNTIME_FUNCTION(Runtime_LoadModuleVariable) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_SMI_ARG_CHECKED(index, 0);
Handle<Module> module(isolate->context()->module());
return *Module::LoadVariable(module, index);
@@ -27,7 +27,7 @@ RUNTIME_FUNCTION(Runtime_LoadModuleVariable) {
RUNTIME_FUNCTION(Runtime_StoreModuleVariable) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_SMI_ARG_CHECKED(index, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
Handle<Module> module(isolate->context()->module());
diff --git a/deps/v8/src/runtime/runtime-numbers.cc b/deps/v8/src/runtime/runtime-numbers.cc
index bfe8763e99..4d8d5d267d 100644
--- a/deps/v8/src/runtime/runtime-numbers.cc
+++ b/deps/v8/src/runtime/runtime-numbers.cc
@@ -15,7 +15,7 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_IsValidSmi) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_NUMBER_CHECKED(int32_t, number, Int32, args[0]);
return isolate->heap()->ToBoolean(Smi::IsValid(number));
@@ -73,7 +73,7 @@ RUNTIME_FUNCTION(Runtime_StringParseInt) {
// ES6 18.2.4 parseFloat(string)
RUNTIME_FUNCTION(Runtime_StringParseFloat) {
HandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
double value =
@@ -86,7 +86,7 @@ RUNTIME_FUNCTION(Runtime_StringParseFloat) {
RUNTIME_FUNCTION(Runtime_NumberToString) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_NUMBER_ARG_HANDLE_CHECKED(number, 0);
return *isolate->factory()->NumberToString(number);
@@ -95,7 +95,7 @@ RUNTIME_FUNCTION(Runtime_NumberToString) {
RUNTIME_FUNCTION(Runtime_NumberToStringSkipCache) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_NUMBER_ARG_HANDLE_CHECKED(number, 0);
return *isolate->factory()->NumberToString(number, false);
@@ -106,7 +106,7 @@ RUNTIME_FUNCTION(Runtime_NumberToStringSkipCache) {
// a small integer.
RUNTIME_FUNCTION(Runtime_NumberToSmi) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Object, obj, 0);
if (obj->IsSmi()) {
return obj;
@@ -126,7 +126,7 @@ RUNTIME_FUNCTION(Runtime_NumberToSmi) {
// compared lexicographically.
RUNTIME_FUNCTION(Runtime_SmiLexicographicCompare) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_SMI_ARG_CHECKED(x_value, 0);
CONVERT_SMI_ARG_CHECKED(y_value, 1);
@@ -200,14 +200,14 @@ RUNTIME_FUNCTION(Runtime_SmiLexicographicCompare) {
RUNTIME_FUNCTION(Runtime_MaxSmi) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
return Smi::FromInt(Smi::kMaxValue);
}
RUNTIME_FUNCTION(Runtime_IsSmi) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Object, obj, 0);
return isolate->heap()->ToBoolean(obj->IsSmi());
}
@@ -215,21 +215,21 @@ RUNTIME_FUNCTION(Runtime_IsSmi) {
RUNTIME_FUNCTION(Runtime_GetRootNaN) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
return isolate->heap()->nan_value();
}
RUNTIME_FUNCTION(Runtime_GetHoleNaNUpper) {
HandleScope scope(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
return *isolate->factory()->NewNumberFromUint(kHoleNanUpper32);
}
RUNTIME_FUNCTION(Runtime_GetHoleNaNLower) {
HandleScope scope(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
return *isolate->factory()->NewNumberFromUint(kHoleNanLower32);
}
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index c7e9cf3c92..e3518d3e09 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -19,7 +19,7 @@ MaybeHandle<Object> Runtime::GetObjectProperty(Isolate* isolate,
Handle<Object> object,
Handle<Object> key,
bool* is_found_out) {
- if (object->IsUndefined(isolate) || object->IsNull(isolate)) {
+ if (object->IsNullOrUndefined(isolate)) {
THROW_NEW_ERROR(
isolate,
NewTypeError(MessageTemplate::kNonObjectPropertyLoad, key, object),
@@ -63,7 +63,7 @@ static MaybeHandle<Object> KeyedGetObjectProperty(Isolate* isolate,
if (entry != GlobalDictionary::kNotFound) {
DCHECK(dictionary->ValueAt(entry)->IsPropertyCell());
PropertyCell* cell = PropertyCell::cast(dictionary->ValueAt(entry));
- if (cell->property_details().type() == DATA) {
+ if (cell->property_details().kind() == kData) {
Object* value = cell->value();
if (!value->IsTheHole(isolate)) {
return Handle<Object>(value, isolate);
@@ -76,7 +76,7 @@ static MaybeHandle<Object> KeyedGetObjectProperty(Isolate* isolate,
NameDictionary* dictionary = receiver->property_dictionary();
int entry = dictionary->FindEntry(key);
if ((entry != NameDictionary::kNotFound) &&
- (dictionary->DetailsAt(entry).type() == DATA)) {
+ (dictionary->DetailsAt(entry).kind() == kData)) {
Object* value = dictionary->ValueAt(entry);
return Handle<Object>(value, isolate);
}
@@ -133,7 +133,7 @@ Maybe<bool> Runtime::DeleteObjectProperty(Isolate* isolate,
// ES6 19.1.3.2
RUNTIME_FUNCTION(Runtime_ObjectHasOwnProperty) {
HandleScope scope(isolate);
- Handle<Object> property = args.at<Object>(1);
+ Handle<Object> property = args.at(1);
Handle<Name> key;
uint32_t index;
@@ -145,7 +145,7 @@ RUNTIME_FUNCTION(Runtime_ObjectHasOwnProperty) {
key_is_array_index = key->AsArrayIndex(&index);
}
- Handle<Object> object = args.at<Object>(0);
+ Handle<Object> object = args.at(0);
if (object->IsJSObject()) {
Handle<JSObject> js_obj = Handle<JSObject>::cast(object);
@@ -199,7 +199,7 @@ RUNTIME_FUNCTION(Runtime_ObjectHasOwnProperty) {
key_is_array_index
? index < static_cast<uint32_t>(String::cast(*object)->length())
: key->Equals(isolate->heap()->length_string()));
- } else if (object->IsNull(isolate) || object->IsUndefined(isolate)) {
+ } else if (object->IsNullOrUndefined(isolate)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kUndefinedOrNullToObject));
}
@@ -212,7 +212,7 @@ RUNTIME_FUNCTION(Runtime_ObjectHasOwnProperty) {
// an Object.create stub.
RUNTIME_FUNCTION(Runtime_ObjectCreate) {
HandleScope scope(isolate);
- Handle<Object> prototype = args.at<Object>(0);
+ Handle<Object> prototype = args.at(0);
if (!prototype->IsNull(isolate) && !prototype->IsJSReceiver()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kProtoObjectOrNull, prototype));
@@ -222,30 +222,8 @@ RUNTIME_FUNCTION(Runtime_ObjectCreate) {
// function's initial map from the current native context.
// TODO(bmeurer): Use a dedicated cache for Object.create; think about
// slack tracking for Object.create.
- Handle<Map> map(isolate->native_context()->object_function()->initial_map(),
- isolate);
- if (map->prototype() != *prototype) {
- if (prototype->IsNull(isolate)) {
- map = isolate->slow_object_with_null_prototype_map();
- } else if (prototype->IsJSObject()) {
- Handle<JSObject> js_prototype = Handle<JSObject>::cast(prototype);
- if (!js_prototype->map()->is_prototype_map()) {
- JSObject::OptimizeAsPrototype(js_prototype, FAST_PROTOTYPE);
- }
- Handle<PrototypeInfo> info =
- Map::GetOrCreatePrototypeInfo(js_prototype, isolate);
- // TODO(verwaest): Use inobject slack tracking for this map.
- if (info->HasObjectCreateMap()) {
- map = handle(info->ObjectCreateMap(), isolate);
- } else {
- map = Map::CopyInitialMap(map);
- Map::SetPrototype(map, prototype, FAST_PROTOTYPE);
- PrototypeInfo::SetObjectCreateMap(info, map);
- }
- } else {
- map = Map::TransitionToPrototype(map, prototype, REGULAR_PROTOTYPE);
- }
- }
+ Handle<Map> map =
+ Map::GetObjectCreateMap(Handle<HeapObject>::cast(prototype));
bool is_dictionary_map = map->is_dictionary_map();
Handle<FixedArray> object_properties;
@@ -262,7 +240,7 @@ RUNTIME_FUNCTION(Runtime_ObjectCreate) {
}
// Define the properties if properties was specified and is not undefined.
- Handle<Object> properties = args.at<Object>(1);
+ Handle<Object> properties = args.at(1);
if (!properties->IsUndefined(isolate)) {
RETURN_FAILURE_ON_EXCEPTION(
isolate, JSReceiver::DefineProperties(isolate, object, properties));
@@ -276,7 +254,7 @@ MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
Handle<Object> key,
Handle<Object> value,
LanguageMode language_mode) {
- if (object->IsUndefined(isolate) || object->IsNull(isolate)) {
+ if (object->IsNullOrUndefined(isolate)) {
THROW_NEW_ERROR(
isolate,
NewTypeError(MessageTemplate::kNonObjectPropertyStore, key, object),
@@ -297,7 +275,7 @@ MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
RUNTIME_FUNCTION(Runtime_GetPrototype) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, obj, 0);
RETURN_RESULT_OR_FAILURE(isolate, JSReceiver::GetPrototype(isolate, obj));
}
@@ -305,7 +283,7 @@ RUNTIME_FUNCTION(Runtime_GetPrototype) {
RUNTIME_FUNCTION(Runtime_InternalSetPrototype) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, obj, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
MAYBE_RETURN(
@@ -316,7 +294,7 @@ RUNTIME_FUNCTION(Runtime_InternalSetPrototype) {
RUNTIME_FUNCTION(Runtime_OptimizeObjectForAddingMultipleProperties) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
CONVERT_SMI_ARG_CHECKED(properties, 1);
// Conservative upper limit to prevent fuzz tests from going OOM.
@@ -331,7 +309,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeObjectForAddingMultipleProperties) {
RUNTIME_FUNCTION(Runtime_GetProperty) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
@@ -343,7 +321,7 @@ RUNTIME_FUNCTION(Runtime_GetProperty) {
// KeyedGetProperty is called from KeyedLoadIC::GenerateGeneric.
RUNTIME_FUNCTION(Runtime_KeyedGetProperty) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, receiver_obj, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key_obj, 1);
@@ -503,7 +481,7 @@ RUNTIME_FUNCTION(Runtime_HasProperty) {
RUNTIME_FUNCTION(Runtime_GetOwnPropertyKeys) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
CONVERT_SMI_ARG_CHECKED(filter_value, 1);
PropertyFilter filter = static_cast<PropertyFilter>(filter_value);
@@ -522,7 +500,7 @@ RUNTIME_FUNCTION(Runtime_GetOwnPropertyKeys) {
// args[0]: object
RUNTIME_FUNCTION(Runtime_GetInterceptorInfo) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
if (!args[0]->IsJSObject()) {
return Smi::kZero;
}
@@ -538,7 +516,7 @@ RUNTIME_FUNCTION(Runtime_GetInterceptorInfo) {
RUNTIME_FUNCTION(Runtime_ToFastProperties) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
if (object->IsJSObject() && !object->IsJSGlobalObject()) {
JSObject::MigrateSlowToFast(Handle<JSObject>::cast(object), 0,
@@ -550,7 +528,7 @@ RUNTIME_FUNCTION(Runtime_ToFastProperties) {
RUNTIME_FUNCTION(Runtime_AllocateHeapNumber) {
HandleScope scope(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
return *isolate->factory()->NewHeapNumber(0);
}
@@ -566,7 +544,7 @@ RUNTIME_FUNCTION(Runtime_NewObject) {
RUNTIME_FUNCTION(Runtime_FinalizeInstanceSize) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Map, initial_map, 0);
initial_map->CompleteInobjectSlackTracking();
@@ -577,7 +555,7 @@ RUNTIME_FUNCTION(Runtime_FinalizeInstanceSize) {
RUNTIME_FUNCTION(Runtime_LoadMutableDouble) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Smi, index, 1);
CHECK((index->value() & 1) == 1);
@@ -596,7 +574,7 @@ RUNTIME_FUNCTION(Runtime_LoadMutableDouble) {
RUNTIME_FUNCTION(Runtime_TryMigrateInstance) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
if (!object->IsJSObject()) return Smi::kZero;
Handle<JSObject> js_object = Handle<JSObject>::cast(object);
@@ -612,13 +590,13 @@ RUNTIME_FUNCTION(Runtime_TryMigrateInstance) {
RUNTIME_FUNCTION(Runtime_IsJSGlobalProxy) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Object, obj, 0);
return isolate->heap()->ToBoolean(obj->IsJSGlobalProxy());
}
static bool IsValidAccessor(Isolate* isolate, Handle<Object> obj) {
- return obj->IsUndefined(isolate) || obj->IsCallable() || obj->IsNull(isolate);
+ return obj->IsNullOrUndefined(isolate) || obj->IsCallable();
}
@@ -630,7 +608,7 @@ static bool IsValidAccessor(Isolate* isolate, Handle<Object> obj) {
// descriptor.
RUNTIME_FUNCTION(Runtime_DefineAccessorPropertyUnchecked) {
HandleScope scope(isolate);
- DCHECK(args.length() == 5);
+ DCHECK_EQ(5, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
CHECK(!obj->IsNull(isolate));
CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
@@ -648,14 +626,36 @@ RUNTIME_FUNCTION(Runtime_DefineAccessorPropertyUnchecked) {
RUNTIME_FUNCTION(Runtime_DefineDataPropertyInLiteral) {
HandleScope scope(isolate);
- DCHECK(args.length() == 5);
+ DCHECK_EQ(6, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
- CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
- CONVERT_SMI_ARG_CHECKED(set_function_name, 4);
+ CONVERT_SMI_ARG_CHECKED(flag, 3);
+ CONVERT_ARG_HANDLE_CHECKED(TypeFeedbackVector, vector, 4);
+ CONVERT_SMI_ARG_CHECKED(index, 5);
+
+ StoreDataPropertyInLiteralICNexus nexus(vector, vector->ToSlot(index));
+ if (nexus.ic_state() == UNINITIALIZED) {
+ if (name->IsUniqueName()) {
+ nexus.ConfigureMonomorphic(name, handle(object->map()));
+ } else {
+ nexus.ConfigureMegamorphic();
+ }
+ } else if (nexus.ic_state() == MONOMORPHIC) {
+ if (nexus.FindFirstMap() != object->map() ||
+ nexus.GetFeedbackExtra() != *name) {
+ nexus.ConfigureMegamorphic();
+ }
+ }
+
+ DataPropertyInLiteralFlags flags =
+ static_cast<DataPropertyInLiteralFlag>(flag);
- if (set_function_name) {
+ PropertyAttributes attrs = (flags & DataPropertyInLiteralFlag::kDontEnum)
+ ? PropertyAttributes::DONT_ENUM
+ : PropertyAttributes::NONE;
+
+ if (flags & DataPropertyInLiteralFlag::kSetFunctionName) {
DCHECK(value->IsJSFunction());
JSFunction::SetName(Handle<JSFunction>::cast(value), name,
isolate->factory()->empty_string());
@@ -671,42 +671,10 @@ RUNTIME_FUNCTION(Runtime_DefineDataPropertyInLiteral) {
return *object;
}
-RUNTIME_FUNCTION(Runtime_DefineDataProperty) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 5);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
- CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
- CONVERT_PROPERTY_ATTRIBUTES_CHECKED(attrs, 3);
- CONVERT_SMI_ARG_CHECKED(set_function_name, 4);
-
- if (set_function_name) {
- DCHECK(value->IsJSFunction());
- JSFunction::SetName(Handle<JSFunction>::cast(value), name,
- isolate->factory()->empty_string());
- }
-
- PropertyDescriptor desc;
- desc.set_writable(!(attrs & ReadOnly));
- desc.set_enumerable(!(attrs & DontEnum));
- desc.set_configurable(!(attrs & DontDelete));
- desc.set_value(value);
-
- Maybe<bool> result = JSReceiver::DefineOwnProperty(isolate, receiver, name,
- &desc, Object::DONT_THROW);
- RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
- if (result.IsNothing()) {
- DCHECK(isolate->has_pending_exception());
- return isolate->heap()->exception();
- }
-
- return *receiver;
-}
-
// Return property without being observable by accessors or interceptors.
RUNTIME_FUNCTION(Runtime_GetDataProperty) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
return *JSReceiver::GetDataProperty(object, name);
@@ -714,17 +682,17 @@ RUNTIME_FUNCTION(Runtime_GetDataProperty) {
RUNTIME_FUNCTION(Runtime_GetConstructorName) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- CHECK(!object->IsUndefined(isolate) && !object->IsNull(isolate));
+ CHECK(!object->IsNullOrUndefined(isolate));
Handle<JSReceiver> recv = Object::ToObject(isolate, object).ToHandleChecked();
return *JSReceiver::GetConstructorName(recv);
}
RUNTIME_FUNCTION(Runtime_HasFastPackedElements) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(HeapObject, obj, 0);
return isolate->heap()->ToBoolean(
IsFastPackedElementsKind(obj->map()->elements_kind()));
@@ -733,7 +701,7 @@ RUNTIME_FUNCTION(Runtime_HasFastPackedElements) {
RUNTIME_FUNCTION(Runtime_ValueOf) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Object, obj, 0);
if (!obj->IsJSValue()) return obj;
return JSValue::cast(obj)->value();
@@ -742,7 +710,7 @@ RUNTIME_FUNCTION(Runtime_ValueOf) {
RUNTIME_FUNCTION(Runtime_IsJSReceiver) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Object, obj, 0);
return isolate->heap()->ToBoolean(obj->IsJSReceiver());
}
@@ -750,7 +718,7 @@ RUNTIME_FUNCTION(Runtime_IsJSReceiver) {
RUNTIME_FUNCTION(Runtime_ClassOf) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Object, obj, 0);
if (!obj->IsJSReceiver()) return isolate->heap()->null_value();
return JSReceiver::cast(obj)->class_name();
@@ -759,7 +727,7 @@ RUNTIME_FUNCTION(Runtime_ClassOf) {
RUNTIME_FUNCTION(Runtime_DefineGetterPropertyUnchecked) {
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
+ DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, getter, 2);
@@ -776,10 +744,26 @@ RUNTIME_FUNCTION(Runtime_DefineGetterPropertyUnchecked) {
return isolate->heap()->undefined_value();
}
+RUNTIME_FUNCTION(Runtime_CopyDataProperties) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, target, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, source, 1);
+
+ // 2. If source is undefined or null, let keys be an empty List.
+ if (source->IsUndefined(isolate) || source->IsNull(isolate)) {
+ return isolate->heap()->undefined_value();
+ }
+
+ MAYBE_RETURN(
+ JSReceiver::SetOrCopyDataProperties(isolate, target, source, false),
+ isolate->heap()->exception());
+ return isolate->heap()->undefined_value();
+}
RUNTIME_FUNCTION(Runtime_DefineSetterPropertyUnchecked) {
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
+ DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, setter, 2);
@@ -947,7 +931,7 @@ RUNTIME_FUNCTION(Runtime_IsAccessCheckNeeded) {
RUNTIME_FUNCTION(Runtime_CreateDataProperty) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, o, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
diff --git a/deps/v8/src/runtime/runtime-promise.cc b/deps/v8/src/runtime/runtime-promise.cc
index 226993a50e..ec340e5b0a 100644
--- a/deps/v8/src/runtime/runtime-promise.cc
+++ b/deps/v8/src/runtime/runtime-promise.cc
@@ -1,27 +1,28 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
#include "src/runtime/runtime-utils.h"
#include "src/debug/debug.h"
#include "src/elements.h"
-#include "src/promise-utils.h"
namespace v8 {
namespace internal {
namespace {
-void PromiseRejectEvent(Isolate* isolate, Handle<JSReceiver> promise,
+void PromiseRejectEvent(Isolate* isolate, Handle<JSPromise> promise,
Handle<Object> rejected_promise, Handle<Object> value,
bool debug_event) {
+ isolate->RunPromiseHook(PromiseHookType::kResolve, promise,
+ isolate->factory()->undefined_value());
+
if (isolate->debug()->is_active() && debug_event) {
isolate->debug()->OnPromiseReject(rejected_promise, value);
}
- Handle<Symbol> key = isolate->factory()->promise_has_handler_symbol();
- // Do not report if we actually have a handler.
- if (JSReceiver::GetDataProperty(promise, key)->IsUndefined(isolate)) {
+
+ // Report only if we don't actually have a handler.
+ if (!promise->has_handler()) {
isolate->ReportPromiseReject(Handle<JSObject>::cast(promise), value,
v8::kPromiseRejectWithNoHandler);
}
@@ -30,9 +31,9 @@ void PromiseRejectEvent(Isolate* isolate, Handle<JSReceiver> promise,
} // namespace
RUNTIME_FUNCTION(Runtime_PromiseRejectEventFromStack) {
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
Handle<Object> rejected_promise = promise;
@@ -41,142 +42,126 @@ RUNTIME_FUNCTION(Runtime_PromiseRejectEventFromStack) {
// undefined, which will be interpreted by PromiseRejectEvent
// as being a caught exception event.
rejected_promise = isolate->GetPromiseOnStackOnThrow();
+ isolate->debug()->OnAsyncTaskEvent(
+ debug::kDebugEnqueuePromiseReject,
+ isolate->debug()->NextAsyncTaskId(promise));
}
PromiseRejectEvent(isolate, promise, rejected_promise, value, true);
return isolate->heap()->undefined_value();
}
+RUNTIME_FUNCTION(Runtime_ReportPromiseReject) {
+ DCHECK_EQ(2, args.length());
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+ isolate->ReportPromiseReject(Handle<JSObject>::cast(promise), value,
+ v8::kPromiseRejectWithNoHandler);
+ return isolate->heap()->undefined_value();
+}
+
RUNTIME_FUNCTION(Runtime_PromiseRevokeReject) {
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
- Handle<Symbol> key = isolate->factory()->promise_has_handler_symbol();
+ CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
// At this point, no revocation has been issued before
- CHECK(JSReceiver::GetDataProperty(promise, key)->IsUndefined(isolate));
+ CHECK(!promise->has_handler());
isolate->ReportPromiseReject(promise, Handle<Object>(),
v8::kPromiseHandlerAddedAfterReject);
return isolate->heap()->undefined_value();
}
namespace {
-void EnqueuePromiseReactionJob(Isolate* isolate, Handle<Object> value,
- Handle<Object> tasks, Handle<Object> deferred,
- Handle<Object> status) {
- Handle<Object> debug_id = isolate->factory()->undefined_value();
- Handle<Object> debug_name = isolate->factory()->undefined_value();
- if (isolate->debug()->is_active()) {
- MaybeHandle<Object> maybe_result;
- Handle<Object> argv[] = {deferred, status};
- maybe_result = Execution::TryCall(
- isolate, isolate->promise_debug_get_info(),
- isolate->factory()->undefined_value(), arraysize(argv), argv);
- Handle<Object> result;
- if ((maybe_result).ToHandle(&result)) {
- CHECK(result->IsJSArray());
- Handle<JSArray> array = Handle<JSArray>::cast(result);
- ElementsAccessor* accessor = array->GetElementsAccessor();
- DCHECK(accessor->HasElement(array, 0));
- DCHECK(accessor->HasElement(array, 1));
- debug_id = accessor->Get(array, 0);
- debug_name = accessor->Get(array, 1);
- }
+
+// In an async function, reuse the existing stack related to the outer
+// Promise. Otherwise, e.g. in a direct call to then, save a new stack.
+// Promises with multiple reactions with one or more of them being async
+// functions will not get a good stack trace, as async functions require
+// different stacks from direct Promise use, but we save and restore a
+// stack once for all reactions.
+//
+// If this isn't a case of async function, we return false, otherwise
+// we set the correct id and return true.
+//
+// TODO(littledan): Improve this case.
+bool GetDebugIdForAsyncFunction(Isolate* isolate,
+ Handle<PromiseReactionJobInfo> info,
+ int* debug_id) {
+ // deferred_promise can be Undefined, FixedArray or userland promise object.
+ if (!info->deferred_promise()->IsJSPromise()) {
+ return false;
}
- Handle<PromiseReactionJobInfo> info =
- isolate->factory()->NewPromiseReactionJobInfo(value, tasks, deferred,
- debug_id, debug_name,
- isolate->native_context());
- isolate->EnqueueMicrotask(info);
-}
-void PromiseFulfill(Isolate* isolate, Handle<JSReceiver> promise,
- Handle<Smi> status, Handle<Object> value,
- Handle<Symbol> reaction) {
- Handle<Object> tasks = JSReceiver::GetDataProperty(promise, reaction);
- if (!tasks->IsUndefined(isolate)) {
- Handle<Object> deferred = JSReceiver::GetDataProperty(
- promise, isolate->factory()->promise_deferred_reaction_symbol());
- EnqueuePromiseReactionJob(isolate, value, tasks, deferred, status);
+ Handle<JSPromise> deferred_promise(JSPromise::cast(info->deferred_promise()),
+ isolate);
+ Handle<Symbol> handled_by_symbol =
+ isolate->factory()->promise_handled_by_symbol();
+ Handle<Object> handled_by_promise =
+ JSObject::GetDataProperty(deferred_promise, handled_by_symbol);
+
+ if (!handled_by_promise->IsJSPromise()) {
+ return false;
}
-}
-} // namespace
-RUNTIME_FUNCTION(Runtime_PromiseReject) {
- DCHECK(args.length() == 3);
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, promise, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, reason, 1);
- CONVERT_BOOLEAN_ARG_CHECKED(debug_event, 2);
+ Handle<JSPromise> handled_by_promise_js =
+ Handle<JSPromise>::cast(handled_by_promise);
+ Handle<Symbol> async_stack_id_symbol =
+ isolate->factory()->promise_async_stack_id_symbol();
+ Handle<Object> id =
+ JSObject::GetDataProperty(handled_by_promise_js, async_stack_id_symbol);
- PromiseRejectEvent(isolate, promise, promise, reason, debug_event);
+ // id can be Undefined or Smi.
+ if (!id->IsSmi()) {
+ return false;
+ }
- Handle<Smi> status = handle(Smi::FromInt(kPromiseRejected), isolate);
- Handle<Symbol> reaction =
- isolate->factory()->promise_reject_reactions_symbol();
- PromiseFulfill(isolate, promise, status, reason, reaction);
- return isolate->heap()->undefined_value();
+ *debug_id = Handle<Smi>::cast(id)->value();
+ return true;
}
-RUNTIME_FUNCTION(Runtime_PromiseFulfill) {
- DCHECK(args.length() == 4);
- HandleScope scope(isolate);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, promise, 0);
- CONVERT_ARG_HANDLE_CHECKED(Smi, status, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
- CONVERT_ARG_HANDLE_CHECKED(Symbol, reaction, 3);
- PromiseFulfill(isolate, promise, status, value, reaction);
- return isolate->heap()->undefined_value();
+void SetDebugInfo(Isolate* isolate, Handle<JSPromise> promise,
+ Handle<PromiseReactionJobInfo> info, int status) {
+ int id = kDebugPromiseNoID;
+ if (!GetDebugIdForAsyncFunction(isolate, info, &id)) {
+ id = isolate->debug()->NextAsyncTaskId(promise);
+ DCHECK(status != v8::Promise::kPending);
+ }
+ info->set_debug_id(id);
}
+void EnqueuePromiseReactionJob(Isolate* isolate, Handle<JSPromise> promise,
+ Handle<PromiseReactionJobInfo> info,
+ int status) {
+ if (isolate->debug()->is_active()) {
+ SetDebugInfo(isolate, promise, info, status);
+ }
+
+ isolate->EnqueueMicrotask(info);
+}
+
+} // namespace
+
RUNTIME_FUNCTION(Runtime_EnqueuePromiseReactionJob) {
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, tasks, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, deferred, 2);
- CONVERT_ARG_HANDLE_CHECKED(Object, status, 3);
- EnqueuePromiseReactionJob(isolate, value, tasks, deferred, status);
+ DCHECK_EQ(3, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
+ CONVERT_ARG_HANDLE_CHECKED(PromiseReactionJobInfo, info, 1);
+ CONVERT_SMI_ARG_CHECKED(status, 2);
+ EnqueuePromiseReactionJob(isolate, promise, info, status);
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(Runtime_EnqueuePromiseResolveThenableJob) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, resolution, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, then, 2);
-
- // TODO(gsathya): Add fast path for native promises with unmodified
- // PromiseThen (which don't need these resolving functions, but
- // instead can just call resolve/reject directly).
- Handle<JSFunction> resolve, reject;
- PromiseUtils::CreateResolvingFunctions(
- isolate, promise, isolate->factory()->false_value(), &resolve, &reject);
-
- Handle<Object> debug_id, debug_name;
- if (isolate->debug()->is_active()) {
- debug_id =
- handle(Smi::FromInt(isolate->GetNextDebugMicrotaskId()), isolate);
- debug_name = isolate->factory()->PromiseResolveThenableJob_string();
- isolate->debug()->OnAsyncTaskEvent(isolate->factory()->enqueue_string(),
- debug_id,
- Handle<String>::cast(debug_name));
- } else {
- debug_id = isolate->factory()->undefined_value();
- debug_name = isolate->factory()->undefined_value();
- }
-
- Handle<PromiseResolveThenableJobInfo> info =
- isolate->factory()->NewPromiseResolveThenableJobInfo(
- resolution, then, resolve, reject, debug_id, debug_name,
- isolate->native_context());
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(PromiseResolveThenableJobInfo, info, 0);
isolate->EnqueueMicrotask(info);
-
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(Runtime_EnqueueMicrotask) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, microtask, 0);
isolate->EnqueueMicrotask(microtask);
return isolate->heap()->undefined_value();
@@ -184,10 +169,79 @@ RUNTIME_FUNCTION(Runtime_EnqueueMicrotask) {
RUNTIME_FUNCTION(Runtime_RunMicrotasks) {
HandleScope scope(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
isolate->RunMicrotasks();
return isolate->heap()->undefined_value();
}
+RUNTIME_FUNCTION(Runtime_PromiseStatus) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
+
+ return Smi::FromInt(promise->status());
+}
+
+RUNTIME_FUNCTION(Runtime_PromiseResult) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
+ return promise->result();
+}
+
+RUNTIME_FUNCTION(Runtime_PromiseMarkAsHandled) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(JSPromise, promise, 0);
+
+ promise->set_has_handler(true);
+ return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_PromiseMarkHandledHint) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(JSPromise, promise, 0);
+
+ promise->set_handled_hint(true);
+ return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_PromiseHookInit) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, parent, 1);
+ isolate->RunPromiseHook(PromiseHookType::kInit, promise, parent);
+ return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_PromiseHookResolve) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
+ isolate->RunPromiseHook(PromiseHookType::kResolve, promise,
+ isolate->factory()->undefined_value());
+ return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_PromiseHookBefore) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
+ isolate->RunPromiseHook(PromiseHookType::kBefore, promise,
+ isolate->factory()->undefined_value());
+ return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_PromiseHookAfter) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSPromise, promise, 0);
+ isolate->RunPromiseHook(PromiseHookType::kAfter, promise,
+ isolate->factory()->undefined_value());
+ return isolate->heap()->undefined_value();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-proxy.cc b/deps/v8/src/runtime/runtime-proxy.cc
index 87c7c9112b..de8231e2e9 100644
--- a/deps/v8/src/runtime/runtime-proxy.cc
+++ b/deps/v8/src/runtime/runtime-proxy.cc
@@ -44,7 +44,7 @@ RUNTIME_FUNCTION(Runtime_JSProxyCall) {
// 6.a. Return Call(target, thisArgument, argumentsList).
ScopedVector<Handle<Object>> argv(arguments_length);
for (int i = 0; i < arguments_length; ++i) {
- argv[i] = args.at<Object>(i + 1);
+ argv[i] = args.at(i + 1);
}
RETURN_RESULT_OR_FAILURE(
isolate, Execution::Call(isolate, target, receiver, arguments_length,
@@ -100,7 +100,7 @@ RUNTIME_FUNCTION(Runtime_JSProxyConstruct) {
// 6.b. Return Construct(target, argumentsList, newTarget).
ScopedVector<Handle<Object>> argv(arguments_length);
for (int i = 0; i < arguments_length; ++i) {
- argv[i] = args.at<Object>(i + 1);
+ argv[i] = args.at(i + 1);
}
RETURN_RESULT_OR_FAILURE(
isolate, Execution::New(isolate, target, new_target, arguments_length,
@@ -135,7 +135,7 @@ RUNTIME_FUNCTION(Runtime_JSProxyConstruct) {
RUNTIME_FUNCTION(Runtime_IsJSProxy) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Object, obj, 0);
return isolate->heap()->ToBoolean(obj->IsJSProxy());
}
@@ -143,7 +143,7 @@ RUNTIME_FUNCTION(Runtime_IsJSProxy) {
RUNTIME_FUNCTION(Runtime_JSProxyGetHandler) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(JSProxy, proxy, 0);
return proxy->handler();
}
@@ -151,7 +151,7 @@ RUNTIME_FUNCTION(Runtime_JSProxyGetHandler) {
RUNTIME_FUNCTION(Runtime_JSProxyGetTarget) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(JSProxy, proxy, 0);
return proxy->target();
}
@@ -159,7 +159,7 @@ RUNTIME_FUNCTION(Runtime_JSProxyGetTarget) {
RUNTIME_FUNCTION(Runtime_JSProxyRevoke) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSProxy, proxy, 0);
JSProxy::Revoke(proxy);
return isolate->heap()->undefined_value();
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index d572eedd31..9a489ecff8 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -652,7 +652,7 @@ MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithEmptyString(
if (!heap->lo_space()->Contains(*answer)) {
heap->CreateFillerObjectAt(end_of_string, delta, ClearRecordedSlots::kNo);
}
- heap->AdjustLiveBytes(*answer, -delta, Heap::CONCURRENT_TO_SWEEPER);
+ heap->AdjustLiveBytes(*answer, -delta);
return *answer;
}
@@ -685,7 +685,7 @@ Object* StringReplaceGlobalRegExpWithStringHelper(
RUNTIME_FUNCTION(Runtime_StringReplaceGlobalRegExpWithString) {
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
+ DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
CONVERT_ARG_HANDLE_CHECKED(String, replacement, 2);
@@ -698,7 +698,7 @@ RUNTIME_FUNCTION(Runtime_StringReplaceGlobalRegExpWithString) {
RUNTIME_FUNCTION(Runtime_StringSplit) {
HandleScope handle_scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1);
CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[2]);
@@ -781,7 +781,7 @@ RUNTIME_FUNCTION(Runtime_StringSplit) {
// RegExpCreate ( P, F )
RUNTIME_FUNCTION(Runtime_RegExpCreate) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, source_object, 0);
Handle<String> source;
@@ -806,7 +806,7 @@ RUNTIME_FUNCTION(Runtime_RegExpCreate) {
RUNTIME_FUNCTION(Runtime_RegExpExec) {
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
+ DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
CONVERT_INT32_ARG_CHECKED(index, 2);
@@ -822,7 +822,7 @@ RUNTIME_FUNCTION(Runtime_RegExpExec) {
RUNTIME_FUNCTION(Runtime_RegExpInternalReplace) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
CONVERT_ARG_HANDLE_CHECKED(String, replacement, 2);
@@ -1237,7 +1237,7 @@ MUST_USE_RESULT MaybeHandle<String> RegExpReplace(Isolate* isolate,
// This is only called for StringReplaceGlobalRegExpWithFunction.
RUNTIME_FUNCTION(Runtime_RegExpExecMultiple) {
HandleScope handles(isolate);
- DCHECK(args.length() == 4);
+ DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
@@ -1259,7 +1259,7 @@ RUNTIME_FUNCTION(Runtime_RegExpExecMultiple) {
RUNTIME_FUNCTION(Runtime_StringReplaceNonGlobalRegExpWithFunction) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
@@ -1269,16 +1269,232 @@ RUNTIME_FUNCTION(Runtime_StringReplaceNonGlobalRegExpWithFunction) {
isolate, subject, regexp, replace));
}
+namespace {
+
+// ES##sec-speciesconstructor
+// SpeciesConstructor ( O, defaultConstructor )
+MUST_USE_RESULT MaybeHandle<Object> SpeciesConstructor(
+ Isolate* isolate, Handle<JSReceiver> recv,
+ Handle<JSFunction> default_ctor) {
+ Handle<Object> ctor_obj;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, ctor_obj,
+ JSObject::GetProperty(recv, isolate->factory()->constructor_string()),
+ Object);
+
+ if (ctor_obj->IsUndefined(isolate)) return default_ctor;
+
+ if (!ctor_obj->IsJSReceiver()) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kConstructorNotReceiver),
+ Object);
+ }
+
+ Handle<JSReceiver> ctor = Handle<JSReceiver>::cast(ctor_obj);
+
+ Handle<Object> species;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, species,
+ JSObject::GetProperty(ctor, isolate->factory()->species_symbol()),
+ Object);
+
+ if (species->IsNullOrUndefined(isolate)) {
+ return default_ctor;
+ }
+
+ if (species->IsConstructor()) return species;
+
+ THROW_NEW_ERROR(
+ isolate, NewTypeError(MessageTemplate::kSpeciesNotConstructor), Object);
+}
+
+MUST_USE_RESULT MaybeHandle<Object> ToUint32(Isolate* isolate,
+ Handle<Object> object,
+ uint32_t* out) {
+ if (object->IsUndefined(isolate)) {
+ *out = kMaxUInt32;
+ return object;
+ }
+
+ Handle<Object> number;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, number, Object::ToNumber(object), Object);
+ *out = NumberToUint32(*number);
+ return object;
+}
+
+Handle<JSArray> NewJSArrayWithElements(Isolate* isolate,
+ Handle<FixedArray> elems,
+ int num_elems) {
+ elems->Shrink(num_elems);
+ return isolate->factory()->NewJSArrayWithElements(elems);
+}
+
+} // namespace
+
+// Slow path for:
+// ES#sec-regexp.prototype-@@replace
+// RegExp.prototype [ @@split ] ( string, limit )
+RUNTIME_FUNCTION(Runtime_RegExpSplit) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+
+ DCHECK(args[1]->IsString());
+
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, recv, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, string, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, limit_obj, 2);
+
+ Factory* factory = isolate->factory();
+
+ Handle<JSFunction> regexp_fun = isolate->regexp_function();
+ Handle<Object> ctor;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, ctor, SpeciesConstructor(isolate, recv, regexp_fun));
+
+ Handle<Object> flags_obj;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, flags_obj, JSObject::GetProperty(recv, factory->flags_string()));
+
+ Handle<String> flags;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, flags,
+ Object::ToString(isolate, flags_obj));
+
+ Handle<String> u_str = factory->LookupSingleCharacterStringFromCode('u');
+ const bool unicode = (String::IndexOf(isolate, flags, u_str, 0) >= 0);
+
+ Handle<String> y_str = factory->LookupSingleCharacterStringFromCode('y');
+ const bool sticky = (String::IndexOf(isolate, flags, y_str, 0) >= 0);
+
+ Handle<String> new_flags = flags;
+ if (!sticky) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, new_flags,
+ factory->NewConsString(flags, y_str));
+ }
+
+ Handle<JSReceiver> splitter;
+ {
+ const int argc = 2;
+
+ ScopedVector<Handle<Object>> argv(argc);
+ argv[0] = recv;
+ argv[1] = new_flags;
+
+ Handle<JSFunction> ctor_fun = Handle<JSFunction>::cast(ctor);
+ Handle<Object> splitter_obj;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, splitter_obj, Execution::New(ctor_fun, argc, argv.start()));
+
+ splitter = Handle<JSReceiver>::cast(splitter_obj);
+ }
+
+ uint32_t limit;
+ RETURN_FAILURE_ON_EXCEPTION(isolate, ToUint32(isolate, limit_obj, &limit));
+
+ const uint32_t length = string->length();
+
+ if (limit == 0) return *factory->NewJSArray(0);
+
+ if (length == 0) {
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, RegExpUtils::RegExpExec(isolate, splitter, string,
+ factory->undefined_value()));
+
+ if (!result->IsNull(isolate)) return *factory->NewJSArray(0);
+
+ Handle<FixedArray> elems = factory->NewUninitializedFixedArray(1);
+ elems->set(0, *string);
+ return *factory->NewJSArrayWithElements(elems);
+ }
+
+ static const int kInitialArraySize = 8;
+ Handle<FixedArray> elems = factory->NewFixedArrayWithHoles(kInitialArraySize);
+ int num_elems = 0;
+
+ uint32_t string_index = 0;
+ uint32_t prev_string_index = 0;
+ while (string_index < length) {
+ RETURN_FAILURE_ON_EXCEPTION(
+ isolate, RegExpUtils::SetLastIndex(isolate, splitter, string_index));
+
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, RegExpUtils::RegExpExec(isolate, splitter, string,
+ factory->undefined_value()));
+
+ if (result->IsNull(isolate)) {
+ string_index = RegExpUtils::AdvanceStringIndex(isolate, string,
+ string_index, unicode);
+ continue;
+ }
+
+ Handle<Object> last_index_obj;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, last_index_obj, RegExpUtils::GetLastIndex(isolate, splitter));
+
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, last_index_obj, Object::ToLength(isolate, last_index_obj));
+
+ const uint32_t end =
+ std::min(PositiveNumberToUint32(*last_index_obj), length);
+ if (end == prev_string_index) {
+ string_index = RegExpUtils::AdvanceStringIndex(isolate, string,
+ string_index, unicode);
+ continue;
+ }
+
+ {
+ Handle<String> substr =
+ factory->NewSubString(string, prev_string_index, string_index);
+ elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
+ if (static_cast<uint32_t>(num_elems) == limit) {
+ return *NewJSArrayWithElements(isolate, elems, num_elems);
+ }
+ }
+
+ prev_string_index = end;
+
+ Handle<Object> num_captures_obj;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, num_captures_obj,
+ Object::GetProperty(result, isolate->factory()->length_string()));
+
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, num_captures_obj, Object::ToLength(isolate, num_captures_obj));
+ const int num_captures = PositiveNumberToUint32(*num_captures_obj);
+
+ for (int i = 1; i < num_captures; i++) {
+ Handle<Object> capture;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, capture, Object::GetElement(isolate, result, i));
+ elems = FixedArray::SetAndGrow(elems, num_elems++, capture);
+ if (static_cast<uint32_t>(num_elems) == limit) {
+ return *NewJSArrayWithElements(isolate, elems, num_elems);
+ }
+ }
+
+ string_index = prev_string_index;
+ }
+
+ {
+ Handle<String> substr =
+ factory->NewSubString(string, prev_string_index, length);
+ elems = FixedArray::SetAndGrow(elems, num_elems++, substr);
+ }
+
+ return *NewJSArrayWithElements(isolate, elems, num_elems);
+}
+
// Slow path for:
// ES#sec-regexp.prototype-@@replace
// RegExp.prototype [ @@replace ] ( string, replaceValue )
RUNTIME_FUNCTION(Runtime_RegExpReplace) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, recv, 0);
CONVERT_ARG_HANDLE_CHECKED(String, string, 1);
- Handle<Object> replace_obj = args.at<Object>(2);
+ Handle<Object> replace_obj = args.at(2);
Factory* factory = isolate->factory();
@@ -1291,7 +1507,7 @@ RUNTIME_FUNCTION(Runtime_RegExpReplace) {
replace_obj));
}
- const int length = string->length();
+ const uint32_t length = string->length();
const bool functional_replace = replace_obj->IsCallable();
Handle<String> replace;
@@ -1348,7 +1564,7 @@ RUNTIME_FUNCTION(Runtime_RegExpReplace) {
// TODO(jgruber): Look into ReplacementStringBuilder instead.
IncrementalStringBuilder builder(isolate);
- int next_source_position = 0;
+ uint32_t next_source_position = 0;
for (const auto& result : results) {
Handle<Object> captures_length_obj;
@@ -1359,8 +1575,7 @@ RUNTIME_FUNCTION(Runtime_RegExpReplace) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, captures_length_obj,
Object::ToLength(isolate, captures_length_obj));
- const int captures_length =
- std::max(Handle<Smi>::cast(captures_length_obj)->value(), 0);
+ const int captures_length = PositiveNumberToUint32(*captures_length_obj);
Handle<Object> match_obj;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, match_obj,
@@ -1381,8 +1596,8 @@ RUNTIME_FUNCTION(Runtime_RegExpReplace) {
// 2^53 - 1 (at least for ToLength), we might actually need uint64_t here?
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, position_obj, Object::ToInteger(isolate, position_obj));
- const int position =
- std::max(std::min(Handle<Smi>::cast(position_obj)->value(), length), 0);
+ const uint32_t position =
+ std::min(PositiveNumberToUint32(*position_obj), length);
ZoneVector<Handle<Object>> captures(&zone);
for (int n = 0; n < captures_length; n++) {
@@ -1442,16 +1657,28 @@ RUNTIME_FUNCTION(Runtime_RegExpReplace) {
RUNTIME_FUNCTION(Runtime_RegExpExecReThrow) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 4);
+ DCHECK_EQ(4, args.length());
Object* exception = isolate->pending_exception();
isolate->clear_pending_exception();
return isolate->ReThrow(exception);
}
+RUNTIME_FUNCTION(Runtime_RegExpInitializeAndCompile) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
+ CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, flags, 2);
+
+ RETURN_FAILURE_ON_EXCEPTION(isolate,
+ JSRegExp::Initialize(regexp, source, flags));
+
+ return *regexp;
+}
RUNTIME_FUNCTION(Runtime_IsRegExp) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Object, obj, 0);
return isolate->heap()->ToBoolean(obj->IsJSRegExp());
}
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index 377799fe04..6dae7dd609 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -88,8 +88,7 @@ Object* DeclareGlobal(
// function.
PropertyDetails old_details = it.property_details();
if (old_details.IsReadOnly() || old_details.IsDontEnum() ||
- (it.state() == LookupIterator::ACCESSOR &&
- it.GetAccessors()->IsAccessorPair())) {
+ (it.state() == LookupIterator::ACCESSOR)) {
// ECMA-262 section 15.1.11 GlobalDeclarationInstantiation 5.d:
// If hasRestrictedGlobal is true, throw a SyntaxError exception.
// ECMA-262 section 18.2.1.3 EvalDeclarationInstantiation 8.a.iv.1.b:
@@ -130,18 +129,18 @@ Object* DeclareGlobal(
return isolate->heap()->undefined_value();
}
-Object* DeclareGlobals(Isolate* isolate, Handle<FixedArray> pairs, int flags,
- Handle<TypeFeedbackVector> feedback_vector) {
+Object* DeclareGlobals(Isolate* isolate, Handle<FixedArray> declarations,
+ int flags, Handle<TypeFeedbackVector> feedback_vector) {
HandleScope scope(isolate);
Handle<JSGlobalObject> global(isolate->global_object());
Handle<Context> context(isolate->context());
// Traverse the name/value pairs and set the properties.
- int length = pairs->length();
- FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < length, i += 2, {
- FeedbackVectorSlot slot(Smi::cast(pairs->get(i))->value());
- Handle<String> name(feedback_vector->GetName(slot), isolate);
- Handle<Object> initial_value(pairs->get(i + 1), isolate);
+ int length = declarations->length();
+ FOR_WITH_HANDLE_SCOPE(isolate, int, i = 0, i, i < length, i += 3, {
+ Handle<String> name(String::cast(declarations->get(i)), isolate);
+ FeedbackVectorSlot slot(Smi::cast(declarations->get(i + 1))->value());
+ Handle<Object> initial_value(declarations->get(i + 2), isolate);
bool is_var = initial_value->IsUndefined(isolate);
bool is_function = initial_value->IsSharedFunctionInfo();
@@ -186,11 +185,11 @@ RUNTIME_FUNCTION(Runtime_DeclareGlobals) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, pairs, 0);
+ CONVERT_ARG_HANDLE_CHECKED(FixedArray, declarations, 0);
CONVERT_SMI_ARG_CHECKED(flags, 1);
CONVERT_ARG_HANDLE_CHECKED(TypeFeedbackVector, feedback_vector, 2);
- return DeclareGlobals(isolate, pairs, flags, feedback_vector);
+ return DeclareGlobals(isolate, declarations, flags, feedback_vector);
}
// TODO(ishell): merge this with Runtime::kDeclareGlobals once interpreter
@@ -199,13 +198,13 @@ RUNTIME_FUNCTION(Runtime_DeclareGlobalsForInterpreter) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, pairs, 0);
+ CONVERT_ARG_HANDLE_CHECKED(FixedArray, declarations, 0);
CONVERT_SMI_ARG_CHECKED(flags, 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, closure, 2);
Handle<TypeFeedbackVector> feedback_vector(closure->feedback_vector(),
isolate);
- return DeclareGlobals(isolate, pairs, flags, feedback_vector);
+ return DeclareGlobals(isolate, declarations, flags, feedback_vector);
}
RUNTIME_FUNCTION(Runtime_InitializeVarGlobal) {
@@ -224,15 +223,15 @@ namespace {
Object* DeclareEvalHelper(Isolate* isolate, Handle<String> name,
Handle<Object> value) {
- // Declarations are always made in a function, native, or script context, or
- // a declaration block scope. Since this is called from eval, the context
- // passed is the context of the caller, which may be some nested context and
- // not the declaration context.
+ // Declarations are always made in a function, native, eval, or script
+ // context, or a declaration block scope. Since this is called from eval, the
+ // context passed is the context of the caller, which may be some nested
+ // context and not the declaration context.
Handle<Context> context_arg(isolate->context(), isolate);
Handle<Context> context(context_arg->declaration_context(), isolate);
DCHECK(context->IsFunctionContext() || context->IsNativeContext() ||
- context->IsScriptContext() ||
+ context->IsScriptContext() || context->IsEvalContext() ||
(context->IsBlockContext() && context->has_extension()));
bool is_function = value->IsJSFunction();
@@ -313,6 +312,8 @@ Object* DeclareEvalHelper(Isolate* isolate, Handle<String> name,
}
DCHECK(object->IsJSContextExtensionObject() || object->IsJSGlobalObject());
} else {
+ // Sloppy eval will never have an extension object, as vars are hoisted out,
+ // and lets are known statically.
DCHECK(context->IsFunctionContext());
object =
isolate->factory()->NewJSObject(isolate->context_extension_function());
@@ -352,7 +353,7 @@ std::unique_ptr<Handle<Object>[]> GetCallerArguments(Isolate* isolate,
// Find frame containing arguments passed to the caller.
JavaScriptFrameIterator it(isolate);
JavaScriptFrame* frame = it.frame();
- List<JSFunction*> functions(2);
+ List<SharedFunctionInfo*> functions(2);
frame->GetFunctions(&functions);
if (functions.length() > 1) {
int inlined_jsframe_index = functions.length() - 1;
@@ -377,6 +378,8 @@ std::unique_ptr<Handle<Object>[]> GetCallerArguments(Isolate* isolate,
NewArray<Handle<Object>>(*total_argc));
bool should_deoptimize = false;
for (int i = 0; i < argument_count; i++) {
+ // If we materialize any object, we should deoptimize the frame because we
+ // might alias an object that was eliminated by escape analysis.
should_deoptimize = should_deoptimize || iter->IsMaterializedObject();
Handle<Object> value = iter->GetValue();
param_data[i] = value;
@@ -384,7 +387,7 @@ std::unique_ptr<Handle<Object>[]> GetCallerArguments(Isolate* isolate,
}
if (should_deoptimize) {
- translated_values.StoreMaterializedValuesAndDeopt();
+ translated_values.StoreMaterializedValuesAndDeopt(frame);
}
return param_data;
@@ -407,7 +410,7 @@ std::unique_ptr<Handle<Object>[]> GetCallerArguments(Isolate* isolate,
template <typename T>
Handle<JSObject> NewSloppyArguments(Isolate* isolate, Handle<JSFunction> callee,
T parameters, int argument_count) {
- CHECK(!IsSubclassConstructor(callee->shared()->kind()));
+ CHECK(!IsDerivedConstructor(callee->shared()->kind()));
DCHECK(callee->shared()->has_simple_parameters());
Handle<JSObject> result =
isolate->factory()->NewArgumentsObject(callee, argument_count);
@@ -517,7 +520,7 @@ class ParameterArguments BASE_EMBEDDED {
RUNTIME_FUNCTION(Runtime_NewSloppyArguments_Generic) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0);
// This generic runtime function can also be used when the caller has been
// inlined, we use the slow but accurate {GetCallerArguments}.
@@ -582,7 +585,7 @@ RUNTIME_FUNCTION(Runtime_NewRestParameter) {
RUNTIME_FUNCTION(Runtime_NewSloppyArguments) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0);
Object** parameters = reinterpret_cast<Object**>(args[1]);
CONVERT_SMI_ARG_CHECKED(argument_count, 2);
@@ -590,26 +593,45 @@ RUNTIME_FUNCTION(Runtime_NewSloppyArguments) {
return *NewSloppyArguments(isolate, callee, argument_getter, argument_count);
}
+RUNTIME_FUNCTION(Runtime_NewArgumentsElements) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ Object** frame = reinterpret_cast<Object**>(args[0]);
+ CONVERT_SMI_ARG_CHECKED(length, 1);
+ Handle<FixedArray> result =
+ isolate->factory()->NewUninitializedFixedArray(length);
+ int const offset = length + 1;
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+ for (int index = 0; index < length; ++index) {
+ result->set(index, frame[offset - index], mode);
+ }
+ return *result;
+}
RUNTIME_FUNCTION(Runtime_NewClosure) {
HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 0);
Handle<Context> context(isolate->context(), isolate);
- return *isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, context,
- NOT_TENURED);
+ Handle<JSFunction> function =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, context,
+ NOT_TENURED);
+ return *function;
}
RUNTIME_FUNCTION(Runtime_NewClosure_Tenured) {
HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 0);
Handle<Context> context(isolate->context(), isolate);
// The caller ensures that we pretenure closures that are assigned
// directly to properties.
- return *isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, context,
- TENURED);
+ Handle<JSFunction> function =
+ isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, context,
+ TENURED);
+ return *function;
}
static Object* FindNameClash(Handle<ScopeInfo> scope_info,
@@ -654,7 +676,7 @@ static Object* FindNameClash(Handle<ScopeInfo> scope_info,
RUNTIME_FUNCTION(Runtime_NewScriptContext) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 1);
@@ -670,8 +692,9 @@ RUNTIME_FUNCTION(Runtime_NewScriptContext) {
// Script contexts have a canonical empty function as their closure, not the
// anonymous closure containing the global code. See
// FullCodeGenerator::PushFunctionArgumentForContextAllocation.
- Handle<JSFunction> closure(
- function->shared()->IsBuiltin() ? *function : native_context->closure());
+ Handle<JSFunction> closure(function->shared()->IsUserJavaScript()
+ ? native_context->closure()
+ : *function);
Handle<Context> result =
isolate->factory()->NewScriptContext(closure, scope_info);
@@ -684,19 +707,19 @@ RUNTIME_FUNCTION(Runtime_NewScriptContext) {
return *result;
}
-
RUNTIME_FUNCTION(Runtime_NewFunctionContext) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+ CONVERT_SMI_ARG_CHECKED(scope_type, 1);
DCHECK(function->context() == isolate->context());
int length = function->shared()->scope_info()->ContextLength();
- return *isolate->factory()->NewFunctionContext(length, function);
+ return *isolate->factory()->NewFunctionContext(
+ length, function, static_cast<ScopeType>(scope_type));
}
-
RUNTIME_FUNCTION(Runtime_PushWithContext) {
HandleScope scope(isolate);
DCHECK_EQ(3, args.length());
diff --git a/deps/v8/src/runtime/runtime-simd.cc b/deps/v8/src/runtime/runtime-simd.cc
index 9542a4420a..067e9d680d 100644
--- a/deps/v8/src/runtime/runtime-simd.cc
+++ b/deps/v8/src/runtime/runtime-simd.cc
@@ -160,7 +160,7 @@ inline float MaxNumber(float a, float b) {
RUNTIME_FUNCTION(Runtime_IsSimdValue) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
return isolate->heap()->ToBoolean(args[0]->IsSimd128Value());
}
@@ -171,7 +171,7 @@ RUNTIME_FUNCTION(Runtime_IsSimdValue) {
// TODO(gdeepti): Fix to use ToNumber conversion once polyfill is updated.
#define CONVERT_SIMD_LANE_ARG_CHECKED(name, index, lanes) \
- Handle<Object> name_object = args.at<Object>(index); \
+ Handle<Object> name_object = args.at(index); \
if (!name_object->IsNumber()) { \
THROW_NEW_ERROR_RETURN_FAILURE( \
isolate, NewTypeError(MessageTemplate::kInvalidSimdIndex)); \
@@ -194,7 +194,7 @@ RUNTIME_FUNCTION(Runtime_IsSimdValue) {
#define SIMD_UNARY_OP(type, lane_type, lane_count, op, result) \
static const int kLaneCount = lane_count; \
- DCHECK(args.length() == 1); \
+ DCHECK_EQ(1, args.length()); \
CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
lane_type lanes[kLaneCount]; \
for (int i = 0; i < kLaneCount; i++) { \
@@ -204,7 +204,7 @@ RUNTIME_FUNCTION(Runtime_IsSimdValue) {
#define SIMD_BINARY_OP(type, lane_type, lane_count, op, result) \
static const int kLaneCount = lane_count; \
- DCHECK(args.length() == 2); \
+ DCHECK_EQ(2, args.length()); \
CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
CONVERT_SIMD_ARG_HANDLE_THROW(type, b, 1); \
lane_type lanes[kLaneCount]; \
@@ -215,7 +215,7 @@ RUNTIME_FUNCTION(Runtime_IsSimdValue) {
#define SIMD_RELATIONAL_OP(type, bool_type, lane_count, a, b, op, result) \
static const int kLaneCount = lane_count; \
- DCHECK(args.length() == 2); \
+ DCHECK_EQ(2, args.length()); \
CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
CONVERT_SIMD_ARG_HANDLE_THROW(type, b, 1); \
bool lanes[kLaneCount]; \
@@ -228,10 +228,10 @@ RUNTIME_FUNCTION(Runtime_IsSimdValue) {
// Common functions.
-#define GET_NUMERIC_ARG(lane_type, name, index) \
- Handle<Object> a; \
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION( \
- isolate, a, Object::ToNumber(args.at<Object>(index))); \
+#define GET_NUMERIC_ARG(lane_type, name, index) \
+ Handle<Object> a; \
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, a, \
+ Object::ToNumber(args.at(index))); \
name = ConvertNumber<lane_type>(a->Number());
#define GET_BOOLEAN_ARG(lane_type, name, index) \
@@ -264,7 +264,7 @@ RUNTIME_FUNCTION(Runtime_IsSimdValue) {
#define SIMD_EXTRACT_FUNCTION(type, lane_type, lane_count, extract, replace) \
RUNTIME_FUNCTION(Runtime_##type##ExtractLane) { \
HandleScope scope(isolate); \
- DCHECK(args.length() == 2); \
+ DCHECK_EQ(2, args.length()); \
CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
CONVERT_SIMD_LANE_ARG_CHECKED(lane, 1, lane_count); \
return *isolate->factory()->extract(a->get_lane(lane)); \
@@ -274,7 +274,7 @@ RUNTIME_FUNCTION(Runtime_IsSimdValue) {
RUNTIME_FUNCTION(Runtime_##type##ReplaceLane) { \
static const int kLaneCount = lane_count; \
HandleScope scope(isolate); \
- DCHECK(args.length() == 3); \
+ DCHECK_EQ(3, args.length()); \
CONVERT_SIMD_ARG_HANDLE_THROW(type, simd, 0); \
CONVERT_SIMD_LANE_ARG_CHECKED(lane, 1, kLaneCount); \
lane_type lanes[kLaneCount]; \
@@ -409,7 +409,7 @@ SIMD_MAXNUM_FUNCTION(Float32x4, float, 4)
FUNCTION(Uint8x16, uint8_t, 8, 16)
#define CONVERT_SHIFT_ARG_CHECKED(name, index) \
- Handle<Object> name_object = args.at<Object>(index); \
+ Handle<Object> name_object = args.at(index); \
if (!name_object->IsNumber()) { \
THROW_NEW_ERROR_RETURN_FAILURE( \
isolate, NewTypeError(MessageTemplate::kInvalidSimdOperation)); \
@@ -422,7 +422,7 @@ SIMD_MAXNUM_FUNCTION(Float32x4, float, 4)
RUNTIME_FUNCTION(Runtime_##type##ShiftLeftByScalar) { \
static const int kLaneCount = lane_count; \
HandleScope scope(isolate); \
- DCHECK(args.length() == 2); \
+ DCHECK_EQ(2, args.length()); \
CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
CONVERT_SHIFT_ARG_CHECKED(shift, 1); \
lane_type lanes[kLaneCount] = {0}; \
@@ -438,7 +438,7 @@ SIMD_MAXNUM_FUNCTION(Float32x4, float, 4)
RUNTIME_FUNCTION(Runtime_##type##ShiftRightByScalar) { \
static const int kLaneCount = lane_count; \
HandleScope scope(isolate); \
- DCHECK(args.length() == 2); \
+ DCHECK_EQ(2, args.length()); \
CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
CONVERT_SHIFT_ARG_CHECKED(shift, 1); \
lane_type lanes[kLaneCount] = {0}; \
@@ -455,7 +455,7 @@ SIMD_MAXNUM_FUNCTION(Float32x4, float, 4)
RUNTIME_FUNCTION(Runtime_##type##ShiftRightByScalar) { \
static const int kLaneCount = lane_count; \
HandleScope scope(isolate); \
- DCHECK(args.length() == 2); \
+ DCHECK_EQ(2, args.length()); \
CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
CONVERT_SHIFT_ARG_CHECKED(shift, 1); \
shift &= lane_bits - 1; \
@@ -485,7 +485,7 @@ SIMD_UINT_TYPES(SIMD_LSR_FUNCTION)
#define SIMD_ANY_FUNCTION(type, lane_count) \
RUNTIME_FUNCTION(Runtime_##type##AnyTrue) { \
HandleScope scope(isolate); \
- DCHECK(args.length() == 1); \
+ DCHECK_EQ(1, args.length()); \
CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
bool result = false; \
for (int i = 0; i < lane_count; i++) { \
@@ -500,7 +500,7 @@ SIMD_UINT_TYPES(SIMD_LSR_FUNCTION)
#define SIMD_ALL_FUNCTION(type, lane_count) \
RUNTIME_FUNCTION(Runtime_##type##AllTrue) { \
HandleScope scope(isolate); \
- DCHECK(args.length() == 1); \
+ DCHECK_EQ(1, args.length()); \
CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 0); \
bool result = true; \
for (int i = 0; i < lane_count; i++) { \
@@ -742,7 +742,7 @@ SIMD_LOGICAL_TYPES(SIMD_NOT_FUNCTION)
RUNTIME_FUNCTION(Runtime_##type##Select) { \
static const int kLaneCount = lane_count; \
HandleScope scope(isolate); \
- DCHECK(args.length() == 3); \
+ DCHECK_EQ(3, args.length()); \
CONVERT_SIMD_ARG_HANDLE_THROW(bool_type, mask, 0); \
CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 1); \
CONVERT_SIMD_ARG_HANDLE_THROW(type, b, 2); \
@@ -795,7 +795,7 @@ SIMD_SIGNED_TYPES(SIMD_NEG_FUNCTION)
RUNTIME_FUNCTION(Runtime_##type##From##from_type) { \
static const int kLaneCount = lane_count; \
HandleScope scope(isolate); \
- DCHECK(args.length() == 1); \
+ DCHECK_EQ(1, args.length()); \
CONVERT_SIMD_ARG_HANDLE_THROW(from_type, a, 0); \
lane_type lanes[kLaneCount]; \
for (int i = 0; i < kLaneCount; i++) { \
@@ -860,7 +860,7 @@ SIMD_FROM_TYPES(SIMD_FROM_FUNCTION)
RUNTIME_FUNCTION(Runtime_##type##From##from_type##Bits) { \
static const int kLaneCount = lane_count; \
HandleScope scope(isolate); \
- DCHECK(args.length() == 1); \
+ DCHECK_EQ(1, args.length()); \
CONVERT_SIMD_ARG_HANDLE_THROW(from_type, a, 0); \
lane_type lanes[kLaneCount]; \
a->CopyBits(lanes); \
@@ -880,23 +880,23 @@ SIMD_FROM_BITS_TYPES(SIMD_FROM_BITS_FUNCTION)
FUNCTION(Int32x4, int32_t, 4) \
FUNCTION(Uint32x4, uint32_t, 4)
-#define SIMD_COERCE_INDEX(name, i) \
- Handle<Object> length_object, number_object; \
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION( \
- isolate, length_object, Object::ToLength(isolate, args.at<Object>(i))); \
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_object, \
- Object::ToNumber(args.at<Object>(i))); \
- if (number_object->Number() != length_object->Number()) { \
- THROW_NEW_ERROR_RETURN_FAILURE( \
- isolate, NewTypeError(MessageTemplate::kInvalidSimdIndex)); \
- } \
+#define SIMD_COERCE_INDEX(name, i) \
+ Handle<Object> length_object, number_object; \
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, length_object, \
+ Object::ToLength(isolate, args.at(i))); \
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_object, \
+ Object::ToNumber(args.at(i))); \
+ if (number_object->Number() != length_object->Number()) { \
+ THROW_NEW_ERROR_RETURN_FAILURE( \
+ isolate, NewTypeError(MessageTemplate::kInvalidSimdIndex)); \
+ } \
int32_t name = number_object->Number();
// Common Load and Store Functions
#define SIMD_LOAD(type, lane_type, lane_count, count, result) \
static const int kLaneCount = lane_count; \
- DCHECK(args.length() == 2); \
+ DCHECK_EQ(2, args.length()); \
CONVERT_SIMD_ARG_HANDLE_THROW(JSTypedArray, tarray, 0); \
SIMD_COERCE_INDEX(index, 1); \
size_t bpe = tarray->element_size(); \
@@ -916,7 +916,7 @@ SIMD_FROM_BITS_TYPES(SIMD_FROM_BITS_FUNCTION)
#define SIMD_STORE(type, lane_type, lane_count, count, a) \
static const int kLaneCount = lane_count; \
- DCHECK(args.length() == 3); \
+ DCHECK_EQ(3, args.length()); \
CONVERT_SIMD_ARG_HANDLE_THROW(JSTypedArray, tarray, 0); \
CONVERT_SIMD_ARG_HANDLE_THROW(type, a, 2); \
SIMD_COERCE_INDEX(index, 1); \
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index 328bdceb37..31d9f1fc6e 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -7,6 +7,7 @@
#include "src/arguments.h"
#include "src/regexp/jsregexp-inl.h"
#include "src/string-builder.h"
+#include "src/string-case.h"
#include "src/string-search.h"
namespace v8 {
@@ -60,7 +61,7 @@ MaybeHandle<String> StringReplaceOneCharWithString(
RUNTIME_FUNCTION(Runtime_StringReplaceOneCharWithString) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
CONVERT_ARG_HANDLE_CHECKED(String, search, 1);
CONVERT_ARG_HANDLE_CHECKED(String, replace, 2);
@@ -86,23 +87,38 @@ RUNTIME_FUNCTION(Runtime_StringReplaceOneCharWithString) {
return isolate->StackOverflow();
}
-
+// ES6 #sec-string.prototype.indexof
+// String.prototype.indexOf(searchString [, position])
RUNTIME_FUNCTION(Runtime_StringIndexOf) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- return String::IndexOf(isolate, args.at<Object>(0), args.at<Object>(1),
- args.at<Object>(2));
+ DCHECK_EQ(3, args.length());
+ return String::IndexOf(isolate, args.at(0), args.at(1), args.at(2));
+}
+
+// ES6 #sec-string.prototype.indexof
+// String.prototype.indexOf(searchString, position)
+// Fast version that assumes that does not perform conversions of the incoming
+// arguments.
+RUNTIME_FUNCTION(Runtime_StringIndexOfUnchecked) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ Handle<String> receiver_string = args.at<String>(0);
+ Handle<String> search_string = args.at<String>(1);
+ int index = std::min(std::max(args.smi_at(2), 0), receiver_string->length());
+
+ return Smi::FromInt(String::IndexOf(isolate, receiver_string, search_string,
+ static_cast<uint32_t>(index)));
}
RUNTIME_FUNCTION(Runtime_StringLastIndexOf) {
HandleScope handle_scope(isolate);
- return String::LastIndexOf(isolate, args.at<Object>(0), args.at<Object>(1),
+ return String::LastIndexOf(isolate, args.at(0), args.at(1),
isolate->factory()->undefined_value());
}
RUNTIME_FUNCTION(Runtime_SubString) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
int start, end;
@@ -134,7 +150,7 @@ RUNTIME_FUNCTION(Runtime_SubString) {
RUNTIME_FUNCTION(Runtime_StringAdd) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, obj1, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, obj2, 1);
isolate->counters()->string_add_runtime()->Increment();
@@ -151,7 +167,7 @@ RUNTIME_FUNCTION(Runtime_StringAdd) {
RUNTIME_FUNCTION(Runtime_InternalizeString) {
HandleScope handles(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
return *isolate->factory()->InternalizeString(string);
}
@@ -159,7 +175,7 @@ RUNTIME_FUNCTION(Runtime_InternalizeString) {
RUNTIME_FUNCTION(Runtime_StringCharCodeAtRT) {
HandleScope handle_scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
CONVERT_NUMBER_CHECKED(uint32_t, i, Uint32, args[1]);
@@ -200,7 +216,7 @@ RUNTIME_FUNCTION(Runtime_StringCompare) {
RUNTIME_FUNCTION(Runtime_StringBuilderConcat) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
int32_t array_length;
if (!args[1]->ToInt32(&array_length)) {
@@ -270,7 +286,7 @@ RUNTIME_FUNCTION(Runtime_StringBuilderConcat) {
RUNTIME_FUNCTION(Runtime_StringBuilderJoin) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
int32_t array_length;
if (!args[1]->ToInt32(&array_length)) {
@@ -411,7 +427,7 @@ static void JoinSparseArrayWithSeparator(FixedArray* elements,
RUNTIME_FUNCTION(Runtime_SparseJoinWithSeparator) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSArray, elements_array, 0);
CONVERT_NUMBER_CHECKED(uint32_t, array_length, Uint32, args[1]);
CONVERT_ARG_HANDLE_CHECKED(String, separator, 2);
@@ -529,7 +545,7 @@ static int CopyCachedOneByteCharsToArray(Heap* heap, const uint8_t* chars,
// For example, "foo" => ["f", "o", "o"].
RUNTIME_FUNCTION(Runtime_StringToArray) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
@@ -679,122 +695,6 @@ MUST_USE_RESULT static Object* ConvertCaseHelper(
}
}
-
-static const uintptr_t kOneInEveryByte = kUintptrAllBitsSet / 0xFF;
-static const uintptr_t kAsciiMask = kOneInEveryByte << 7;
-
-// Given a word and two range boundaries returns a word with high bit
-// set in every byte iff the corresponding input byte was strictly in
-// the range (m, n). All the other bits in the result are cleared.
-// This function is only useful when it can be inlined and the
-// boundaries are statically known.
-// Requires: all bytes in the input word and the boundaries must be
-// ASCII (less than 0x7F).
-static inline uintptr_t AsciiRangeMask(uintptr_t w, char m, char n) {
- // Use strict inequalities since in edge cases the function could be
- // further simplified.
- DCHECK(0 < m && m < n);
- // Has high bit set in every w byte less than n.
- uintptr_t tmp1 = kOneInEveryByte * (0x7F + n) - w;
- // Has high bit set in every w byte greater than m.
- uintptr_t tmp2 = w + kOneInEveryByte * (0x7F - m);
- return (tmp1 & tmp2 & (kOneInEveryByte * 0x80));
-}
-
-
-#ifdef DEBUG
-static bool CheckFastAsciiConvert(char* dst, const char* src, int length,
- bool changed, bool is_to_lower) {
- bool expected_changed = false;
- for (int i = 0; i < length; i++) {
- if (dst[i] == src[i]) continue;
- expected_changed = true;
- if (is_to_lower) {
- DCHECK('A' <= src[i] && src[i] <= 'Z');
- DCHECK(dst[i] == src[i] + ('a' - 'A'));
- } else {
- DCHECK('a' <= src[i] && src[i] <= 'z');
- DCHECK(dst[i] == src[i] - ('a' - 'A'));
- }
- }
- return (expected_changed == changed);
-}
-#endif
-
-
-template <class Converter>
-static bool FastAsciiConvert(char* dst, const char* src, int length,
- bool* changed_out) {
-#ifdef DEBUG
- char* saved_dst = dst;
- const char* saved_src = src;
-#endif
- DisallowHeapAllocation no_gc;
- // We rely on the distance between upper and lower case letters
- // being a known power of 2.
- DCHECK('a' - 'A' == (1 << 5));
- // Boundaries for the range of input characters than require conversion.
- static const char lo = Converter::kIsToLower ? 'A' - 1 : 'a' - 1;
- static const char hi = Converter::kIsToLower ? 'Z' + 1 : 'z' + 1;
- bool changed = false;
- uintptr_t or_acc = 0;
- const char* const limit = src + length;
-
- // dst is newly allocated and always aligned.
- DCHECK(IsAligned(reinterpret_cast<intptr_t>(dst), sizeof(uintptr_t)));
- // Only attempt processing one word at a time if src is also aligned.
- if (IsAligned(reinterpret_cast<intptr_t>(src), sizeof(uintptr_t))) {
- // Process the prefix of the input that requires no conversion one aligned
- // (machine) word at a time.
- while (src <= limit - sizeof(uintptr_t)) {
- const uintptr_t w = *reinterpret_cast<const uintptr_t*>(src);
- or_acc |= w;
- if (AsciiRangeMask(w, lo, hi) != 0) {
- changed = true;
- break;
- }
- *reinterpret_cast<uintptr_t*>(dst) = w;
- src += sizeof(uintptr_t);
- dst += sizeof(uintptr_t);
- }
- // Process the remainder of the input performing conversion when
- // required one word at a time.
- while (src <= limit - sizeof(uintptr_t)) {
- const uintptr_t w = *reinterpret_cast<const uintptr_t*>(src);
- or_acc |= w;
- uintptr_t m = AsciiRangeMask(w, lo, hi);
- // The mask has high (7th) bit set in every byte that needs
- // conversion and we know that the distance between cases is
- // 1 << 5.
- *reinterpret_cast<uintptr_t*>(dst) = w ^ (m >> 2);
- src += sizeof(uintptr_t);
- dst += sizeof(uintptr_t);
- }
- }
- // Process the last few bytes of the input (or the whole input if
- // unaligned access is not supported).
- while (src < limit) {
- char c = *src;
- or_acc |= c;
- if (lo < c && c < hi) {
- c ^= (1 << 5);
- changed = true;
- }
- *dst = c;
- ++src;
- ++dst;
- }
-
- if ((or_acc & kAsciiMask) != 0) return false;
-
- DCHECK(CheckFastAsciiConvert(saved_dst, saved_src, length, changed,
- Converter::kIsToLower));
-
- *changed_out = changed;
- return true;
-}
-
-
template <class Converter>
MUST_USE_RESULT static Object* ConvertCase(
Handle<String> s, Isolate* isolate,
@@ -818,12 +718,13 @@ MUST_USE_RESULT static Object* ConvertCase(
String::FlatContent flat_content = s->GetFlatContent();
DCHECK(flat_content.IsFlat());
bool has_changed_character = false;
- bool is_ascii = FastAsciiConvert<Converter>(
+ int index_to_first_unprocessed = FastAsciiConvert<Converter::kIsToLower>(
reinterpret_cast<char*>(result->GetChars()),
reinterpret_cast<const char*>(flat_content.ToOneByteVector().start()),
length, &has_changed_character);
// If not ASCII, we discard the result and take the 2 byte path.
- if (is_ascii) return has_changed_character ? *result : *s;
+ if (index_to_first_unprocessed == length)
+ return has_changed_character ? *result : *s;
}
Handle<SeqString> result; // Same length as input.
@@ -857,7 +758,6 @@ RUNTIME_FUNCTION(Runtime_StringToLowerCase) {
return ConvertCase(s, isolate, isolate->runtime_state()->to_lower_mapping());
}
-
RUNTIME_FUNCTION(Runtime_StringToUpperCase) {
HandleScope scope(isolate);
DCHECK_EQ(args.length(), 1);
@@ -955,7 +855,7 @@ RUNTIME_FUNCTION(Runtime_StringNotEqual) {
RUNTIME_FUNCTION(Runtime_FlattenString) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, str, 0);
return *String::Flatten(str);
}
@@ -982,7 +882,7 @@ RUNTIME_FUNCTION(Runtime_ExternalStringGetChar) {
RUNTIME_FUNCTION(Runtime_StringCharCodeAt) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
if (!args[0]->IsString()) return isolate->heap()->undefined_value();
if (!args[1]->IsNumber()) return isolate->heap()->undefined_value();
if (std::isinf(args.number_at(1))) return isolate->heap()->nan_value();
diff --git a/deps/v8/src/runtime/runtime-symbol.cc b/deps/v8/src/runtime/runtime-symbol.cc
index 300a6439b1..2eaef63bbf 100644
--- a/deps/v8/src/runtime/runtime-symbol.cc
+++ b/deps/v8/src/runtime/runtime-symbol.cc
@@ -14,7 +14,7 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_CreateSymbol) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
CHECK(name->IsString() || name->IsUndefined(isolate));
Handle<Symbol> symbol = isolate->factory()->NewSymbol();
@@ -25,7 +25,7 @@ RUNTIME_FUNCTION(Runtime_CreateSymbol) {
RUNTIME_FUNCTION(Runtime_CreatePrivateSymbol) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
CHECK(name->IsString() || name->IsUndefined(isolate));
Handle<Symbol> symbol = isolate->factory()->NewPrivateSymbol();
@@ -36,7 +36,7 @@ RUNTIME_FUNCTION(Runtime_CreatePrivateSymbol) {
RUNTIME_FUNCTION(Runtime_SymbolDescription) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Symbol, symbol, 0);
return symbol->name();
}
@@ -56,16 +56,9 @@ RUNTIME_FUNCTION(Runtime_SymbolDescriptiveString) {
}
-RUNTIME_FUNCTION(Runtime_SymbolRegistry) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 0);
- return *isolate->GetSymbolRegistry();
-}
-
-
RUNTIME_FUNCTION(Runtime_SymbolIsPrivate) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Symbol, symbol, 0);
return isolate->heap()->ToBoolean(symbol->is_private());
}
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index 7054192a0f..bea7245c35 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -19,12 +19,51 @@
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects.h"
+namespace {
+struct WasmCompileControls {
+ uint32_t MaxWasmBufferSize = std::numeric_limits<uint32_t>::max();
+ bool AllowAnySizeForAsync = true;
+};
+
+// We need per-isolate controls, because we sometimes run tests in multiple
+// isolates
+// concurrently.
+// To avoid upsetting the static initializer count, we lazy initialize this.
+v8::base::LazyInstance<std::map<v8::Isolate*, WasmCompileControls>>::type
+ g_PerIsolateWasmControls = LAZY_INSTANCE_INITIALIZER;
+
+bool IsWasmCompileAllowed(v8::Isolate* isolate, v8::Local<v8::Value> value,
+ bool is_async) {
+ DCHECK_GT(g_PerIsolateWasmControls.Get().count(isolate), 0);
+ const WasmCompileControls& ctrls = g_PerIsolateWasmControls.Get().at(isolate);
+ return (is_async && ctrls.AllowAnySizeForAsync) ||
+ (v8::Local<v8::ArrayBuffer>::Cast(value)->ByteLength() <=
+ ctrls.MaxWasmBufferSize);
+}
+
+// Use the compile controls for instantiation, too
+bool IsWasmInstantiateAllowed(v8::Isolate* isolate,
+ v8::Local<v8::Value> module_or_bytes,
+ v8::MaybeLocal<v8::Value> ffi, bool is_async) {
+ DCHECK_GT(g_PerIsolateWasmControls.Get().count(isolate), 0);
+ const WasmCompileControls& ctrls = g_PerIsolateWasmControls.Get().at(isolate);
+ if (is_async && ctrls.AllowAnySizeForAsync) return true;
+ if (!module_or_bytes->IsWebAssemblyCompiledModule()) {
+ return IsWasmCompileAllowed(isolate, module_or_bytes, is_async);
+ }
+ v8::Local<v8::WasmCompiledModule> module =
+ v8::Local<v8::WasmCompiledModule>::Cast(module_or_bytes);
+ return static_cast<uint32_t>(module->GetWasmWireBytes()->Length()) <=
+ ctrls.MaxWasmBufferSize;
+}
+} // namespace
+
namespace v8 {
namespace internal {
RUNTIME_FUNCTION(Runtime_ConstructDouble) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_NUMBER_CHECKED(uint32_t, hi, Uint32, args[0]);
CONVERT_NUMBER_CHECKED(uint32_t, lo, Uint32, args[1]);
uint64_t result = (static_cast<uint64_t>(hi) << 32) | lo;
@@ -33,7 +72,7 @@ RUNTIME_FUNCTION(Runtime_ConstructDouble) {
RUNTIME_FUNCTION(Runtime_DeoptimizeFunction) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
// This function is used by fuzzers to get coverage in compiler.
// Ignore calls on non-function objects to avoid runtime errors.
@@ -48,7 +87,7 @@ RUNTIME_FUNCTION(Runtime_DeoptimizeFunction) {
// TODO(turbofan): Deoptimization is not supported yet.
if (function->code()->is_turbofanned() &&
- function->shared()->asm_function() && !FLAG_turbo_asm_deoptimization) {
+ function->shared()->asm_function()) {
return isolate->heap()->undefined_value();
}
@@ -60,7 +99,7 @@ RUNTIME_FUNCTION(Runtime_DeoptimizeFunction) {
RUNTIME_FUNCTION(Runtime_DeoptimizeNow) {
HandleScope scope(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
Handle<JSFunction> function;
@@ -74,7 +113,7 @@ RUNTIME_FUNCTION(Runtime_DeoptimizeNow) {
// TODO(turbofan): Deoptimization is not supported yet.
if (function->code()->is_turbofanned() &&
- function->shared()->asm_function() && !FLAG_turbo_asm_deoptimization) {
+ function->shared()->asm_function()) {
return isolate->heap()->undefined_value();
}
@@ -86,7 +125,7 @@ RUNTIME_FUNCTION(Runtime_DeoptimizeNow) {
RUNTIME_FUNCTION(Runtime_RunningInSimulator) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
#if defined(USE_SIMULATOR)
return isolate->heap()->true_value();
#else
@@ -97,7 +136,7 @@ RUNTIME_FUNCTION(Runtime_RunningInSimulator) {
RUNTIME_FUNCTION(Runtime_IsConcurrentRecompilationSupported) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
return isolate->heap()->ToBoolean(
isolate->concurrent_recompilation_enabled());
}
@@ -127,6 +166,12 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
return isolate->heap()->undefined_value();
}
+ // If function isn't compiled, compile it now.
+ if (!function->shared()->is_compiled() &&
+ !Compiler::Compile(function, Compiler::CLEAR_EXCEPTION)) {
+ return isolate->heap()->undefined_value();
+ }
+
// If the function is already optimized, just return.
if (function->IsOptimized()) return isolate->heap()->undefined_value();
@@ -146,7 +191,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
RUNTIME_FUNCTION(Runtime_InterpretFunctionOnNextCall) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
if (!function_object->IsJSFunction()) {
return isolate->heap()->undefined_value();
@@ -164,13 +209,19 @@ RUNTIME_FUNCTION(Runtime_InterpretFunctionOnNextCall) {
RUNTIME_FUNCTION(Runtime_BaselineFunctionOnNextCall) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
if (!function_object->IsJSFunction()) {
return isolate->heap()->undefined_value();
}
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
+ // If function isn't compiled, compile it now.
+ if (!function->shared()->is_compiled() &&
+ !Compiler::Compile(function, Compiler::CLEAR_EXCEPTION)) {
+ return isolate->heap()->undefined_value();
+ }
+
// Do not tier down if we are already on optimized code. Replacing optimized
// code without actual deoptimization can lead to funny bugs.
if (function->code()->kind() != Code::OPTIMIZED_FUNCTION &&
@@ -216,7 +267,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
RUNTIME_FUNCTION(Runtime_NeverOptimizeFunction) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(JSFunction, function, 0);
function->shared()->set_disable_optimization_reason(
kOptimizationDisabledForTest);
@@ -277,7 +328,7 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
RUNTIME_FUNCTION(Runtime_UnblockConcurrentRecompilation) {
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
if (FLAG_block_concurrent_recompilation &&
isolate->concurrent_recompilation_enabled()) {
isolate->optimizing_compile_dispatcher()->Unblock();
@@ -288,19 +339,23 @@ RUNTIME_FUNCTION(Runtime_UnblockConcurrentRecompilation) {
RUNTIME_FUNCTION(Runtime_GetOptimizationCount) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
return Smi::FromInt(function->shared()->opt_count());
}
+static void ReturnThis(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ args.GetReturnValue().Set(args.This());
+}
RUNTIME_FUNCTION(Runtime_GetUndetectable) {
HandleScope scope(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
Local<v8::ObjectTemplate> desc = v8::ObjectTemplate::New(v8_isolate);
desc->MarkAsUndetectable();
+ desc->SetCallAsFunctionHandler(ReturnThis);
Local<v8::Object> obj;
if (!desc->NewInstance(v8_isolate->GetCurrentContext()).ToLocal(&obj)) {
return nullptr;
@@ -323,7 +378,7 @@ static void call_as_function(const v8::FunctionCallbackInfo<v8::Value>& args) {
// parameters when it is called.
RUNTIME_FUNCTION(Runtime_GetCallable) {
HandleScope scope(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(v8_isolate);
Local<ObjectTemplate> instance_template = t->InstanceTemplate();
@@ -339,7 +394,7 @@ RUNTIME_FUNCTION(Runtime_GetCallable) {
RUNTIME_FUNCTION(Runtime_ClearFunctionTypeFeedback) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
function->ClearTypeFeedbackInfo();
Code* unoptimized = function->shared()->code();
@@ -412,9 +467,29 @@ RUNTIME_FUNCTION(Runtime_CheckWasmWrapperElision) {
return isolate->heap()->ToBoolean(count == 1);
}
+RUNTIME_FUNCTION(Runtime_SetWasmCompileControls) {
+ HandleScope scope(isolate);
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ CHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(Smi, block_size, 0);
+ CONVERT_BOOLEAN_ARG_CHECKED(allow_async, 1);
+ WasmCompileControls& ctrl = (*g_PerIsolateWasmControls.Pointer())[v8_isolate];
+ ctrl.AllowAnySizeForAsync = allow_async;
+ ctrl.MaxWasmBufferSize = static_cast<uint32_t>(block_size->value());
+ isolate->set_allow_wasm_compile_callback(IsWasmCompileAllowed);
+ return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_SetWasmInstantiateControls) {
+ HandleScope scope(isolate);
+ CHECK(args.length() == 0);
+ isolate->set_allow_wasm_instantiate_callback(IsWasmInstantiateAllowed);
+ return isolate->heap()->undefined_value();
+}
+
RUNTIME_FUNCTION(Runtime_NotifyContextDisposed) {
HandleScope scope(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
isolate->heap()->NotifyContextDisposed(true);
return isolate->heap()->undefined_value();
}
@@ -444,7 +519,7 @@ RUNTIME_FUNCTION(Runtime_SetAllocationTimeout) {
RUNTIME_FUNCTION(Runtime_DebugPrint) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
OFStream os(stdout);
#ifdef DEBUG
@@ -475,7 +550,7 @@ RUNTIME_FUNCTION(Runtime_DebugPrint) {
RUNTIME_FUNCTION(Runtime_DebugTrace) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
isolate->PrintStack(stdout);
return isolate->heap()->undefined_value();
}
@@ -485,7 +560,7 @@ RUNTIME_FUNCTION(Runtime_DebugTrace) {
// very slowly for very deeply nested ConsStrings. For debugging use only.
RUNTIME_FUNCTION(Runtime_GlobalPrint) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(String, string, 0);
StringCharacterStream stream(string);
@@ -501,7 +576,7 @@ RUNTIME_FUNCTION(Runtime_SystemBreak) {
// The code below doesn't create handles, but when breaking here in GDB
// having a handle scope might be useful.
HandleScope scope(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
base::OS::DebugBreak();
return isolate->heap()->undefined_value();
}
@@ -510,7 +585,7 @@ RUNTIME_FUNCTION(Runtime_SystemBreak) {
// Sets a v8 flag.
RUNTIME_FUNCTION(Runtime_SetFlags) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(String, arg, 0);
std::unique_ptr<char[]> flags =
arg->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
@@ -521,7 +596,7 @@ RUNTIME_FUNCTION(Runtime_SetFlags) {
RUNTIME_FUNCTION(Runtime_Abort) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_SMI_ARG_CHECKED(message_id, 0);
const char* message =
GetBailoutReason(static_cast<BailoutReason>(message_id));
@@ -535,7 +610,7 @@ RUNTIME_FUNCTION(Runtime_Abort) {
RUNTIME_FUNCTION(Runtime_AbortJS) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, message, 0);
base::OS::PrintError("abort: %s\n", message->ToCString().get());
isolate->PrintStack(stderr);
@@ -546,14 +621,14 @@ RUNTIME_FUNCTION(Runtime_AbortJS) {
RUNTIME_FUNCTION(Runtime_NativeScriptsCount) {
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
return Smi::FromInt(Natives::GetBuiltinsCount());
}
// TODO(5510): remove this.
RUNTIME_FUNCTION(Runtime_GetV8Version) {
HandleScope scope(isolate);
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
const char* version_string = v8::V8::GetVersion();
@@ -564,7 +639,7 @@ RUNTIME_FUNCTION(Runtime_GetV8Version) {
RUNTIME_FUNCTION(Runtime_DisassembleFunction) {
HandleScope scope(isolate);
#ifdef DEBUG
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
// Get the function and make sure it is compiled.
CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
if (!Compiler::Compile(func, Compiler::KEEP_EXCEPTION)) {
@@ -628,7 +703,7 @@ RUNTIME_FUNCTION(Runtime_TraceTailCall) {
RUNTIME_FUNCTION(Runtime_GetExceptionDetails) {
HandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSObject, exception_obj, 0);
Factory* factory = isolate->factory();
@@ -653,7 +728,7 @@ RUNTIME_FUNCTION(Runtime_GetExceptionDetails) {
RUNTIME_FUNCTION(Runtime_HaveSameMap) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_CHECKED(JSObject, obj1, 0);
CONVERT_ARG_CHECKED(JSObject, obj2, 1);
return isolate->heap()->ToBoolean(obj1->map() == obj2->map());
@@ -662,41 +737,48 @@ RUNTIME_FUNCTION(Runtime_HaveSameMap) {
RUNTIME_FUNCTION(Runtime_InNewSpace) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Object, obj, 0);
return isolate->heap()->ToBoolean(isolate->heap()->InNewSpace(obj));
}
-static bool IsAsmWasmCode(Isolate* isolate, Handle<JSFunction> function) {
+RUNTIME_FUNCTION(Runtime_IsAsmWasmCode) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(JSFunction, function, 0);
if (!function->shared()->HasAsmWasmData()) {
// Doesn't have wasm data.
- return false;
+ return isolate->heap()->false_value();
}
if (function->shared()->code() !=
isolate->builtins()->builtin(Builtins::kInstantiateAsmJs)) {
// Hasn't been compiled yet.
- return false;
+ return isolate->heap()->false_value();
}
- return true;
+ return isolate->heap()->true_value();
}
-RUNTIME_FUNCTION(Runtime_IsAsmWasmCode) {
+namespace {
+bool DisallowCodegenFromStringsCallback(v8::Local<v8::Context> context) {
+ return false;
+}
+}
+
+RUNTIME_FUNCTION(Runtime_DisallowCodegenFromStrings) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- // TODO(mstarzinger): --always-opt should still allow asm.js->wasm,
- // but currently does not. For now, pretend asm.js->wasm is on for
- // this case. Be more accurate once this is corrected.
- return isolate->heap()->ToBoolean(
- ((FLAG_always_opt || FLAG_prepare_always_opt) && FLAG_validate_asm) ||
- IsAsmWasmCode(isolate, function));
+ DCHECK_EQ(0, args.length());
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ v8_isolate->SetAllowCodeGenerationFromStringsCallback(
+ DisallowCodegenFromStringsCallback);
+ return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(Runtime_IsNotAsmWasmCode) {
+RUNTIME_FUNCTION(Runtime_IsWasmCode) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- return isolate->heap()->ToBoolean(!IsAsmWasmCode(isolate, function));
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(JSFunction, function, 0);
+ bool is_js_to_wasm = function->code()->kind() == Code::JS_TO_WASM_FUNCTION;
+ return isolate->heap()->ToBoolean(is_js_to_wasm);
}
#define ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(Name) \
@@ -736,15 +818,18 @@ RUNTIME_FUNCTION(Runtime_SpeciesProtector) {
return isolate->heap()->ToBoolean(isolate->IsArraySpeciesLookupChainIntact());
}
+#define CONVERT_ARG_HANDLE_CHECKED_2(Type, name, index) \
+ CHECK(Type::Is##Type(args[index])); \
+ Handle<Type> name = args.at<Type>(index);
+
// Take a compiled wasm module, serialize it and copy the buffer into an array
// buffer, which is then returned.
RUNTIME_FUNCTION(Runtime_SerializeWasmModule) {
HandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, module_obj, 0);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED_2(WasmModuleObject, module_obj, 0);
- Handle<FixedArray> orig =
- handle(FixedArray::cast(module_obj->GetInternalField(0)));
+ Handle<WasmCompiledModule> orig(module_obj->compiled_module());
std::unique_ptr<ScriptData> data =
WasmCompiledModuleSerializer::SerializeWasmModule(isolate, orig);
void* buff = isolate->array_buffer_allocator()->Allocate(data->length());
@@ -758,7 +843,7 @@ RUNTIME_FUNCTION(Runtime_SerializeWasmModule) {
// Return undefined if unsuccessful.
RUNTIME_FUNCTION(Runtime_DeserializeWasmModule) {
HandleScope shs(isolate);
- DCHECK(args.length() == 2);
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, buffer, 0);
CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, wire_bytes, 1);
@@ -793,8 +878,8 @@ RUNTIME_FUNCTION(Runtime_DeserializeWasmModule) {
RUNTIME_FUNCTION(Runtime_ValidateWasmInstancesChain) {
HandleScope shs(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, module_obj, 0);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED_2(WasmModuleObject, module_obj, 0);
CONVERT_ARG_HANDLE_CHECKED(Smi, instance_count, 1);
wasm::testing::ValidateInstancesChain(isolate, module_obj,
instance_count->value());
@@ -803,17 +888,34 @@ RUNTIME_FUNCTION(Runtime_ValidateWasmInstancesChain) {
RUNTIME_FUNCTION(Runtime_ValidateWasmModuleState) {
HandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, module_obj, 0);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED_2(WasmModuleObject, module_obj, 0);
wasm::testing::ValidateModuleState(isolate, module_obj);
return isolate->heap()->ToBoolean(true);
}
RUNTIME_FUNCTION(Runtime_ValidateWasmOrphanedInstance) {
HandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, instance_obj, 0);
- wasm::testing::ValidateOrphanedInstance(isolate, instance_obj);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED_2(WasmInstanceObject, instance, 0);
+ wasm::testing::ValidateOrphanedInstance(isolate, instance);
+ return isolate->heap()->ToBoolean(true);
+}
+
+RUNTIME_FUNCTION(Runtime_Verify) {
+ HandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+#ifdef VERIFY_HEAP
+ object->ObjectVerify();
+#else
+ CHECK(object->IsObject());
+ if (object->IsHeapObject()) {
+ CHECK(HeapObject::cast(*object)->map()->IsMap());
+ } else {
+ CHECK(object->IsSmi());
+ }
+#endif
return isolate->heap()->ToBoolean(true);
}
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index cb0e062d14..d5e394c345 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -15,7 +15,7 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_ArrayBufferGetByteLength) {
SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(JSArrayBuffer, holder, 0);
return holder->byte_length();
}
@@ -23,7 +23,7 @@ RUNTIME_FUNCTION(Runtime_ArrayBufferGetByteLength) {
RUNTIME_FUNCTION(Runtime_ArrayBufferSliceImpl) {
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
+ DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, source, 0);
CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, target, 1);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(first, 2);
@@ -56,7 +56,7 @@ RUNTIME_FUNCTION(Runtime_ArrayBufferSliceImpl) {
RUNTIME_FUNCTION(Runtime_ArrayBufferNeuter) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, array_buffer, 0);
if (array_buffer->backing_store() == NULL) {
CHECK(Smi::kZero == array_buffer->byte_length());
@@ -97,7 +97,7 @@ void Runtime::ArrayIdToTypeAndSize(int arrayId, ExternalArrayType* array_type,
RUNTIME_FUNCTION(Runtime_TypedArrayInitialize) {
HandleScope scope(isolate);
- DCHECK(args.length() == 6);
+ DCHECK_EQ(6, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0);
CONVERT_SMI_ARG_CHECKED(arrayId, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, maybe_buffer, 2);
@@ -179,7 +179,7 @@ RUNTIME_FUNCTION(Runtime_TypedArrayInitialize) {
// Returns true if backing store was initialized or false otherwise.
RUNTIME_FUNCTION(Runtime_TypedArrayInitializeFromArrayLike) {
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
+ DCHECK_EQ(4, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0);
CONVERT_SMI_ARG_CHECKED(arrayId, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, source, 2);
@@ -310,7 +310,7 @@ enum TypedArraySetResultCodes {
RUNTIME_FUNCTION(Runtime_TypedArraySetFastCases) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
+ DCHECK_EQ(3, args.length());
if (!args[0]->IsJSTypedArray()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kNotTypedArray));
@@ -369,7 +369,7 @@ RUNTIME_FUNCTION(Runtime_TypedArraySetFastCases) {
RUNTIME_FUNCTION(Runtime_TypedArrayMaxSizeInHeap) {
- DCHECK(args.length() == 0);
+ DCHECK_EQ(0, args.length());
DCHECK_OBJECT_SIZE(FLAG_typed_array_max_size_in_heap +
FixedTypedArrayBase::kDataOffset);
return Smi::FromInt(FLAG_typed_array_max_size_in_heap);
@@ -378,14 +378,14 @@ RUNTIME_FUNCTION(Runtime_TypedArrayMaxSizeInHeap) {
RUNTIME_FUNCTION(Runtime_IsTypedArray) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
return isolate->heap()->ToBoolean(args[0]->IsJSTypedArray());
}
RUNTIME_FUNCTION(Runtime_IsSharedTypedArray) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
return isolate->heap()->ToBoolean(
args[0]->IsJSTypedArray() &&
JSTypedArray::cast(args[0])->GetBuffer()->is_shared());
@@ -394,7 +394,7 @@ RUNTIME_FUNCTION(Runtime_IsSharedTypedArray) {
RUNTIME_FUNCTION(Runtime_IsSharedIntegerTypedArray) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
if (!args[0]->IsJSTypedArray()) {
return isolate->heap()->false_value();
}
@@ -409,7 +409,7 @@ RUNTIME_FUNCTION(Runtime_IsSharedIntegerTypedArray) {
RUNTIME_FUNCTION(Runtime_IsSharedInteger32TypedArray) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
if (!args[0]->IsJSTypedArray()) {
return isolate->heap()->false_value();
}
diff --git a/deps/v8/src/runtime/runtime-utils.h b/deps/v8/src/runtime/runtime-utils.h
index 147efed092..8c7714a0f6 100644
--- a/deps/v8/src/runtime/runtime-utils.h
+++ b/deps/v8/src/runtime/runtime-utils.h
@@ -6,6 +6,7 @@
#define V8_RUNTIME_RUNTIME_UTILS_H_
#include "src/base/logging.h"
+#include "src/globals.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -24,7 +25,7 @@ namespace internal {
#define CONVERT_NUMBER_ARG_HANDLE_CHECKED(name, index) \
CHECK(args[index]->IsNumber()); \
- Handle<Object> name = args.at<Object>(index);
+ Handle<Object> name = args.at(index);
// Cast the given object to a boolean and store it in a variable with
// the given name. If the object is not a boolean we crash safely.
@@ -47,10 +48,10 @@ namespace internal {
// Cast the given argument to a size_t and store its value in a variable with
// the given name. If the argument is not a size_t we crash safely.
-#define CONVERT_SIZE_ARG_CHECKED(name, index) \
- CHECK(args[index]->IsNumber()); \
- Handle<Object> name##_object = args.at<Object>(index); \
- size_t name = 0; \
+#define CONVERT_SIZE_ARG_CHECKED(name, index) \
+ CHECK(args[index]->IsNumber()); \
+ Handle<Object> name##_object = args.at(index); \
+ size_t name = 0; \
CHECK(TryNumberToSize(*name##_object, &name));
// Call the specified converter on the object *comand store the result in
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
index ab69046c45..3ae5b92da1 100644
--- a/deps/v8/src/runtime/runtime-wasm.cc
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -14,52 +14,115 @@
#include "src/objects-inl.h"
#include "src/v8memory.h"
#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-opcodes.h"
namespace v8 {
namespace internal {
+namespace {
+Handle<WasmInstanceObject> GetWasmInstanceOnStackTop(Isolate* isolate) {
+ DisallowHeapAllocation no_allocation;
+ const Address entry = Isolate::c_entry_fp(isolate->thread_local_top());
+ Address pc =
+ Memory::Address_at(entry + StandardFrameConstants::kCallerPCOffset);
+ Code* code = isolate->inner_pointer_to_code_cache()->GetCacheEntry(pc)->code;
+ DCHECK_EQ(Code::WASM_FUNCTION, code->kind());
+ WasmInstanceObject* owning_instance = wasm::GetOwningWasmInstance(code);
+ CHECK_NOT_NULL(owning_instance);
+ return handle(owning_instance, isolate);
+}
+} // namespace
+
RUNTIME_FUNCTION(Runtime_WasmMemorySize) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
- Handle<JSObject> module_instance;
- {
- // Get the module JSObject
- DisallowHeapAllocation no_allocation;
- const Address entry = Isolate::c_entry_fp(isolate->thread_local_top());
- Address pc =
- Memory::Address_at(entry + StandardFrameConstants::kCallerPCOffset);
- Code* code =
- isolate->inner_pointer_to_code_cache()->GetCacheEntry(pc)->code;
- Object* owning_instance = wasm::GetOwningWasmInstance(code);
- CHECK_NOT_NULL(owning_instance);
- module_instance = handle(JSObject::cast(owning_instance), isolate);
- }
+ Handle<WasmInstanceObject> instance = GetWasmInstanceOnStackTop(isolate);
return *isolate->factory()->NewNumberFromInt(
- wasm::GetInstanceMemorySize(isolate, module_instance));
+ wasm::GetInstanceMemorySize(isolate, instance));
}
RUNTIME_FUNCTION(Runtime_WasmGrowMemory) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_UINT32_ARG_CHECKED(delta_pages, 0);
- Handle<JSObject> module_instance;
- {
- // Get the module JSObject
- DisallowHeapAllocation no_allocation;
- const Address entry = Isolate::c_entry_fp(isolate->thread_local_top());
- Address pc =
- Memory::Address_at(entry + StandardFrameConstants::kCallerPCOffset);
- Code* code =
- isolate->inner_pointer_to_code_cache()->GetCacheEntry(pc)->code;
- Object* owning_instance = wasm::GetOwningWasmInstance(code);
- CHECK_NOT_NULL(owning_instance);
- module_instance = handle(JSObject::cast(owning_instance), isolate);
- }
+ Handle<WasmInstanceObject> instance = GetWasmInstanceOnStackTop(isolate);
return *isolate->factory()->NewNumberFromInt(
- wasm::GrowInstanceMemory(isolate, module_instance, delta_pages));
+ wasm::GrowMemory(isolate, instance, delta_pages));
+}
+
+Object* ThrowRuntimeError(Isolate* isolate, int message_id, int byte_offset,
+ bool patch_source_position) {
+ HandleScope scope(isolate);
+ Handle<Object> error_obj = isolate->factory()->NewWasmRuntimeError(
+ static_cast<MessageTemplate::Template>(message_id));
+
+ if (!patch_source_position) {
+ return isolate->Throw(*error_obj);
+ }
+
+ // For wasm traps, the byte offset (a.k.a source position) can not be
+ // determined from relocation info, since the explicit checks for traps
+ // converge in one singe block which calls this runtime function.
+ // We hence pass the byte offset explicitely, and patch it into the top-most
+ // frame (a wasm frame) on the collected stack trace.
+ // TODO(wasm): This implementation is temporary, see bug #5007:
+ // https://bugs.chromium.org/p/v8/issues/detail?id=5007
+ Handle<JSObject> error = Handle<JSObject>::cast(error_obj);
+ Handle<Object> stack_trace_obj = JSReceiver::GetDataProperty(
+ error, isolate->factory()->stack_trace_symbol());
+ // Patch the stack trace (array of <receiver, function, code, position>).
+ if (stack_trace_obj->IsJSArray()) {
+ Handle<FrameArray> stack_elements(
+ FrameArray::cast(JSArray::cast(*stack_trace_obj)->elements()));
+ DCHECK(stack_elements->Code(0)->kind() == AbstractCode::WASM_FUNCTION);
+ DCHECK(stack_elements->Offset(0)->value() >= 0);
+ stack_elements->SetOffset(0, Smi::FromInt(-1 - byte_offset));
+ }
+
+ // Patch the detailed stack trace (array of JSObjects with various
+ // properties).
+ Handle<Object> detailed_stack_trace_obj = JSReceiver::GetDataProperty(
+ error, isolate->factory()->detailed_stack_trace_symbol());
+ if (detailed_stack_trace_obj->IsJSArray()) {
+ Handle<FixedArray> stack_elements(
+ FixedArray::cast(JSArray::cast(*detailed_stack_trace_obj)->elements()));
+ DCHECK_GE(stack_elements->length(), 1);
+ Handle<JSObject> top_frame(JSObject::cast(stack_elements->get(0)));
+ Handle<String> wasm_offset_key =
+ isolate->factory()->InternalizeOneByteString(
+ STATIC_CHAR_VECTOR("column"));
+ LookupIterator it(top_frame, wasm_offset_key, top_frame,
+ LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+ if (it.IsFound()) {
+ DCHECK(JSReceiver::GetDataProperty(&it)->IsSmi());
+ // Make column number 1-based here.
+ Maybe<bool> data_set = JSReceiver::SetDataProperty(
+ &it, handle(Smi::FromInt(byte_offset + 1), isolate));
+ DCHECK(data_set.IsJust() && data_set.FromJust() == true);
+ USE(data_set);
+ }
+ }
+
+ return isolate->Throw(*error_obj);
+}
+
+RUNTIME_FUNCTION(Runtime_ThrowWasmError) {
+ DCHECK_EQ(2, args.length());
+ CONVERT_SMI_ARG_CHECKED(message_id, 0);
+ CONVERT_SMI_ARG_CHECKED(byte_offset, 1);
+ return ThrowRuntimeError(isolate, message_id, byte_offset, true);
}
+#define DECLARE_ENUM(name) \
+ RUNTIME_FUNCTION(Runtime_ThrowWasm##name) { \
+ int message_id = wasm::WasmOpcodes::TrapReasonToMessageId(wasm::k##name); \
+ return ThrowRuntimeError(isolate, message_id, 0, false); \
+ }
+FOREACH_WASM_TRAPREASON(DECLARE_ENUM)
+#undef DECLARE_ENUM
+
RUNTIME_FUNCTION(Runtime_WasmThrowTypeError) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
@@ -89,5 +152,28 @@ RUNTIME_FUNCTION(Runtime_WasmGetCaughtExceptionValue) {
return exception;
}
+RUNTIME_FUNCTION(Runtime_WasmRunInterpreter) {
+ DCHECK(args.length() == 3);
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, instance_obj, 0);
+ CONVERT_NUMBER_CHECKED(int32_t, func_index, Int32, args[1]);
+ CONVERT_ARG_HANDLE_CHECKED(Object, arg_buffer_obj, 2);
+ CHECK(WasmInstanceObject::IsWasmInstanceObject(*instance_obj));
+
+ // The arg buffer is the raw pointer to the caller's stack. It looks like a
+ // Smi (lowest bit not set, as checked by IsSmi), but is no valid Smi. We just
+ // cast it back to the raw pointer.
+ CHECK(!arg_buffer_obj->IsHeapObject());
+ CHECK(arg_buffer_obj->IsSmi());
+ uint8_t* arg_buffer = reinterpret_cast<uint8_t*>(*arg_buffer_obj);
+
+ Handle<WasmInstanceObject> instance =
+ Handle<WasmInstanceObject>::cast(instance_obj);
+ Handle<WasmDebugInfo> debug_info =
+ WasmInstanceObject::GetOrCreateDebugInfo(instance);
+ WasmDebugInfo::RunInterpreter(debug_info, func_index, arg_buffer);
+ return isolate->heap()->undefined_value();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime.cc b/deps/v8/src/runtime/runtime.cc
index 9d1cd39c5d..38f1805656 100644
--- a/deps/v8/src/runtime/runtime.cc
+++ b/deps/v8/src/runtime/runtime.cc
@@ -10,6 +10,7 @@
#include "src/handles-inl.h"
#include "src/heap/heap.h"
#include "src/isolate.h"
+#include "src/objects-inl.h"
#include "src/runtime/runtime-utils.h"
namespace v8 {
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index 8e2e83c37e..7eadbe2c09 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -45,7 +45,6 @@ namespace internal {
F(EstimateNumberOfElements, 1, 1) \
F(GetArrayKeys, 2, 1) \
F(NewArray, -1 /* >= 3 */, 1) \
- F(ArrayPush, -1, 1) \
F(FunctionBind, -1, 1) \
F(NormalizeElements, 1, 1) \
F(GrowArrayElements, 2, 1) \
@@ -75,22 +74,24 @@ namespace internal {
F(AtomicsWake, 3, 1) \
F(AtomicsNumWaitersForTesting, 2, 1)
-#define FOR_EACH_INTRINSIC_CLASSES(F) \
- F(ThrowNonMethodError, 0, 1) \
- F(ThrowUnsupportedSuperError, 0, 1) \
- F(ThrowConstructorNonCallableError, 1, 1) \
- F(ThrowArrayNotSubclassableError, 0, 1) \
- F(ThrowStaticPrototypeError, 0, 1) \
- F(ThrowIfStaticPrototype, 1, 1) \
- F(HomeObjectSymbol, 0, 1) \
- F(DefineClass, 4, 1) \
- F(LoadFromSuper, 3, 1) \
- F(LoadKeyedFromSuper, 3, 1) \
- F(StoreToSuper_Strict, 4, 1) \
- F(StoreToSuper_Sloppy, 4, 1) \
- F(StoreKeyedToSuper_Strict, 4, 1) \
- F(StoreKeyedToSuper_Sloppy, 4, 1) \
- F(GetSuperConstructor, 1, 1)
+#define FOR_EACH_INTRINSIC_CLASSES(F) \
+ F(ThrowUnsupportedSuperError, 0, 1) \
+ F(ThrowConstructorNonCallableError, 1, 1) \
+ F(ThrowStaticPrototypeError, 0, 1) \
+ F(ThrowSuperAlreadyCalledError, 0, 1) \
+ F(ThrowNotSuperConstructor, 2, 1) \
+ F(HomeObjectSymbol, 0, 1) \
+ F(DefineClass, 4, 1) \
+ F(InstallClassNameAccessor, 1, 1) \
+ F(InstallClassNameAccessorWithCheck, 1, 1) \
+ F(LoadFromSuper, 3, 1) \
+ F(LoadKeyedFromSuper, 3, 1) \
+ F(StoreToSuper_Strict, 4, 1) \
+ F(StoreToSuper_Sloppy, 4, 1) \
+ F(StoreKeyedToSuper_Strict, 4, 1) \
+ F(StoreKeyedToSuper_Sloppy, 4, 1) \
+ F(GetSuperConstructor, 1, 1) \
+ F(NewWithSpread, -1, 1)
#define FOR_EACH_INTRINSIC_COLLECTIONS(F) \
F(StringGetRawHashField, 1, 1) \
@@ -147,7 +148,7 @@ namespace internal {
F(DebugGetInternalProperties, 1, 1) \
F(DebugGetPropertyDetails, 2, 1) \
F(DebugGetProperty, 2, 1) \
- F(DebugPropertyTypeFromDetails, 1, 1) \
+ F(DebugPropertyKindFromDetails, 1, 1) \
F(DebugPropertyAttributesFromDetails, 1, 1) \
F(CheckExecutionState, 1, 1) \
F(GetFrameCount, 1, 1) \
@@ -169,9 +170,10 @@ namespace internal {
F(ChangeBreakOnException, 2, 1) \
F(IsBreakOnException, 1, 1) \
F(PrepareStep, 2, 1) \
+ F(PrepareStepFrame, 0, 1) \
F(ClearStepping, 0, 1) \
- F(DebugEvaluate, 6, 1) \
- F(DebugEvaluateGlobal, 4, 1) \
+ F(DebugEvaluate, 4, 1) \
+ F(DebugEvaluateGlobal, 2, 1) \
F(DebugGetLoadedScripts, 0, 1) \
F(DebugReferencedBy, 3, 1) \
F(DebugConstructedBy, 2, 1) \
@@ -190,14 +192,17 @@ namespace internal {
F(ScriptLocationFromLine, 4, 1) \
F(ScriptLocationFromLine2, 4, 1) \
F(ScriptPositionInfo, 3, 1) \
+ F(ScriptPositionInfo2, 3, 1) \
F(ScriptSourceLine, 2, 1) \
- F(DebugPrepareStepInIfStepping, 1, 1) \
+ F(DebugOnFunctionCall, 1, 1) \
F(DebugPrepareStepInSuspendedGenerator, 0, 1) \
- F(DebugRecordAsyncFunction, 1, 1) \
+ F(DebugRecordGenerator, 1, 1) \
F(DebugPushPromise, 1, 1) \
F(DebugPopPromise, 0, 1) \
- F(DebugNextMicrotaskId, 0, 1) \
- F(DebugAsyncTaskEvent, 3, 1) \
+ F(DebugPromiseReject, 2, 1) \
+ F(DebugNextAsyncTaskId, 1, 1) \
+ F(DebugAsyncEventEnqueueRecurring, 2, 1) \
+ F(DebugAsyncFunctionPromiseCreated, 1, 1) \
F(DebugIsActive, 0, 1) \
F(DebugBreakInOptimizedCode, 0, 1)
@@ -210,11 +215,9 @@ namespace internal {
F(ForInNext, 4, 1)
#define FOR_EACH_INTRINSIC_INTERPRETER(F) \
- F(InterpreterNewClosure, 2, 1) \
+ F(InterpreterNewClosure, 4, 1) \
F(InterpreterTraceBytecodeEntry, 3, 1) \
F(InterpreterTraceBytecodeExit, 3, 1) \
- F(InterpreterClearPendingMessage, 0, 1) \
- F(InterpreterSetPendingMessage, 1, 1) \
F(InterpreterAdvanceBytecodeOffset, 2, 1)
#define FOR_EACH_INTRINSIC_FUNCTION(F) \
@@ -241,10 +244,10 @@ namespace internal {
#define FOR_EACH_INTRINSIC_GENERATOR(F) \
F(CreateJSGeneratorObject, 2, 1) \
- F(SuspendJSGeneratorObject, 1, 1) \
F(GeneratorClose, 1, 1) \
F(GeneratorGetFunction, 1, 1) \
F(GeneratorGetReceiver, 1, 1) \
+ F(GeneratorGetContext, 1, 1) \
F(GeneratorGetInputOrDebugPos, 1, 1) \
F(GeneratorGetContinuation, 1, 1) \
F(GeneratorGetSourcePosition, 1, 1) \
@@ -258,15 +261,12 @@ namespace internal {
F(GetLanguageTagVariants, 1, 1) \
F(IsInitializedIntlObject, 1, 1) \
F(IsInitializedIntlObjectOfType, 2, 1) \
- F(MarkAsInitializedIntlObjectOfType, 3, 1) \
- F(GetImplFromInitializedIntlObject, 1, 1) \
+ F(MarkAsInitializedIntlObjectOfType, 2, 1) \
F(CreateDateTimeFormat, 3, 1) \
F(InternalDateFormat, 2, 1) \
F(InternalDateFormatToParts, 2, 1) \
- F(InternalDateParse, 2, 1) \
F(CreateNumberFormat, 3, 1) \
F(InternalNumberFormat, 2, 1) \
- F(InternalNumberParse, 2, 1) \
F(CreateCollator, 3, 1) \
F(InternalCompare, 3, 1) \
F(StringNormalize, 2, 1) \
@@ -292,8 +292,8 @@ namespace internal {
F(CheckIsBootstrapping, 0, 1) \
F(CreateListFromArrayLike, 1, 1) \
F(EnqueueMicrotask, 1, 1) \
- F(EnqueuePromiseReactionJob, 4, 1) \
- F(EnqueuePromiseResolveThenableJob, 3, 1) \
+ F(EnqueuePromiseReactionJob, 3, 1) \
+ F(EnqueuePromiseResolveThenableJob, 1, 1) \
F(GetAndResetRuntimeCallStats, -1 /* <= 2 */, 1) \
F(ExportExperimentalFromRuntime, 1, 1) \
F(ExportFromRuntime, 1, 1) \
@@ -301,15 +301,21 @@ namespace internal {
F(InstallToContext, 1, 1) \
F(Interrupt, 0, 1) \
F(IS_VAR, 1, 1) \
- F(IsWasmInstance, 1, 1) \
F(NewReferenceError, 2, 1) \
F(NewSyntaxError, 2, 1) \
F(NewTypeError, 2, 1) \
F(OrdinaryHasInstance, 2, 1) \
- F(PromiseReject, 3, 1) \
- F(PromiseFulfill, 4, 1) \
+ F(ReportPromiseReject, 2, 1) \
+ F(PromiseHookInit, 2, 1) \
+ F(PromiseHookResolve, 1, 1) \
+ F(PromiseHookBefore, 1, 1) \
+ F(PromiseHookAfter, 1, 1) \
+ F(PromiseMarkAsHandled, 1, 1) \
+ F(PromiseMarkHandledHint, 1, 1) \
F(PromiseRejectEventFromStack, 2, 1) \
F(PromiseRevokeReject, 1, 1) \
+ F(PromiseResult, 1, 1) \
+ F(PromiseStatus, 1, 1) \
F(PromoteScheduledException, 0, 1) \
F(ReThrow, 1, 1) \
F(RunMicrotasks, 0, 1) \
@@ -324,16 +330,18 @@ namespace internal {
F(ThrowGeneratorRunning, 0, 1) \
F(ThrowIllegalInvocation, 0, 1) \
F(ThrowIncompatibleMethodReceiver, 2, 1) \
+ F(ThrowInvalidHint, 1, 1) \
F(ThrowInvalidStringLength, 0, 1) \
F(ThrowIteratorResultNotAnObject, 1, 1) \
+ F(ThrowSymbolIteratorInvalid, 0, 1) \
F(ThrowNotGeneric, 1, 1) \
F(ThrowReferenceError, 1, 1) \
F(ThrowStackOverflow, 0, 1) \
F(ThrowTypeError, -1 /* >= 1 */, 1) \
- F(ThrowWasmError, 2, 1) \
F(ThrowUndefinedOrNullToObject, 1, 1) \
F(Typeof, 1, 1) \
- F(UnwindAndFindExceptionHandler, 0, 1)
+ F(UnwindAndFindExceptionHandler, 0, 1) \
+ F(AllowDynamicFunction, 1, 1)
#define FOR_EACH_INTRINSIC_LITERALS(F) \
F(CreateRegExpLiteral, 4, 1) \
@@ -341,13 +349,13 @@ namespace internal {
F(CreateArrayLiteral, 4, 1) \
F(CreateArrayLiteralStubBailout, 3, 1)
-
#define FOR_EACH_INTRINSIC_LIVEEDIT(F) \
F(LiveEditFindSharedFunctionInfosForScript, 1, 1) \
F(LiveEditGatherCompileInfo, 2, 1) \
F(LiveEditReplaceScript, 3, 1) \
- F(LiveEditFunctionSourceUpdated, 1, 1) \
+ F(LiveEditFunctionSourceUpdated, 2, 1) \
F(LiveEditReplaceFunctionCode, 2, 1) \
+ F(LiveEditFixupScript, 2, 1) \
F(LiveEditFunctionSetScript, 2, 1) \
F(LiveEditReplaceRefToNestedFunction, 3, 1) \
F(LiveEditPatchFunctionPositions, 2, 1) \
@@ -402,14 +410,14 @@ namespace internal {
F(TryMigrateInstance, 1, 1) \
F(IsJSGlobalProxy, 1, 1) \
F(DefineAccessorPropertyUnchecked, 5, 1) \
- F(DefineDataPropertyInLiteral, 5, 1) \
- F(DefineDataProperty, 5, 1) \
+ F(DefineDataPropertyInLiteral, 6, 1) \
F(GetDataProperty, 2, 1) \
F(GetConstructorName, 1, 1) \
F(HasFastPackedElements, 1, 1) \
F(ValueOf, 1, 1) \
F(IsJSReceiver, 1, 1) \
F(ClassOf, 1, 1) \
+ F(CopyDataProperties, 2, 1) \
F(DefineGetterPropertyUnchecked, 4, 1) \
F(DefineSetterPropertyUnchecked, 4, 1) \
F(ToObject, 1, 1) \
@@ -465,8 +473,10 @@ namespace internal {
F(RegExpExec, 4, 1) \
F(RegExpExecMultiple, 4, 1) \
F(RegExpExecReThrow, 4, 1) \
+ F(RegExpInitializeAndCompile, 3, 1) \
F(RegExpInternalReplace, 3, 1) \
F(RegExpReplace, 3, 1) \
+ F(RegExpSplit, 3, 1) \
F(StringReplaceGlobalRegExpWithString, 4, 1) \
F(StringReplaceNonGlobalRegExpWithFunction, 3, 1) \
F(StringSplit, 3, 1)
@@ -482,10 +492,11 @@ namespace internal {
F(NewStrictArguments, 1, 1) \
F(NewRestParameter, 1, 1) \
F(NewSloppyArguments, 3, 1) \
- F(NewClosure, 1, 1) \
- F(NewClosure_Tenured, 1, 1) \
+ F(NewArgumentsElements, 2, 1) \
+ F(NewClosure, 3, 1) \
+ F(NewClosure_Tenured, 3, 1) \
F(NewScriptContext, 2, 1) \
- F(NewFunctionContext, 1, 1) \
+ F(NewFunctionContext, 2, 1) \
F(PushModuleContext, 3, 1) \
F(PushWithContext, 3, 1) \
F(PushCatchContext, 4, 1) \
@@ -809,6 +820,7 @@ namespace internal {
#define FOR_EACH_INTRINSIC_STRINGS(F) \
F(StringReplaceOneCharWithString, 3, 1) \
F(StringIndexOf, 3, 1) \
+ F(StringIndexOfUnchecked, 3, 1) \
F(StringLastIndexOf, 2, 1) \
F(SubString, 3, 1) \
F(StringAdd, 2, 1) \
@@ -837,7 +849,6 @@ namespace internal {
F(CreatePrivateSymbol, 1, 1) \
F(SymbolDescription, 1, 1) \
F(SymbolDescriptiveString, 1, 1) \
- F(SymbolRegistry, 0, 1) \
F(SymbolIsPrivate, 1, 1)
#define FOR_EACH_INTRINSIC_TEST(F) \
@@ -898,10 +909,14 @@ namespace internal {
F(SerializeWasmModule, 1, 1) \
F(DeserializeWasmModule, 2, 1) \
F(IsAsmWasmCode, 1, 1) \
- F(IsNotAsmWasmCode, 1, 1) \
+ F(IsWasmCode, 1, 1) \
+ F(DisallowCodegenFromStrings, 0, 1) \
F(ValidateWasmInstancesChain, 2, 1) \
F(ValidateWasmModuleState, 1, 1) \
- F(ValidateWasmOrphanedInstance, 1, 1)
+ F(ValidateWasmOrphanedInstance, 1, 1) \
+ F(SetWasmCompileControls, 2, 1) \
+ F(SetWasmInstantiateControls, 0, 1) \
+ F(Verify, 1, 1)
#define FOR_EACH_INTRINSIC_TYPEDARRAY(F) \
F(ArrayBufferGetByteLength, 1, 1) \
@@ -920,12 +935,22 @@ namespace internal {
F(IsSharedIntegerTypedArray, 1, 1) \
F(IsSharedInteger32TypedArray, 1, 1)
-#define FOR_EACH_INTRINSIC_WASM(F) \
- F(WasmGrowMemory, 1, 1) \
- F(WasmMemorySize, 0, 1) \
- F(WasmThrowTypeError, 0, 1) \
- F(WasmThrow, 2, 1) \
- F(WasmGetCaughtExceptionValue, 1, 1)
+#define FOR_EACH_INTRINSIC_WASM(F) \
+ F(WasmGrowMemory, 1, 1) \
+ F(WasmMemorySize, 0, 1) \
+ F(ThrowWasmError, 2, 1) \
+ F(WasmThrowTypeError, 0, 1) \
+ F(WasmThrow, 2, 1) \
+ F(WasmGetCaughtExceptionValue, 1, 1) \
+ F(ThrowWasmTrapUnreachable, 0, 1) \
+ F(ThrowWasmTrapMemOutOfBounds, 0, 1) \
+ F(ThrowWasmTrapDivByZero, 0, 1) \
+ F(ThrowWasmTrapDivUnrepresentable, 0, 1) \
+ F(ThrowWasmTrapRemByZero, 0, 1) \
+ F(ThrowWasmTrapFloatUnrepresentable, 0, 1) \
+ F(ThrowWasmTrapFuncInvalid, 0, 1) \
+ F(ThrowWasmTrapFuncSigMismatch, 0, 1) \
+ F(WasmRunInterpreter, 3, 1)
#define FOR_EACH_INTRINSIC_RETURN_PAIR(F) \
F(LoadLookupSlotForCall, 1, 2)
@@ -946,8 +971,8 @@ namespace internal {
F(KeyedStoreIC_Miss, 5, 1) \
F(KeyedStoreIC_Slow, 5, 1) \
F(LoadElementWithInterceptor, 2, 1) \
- F(LoadGlobalIC_Miss, 2, 1) \
- F(LoadGlobalIC_Slow, 2, 1) \
+ F(LoadGlobalIC_Miss, 3, 1) \
+ F(LoadGlobalIC_Slow, 1, 1) \
F(LoadIC_Miss, 4, 1) \
F(LoadPropertyWithInterceptor, 3, 1) \
F(LoadPropertyWithInterceptorOnly, 3, 1) \
@@ -1009,14 +1034,13 @@ FOR_EACH_INTRINSIC_RETURN_OBJECT(F)
class Runtime : public AllStatic {
public:
- enum FunctionId {
+ enum FunctionId : int32_t {
#define F(name, nargs, ressize) k##name,
#define I(name, nargs, ressize) kInline##name,
- FOR_EACH_INTRINSIC(F)
- FOR_EACH_INTRINSIC(I)
+ FOR_EACH_INTRINSIC(F) FOR_EACH_INTRINSIC(I)
#undef I
#undef F
- kNumFunctions,
+ kNumFunctions,
};
enum IntrinsicType { RUNTIME, INLINE };
diff --git a/deps/v8/src/s390/assembler-s390.cc b/deps/v8/src/s390/assembler-s390.cc
index a448947307..448acb34ad 100644
--- a/deps/v8/src/s390/assembler-s390.cc
+++ b/deps/v8/src/s390/assembler-s390.cc
@@ -138,39 +138,48 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
// The facilities we are checking for are:
// Bit 45 - Distinct Operands for instructions like ARK, SRK, etc.
// As such, we require only 1 double word
- int64_t facilities[1];
- facilities[0] = 0;
+ int64_t facilities[3] = {0L};
// LHI sets up GPR0
// STFLE is specified as .insn, as opcode is not recognized.
// We register the instructions kill r0 (LHI) and the CC (STFLE).
asm volatile(
- "lhi 0,0\n"
+ "lhi 0,2\n"
".insn s,0xb2b00000,%0\n"
: "=Q"(facilities)
:
: "cc", "r0");
+ uint64_t one = static_cast<uint64_t>(1);
// Test for Distinct Operands Facility - Bit 45
- if (facilities[0] & (1lu << (63 - 45))) {
+ if (facilities[0] & (one << (63 - 45))) {
supported_ |= (1u << DISTINCT_OPS);
}
// Test for General Instruction Extension Facility - Bit 34
- if (facilities[0] & (1lu << (63 - 34))) {
+ if (facilities[0] & (one << (63 - 34))) {
supported_ |= (1u << GENERAL_INSTR_EXT);
}
// Test for Floating Point Extension Facility - Bit 37
- if (facilities[0] & (1lu << (63 - 37))) {
+ if (facilities[0] & (one << (63 - 37))) {
supported_ |= (1u << FLOATING_POINT_EXT);
}
+ // Test for Vector Facility - Bit 129
+ if (facilities[2] & (one << (63 - (129 - 128)))) {
+ supported_ |= (1u << VECTOR_FACILITY);
+ }
+ // Test for Miscellaneous Instruction Extension Facility - Bit 58
+ if (facilities[0] & (1lu << (63 - 58))) {
+ supported_ |= (1u << MISC_INSTR_EXT2);
+ }
}
#else
// All distinct ops instructions can be simulated
supported_ |= (1u << DISTINCT_OPS);
// RISBG can be simulated
supported_ |= (1u << GENERAL_INSTR_EXT);
-
supported_ |= (1u << FLOATING_POINT_EXT);
+ supported_ |= (1u << MISC_INSTR_EXT2);
USE(performSTFLE); // To avoid assert
+ supported_ |= (1u << VECTOR_FACILITY);
#endif
supported_ |= (1u << FPU);
}
@@ -192,6 +201,8 @@ void CpuFeatures::PrintFeatures() {
printf("FPU_EXT=%d\n", CpuFeatures::IsSupported(FLOATING_POINT_EXT));
printf("GENERAL_INSTR=%d\n", CpuFeatures::IsSupported(GENERAL_INSTR_EXT));
printf("DISTINCT_OPS=%d\n", CpuFeatures::IsSupported(DISTINCT_OPS));
+ printf("VECTOR_FACILITY=%d\n", CpuFeatures::IsSupported(VECTOR_FACILITY));
+ printf("MISC_INSTR_EXT2=%d\n", CpuFeatures::IsSupported(MISC_INSTR_EXT2));
}
Register ToRegister(int num) {
@@ -233,13 +244,19 @@ Address RelocInfo::wasm_global_reference() {
return Assembler::target_address_at(pc_, host_);
}
+uint32_t RelocInfo::wasm_function_table_size_reference() {
+ DCHECK(IsWasmFunctionTableSizeReference(rmode_));
+ return static_cast<uint32_t>(
+ reinterpret_cast<intptr_t>(Assembler::target_address_at(pc_, host_)));
+}
+
void RelocInfo::unchecked_update_wasm_memory_reference(
Address address, ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
}
-void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
- ICacheFlushMode flush_mode) {
+void RelocInfo::unchecked_update_wasm_size(uint32_t size,
+ ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate_, pc_, host_,
reinterpret_cast<Address>(size), flush_mode);
}
@@ -522,11 +539,11 @@ void Assembler::load_label_offset(Register r1, Label* L) {
// Pseudo op - branch on condition
void Assembler::branchOnCond(Condition c, int branch_offset, bool is_bound) {
- int offset = branch_offset;
- if (is_bound && is_int16(offset)) {
- brc(c, Operand(offset & 0xFFFF)); // short jump
+ int offset_in_halfwords = branch_offset / 2;
+ if (is_bound && is_int16(offset_in_halfwords)) {
+ brc(c, Operand(offset_in_halfwords & 0xFFFF)); // short jump
} else {
- brcl(c, Operand(offset)); // long jump
+ brcl(c, Operand(offset_in_halfwords)); // long jump
}
}
@@ -580,38 +597,6 @@ void Assembler::nop(int type) {
}
}
-// RR format: <insn> R1,R2
-// +--------+----+----+
-// | OpCode | R1 | R2 |
-// +--------+----+----+
-// 0 8 12 15
-#define RR_FORM_EMIT(name, op) \
- void Assembler::name(Register r1, Register r2) { rr_form(op, r1, r2); }
-
-void Assembler::rr_form(Opcode op, Register r1, Register r2) {
- DCHECK(is_uint8(op));
- emit2bytes(op * B8 | r1.code() * B4 | r2.code());
-}
-
-void Assembler::rr_form(Opcode op, DoubleRegister r1, DoubleRegister r2) {
- DCHECK(is_uint8(op));
- emit2bytes(op * B8 | r1.code() * B4 | r2.code());
-}
-
-// RR2 format: <insn> M1,R2
-// +--------+----+----+
-// | OpCode | M1 | R2 |
-// +--------+----+----+
-// 0 8 12 15
-#define RR2_FORM_EMIT(name, op) \
- void Assembler::name(Condition m1, Register r2) { rr_form(op, m1, r2); }
-
-void Assembler::rr_form(Opcode op, Condition m1, Register r2) {
- DCHECK(is_uint8(op));
- DCHECK(is_uint4(m1));
- emit2bytes(op * B8 | m1 * B4 | r2.code());
-}
-
// RX format: <insn> R1,D2(X2,B2)
// +--------+----+----+----+-------------+
// | OpCode | R1 | X2 | B2 | D2 |
@@ -716,75 +701,6 @@ void Assembler::rie_form(Opcode op, Register r1, Register r3,
emit6bytes(code);
}
-// RIL1 format: <insn> R1,I2
-// +--------+----+----+------------------------------------+
-// | OpCode | R1 |OpCd| I2 |
-// +--------+----+----+------------------------------------+
-// 0 8 12 16 47
-#define RIL1_FORM_EMIT(name, op) \
- void Assembler::name(Register r, const Operand& i2) { ril_form(op, r, i2); }
-
-void Assembler::ril_form(Opcode op, Register r1, const Operand& i2) {
- DCHECK(is_uint12(op));
- uint64_t code = (static_cast<uint64_t>(op & 0xFF0)) * B36 |
- (static_cast<uint64_t>(r1.code())) * B36 |
- (static_cast<uint64_t>(op & 0x00F)) * B32 |
- (static_cast<uint64_t>(i2.imm_) & 0xFFFFFFFF);
- emit6bytes(code);
-}
-
-// RIL2 format: <insn> M1,I2
-// +--------+----+----+------------------------------------+
-// | OpCode | M1 |OpCd| I2 |
-// +--------+----+----+------------------------------------+
-// 0 8 12 16 47
-#define RIL2_FORM_EMIT(name, op) \
- void Assembler::name(Condition m1, const Operand& i2) { \
- ril_form(op, m1, i2); \
- }
-
-void Assembler::ril_form(Opcode op, Condition m1, const Operand& i2) {
- DCHECK(is_uint12(op));
- DCHECK(is_uint4(m1));
- uint64_t code = (static_cast<uint64_t>(op & 0xFF0)) * B36 |
- (static_cast<uint64_t>(m1)) * B36 |
- (static_cast<uint64_t>(op & 0x00F)) * B32 |
- (static_cast<uint64_t>(i2.imm_ & 0xFFFFFFFF));
- emit6bytes(code);
-}
-
-// RRE format: <insn> R1,R2
-// +------------------+--------+----+----+
-// | OpCode |////////| R1 | R2 |
-// +------------------+--------+----+----+
-// 0 16 24 28 31
-#define RRE_FORM_EMIT(name, op) \
- void Assembler::name(Register r1, Register r2) { rre_form(op, r1, r2); }
-
-void Assembler::rre_form(Opcode op, Register r1, Register r2) {
- DCHECK(is_uint16(op));
- emit4bytes(op << 16 | r1.code() * B4 | r2.code());
-}
-
-void Assembler::rre_form(Opcode op, DoubleRegister r1, DoubleRegister r2) {
- DCHECK(is_uint16(op));
- emit4bytes(op << 16 | r1.code() * B4 | r2.code());
-}
-
-// RRD format: <insn> R1,R3, R2
-// +------------------+----+----+----+----+
-// | OpCode | R1 |////| R3 | R2 |
-// +------------------+----+----+----+----+
-// 0 16 20 24 28 31
-#define RRD_FORM_EMIT(name, op) \
- void Assembler::name(Register r1, Register r3, Register r2) { \
- rrd_form(op, r1, r3, r2); \
- }
-
-void Assembler::rrd_form(Opcode op, Register r1, Register r3, Register r2) {
- emit4bytes(op << 16 | r1.code() * B12 | r3.code() * B4 | r2.code());
-}
-
// RS1 format: <insn> R1,R3,D2(B2)
// +--------+----+----+----+-------------+
// | OpCode | R1 | R3 | B2 | D2 |
@@ -1104,7 +1020,7 @@ void Assembler::si_form(Opcode op, const Operand& i2, Register b1, Disp d1) {
}
void Assembler::siy_form(Opcode op, const Operand& i2, Register b1, Disp d1) {
- DCHECK(is_uint20(d1));
+ DCHECK(is_uint20(d1) || is_int20(d1));
DCHECK(is_uint16(op));
DCHECK(is_uint8(i2.imm_));
uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
@@ -1418,46 +1334,28 @@ void Assembler::rrfe_form(Opcode op, Condition m3, Condition m4, Register r1,
// start of S390 instruction
RX_FORM_EMIT(bc, BC)
-RR_FORM_EMIT(bctr, BCTR)
RXE_FORM_EMIT(ceb, CEB)
SS1_FORM_EMIT(ed, ED)
RX_FORM_EMIT(ex, EX)
-RRE_FORM_EMIT(flogr, FLOGR)
-RRE_FORM_EMIT(lcgr, LCGR)
-RR_FORM_EMIT(lcr, LCR)
RX_FORM_EMIT(le_z, LE)
RXY_FORM_EMIT(ley, LEY)
-RIL1_FORM_EMIT(llihf, LLIHF)
-RIL1_FORM_EMIT(llilf, LLILF)
-RRE_FORM_EMIT(lngr, LNGR)
-RR_FORM_EMIT(lnr, LNR)
-RRE_FORM_EMIT(lrvr, LRVR)
-RRE_FORM_EMIT(lrvgr, LRVGR)
RXY_FORM_EMIT(lrv, LRV)
RXY_FORM_EMIT(lrvg, LRVG)
RXY_FORM_EMIT(lrvh, LRVH)
SS1_FORM_EMIT(mvn, MVN)
SS1_FORM_EMIT(nc, NC)
SI_FORM_EMIT(ni, NI)
-RIL1_FORM_EMIT(nihf, NIHF)
-RIL1_FORM_EMIT(nilf, NILF)
RI1_FORM_EMIT(nilh, NILH)
RI1_FORM_EMIT(nill, NILL)
-RIL1_FORM_EMIT(oihf, OIHF)
-RIL1_FORM_EMIT(oilf, OILF)
RI1_FORM_EMIT(oill, OILL)
-RRE_FORM_EMIT(popcnt, POPCNT_Z)
-RIL1_FORM_EMIT(slfi, SLFI)
+RXY_FORM_EMIT(pfd, PFD)
RXY_FORM_EMIT(slgf, SLGF)
-RIL1_FORM_EMIT(slgfi, SLGFI)
RXY_FORM_EMIT(strvh, STRVH)
RXY_FORM_EMIT(strv, STRV)
RXY_FORM_EMIT(strvg, STRVG)
RI1_FORM_EMIT(tmll, TMLL)
SS1_FORM_EMIT(tr, TR)
S_FORM_EMIT(ts, TS)
-RIL1_FORM_EMIT(xihf, XIHF)
-RIL1_FORM_EMIT(xilf, XILF)
// -------------------------
// Load Address Instructions
@@ -1473,11 +1371,6 @@ void Assembler::lay(Register r1, const MemOperand& opnd) {
}
// Load Address Relative Long
-void Assembler::larl(Register r1, const Operand& opnd) {
- ril_form(LARL, r1, opnd);
-}
-
-// Load Address Relative Long
void Assembler::larl(Register r1, Label* l) {
larl(r1, Operand(branch_offset(l)));
}
@@ -1490,17 +1383,11 @@ void Assembler::lb(Register r, const MemOperand& src) {
rxy_form(LB, r, src.rx(), src.rb(), src.offset());
}
-// Load Byte Register-Register (32<-8)
-void Assembler::lbr(Register r1, Register r2) { rre_form(LBR, r1, r2); }
-
// Load Byte Register-Storage (64<-8)
void Assembler::lgb(Register r, const MemOperand& src) {
rxy_form(LGB, r, src.rx(), src.rb(), src.offset());
}
-// Load Byte Register-Register (64<-8)
-void Assembler::lgbr(Register r1, Register r2) { rre_form(LGBR, r1, r2); }
-
// Load Halfword Register-Storage (32<-16)
void Assembler::lh(Register r, const MemOperand& src) {
rx_form(LH, r, src.rx(), src.rb(), src.offset());
@@ -1511,17 +1398,11 @@ void Assembler::lhy(Register r, const MemOperand& src) {
rxy_form(LHY, r, src.rx(), src.rb(), src.offset());
}
-// Load Halfword Register-Register (32<-16)
-void Assembler::lhr(Register r1, Register r2) { rre_form(LHR, r1, r2); }
-
// Load Halfword Register-Storage (64<-16)
void Assembler::lgh(Register r, const MemOperand& src) {
rxy_form(LGH, r, src.rx(), src.rb(), src.offset());
}
-// Load Halfword Register-Register (64<-16)
-void Assembler::lghr(Register r1, Register r2) { rre_form(LGHR, r1, r2); }
-
// Load Register-Storage (32)
void Assembler::l(Register r, const MemOperand& src) {
rx_form(L, r, src.rx(), src.rb(), src.offset());
@@ -1532,25 +1413,16 @@ void Assembler::ly(Register r, const MemOperand& src) {
rxy_form(LY, r, src.rx(), src.rb(), src.offset());
}
-// Load Register-Register (32)
-void Assembler::lr(Register r1, Register r2) { rr_form(LR, r1, r2); }
-
// Load Register-Storage (64)
void Assembler::lg(Register r, const MemOperand& src) {
rxy_form(LG, r, src.rx(), src.rb(), src.offset());
}
-// Load Register-Register (64)
-void Assembler::lgr(Register r1, Register r2) { rre_form(LGR, r1, r2); }
-
// Load Register-Storage (64<-32)
void Assembler::lgf(Register r, const MemOperand& src) {
rxy_form(LGF, r, src.rx(), src.rb(), src.offset());
}
-// Load Sign Extended Register-Register (64<-32)
-void Assembler::lgfr(Register r1, Register r2) { rre_form(LGFR, r1, r2); }
-
// Load Halfword Immediate (32)
void Assembler::lhi(Register r, const Operand& imm) { ri_form(LHI, r, imm); }
@@ -1570,15 +1442,6 @@ void Assembler::ltg(Register r1, const MemOperand& opnd) {
rxy_form(LTG, r1, opnd.rx(), opnd.rb(), opnd.offset());
}
-// Load and Test Register-Register (32)
-void Assembler::ltr(Register r1, Register r2) { rr_form(LTR, r1, r2); }
-
-// Load and Test Register-Register (64)
-void Assembler::ltgr(Register r1, Register r2) { rre_form(LTGR, r1, r2); }
-
-// Load and Test Register-Register (64<-32)
-void Assembler::ltgfr(Register r1, Register r2) { rre_form(LTGFR, r1, r2); }
-
// -------------------------
// Load Logical Instructions
// -------------------------
@@ -1597,9 +1460,6 @@ void Assembler::llgf(Register r1, const MemOperand& opnd) {
rxy_form(LLGF, r1, opnd.rx(), opnd.rb(), opnd.offset());
}
-// Load Logical Register-Register (64<-32)
-void Assembler::llgfr(Register r1, Register r2) { rre_form(LLGFR, r1, r2); }
-
// Load Logical halfword Register-Storage (32)
void Assembler::llh(Register r1, const MemOperand& opnd) {
rxy_form(LLH, r1, opnd.rx(), opnd.rb(), opnd.offset());
@@ -1610,12 +1470,6 @@ void Assembler::llgh(Register r1, const MemOperand& opnd) {
rxy_form(LLGH, r1, opnd.rx(), opnd.rb(), opnd.offset());
}
-// Load Logical halfword Register-Register (32)
-void Assembler::llhr(Register r1, Register r2) { rre_form(LLHR, r1, r2); }
-
-// Load Logical halfword Register-Register (64)
-void Assembler::llghr(Register r1, Register r2) { rre_form(LLGHR, r1, r2); }
-
// Load On Condition R-R (32)
void Assembler::locr(Condition m3, Register r1, Register r2) {
rrf2_form(LOCR << 16 | m3 * B12 | r1.code() * B4 | r2.code());
@@ -1639,11 +1493,6 @@ void Assembler::locg(Condition m3, Register r1, const MemOperand& src) {
// -------------------
// Branch Instructions
// -------------------
-// Branch and Save
-void Assembler::basr(Register r1, Register r2) { rr_form(BASR, r1, r2); }
-
-// Indirect Conditional Branch via register
-void Assembler::bcr(Condition m, Register target) { rr_form(BCR, m, target); }
// Branch on Count (32)
void Assembler::bct(Register r, const MemOperand& opnd) {
@@ -1660,31 +1509,8 @@ void Assembler::bras(Register r, const Operand& opnd) {
ri_form(BRAS, r, opnd);
}
-// Branch Relative and Save (64)
-void Assembler::brasl(Register r, const Operand& opnd) {
- ril_form(BRASL, r, opnd);
-}
-
// Branch relative on Condition (32)
-void Assembler::brc(Condition c, const Operand& opnd) {
- // BRC actually encodes # of halfwords, so divide by 2.
- int16_t numHalfwords = static_cast<int16_t>(opnd.immediate()) / 2;
- Operand halfwordOp = Operand(numHalfwords);
- halfwordOp.setBits(16);
- ri_form(BRC, c, halfwordOp);
-}
-
-// Branch Relative on Condition (64)
-void Assembler::brcl(Condition c, const Operand& opnd, bool isCodeTarget) {
- Operand halfwordOp = opnd;
- // Operand for code targets will be index to code_targets_
- if (!isCodeTarget) {
- // BRCL actually encodes # of halfwords, so divide by 2.
- int32_t numHalfwords = static_cast<int32_t>(opnd.immediate()) / 2;
- halfwordOp = Operand(numHalfwords);
- }
- ril_form(BRCL, c, halfwordOp);
-}
+void Assembler::brc(Condition c, const Operand& opnd) { ri_form(BRC, c, opnd); }
// Branch On Count (32)
void Assembler::brct(Register r1, const Operand& imm) {
@@ -1717,17 +1543,11 @@ void Assembler::cy(Register r, const MemOperand& opnd) {
rxy_form(CY, r, opnd.rx(), opnd.rb(), opnd.offset());
}
-// Compare Register-Register (32)
-void Assembler::cr_z(Register r1, Register r2) { rr_form(CR, r1, r2); }
-
// Compare Register-Storage (64)
void Assembler::cg(Register r, const MemOperand& opnd) {
rxy_form(CG, r, opnd.rx(), opnd.rb(), opnd.offset());
}
-// Compare Register-Register (64)
-void Assembler::cgr(Register r1, Register r2) { rre_form(CGR, r1, r2); }
-
// Compare Halfword Register-Storage (32)
void Assembler::ch(Register r, const MemOperand& opnd) {
rx_form(CH, r, opnd.rx(), opnd.rb(), opnd.offset());
@@ -1746,14 +1566,6 @@ void Assembler::cghi(Register r, const Operand& opnd) {
ri_form(CGHI, r, opnd);
}
-// Compare Immediate (32)
-void Assembler::cfi(Register r, const Operand& opnd) { ril_form(CFI, r, opnd); }
-
-// Compare Immediate (64)
-void Assembler::cgfi(Register r, const Operand& opnd) {
- ril_form(CGFI, r, opnd);
-}
-
// ----------------------------
// Compare Logical Instructions
// ----------------------------
@@ -1767,25 +1579,11 @@ void Assembler::cly(Register r, const MemOperand& opnd) {
rxy_form(CLY, r, opnd.rx(), opnd.rb(), opnd.offset());
}
-// Compare Logical Register-Register (32)
-void Assembler::clr(Register r1, Register r2) { rr_form(CLR, r1, r2); }
-
// Compare Logical Register-Storage (64)
void Assembler::clg(Register r, const MemOperand& opnd) {
rxy_form(CLG, r, opnd.rx(), opnd.rb(), opnd.offset());
}
-// Compare Logical Register-Register (64)
-void Assembler::clgr(Register r1, Register r2) { rre_form(CLGR, r1, r2); }
-
-// Compare Logical Immediate (32)
-void Assembler::clfi(Register r1, const Operand& i2) { ril_form(CLFI, r1, i2); }
-
-// Compare Logical Immediate (64<32)
-void Assembler::clgfi(Register r1, const Operand& i2) {
- ril_form(CLGFI, r1, i2);
-}
-
// Compare Immediate (Mem - Imm) (8)
void Assembler::cli(const MemOperand& opnd, const Operand& imm) {
si_form(CLI, imm, opnd.rb(), opnd.offset());
@@ -1866,11 +1664,6 @@ void Assembler::ay(Register r1, const MemOperand& opnd) {
rxy_form(AY, r1, opnd.rx(), opnd.rb(), opnd.offset());
}
-// Add Immediate (32)
-void Assembler::afi(Register r1, const Operand& opnd) {
- ril_form(AFI, r1, opnd);
-}
-
// Add Halfword Register-Storage (32)
void Assembler::ah(Register r1, const MemOperand& opnd) {
rx_form(AH, r1, opnd.rx(), opnd.rb(), opnd.offset());
@@ -1889,9 +1682,6 @@ void Assembler::ahik(Register r1, Register r3, const Operand& i2) {
rie_form(AHIK, r1, r3, i2);
}
-// Add Register (32)
-void Assembler::ar(Register r1, Register r2) { rr_form(AR, r1, r2); }
-
// Add Register-Register-Register (32)
void Assembler::ark(Register r1, Register r2, Register r3) {
rrf1_form(ARK, r1, r2, r3);
@@ -1917,14 +1707,6 @@ void Assembler::agf(Register r1, const MemOperand& opnd) {
rxy_form(AGF, r1, opnd.rx(), opnd.rb(), opnd.offset());
}
-// Add Immediate (64)
-void Assembler::agfi(Register r1, const Operand& opnd) {
- ril_form(AGFI, r1, opnd);
-}
-
-// Add Register-Register (64<-32)
-void Assembler::agfr(Register r1, Register r2) { rre_form(AGFR, r1, r2); }
-
// Add Halfword Immediate (64)
void Assembler::aghi(Register r1, const Operand& i2) { ri_form(AGHI, r1, i2); }
@@ -1933,9 +1715,6 @@ void Assembler::aghik(Register r1, Register r3, const Operand& i2) {
rie_form(AGHIK, r1, r3, i2);
}
-// Add Register (64)
-void Assembler::agr(Register r1, Register r2) { rre_form(AGR, r1, r2); }
-
// Add Register-Register-Register (64)
void Assembler::agrk(Register r1, Register r2, Register r3) {
rrf1_form(AGRK, r1, r2, r3);
@@ -1961,17 +1740,6 @@ void Assembler::aly(Register r1, const MemOperand& opnd) {
rxy_form(ALY, r1, opnd.rx(), opnd.rb(), opnd.offset());
}
-// Add Logical Immediate (32)
-void Assembler::alfi(Register r1, const Operand& opnd) {
- ril_form(ALFI, r1, opnd);
-}
-
-// Add Logical Register-Register (32)
-void Assembler::alr(Register r1, Register r2) { rr_form(ALR, r1, r2); }
-
-// Add Logical With Carry Register-Register (32)
-void Assembler::alcr(Register r1, Register r2) { rre_form(ALCR, r1, r2); }
-
// Add Logical Register-Register-Register (32)
void Assembler::alrk(Register r1, Register r2, Register r3) {
rrf1_form(ALRK, r1, r2, r3);
@@ -1985,14 +1753,6 @@ void Assembler::alg(Register r1, const MemOperand& opnd) {
rxy_form(ALG, r1, opnd.rx(), opnd.rb(), opnd.offset());
}
-// Add Logical Immediate (64)
-void Assembler::algfi(Register r1, const Operand& opnd) {
- ril_form(ALGFI, r1, opnd);
-}
-
-// Add Logical Register-Register (64)
-void Assembler::algr(Register r1, Register r2) { rre_form(ALGR, r1, r2); }
-
// Add Logical Register-Register-Register (64)
void Assembler::algrk(Register r1, Register r2, Register r3) {
rrf1_form(ALGRK, r1, r2, r3);
@@ -2021,9 +1781,6 @@ void Assembler::shy(Register r1, const MemOperand& opnd) {
rxy_form(SHY, r1, opnd.rx(), opnd.rb(), opnd.offset());
}
-// Subtract Register (32)
-void Assembler::sr(Register r1, Register r2) { rr_form(SR, r1, r2); }
-
// Subtract Register-Register-Register (32)
void Assembler::srk(Register r1, Register r2, Register r3) {
rrf1_form(SRK, r1, r2, r3);
@@ -2042,12 +1799,6 @@ void Assembler::sgf(Register r1, const MemOperand& opnd) {
rxy_form(SGF, r1, opnd.rx(), opnd.rb(), opnd.offset());
}
-// Subtract Register (64)
-void Assembler::sgr(Register r1, Register r2) { rre_form(SGR, r1, r2); }
-
-// Subtract Register (64<-32)
-void Assembler::sgfr(Register r1, Register r2) { rre_form(SGFR, r1, r2); }
-
// Subtract Register-Register-Register (64)
void Assembler::sgrk(Register r1, Register r2, Register r3) {
rrf1_form(SGRK, r1, r2, r3);
@@ -2066,12 +1817,6 @@ void Assembler::sly(Register r1, const MemOperand& opnd) {
rxy_form(SLY, r1, opnd.rx(), opnd.rb(), opnd.offset());
}
-// Subtract Logical Register-Register (32)
-void Assembler::slr(Register r1, Register r2) { rr_form(SLR, r1, r2); }
-
-// Subtract Logical With Borrow Register-Register (32)
-void Assembler::slbr(Register r1, Register r2) { rre_form(SLBR, r1, r2); }
-
// Subtract Logical Register-Register-Register (32)
void Assembler::slrk(Register r1, Register r2, Register r3) {
rrf1_form(SLRK, r1, r2, r3);
@@ -2085,9 +1830,6 @@ void Assembler::slg(Register r1, const MemOperand& opnd) {
rxy_form(SLG, r1, opnd.rx(), opnd.rb(), opnd.offset());
}
-// Subtract Logical Register-Register (64)
-void Assembler::slgr(Register r1, Register r2) { rre_form(SLGR, r1, r2); }
-
// Subtract Logical Register-Register-Register (64)
void Assembler::slgrk(Register r1, Register r2, Register r3) {
rrf1_form(SLGRK, r1, r2, r3);
@@ -2107,23 +1849,11 @@ void Assembler::mfy(Register r1, const MemOperand& opnd) {
rxy_form(MFY, r1, opnd.rx(), opnd.rb(), opnd.offset());
}
-// Multiply Register (64<32)
-void Assembler::mr_z(Register r1, Register r2) {
- DCHECK(r1.code() % 2 == 0);
- rr_form(MR, r1, r2);
-}
-
// Multiply Logical Register-Storage (64<32)
void Assembler::ml(Register r1, const MemOperand& opnd) {
rxy_form(ML, r1, opnd.rx(), opnd.rb(), opnd.offset());
}
-// Multiply Logical Register (64<32)
-void Assembler::mlr(Register r1, Register r2) {
- DCHECK(r1.code() % 2 == 0);
- rre_form(MLR, r1, r2);
-}
-
// Multiply Single Register-Storage (32)
void Assembler::ms(Register r1, const MemOperand& opnd) {
rx_form(MS, r1, opnd.rx(), opnd.rb(), opnd.offset());
@@ -2134,14 +1864,6 @@ void Assembler::msy(Register r1, const MemOperand& opnd) {
rxy_form(MSY, r1, opnd.rx(), opnd.rb(), opnd.offset());
}
-// Multiply Single Immediate (32)
-void Assembler::msfi(Register r1, const Operand& opnd) {
- ril_form(MSFI, r1, opnd);
-}
-
-// Multiply Single Register (64<32)
-void Assembler::msr(Register r1, Register r2) { rre_form(MSR, r1, r2); }
-
// Multiply Halfword Register-Storage (32)
void Assembler::mh(Register r1, const MemOperand& opnd) {
rx_form(MH, r1, opnd.rx(), opnd.rb(), opnd.offset());
@@ -2157,6 +1879,16 @@ void Assembler::mhi(Register r1, const Operand& opnd) {
ri_form(MHI, r1, opnd);
}
+// Multiply Single Register (32)
+void Assembler::msrkc(Register r1, Register r2, Register r3) {
+ rrf1_form(MSRKC, r1, r2, r3);
+}
+
+// Multiply Single Register (64)
+void Assembler::msgrkc(Register r1, Register r2, Register r3) {
+ rrf1_form(MSGRKC, r1, r2, r3);
+}
+
// ----------------------------
// 64-bit Multiply Instructions
// ----------------------------
@@ -2165,27 +1897,16 @@ void Assembler::mlg(Register r1, const MemOperand& opnd) {
rxy_form(MLG, r1, opnd.rx(), opnd.rb(), opnd.offset());
}
-// Multiply Register (128<64)
-void Assembler::mlgr(Register r1, Register r2) { rre_form(MLGR, r1, r2); }
-
// Multiply Halfword Immediate (64)
void Assembler::mghi(Register r1, const Operand& opnd) {
ri_form(MGHI, r1, opnd);
}
-// Multiply Single Immediate (64)
-void Assembler::msgfi(Register r1, const Operand& opnd) {
- ril_form(MSGFI, r1, opnd);
-}
-
// Multiply Single Register-Storage (64)
void Assembler::msg(Register r1, const MemOperand& opnd) {
rxy_form(MSG, r1, opnd.rx(), opnd.rb(), opnd.offset());
}
-// Multiply Single Register-Register (64)
-void Assembler::msgr(Register r1, Register r2) { rre_form(MSGR, r1, r2); }
-
// --------------------------
// 32-bit Divide Instructions
// --------------------------
@@ -2194,29 +1915,11 @@ void Assembler::d(Register r1, const MemOperand& opnd) {
rx_form(D, r1, opnd.rx(), opnd.rb(), opnd.offset());
}
-// Divide Register (32<-64)
-void Assembler::dr(Register r1, Register r2) {
- DCHECK(r1.code() % 2 == 0);
- rr_form(DR, r1, r2);
-}
-
// Divide Logical Register-Storage (32<-64)
void Assembler::dl(Register r1, const MemOperand& opnd) {
rx_form(DL, r1, opnd.rx(), opnd.rb(), opnd.offset());
}
-// Divide Logical Register (32<-64)
-void Assembler::dlr(Register r1, Register r2) { rre_form(DLR, r1, r2); }
-
-// --------------------------
-// 64-bit Divide Instructions
-// --------------------------
-// Divide Logical Register (64<-128)
-void Assembler::dlgr(Register r1, Register r2) { rre_form(DLGR, r1, r2); }
-
-// Divide Single Register (64<-32)
-void Assembler::dsgr(Register r1, Register r2) { rre_form(DSGR, r1, r2); }
-
// --------------------
// Bitwise Instructions
// --------------------
@@ -2230,9 +1933,6 @@ void Assembler::ny(Register r1, const MemOperand& opnd) {
rxy_form(NY, r1, opnd.rx(), opnd.rb(), opnd.offset());
}
-// AND Register (32)
-void Assembler::nr(Register r1, Register r2) { rr_form(NR, r1, r2); }
-
// AND Register-Register-Register (32)
void Assembler::nrk(Register r1, Register r2, Register r3) {
rrf1_form(NRK, r1, r2, r3);
@@ -2243,9 +1943,6 @@ void Assembler::ng(Register r1, const MemOperand& opnd) {
rxy_form(NG, r1, opnd.rx(), opnd.rb(), opnd.offset());
}
-// AND Register (64)
-void Assembler::ngr(Register r1, Register r2) { rre_form(NGR, r1, r2); }
-
// AND Register-Register-Register (64)
void Assembler::ngrk(Register r1, Register r2, Register r3) {
rrf1_form(NGRK, r1, r2, r3);
@@ -2261,9 +1958,6 @@ void Assembler::oy(Register r1, const MemOperand& opnd) {
rxy_form(OY, r1, opnd.rx(), opnd.rb(), opnd.offset());
}
-// OR Register (32)
-void Assembler::or_z(Register r1, Register r2) { rr_form(OR, r1, r2); }
-
// OR Register-Register-Register (32)
void Assembler::ork(Register r1, Register r2, Register r3) {
rrf1_form(ORK, r1, r2, r3);
@@ -2274,9 +1968,6 @@ void Assembler::og(Register r1, const MemOperand& opnd) {
rxy_form(OG, r1, opnd.rx(), opnd.rb(), opnd.offset());
}
-// OR Register (64)
-void Assembler::ogr(Register r1, Register r2) { rre_form(OGR, r1, r2); }
-
// OR Register-Register-Register (64)
void Assembler::ogrk(Register r1, Register r2, Register r3) {
rrf1_form(OGRK, r1, r2, r3);
@@ -2292,9 +1983,6 @@ void Assembler::xy(Register r1, const MemOperand& opnd) {
rxy_form(XY, r1, opnd.rx(), opnd.rb(), opnd.offset());
}
-// XOR Register (32)
-void Assembler::xr(Register r1, Register r2) { rr_form(XR, r1, r2); }
-
// XOR Register-Register-Register (32)
void Assembler::xrk(Register r1, Register r2, Register r3) {
rrf1_form(XRK, r1, r2, r3);
@@ -2305,9 +1993,6 @@ void Assembler::xg(Register r1, const MemOperand& opnd) {
rxy_form(XG, r1, opnd.rx(), opnd.rb(), opnd.offset());
}
-// XOR Register (64)
-void Assembler::xgr(Register r1, Register r2) { rre_form(XGR, r1, r2); }
-
// XOR Register-Register-Register (64)
void Assembler::xgrk(Register r1, Register r2, Register r3) {
rrf1_form(XGRK, r1, r2, r3);
@@ -2320,19 +2005,6 @@ void Assembler::xc(const MemOperand& opnd1, const MemOperand& opnd2,
opnd2.getBaseRegister(), opnd2.getDisplacement());
}
-// -------------------------------------------
-// Bitwise GPR <-> FPR Conversion Instructions
-// -------------------------------------------
-// Load GR from FPR (64 <- L)
-void Assembler::lgdr(Register r1, DoubleRegister f2) {
- rre_form(LGDR, r1, Register::from_code(f2.code()));
-}
-
-// Load FPR from FR (L <- 64)
-void Assembler::ldgr(DoubleRegister f1, Register r2) {
- rre_form(LDGR, Register::from_code(f1.code()), r2);
-}
-
void Assembler::EnsureSpaceFor(int space_needed) {
if (buffer_space() <= (kGap + space_needed)) {
GrowBuffer(space_needed);
@@ -2547,7 +2219,7 @@ void Assembler::jump(Handle<Code> target, RelocInfo::Mode rmode,
EnsureSpace ensure_space(this);
int32_t target_index = emit_code_target(target, rmode);
- brcl(cond, Operand(target_index), true);
+ brcl(cond, Operand(target_index));
}
// Store (32)
@@ -2621,16 +2293,6 @@ void Assembler::icy(Register r1, const MemOperand& opnd) {
rxy_form(ICY, r1, opnd.rx(), opnd.rb(), opnd.offset());
}
-// Insert Immediate (High)
-void Assembler::iihf(Register r1, const Operand& opnd) {
- ril_form(IIHF, r1, opnd);
-}
-
-// Insert Immediate (low)
-void Assembler::iilf(Register r1, const Operand& opnd) {
- ril_form(IILF, r1, opnd);
-}
-
// Insert Immediate (high high)
void Assembler::iihh(Register r1, const Operand& opnd) {
ri_form(IIHH, r1, opnd);
@@ -2651,164 +2313,46 @@ void Assembler::iill(Register r1, const Operand& opnd) {
ri_form(IILL, r1, opnd);
}
-// Load Immediate 32->64
-void Assembler::lgfi(Register r1, const Operand& opnd) {
- ril_form(LGFI, r1, opnd);
-}
-
// GPR <-> FPR Instructions
// Floating point instructions
//
-// Load zero Register (64)
-void Assembler::lzdr(DoubleRegister r1) {
- rre_form(LZDR, Register::from_code(r1.code()), Register::from_code(0));
-}
-
-// Add Register-Register (LB)
-void Assembler::aebr(DoubleRegister r1, DoubleRegister r2) {
- rre_form(AEBR, Register::from_code(r1.code()),
- Register::from_code(r2.code()));
-}
-
// Add Register-Storage (LB)
void Assembler::adb(DoubleRegister r1, const MemOperand& opnd) {
rxe_form(ADB, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
opnd.offset());
}
-// Add Register-Register (LB)
-void Assembler::adbr(DoubleRegister r1, DoubleRegister r2) {
- rre_form(ADBR, Register::from_code(r1.code()),
- Register::from_code(r2.code()));
-}
-
-// Compare Register-Register (LB)
-void Assembler::cebr(DoubleRegister r1, DoubleRegister r2) {
- rre_form(CEBR, Register::from_code(r1.code()),
- Register::from_code(r2.code()));
-}
-
// Compare Register-Storage (LB)
void Assembler::cdb(DoubleRegister r1, const MemOperand& opnd) {
rx_form(CD, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
opnd.offset());
}
-// Compare Register-Register (LB)
-void Assembler::cdbr(DoubleRegister r1, DoubleRegister r2) {
- rre_form(CDBR, Register::from_code(r1.code()),
- Register::from_code(r2.code()));
-}
-
-// Divide Register-Register (LB)
-void Assembler::debr(DoubleRegister r1, DoubleRegister r2) {
- rre_form(DEBR, Register::from_code(r1.code()),
- Register::from_code(r2.code()));
-}
-
// Divide Register-Storage (LB)
void Assembler::ddb(DoubleRegister r1, const MemOperand& opnd) {
rxe_form(DDB, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
opnd.offset());
}
-// Divide Register-Register (LB)
-void Assembler::ddbr(DoubleRegister r1, DoubleRegister r2) {
- rre_form(DDBR, Register::from_code(r1.code()),
- Register::from_code(r2.code()));
-}
-
-// Multiply Register-Register (LB)
-void Assembler::meebr(DoubleRegister r1, DoubleRegister r2) {
- rre_form(MEEBR, Register::from_code(r1.code()),
- Register::from_code(r2.code()));
-}
-
// Multiply Register-Storage (LB)
void Assembler::mdb(DoubleRegister r1, const MemOperand& opnd) {
rxe_form(MDB, Register::from_code(r1.code()), opnd.rb(), opnd.rx(),
opnd.offset());
}
-// Multiply Register-Register (LB)
-void Assembler::mdbr(DoubleRegister r1, DoubleRegister r2) {
- rre_form(MDBR, Register::from_code(r1.code()),
- Register::from_code(r2.code()));
-}
-
-// Subtract Register-Register (LB)
-void Assembler::sebr(DoubleRegister r1, DoubleRegister r2) {
- rre_form(SEBR, Register::from_code(r1.code()),
- Register::from_code(r2.code()));
-}
-
// Subtract Register-Storage (LB)
void Assembler::sdb(DoubleRegister r1, const MemOperand& opnd) {
rxe_form(SDB, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
opnd.offset());
}
-// Subtract Register-Register (LB)
-void Assembler::sdbr(DoubleRegister r1, DoubleRegister r2) {
- rre_form(SDBR, Register::from_code(r1.code()),
- Register::from_code(r2.code()));
-}
-
// Square Root (LB)
void Assembler::sqdb(DoubleRegister r1, const MemOperand& opnd) {
rxe_form(SQDB, Register::from_code(r1.code()), opnd.rx(), opnd.rb(),
opnd.offset());
}
-// Square Root Register-Register (LB)
-void Assembler::sqebr(DoubleRegister r1, DoubleRegister r2) {
- rre_form(SQEBR, Register::from_code(r1.code()),
- Register::from_code(r2.code()));
-}
-
-// Square Root Register-Register (LB)
-void Assembler::sqdbr(DoubleRegister r1, DoubleRegister r2) {
- rre_form(SQDBR, Register::from_code(r1.code()),
- Register::from_code(r2.code()));
-}
-
-// Load Rounded (double -> float)
-void Assembler::ledbr(DoubleRegister r1, DoubleRegister r2) {
- rre_form(LEDBR, Register::from_code(r1.code()),
- Register::from_code(r2.code()));
-}
-
-// Load Lengthen (float -> double)
-void Assembler::ldebr(DoubleRegister r1, DoubleRegister r2) {
- rre_form(LDEBR, Register::from_code(r1.code()),
- Register::from_code(r2.code()));
-}
-
-// Load Complement Register-Register (LB)
-void Assembler::lcdbr(DoubleRegister r1, DoubleRegister r2) {
- rre_form(LCDBR, Register::from_code(r1.code()),
- Register::from_code(r2.code()));
-}
-
-// Load Complement Register-Register (LB)
-void Assembler::lcebr(DoubleRegister r1, DoubleRegister r2) {
- rre_form(LCEBR, Register::from_code(r1.code()),
- Register::from_code(r2.code()));
-}
-
-// Load Positive Register-Register (LB)
-void Assembler::lpebr(DoubleRegister r1, DoubleRegister r2) {
- rre_form(LPEBR, Register::from_code(r1.code()),
- Register::from_code(r2.code()));
-}
-
-// Load Positive Register-Register (LB)
-void Assembler::lpdbr(DoubleRegister r1, DoubleRegister r2) {
- rre_form(LPDBR, Register::from_code(r1.code()),
- Register::from_code(r2.code()));
-}
-
// Store Double (64)
void Assembler::std(DoubleRegister r1, const MemOperand& opnd) {
rx_form(STD, r1, opnd.rx(), opnd.rb(), opnd.offset());
@@ -2855,21 +2399,6 @@ void Assembler::ley(DoubleRegister r1, const MemOperand& opnd) {
rxy_form(LEY, r1, opnd.rx(), opnd.rb(), opnd.offset());
}
-// Load Double Register-Register (64)
-void Assembler::ldr(DoubleRegister r1, DoubleRegister r2) {
- rr_form(LDR, r1, r2);
-}
-
-// Load And Test Register-Register (L)
-void Assembler::ltebr(DoubleRegister r1, DoubleRegister r2) {
- rre_form(LTEBR, r1, r2);
-}
-
-// Load And Test Register-Register (L)
-void Assembler::ltdbr(DoubleRegister r1, DoubleRegister r2) {
- rre_form(LTDBR, r1, r2);
-}
-
// Convert to Fixed point (64<-S)
void Assembler::cgebr(Condition m, Register r1, DoubleRegister r2) {
rrfe_form(CGEBR, m, Condition(0), r1, Register::from_code(r2.code()));
@@ -2885,21 +2414,6 @@ void Assembler::cfdbr(Condition m, Register r1, DoubleRegister r2) {
rrfe_form(CFDBR, m, Condition(0), r1, Register::from_code(r2.code()));
}
-// Convert from Fixed point (L<-64)
-void Assembler::cegbr(DoubleRegister r1, Register r2) {
- rre_form(CEGBR, Register::from_code(r1.code()), r2);
-}
-
-// Convert from Fixed point (L<-64)
-void Assembler::cdgbr(DoubleRegister r1, Register r2) {
- rre_form(CDGBR, Register::from_code(r1.code()), r2);
-}
-
-// Convert from Fixed point (L<-32)
-void Assembler::cdfbr(DoubleRegister r1, Register r2) {
- rre_form(CDFBR, Register::from_code(r1.code()), r2);
-}
-
// Convert to Fixed Logical (64<-L)
void Assembler::clgdbr(Condition m3, Condition m4, Register r1,
DoubleRegister r2) {
@@ -2988,20 +2502,6 @@ void Assembler::fidbra(DoubleRegister d1, DoubleRegister d2, FIDBRA_MASK3 m3) {
rrf2_form(FIDBRA << 16 | m3 * B12 | d1.code() * B4 | d2.code());
}
-// Multiply and Add - MADBR R1, R3, R2
-// R1 = R3 * R2 + R1
-void Assembler::madbr(DoubleRegister d1, DoubleRegister d3, DoubleRegister d2) {
- rrd_form(MADBR, Register::from_code(d1.code()),
- Register::from_code(d3.code()), Register::from_code(d2.code()));
-}
-
-// Multiply and Subtract - MSDBR R1, R3, R2
-// R1 = R3 * R2 - R1
-void Assembler::msdbr(DoubleRegister d1, DoubleRegister d3, DoubleRegister d2) {
- rrd_form(MSDBR, Register::from_code(d1.code()),
- Register::from_code(d3.code()), Register::from_code(d2.code()));
-}
-
// end of S390instructions
bool Assembler::IsNop(SixByteInstr instr, int type) {
@@ -3012,6 +2512,21 @@ bool Assembler::IsNop(SixByteInstr instr, int type) {
return ((instr & 0xffff) == 0x1800); // lr r0,r0
}
+// dummy instruction reserved for special use.
+void Assembler::dumy(int r1, int x2, int b2, int d2) {
+#if defined(USE_SIMULATOR)
+ int op = 0xE353;
+ uint64_t code = (static_cast<uint64_t>(op & 0xFF00)) * B32 |
+ (static_cast<uint64_t>(r1) & 0xF) * B36 |
+ (static_cast<uint64_t>(x2) & 0xF) * B32 |
+ (static_cast<uint64_t>(b2) & 0xF) * B28 |
+ (static_cast<uint64_t>(d2 & 0x0FFF)) * B16 |
+ (static_cast<uint64_t>(d2 & 0x0FF000)) >> 4 |
+ (static_cast<uint64_t>(op & 0x00FF));
+ emit6bytes(code);
+#endif
+}
+
void Assembler::GrowBuffer(int needed) {
if (!own_buffer_) FATAL("external code buffer is too small");
diff --git a/deps/v8/src/s390/assembler-s390.h b/deps/v8/src/s390/assembler-s390.h
index 65f0126580..89a3182f1a 100644
--- a/deps/v8/src/s390/assembler-s390.h
+++ b/deps/v8/src/s390/assembler-s390.h
@@ -545,6 +545,163 @@ class Assembler : public AssemblerBase {
// ---------------------------------------------------------------------------
// Code generation
+ template <class T, int size, int lo, int hi>
+ inline T getfield(T value) {
+ DCHECK(lo < hi);
+ DCHECK(size > 0);
+ int mask = hi - lo;
+ int shift = size * 8 - hi;
+ uint32_t mask_value = (mask == 32) ? 0xffffffff : (1 << mask) - 1;
+ return (value & mask_value) << shift;
+ }
+
+ // Declare generic instruction formats by fields
+ inline void e_format(Opcode opcode) {
+ emit2bytes(getfield<uint16_t, 2, 0, 16>(opcode));
+ }
+
+ inline void i_format(Opcode opcode, int f1) {
+ emit2bytes(getfield<uint16_t, 2, 0, 8>(opcode) |
+ getfield<uint16_t, 2, 8, 16>(f1));
+ }
+
+ inline void ie_format(Opcode opcode, int f1, int f2) {
+ emit4bytes(getfield<uint32_t, 4, 0, 16>(opcode) |
+ getfield<uint32_t, 4, 24, 28>(f1) |
+ getfield<uint32_t, 4, 28, 32>(f2));
+ }
+ inline void mii_format(Opcode opcode, int f1, int f2, int f3) {
+ emit6bytes(
+ getfield<uint64_t, 6, 0, 8>(opcode) | getfield<uint64_t, 6, 8, 12>(f1) |
+ getfield<uint64_t, 6, 12, 24>(f2) | getfield<uint64_t, 6, 24, 48>(f3));
+ }
+
+ inline void ri_format(Opcode opcode, int f1, int f2) {
+ uint32_t op1 = opcode >> 4;
+ uint32_t op2 = opcode & 0xf;
+ emit4bytes(
+ getfield<uint32_t, 4, 0, 8>(op1) | getfield<uint32_t, 4, 8, 12>(f1) |
+ getfield<uint32_t, 4, 12, 16>(op2) | getfield<uint32_t, 4, 16, 32>(f2));
+ }
+
+ inline void rie_1_format(Opcode opcode, int f1, int f2, int f3, int f4) {
+ uint32_t op1 = opcode >> 8;
+ uint32_t op2 = opcode & 0xff;
+ emit6bytes(
+ getfield<uint64_t, 6, 0, 8>(op1) | getfield<uint64_t, 6, 8, 12>(f1) |
+ getfield<uint64_t, 6, 12, 16>(f2) | getfield<uint64_t, 6, 16, 32>(f3) |
+ getfield<uint64_t, 6, 32, 36>(f4) | getfield<uint64_t, 6, 40, 48>(op2));
+ }
+
+ inline void rie_2_format(Opcode opcode, int f1, int f2, int f3, int f4) {
+ uint32_t op1 = opcode >> 8;
+ uint32_t op2 = opcode & 0xff;
+ emit6bytes(
+ getfield<uint64_t, 6, 0, 8>(op1) | getfield<uint64_t, 6, 8, 12>(f1) |
+ getfield<uint64_t, 6, 12, 16>(f2) | getfield<uint64_t, 6, 16, 32>(f3) |
+ getfield<uint64_t, 6, 32, 40>(f4) | getfield<uint64_t, 6, 40, 48>(op2));
+ }
+
+ inline void rie_3_format(Opcode opcode, int f1, int f2, int f3, int f4,
+ int f5) {
+ uint32_t op1 = opcode >> 8;
+ uint32_t op2 = opcode & 0xff;
+ emit6bytes(
+ getfield<uint64_t, 6, 0, 8>(op1) | getfield<uint64_t, 6, 8, 12>(f1) |
+ getfield<uint64_t, 6, 12, 16>(f2) | getfield<uint64_t, 6, 16, 24>(f3) |
+ getfield<uint64_t, 6, 24, 32>(f4) | getfield<uint64_t, 6, 32, 40>(f5) |
+ getfield<uint64_t, 6, 40, 48>(op2));
+ }
+
+#define DECLARE_S390_RIL_AB_INSTRUCTIONS(name, op_name, op_value) \
+ template <class R1> \
+ inline void name(R1 r1, const Operand& i2) { \
+ ril_format(op_name, r1.code(), i2.immediate()); \
+ }
+#define DECLARE_S390_RIL_C_INSTRUCTIONS(name, op_name, op_value) \
+ inline void name(Condition m1, const Operand& i2) { \
+ ril_format(op_name, m1, i2.immediate()); \
+ }
+
+ inline void ril_format(Opcode opcode, int f1, int f2) {
+ uint32_t op1 = opcode >> 4;
+ uint32_t op2 = opcode & 0xf;
+ emit6bytes(
+ getfield<uint64_t, 6, 0, 8>(op1) | getfield<uint64_t, 6, 8, 12>(f1) |
+ getfield<uint64_t, 6, 12, 16>(op2) | getfield<uint64_t, 6, 16, 48>(f2));
+ }
+ S390_RIL_A_OPCODE_LIST(DECLARE_S390_RIL_AB_INSTRUCTIONS)
+ S390_RIL_B_OPCODE_LIST(DECLARE_S390_RIL_AB_INSTRUCTIONS)
+ S390_RIL_C_OPCODE_LIST(DECLARE_S390_RIL_C_INSTRUCTIONS)
+#undef DECLARE_S390_RIL_AB_INSTRUCTIONS
+#undef DECLARE_S390_RIL_C_INSTRUCTIONS
+
+ inline void ris_format(Opcode opcode, int f1, int f2, int f3, int f4,
+ int f5) {
+ uint32_t op1 = opcode >> 8;
+ uint32_t op2 = opcode & 0xff;
+ emit6bytes(
+ getfield<uint64_t, 6, 0, 8>(op1) | getfield<uint64_t, 6, 8, 12>(f1) |
+ getfield<uint64_t, 6, 12, 16>(f2) | getfield<uint64_t, 6, 16, 20>(f3) |
+ getfield<uint64_t, 6, 20, 32>(f4) | getfield<uint64_t, 6, 32, 40>(f5) |
+ getfield<uint64_t, 6, 40, 48>(op2));
+ }
+
+#define DECLARE_S390_RR_INSTRUCTIONS(name, op_name, op_value) \
+ inline void name(Register r1, Register r2) { \
+ rr_format(op_name, r1.code(), r2.code()); \
+ } \
+ inline void name(DoubleRegister r1, DoubleRegister r2) { \
+ rr_format(op_name, r1.code(), r2.code()); \
+ } \
+ inline void name(Condition m1, Register r2) { \
+ rr_format(op_name, m1, r2.code()); \
+ }
+
+ inline void rr_format(Opcode opcode, int f1, int f2) {
+ emit2bytes(getfield<uint16_t, 2, 0, 8>(opcode) |
+ getfield<uint16_t, 2, 8, 12>(f1) |
+ getfield<uint16_t, 2, 12, 16>(f2));
+ }
+ S390_RR_OPCODE_LIST(DECLARE_S390_RR_INSTRUCTIONS)
+#undef DECLARE_S390_RR_INSTRUCTIONS
+
+#define DECLARE_S390_RRD_INSTRUCTIONS(name, op_name, op_value) \
+ template <class R1, class R2, class R3> \
+ inline void name(R1 r1, R3 r3, R2 r2) { \
+ rrd_format(op_name, r1.code(), r3.code(), r2.code()); \
+ }
+ inline void rrd_format(Opcode opcode, int f1, int f2, int f3) {
+ emit4bytes(getfield<uint32_t, 4, 0, 16>(opcode) |
+ getfield<uint32_t, 4, 16, 20>(f1) |
+ getfield<uint32_t, 4, 24, 28>(f2) |
+ getfield<uint32_t, 4, 28, 32>(f3));
+ }
+ S390_RRD_OPCODE_LIST(DECLARE_S390_RRD_INSTRUCTIONS)
+#undef DECLARE_S390_RRD_INSTRUCTIONS
+
+#define DECLARE_S390_RRE_INSTRUCTIONS(name, op_name, op_value) \
+ template <class R1, class R2> \
+ inline void name(R1 r1, R2 r2) { \
+ rre_format(op_name, r1.code(), r2.code()); \
+ }
+ inline void rre_format(Opcode opcode, int f1, int f2) {
+ emit4bytes(getfield<uint32_t, 4, 0, 16>(opcode) |
+ getfield<uint32_t, 4, 24, 28>(f1) |
+ getfield<uint32_t, 4, 28, 32>(f2));
+ }
+ S390_RRE_OPCODE_LIST(DECLARE_S390_RRE_INSTRUCTIONS)
+ // Special format
+ void lzdr(DoubleRegister r1) { rre_format(LZDR, r1.code(), 0); }
+#undef DECLARE_S390_RRE_INSTRUCTIONS
+
+ inline void rrf_format(Opcode opcode, int f1, int f2, int f3, int f4) {
+ emit4bytes(
+ getfield<uint32_t, 4, 0, 16>(opcode) |
+ getfield<uint32_t, 4, 16, 20>(f1) | getfield<uint32_t, 4, 20, 24>(f2) |
+ getfield<uint32_t, 4, 24, 28>(f3) | getfield<uint32_t, 4, 28, 32>(f4));
+ }
+
// Helper for unconditional branch to Label with update to save register
void b(Register r, Label* l) {
int32_t halfwords = branch_offset(l) / 2;
@@ -647,10 +804,6 @@ class Assembler : public AssemblerBase {
void name(Register r1, Register r2, const Operand& i3, const Operand& i4, \
const Operand& i5)
-#define RIL1_FORM(name) void name(Register r1, const Operand& i2)
-
-#define RIL2_FORM(name) void name(Condition m1, const Operand& i2)
-
#define RXE_FORM(name) \
void name(Register r1, const MemOperand& opnd); \
void name(Register r1, Register b2, Register x2, Disp d2)
@@ -679,8 +832,6 @@ class Assembler : public AssemblerBase {
void name(Register b1, Disp d1, const Operand& i2); \
void name(const MemOperand& opnd, const Operand& i2)
-#define RRE_FORM(name) void name(Register r1, Register r2)
-
#define RRF1_FORM(name) void name(Register r1, Register r2, Register r3)
#define RRF2_FORM(name) void name(Condition m1, Register r1, Register r2)
@@ -712,8 +863,6 @@ class Assembler : public AssemblerBase {
void name(Register r1, Condition m3, Register b2, Disp d2); \
void name(Register r1, Condition m3, const MemOperand& opnd)
-#define RRD_FORM(name) void name(Register r1, Register r3, Register r2)
-
#define RRS_FORM(name) \
void name(Register r1, Register r2, Register b4, Disp d4, Condition m3); \
void name(Register r1, Register r2, Condition m3, const MemOperand& opnd)
@@ -758,64 +907,89 @@ class Assembler : public AssemblerBase {
void name(Register r3, Register b1, Disp d1, Register b2, Disp d2); \
void name(Register r3, const MemOperand& opnd1, const MemOperand& opnd2)
+#define DECLARE_VRR_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ void name(DoubleRegister v1, DoubleRegister v2, Condition m5, Condition m4, \
+ Condition m3) { \
+ uint64_t code = (static_cast<uint64_t>(opcode_value & 0xFF00)) * B32 | \
+ (static_cast<uint64_t>(v1.code())) * B36 | \
+ (static_cast<uint64_t>(v2.code())) * B32 | \
+ (static_cast<uint64_t>(m5 & 0xF)) * B20 | \
+ (static_cast<uint64_t>(m4 & 0xF)) * B16 | \
+ (static_cast<uint64_t>(m3 & 0xF)) * B12 | \
+ (static_cast<uint64_t>(opcode_value & 0x00FF)); \
+ emit6bytes(code); \
+ }
+ S390_VRR_A_OPCODE_LIST(DECLARE_VRR_A_INSTRUCTIONS)
+#undef DECLARE_VRR_A_INSTRUCTIONS
+
+#define DECLARE_VRR_C_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ void name(DoubleRegister v1, DoubleRegister v2, DoubleRegister v3, \
+ Condition m6, Condition m5, Condition m4) { \
+ uint64_t code = (static_cast<uint64_t>(opcode_value & 0xFF00)) * B32 | \
+ (static_cast<uint64_t>(v1.code())) * B36 | \
+ (static_cast<uint64_t>(v2.code())) * B32 | \
+ (static_cast<uint64_t>(v3.code())) * B28 | \
+ (static_cast<uint64_t>(m6 & 0xF)) * B20 | \
+ (static_cast<uint64_t>(m5 & 0xF)) * B16 | \
+ (static_cast<uint64_t>(m4 & 0xF)) * B12 | \
+ (static_cast<uint64_t>(opcode_value & 0x00FF)); \
+ emit6bytes(code); \
+ }
+ S390_VRR_C_OPCODE_LIST(DECLARE_VRR_C_INSTRUCTIONS)
+#undef DECLARE_VRR_C_INSTRUCTIONS
+
+ // Single Element format
+ void vfa(DoubleRegister v1, DoubleRegister v2, DoubleRegister v3) {
+ vfa(v1, v2, v3, static_cast<Condition>(0), static_cast<Condition>(8),
+ static_cast<Condition>(3));
+ }
+ void vfs(DoubleRegister v1, DoubleRegister v2, DoubleRegister v3) {
+ vfs(v1, v2, v3, static_cast<Condition>(0), static_cast<Condition>(8),
+ static_cast<Condition>(3));
+ }
+ void vfm(DoubleRegister v1, DoubleRegister v2, DoubleRegister v3) {
+ vfm(v1, v2, v3, static_cast<Condition>(0), static_cast<Condition>(8),
+ static_cast<Condition>(3));
+ }
+ void vfd(DoubleRegister v1, DoubleRegister v2, DoubleRegister v3) {
+ vfd(v1, v2, v3, static_cast<Condition>(0), static_cast<Condition>(8),
+ static_cast<Condition>(3));
+ }
+
// S390 instruction sets
RX_FORM(bc);
- RR_FORM(bctr);
RX_FORM(cd);
- RRE_FORM(cdr);
RXE_FORM(cdb);
RXE_FORM(ceb);
RXE_FORM(ddb);
- RRE_FORM(ddbr);
SS1_FORM(ed);
- RRE_FORM(epair);
RX_FORM(ex);
RRF2_FORM(fidbr);
- RRE_FORM(flogr);
RX_FORM(ic_z);
RXY_FORM(icy);
- RIL1_FORM(iihf);
RI1_FORM(iihh);
RI1_FORM(iihl);
- RIL1_FORM(iilf);
- RIL1_FORM(lgfi);
RI1_FORM(iilh);
RI1_FORM(iill);
- RRE_FORM(lcgr);
- RR_FORM(lcr);
RX_FORM(le_z);
RXY_FORM(ley);
- RIL1_FORM(llihf);
- RIL1_FORM(llilf);
- RRE_FORM(lngr);
- RR_FORM(lnr);
RSY1_FORM(loc);
RXY_FORM(lrv);
- RRE_FORM(lrvr);
- RRE_FORM(lrvgr);
RXY_FORM(lrvh);
RXY_FORM(lrvg);
RXE_FORM(mdb);
- RRE_FORM(mdbr);
SS4_FORM(mvck);
SSF_FORM(mvcos);
SS4_FORM(mvcs);
SS1_FORM(mvn);
SS1_FORM(nc);
SI_FORM(ni);
- RIL1_FORM(nihf);
- RIL1_FORM(nilf);
RI1_FORM(nilh);
RI1_FORM(nill);
- RIL1_FORM(oihf);
- RIL1_FORM(oilf);
RI1_FORM(oill);
- RRE_FORM(popcnt);
+ RXY_FORM(pfd);
RXE_FORM(sdb);
- RRE_FORM(sdbr);
- RIL1_FORM(slfi);
RXY_FORM(slgf);
- RIL1_FORM(slgfi);
RS1_FORM(srdl);
RX_FORM(ste);
RXY_FORM(stey);
@@ -825,51 +999,35 @@ class Assembler : public AssemblerBase {
RI1_FORM(tmll);
SS1_FORM(tr);
S_FORM(ts);
- RIL1_FORM(xihf);
- RIL1_FORM(xilf);
// Load Address Instructions
void la(Register r, const MemOperand& opnd);
void lay(Register r, const MemOperand& opnd);
- void larl(Register r1, const Operand& opnd);
void larl(Register r, Label* l);
// Load Instructions
void lb(Register r, const MemOperand& src);
- void lbr(Register r1, Register r2);
void lgb(Register r, const MemOperand& src);
- void lgbr(Register r1, Register r2);
void lh(Register r, const MemOperand& src);
void lhy(Register r, const MemOperand& src);
- void lhr(Register r1, Register r2);
void lgh(Register r, const MemOperand& src);
- void lghr(Register r1, Register r2);
void l(Register r, const MemOperand& src);
void ly(Register r, const MemOperand& src);
- void lr(Register r1, Register r2);
void lg(Register r, const MemOperand& src);
- void lgr(Register r1, Register r2);
void lgf(Register r, const MemOperand& src);
- void lgfr(Register r1, Register r2);
void lhi(Register r, const Operand& imm);
void lghi(Register r, const Operand& imm);
// Load And Test Instructions
void lt_z(Register r, const MemOperand& src);
void ltg(Register r, const MemOperand& src);
- void ltr(Register r1, Register r2);
- void ltgr(Register r1, Register r2);
- void ltgfr(Register r1, Register r2);
// Load Logical Instructions
void llc(Register r, const MemOperand& src);
void llgc(Register r, const MemOperand& src);
void llgf(Register r, const MemOperand& src);
- void llgfr(Register r1, Register r2);
void llh(Register r, const MemOperand& src);
void llgh(Register r, const MemOperand& src);
- void llhr(Register r1, Register r2);
- void llghr(Register r1, Register r2);
// Load Multiple Instructions
void lm(Register r1, Register r2, const MemOperand& src);
@@ -899,24 +1057,16 @@ class Assembler : public AssemblerBase {
// Compare Instructions
void c(Register r, const MemOperand& opnd);
void cy(Register r, const MemOperand& opnd);
- void cr_z(Register r1, Register r2);
void cg(Register r, const MemOperand& opnd);
- void cgr(Register r1, Register r2);
void ch(Register r, const MemOperand& opnd);
void chy(Register r, const MemOperand& opnd);
void chi(Register r, const Operand& opnd);
void cghi(Register r, const Operand& opnd);
- void cfi(Register r, const Operand& opnd);
- void cgfi(Register r, const Operand& opnd);
// Compare Logical Instructions
void cl(Register r, const MemOperand& opnd);
void cly(Register r, const MemOperand& opnd);
- void clr(Register r1, Register r2);
void clg(Register r, const MemOperand& opnd);
- void clgr(Register r1, Register r2);
- void clfi(Register r, const Operand& opnd);
- void clgfi(Register r, const Operand& opnd);
void cli(const MemOperand& mem, const Operand& imm);
void cliy(const MemOperand& mem, const Operand& imm);
void clc(const MemOperand& opnd1, const MemOperand& opnd2, Length length);
@@ -978,52 +1128,38 @@ class Assembler : public AssemblerBase {
void mvc(const MemOperand& opnd1, const MemOperand& opnd2, uint32_t length);
// Branch Instructions
- void basr(Register r1, Register r2);
- void bcr(Condition m, Register target);
void bct(Register r, const MemOperand& opnd);
void bctg(Register r, const MemOperand& opnd);
void bras(Register r, const Operand& opnd);
- void brasl(Register r, const Operand& opnd);
void brc(Condition c, const Operand& opnd);
- void brcl(Condition m, const Operand& opnd, bool isCodeTarget = false);
void brct(Register r1, const Operand& opnd);
void brctg(Register r1, const Operand& opnd);
// 32-bit Add Instructions
void a(Register r1, const MemOperand& opnd);
void ay(Register r1, const MemOperand& opnd);
- void afi(Register r1, const Operand& opnd);
void ah(Register r1, const MemOperand& opnd);
void ahy(Register r1, const MemOperand& opnd);
void ahi(Register r1, const Operand& opnd);
void ahik(Register r1, Register r3, const Operand& opnd);
- void ar(Register r1, Register r2);
void ark(Register r1, Register r2, Register r3);
void asi(const MemOperand&, const Operand&);
// 64-bit Add Instructions
void ag(Register r1, const MemOperand& opnd);
void agf(Register r1, const MemOperand& opnd);
- void agfi(Register r1, const Operand& opnd);
- void agfr(Register r1, Register r2);
void aghi(Register r1, const Operand& opnd);
void aghik(Register r1, Register r3, const Operand& opnd);
- void agr(Register r1, Register r2);
void agrk(Register r1, Register r2, Register r3);
void agsi(const MemOperand&, const Operand&);
// 32-bit Add Logical Instructions
void al_z(Register r1, const MemOperand& opnd);
void aly(Register r1, const MemOperand& opnd);
- void alfi(Register r1, const Operand& opnd);
- void alr(Register r1, Register r2);
- void alcr(Register r1, Register r2);
void alrk(Register r1, Register r2, Register r3);
// 64-bit Add Logical Instructions
void alg(Register r1, const MemOperand& opnd);
- void algfi(Register r1, const Operand& opnd);
- void algr(Register r1, Register r2);
void algrk(Register r1, Register r2, Register r3);
// 32-bit Subtract Instructions
@@ -1031,107 +1167,71 @@ class Assembler : public AssemblerBase {
void sy(Register r1, const MemOperand& opnd);
void sh(Register r1, const MemOperand& opnd);
void shy(Register r1, const MemOperand& opnd);
- void sr(Register r1, Register r2);
void srk(Register r1, Register r2, Register r3);
// 64-bit Subtract Instructions
void sg(Register r1, const MemOperand& opnd);
void sgf(Register r1, const MemOperand& opnd);
- void sgr(Register r1, Register r2);
- void sgfr(Register r1, Register r2);
void sgrk(Register r1, Register r2, Register r3);
// 32-bit Subtract Logical Instructions
void sl(Register r1, const MemOperand& opnd);
void sly(Register r1, const MemOperand& opnd);
- void slr(Register r1, Register r2);
void slrk(Register r1, Register r2, Register r3);
- void slbr(Register r1, Register r2);
// 64-bit Subtract Logical Instructions
void slg(Register r1, const MemOperand& opnd);
- void slgr(Register r1, Register r2);
void slgrk(Register r1, Register r2, Register r3);
// 32-bit Multiply Instructions
void m(Register r1, const MemOperand& opnd);
void mfy(Register r1, const MemOperand& opnd);
- void mr_z(Register r1, Register r2);
void ml(Register r1, const MemOperand& opnd);
- void mlr(Register r1, Register r2);
void ms(Register r1, const MemOperand& opnd);
void msy(Register r1, const MemOperand& opnd);
- void msfi(Register r1, const Operand& opnd);
- void msr(Register r1, Register r2);
void mh(Register r1, const MemOperand& opnd);
void mhy(Register r1, const MemOperand& opnd);
void mhi(Register r1, const Operand& opnd);
+ void msrkc(Register r1, Register r2, Register r3);
+ void msgrkc(Register r1, Register r2, Register r3);
// 64-bit Multiply Instructions
void mlg(Register r1, const MemOperand& opnd);
- void mlgr(Register r1, Register r2);
void mghi(Register r1, const Operand& opnd);
- void msgfi(Register r1, const Operand& opnd);
void msg(Register r1, const MemOperand& opnd);
- void msgr(Register r1, Register r2);
// 32-bit Divide Instructions
void d(Register r1, const MemOperand& opnd);
- void dr(Register r1, Register r2);
void dl(Register r1, const MemOperand& opnd);
- void dlr(Register r1, Register r2);
-
- // 64-bit Divide Instructions
- void dlgr(Register r1, Register r2);
- void dsgr(Register r1, Register r2);
// Bitwise Instructions (AND / OR / XOR)
void n(Register r1, const MemOperand& opnd);
void ny(Register r1, const MemOperand& opnd);
- void nr(Register r1, Register r2);
void nrk(Register r1, Register r2, Register r3);
void ng(Register r1, const MemOperand& opnd);
- void ngr(Register r1, Register r2);
void ngrk(Register r1, Register r2, Register r3);
void o(Register r1, const MemOperand& opnd);
void oy(Register r1, const MemOperand& opnd);
- void or_z(Register r1, Register r2);
void ork(Register r1, Register r2, Register r3);
void og(Register r1, const MemOperand& opnd);
- void ogr(Register r1, Register r2);
void ogrk(Register r1, Register r2, Register r3);
void x(Register r1, const MemOperand& opnd);
void xy(Register r1, const MemOperand& opnd);
- void xr(Register r1, Register r2);
void xrk(Register r1, Register r2, Register r3);
void xg(Register r1, const MemOperand& opnd);
- void xgr(Register r1, Register r2);
void xgrk(Register r1, Register r2, Register r3);
void xc(const MemOperand& opnd1, const MemOperand& opnd2, Length length);
- // Bitwise GPR <-> FPR Conversion Instructions
- void lgdr(Register r1, DoubleRegister f2);
- void ldgr(DoubleRegister f1, Register r2);
-
// Floating Point Load / Store Instructions
void ld(DoubleRegister r1, const MemOperand& opnd);
void ldy(DoubleRegister r1, const MemOperand& opnd);
void le_z(DoubleRegister r1, const MemOperand& opnd);
void ley(DoubleRegister r1, const MemOperand& opnd);
- void ldr(DoubleRegister r1, DoubleRegister r2);
- void ltdbr(DoubleRegister r1, DoubleRegister r2);
- void ltebr(DoubleRegister r1, DoubleRegister r2);
void std(DoubleRegister r1, const MemOperand& opnd);
void stdy(DoubleRegister r1, const MemOperand& opnd);
void ste(DoubleRegister r1, const MemOperand& opnd);
void stey(DoubleRegister r1, const MemOperand& opnd);
- // Floating Point Load Rounded/Positive Instructions
- void ledbr(DoubleRegister r1, DoubleRegister r2);
- void ldebr(DoubleRegister r1, DoubleRegister r2);
- void lpebr(DoubleRegister r1, DoubleRegister r2);
- void lpdbr(DoubleRegister r1, DoubleRegister r2);
-
// Floating <-> Fixed Point Conversion Instructions
void cdlfbr(Condition m3, Condition m4, DoubleRegister fltReg,
Register fixReg);
@@ -1150,40 +1250,20 @@ class Assembler : public AssemblerBase {
void clgebr(Condition m3, Condition m4, Register fixReg,
DoubleRegister fltReg);
void cfdbr(Condition m, Register fixReg, DoubleRegister fltReg);
- void cdfbr(DoubleRegister fltReg, Register fixReg);
void cgebr(Condition m, Register fixReg, DoubleRegister fltReg);
void cgdbr(Condition m, Register fixReg, DoubleRegister fltReg);
- void cegbr(DoubleRegister fltReg, Register fixReg);
- void cdgbr(DoubleRegister fltReg, Register fixReg);
void cfebr(Condition m3, Register fixReg, DoubleRegister fltReg);
void cefbr(Condition m3, DoubleRegister fltReg, Register fixReg);
// Floating Point Compare Instructions
- void cebr(DoubleRegister r1, DoubleRegister r2);
void cdb(DoubleRegister r1, const MemOperand& opnd);
- void cdbr(DoubleRegister r1, DoubleRegister r2);
// Floating Point Arithmetic Instructions
- void aebr(DoubleRegister r1, DoubleRegister r2);
void adb(DoubleRegister r1, const MemOperand& opnd);
- void adbr(DoubleRegister r1, DoubleRegister r2);
- void lzdr(DoubleRegister r1);
- void sebr(DoubleRegister r1, DoubleRegister r2);
void sdb(DoubleRegister r1, const MemOperand& opnd);
- void sdbr(DoubleRegister r1, DoubleRegister r2);
- void meebr(DoubleRegister r1, DoubleRegister r2);
void mdb(DoubleRegister r1, const MemOperand& opnd);
- void mdbr(DoubleRegister r1, DoubleRegister r2);
- void debr(DoubleRegister r1, DoubleRegister r2);
void ddb(DoubleRegister r1, const MemOperand& opnd);
- void ddbr(DoubleRegister r1, DoubleRegister r2);
- void madbr(DoubleRegister r1, DoubleRegister r2, DoubleRegister r3);
- void msdbr(DoubleRegister r1, DoubleRegister r2, DoubleRegister r3);
- void sqebr(DoubleRegister r1, DoubleRegister r2);
void sqdb(DoubleRegister r1, const MemOperand& opnd);
- void sqdbr(DoubleRegister r1, DoubleRegister r2);
- void lcdbr(DoubleRegister r1, DoubleRegister r2);
- void lcebr(DoubleRegister r1, DoubleRegister r2);
void ldeb(DoubleRegister r1, const MemOperand& opnd);
enum FIDBRA_MASK3 {
@@ -1224,6 +1304,8 @@ class Assembler : public AssemblerBase {
void nop(int type = 0); // 0 is the default non-marking type.
+ void dumy(int r1, int x2, int b2, int d2);
+
// Check the code size generated from label to here.
int SizeOfCodeGeneratedSince(Label* label) {
return pc_offset() - label->pos();
@@ -1231,9 +1313,6 @@ class Assembler : public AssemblerBase {
// Debugging
- // Mark generator continuation.
- void RecordGeneratorContinuation();
-
// Mark address of a debug break slot.
void RecordDebugBreakSlot(RelocInfo::Mode mode);
@@ -1367,9 +1446,6 @@ class Assembler : public AssemblerBase {
// Helpers to emit binary encoding for various instruction formats.
- inline void rr_form(Opcode op, Register r1, Register r2);
- inline void rr_form(Opcode op, DoubleRegister r1, DoubleRegister r2);
- inline void rr_form(Opcode op, Condition m1, Register r2);
inline void rr2_form(uint8_t op, Condition m1, Register r2);
inline void rx_form(Opcode op, Register r1, Register x2, Register b2,
@@ -1384,17 +1460,9 @@ class Assembler : public AssemblerBase {
inline void rie_f_form(Opcode op, Register r1, Register r2, const Operand& i3,
const Operand& i4, const Operand& i5);
- inline void ril_form(Opcode op, Register r1, const Operand& i2);
- inline void ril_form(Opcode op, Condition m1, const Operand& i2);
-
inline void ris_form(Opcode op, Register r1, Condition m3, Register b4,
Disp d4, const Operand& i2);
- inline void rrd_form(Opcode op, Register r1, Register r3, Register r2);
-
- inline void rre_form(Opcode op, Register r1, Register r2);
- inline void rre_form(Opcode op, DoubleRegister r1, DoubleRegister r2);
-
inline void rrf1_form(Opcode op, Register r1, Register r2, Register r3);
inline void rrf1_form(uint32_t x);
inline void rrf2_form(uint32_t x);
diff --git a/deps/v8/src/s390/code-stubs-s390.cc b/deps/v8/src/s390/code-stubs-s390.cc
index 553d6d8ce4..5fdddc6e32 100644
--- a/deps/v8/src/s390/code-stubs-s390.cc
+++ b/deps/v8/src/s390/code-stubs-s390.cc
@@ -32,17 +32,6 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kNewArray);
}
-void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
- descriptor->Initialize(r2, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
-void FastFunctionBindStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
- descriptor->Initialize(r2, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
Condition cond);
static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
@@ -647,8 +636,11 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cc == eq) {
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(lhs, rhs);
- __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+ __ Push(cp);
+ __ Call(strict() ? isolate()->builtins()->StrictEqual()
+ : isolate()->builtins()->Equal(),
+ RelocInfo::CODE_TARGET);
+ __ Pop(cp);
}
// Turn true into 0 and false into some non-zero value.
STATIC_ASSERT(EQUAL == 0);
@@ -847,7 +839,6 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
SaveFPRegsMode mode = kSaveFPRegs;
CEntryStub(isolate, 1, mode).GetCode();
StoreBufferOverflowStub(isolate, mode).GetCode();
- isolate->set_fp_stubs_generated(true);
}
void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
@@ -2166,44 +2157,6 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
}
-enum CopyCharactersFlags { COPY_ASCII = 1, DEST_ALWAYS_ALIGNED = 2 };
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, Register dest,
- Register src, Register count,
- Register scratch,
- String::Encoding encoding) {
- if (FLAG_debug_code) {
- // Check that destination is word aligned.
- __ mov(r0, Operand(kPointerAlignmentMask));
- __ AndP(r0, dest);
- __ Check(eq, kDestinationOfCopyNotAligned, cr0);
- }
-
- // Nothing to do for zero characters.
- Label done;
- if (encoding == String::TWO_BYTE_ENCODING) {
- // double the length
- __ AddP(count, count, count);
- __ beq(&done, Label::kNear);
- } else {
- __ CmpP(count, Operand::Zero());
- __ beq(&done, Label::kNear);
- }
-
- // Copy count bytes from src to dst.
- Label byte_loop;
- // TODO(joransiu): Convert into MVC loop
- __ bind(&byte_loop);
- __ LoadlB(scratch, MemOperand(src));
- __ la(src, MemOperand(src, 1));
- __ stc(scratch, MemOperand(dest));
- __ la(dest, MemOperand(dest, 1));
- __ BranchOnCount(count, &byte_loop);
-
- __ bind(&done);
-}
-
-
void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
Register left,
Register right,
@@ -2792,83 +2745,6 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(
__ bne(miss);
}
-// Probe the name dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found. Jump to
-// the |miss| label otherwise.
-// If lookup was successful |scratch2| will be equal to elements + 4 * index.
-void NameDictionaryLookupStub::GeneratePositiveLookup(
- MacroAssembler* masm, Label* miss, Label* done, Register elements,
- Register name, Register scratch1, Register scratch2) {
- DCHECK(!elements.is(scratch1));
- DCHECK(!elements.is(scratch2));
- DCHECK(!name.is(scratch1));
- DCHECK(!name.is(scratch2));
-
- __ AssertName(name);
-
- // Compute the capacity mask.
- __ LoadP(scratch1, FieldMemOperand(elements, kCapacityOffset));
- __ SmiUntag(scratch1); // convert smi to int
- __ SubP(scratch1, Operand(1));
-
- // Generate an unrolled loop that performs a few probes before
- // giving up. Measurements done on Gmail indicate that 2 probes
- // cover ~93% of loads from dictionaries.
- for (int i = 0; i < kInlinedProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ LoadlW(scratch2, FieldMemOperand(name, String::kHashFieldOffset));
- if (i > 0) {
- // Add the probe offset (i + i * i) left shifted to avoid right shifting
- // the hash in a separate instruction. The value hash + i + i * i is right
- // shifted in the following and instruction.
- DCHECK(NameDictionary::GetProbeOffset(i) <
- 1 << (32 - Name::kHashFieldOffset));
- __ AddP(scratch2,
- Operand(NameDictionary::GetProbeOffset(i) << Name::kHashShift));
- }
- __ srl(scratch2, Operand(String::kHashShift));
- __ AndP(scratch2, scratch1);
-
- // Scale the index by multiplying by the entry size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- // scratch2 = scratch2 * 3.
- __ ShiftLeftP(ip, scratch2, Operand(1));
- __ AddP(scratch2, ip);
-
- // Check if the key is identical to the name.
- __ ShiftLeftP(ip, scratch2, Operand(kPointerSizeLog2));
- __ AddP(scratch2, elements, ip);
- __ LoadP(ip, FieldMemOperand(scratch2, kElementsStartOffset));
- __ CmpP(name, ip);
- __ beq(done);
- }
-
- const int spill_mask = (r0.bit() | r8.bit() | r7.bit() | r6.bit() | r5.bit() |
- r4.bit() | r3.bit() | r2.bit()) &
- ~(scratch1.bit() | scratch2.bit());
-
- __ LoadRR(r0, r14);
- __ MultiPush(spill_mask);
- if (name.is(r2)) {
- DCHECK(!elements.is(r3));
- __ LoadRR(r3, name);
- __ LoadRR(r2, elements);
- } else {
- __ LoadRR(r2, elements);
- __ LoadRR(r3, name);
- }
- NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
- __ CallStub(&stub);
- __ LoadRR(r1, r2);
- __ LoadRR(scratch2, r4);
- __ MultiPop(spill_mask);
- __ LoadRR(r14, r0);
-
- __ CmpP(r1, Operand::Zero());
- __ bne(done);
- __ beq(miss);
-}
-
void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
@@ -3145,240 +3021,6 @@ void CallICTrampolineStub::Generate(MacroAssembler* masm) {
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
-static void HandleArrayCases(MacroAssembler* masm, Register feedback,
- Register receiver_map, Register scratch1,
- Register scratch2, bool is_polymorphic,
- Label* miss) {
- // feedback initially contains the feedback array
- Label next_loop, prepare_next;
- Label start_polymorphic;
-
- Register cached_map = scratch1;
-
- __ LoadP(cached_map,
- FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
- __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
- __ CmpP(receiver_map, cached_map);
- __ bne(&start_polymorphic, Label::kNear);
- // found, now call handler.
- Register handler = feedback;
- __ LoadP(handler,
- FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
- __ AddP(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(ip);
-
- Register length = scratch2;
- __ bind(&start_polymorphic);
- __ LoadP(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
- if (!is_polymorphic) {
- // If the IC could be monomorphic we have to make sure we don't go past the
- // end of the feedback array.
- __ CmpSmiLiteral(length, Smi::FromInt(2), r0);
- __ beq(miss);
- }
-
- Register too_far = length;
- Register pointer_reg = feedback;
-
- // +-----+------+------+-----+-----+ ... ----+
- // | map | len | wm0 | h0 | wm1 | hN |
- // +-----+------+------+-----+-----+ ... ----+
- // 0 1 2 len-1
- // ^ ^
- // | |
- // pointer_reg too_far
- // aka feedback scratch2
- // also need receiver_map
- // use cached_map (scratch1) to look in the weak map values.
- __ SmiToPtrArrayOffset(r0, length);
- __ AddP(too_far, feedback, r0);
- __ AddP(too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ AddP(pointer_reg, feedback,
- Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
-
- __ bind(&next_loop);
- __ LoadP(cached_map, MemOperand(pointer_reg));
- __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
- __ CmpP(receiver_map, cached_map);
- __ bne(&prepare_next, Label::kNear);
- __ LoadP(handler, MemOperand(pointer_reg, kPointerSize));
- __ AddP(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(ip);
-
- __ bind(&prepare_next);
- __ AddP(pointer_reg, Operand(kPointerSize * 2));
- __ CmpP(pointer_reg, too_far);
- __ blt(&next_loop, Label::kNear);
-
- // We exhausted our array of map handler pairs.
- __ b(miss);
-}
-
-static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
- Register receiver_map, Register feedback,
- Register vector, Register slot,
- Register scratch, Label* compare_map,
- Label* load_smi_map, Label* try_array) {
- __ JumpIfSmi(receiver, load_smi_map);
- __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ bind(compare_map);
- Register cached_map = scratch;
- // Move the weak map into the weak_cell register.
- __ LoadP(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
- __ CmpP(cached_map, receiver_map);
- __ bne(try_array);
- Register handler = feedback;
- __ SmiToPtrArrayOffset(r1, slot);
- __ LoadP(handler,
- FieldMemOperand(r1, vector, FixedArray::kHeaderSize + kPointerSize));
- __ AddP(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(ip);
-}
-
-void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
- KeyedStoreICStub stub(isolate(), state());
- stub.GenerateForTrampoline(masm);
-}
-
-void KeyedStoreICStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
-
-void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
- Register receiver_map, Register scratch1,
- Register scratch2, Label* miss) {
- // feedback initially contains the feedback array
- Label next_loop, prepare_next;
- Label start_polymorphic;
- Label transition_call;
-
- Register cached_map = scratch1;
- Register too_far = scratch2;
- Register pointer_reg = feedback;
- __ LoadP(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
-
- // +-----+------+------+-----+-----+-----+ ... ----+
- // | map | len | wm0 | wt0 | h0 | wm1 | hN |
- // +-----+------+------+-----+-----+ ----+ ... ----+
- // 0 1 2 len-1
- // ^ ^
- // | |
- // pointer_reg too_far
- // aka feedback scratch2
- // also need receiver_map
- // use cached_map (scratch1) to look in the weak map values.
- __ SmiToPtrArrayOffset(r0, too_far);
- __ AddP(too_far, feedback, r0);
- __ AddP(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ AddP(pointer_reg, feedback,
- Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
-
- __ bind(&next_loop);
- __ LoadP(cached_map, MemOperand(pointer_reg));
- __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
- __ CmpP(receiver_map, cached_map);
- __ bne(&prepare_next);
- // Is it a transitioning store?
- __ LoadP(too_far, MemOperand(pointer_reg, kPointerSize));
- __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
- __ bne(&transition_call);
- __ LoadP(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
- __ AddP(ip, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(ip);
-
- __ bind(&transition_call);
- __ LoadP(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
- __ JumpIfSmi(too_far, miss);
-
- __ LoadP(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
-
- // Load the map into the correct register.
- DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
- __ LoadRR(feedback, too_far);
-
- __ AddP(ip, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(ip);
-
- __ bind(&prepare_next);
- __ AddP(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
- __ CmpLogicalP(pointer_reg, too_far);
- __ blt(&next_loop);
-
- // We exhausted our array of map handler pairs.
- __ b(miss);
-}
-
-void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // r3
- Register key = StoreWithVectorDescriptor::NameRegister(); // r4
- Register vector = StoreWithVectorDescriptor::VectorRegister(); // r5
- Register slot = StoreWithVectorDescriptor::SlotRegister(); // r6
- DCHECK(StoreWithVectorDescriptor::ValueRegister().is(r2)); // r2
- Register feedback = r7;
- Register receiver_map = r8;
- Register scratch1 = r9;
-
- __ SmiToPtrArrayOffset(r0, slot);
- __ AddP(feedback, vector, r0);
- __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
- scratch1, &compare_map, &load_smi_map, &try_array);
-
- __ bind(&try_array);
- // Is it a fixed array?
- __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
- __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
- __ bne(&not_array);
-
- // We have a polymorphic element handler.
- Label polymorphic, try_poly_name;
- __ bind(&polymorphic);
-
- Register scratch2 = ip;
-
- HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
- &miss);
-
- __ bind(&not_array);
- // Is it generic?
- __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
- __ bne(&try_poly_name);
- Handle<Code> megamorphic_stub =
- KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
- __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
-
- __ bind(&try_poly_name);
- // We might have a name in feedback, and a fixed array in the next slot.
- __ CmpP(key, feedback);
- __ bne(&miss);
- // If the name comparison succeeded, we know we have a fixed array with
- // at least one map/handler pair.
- __ SmiToPtrArrayOffset(r0, slot);
- __ AddP(feedback, vector, r0);
- __ LoadP(feedback,
- FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
- HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
- &miss);
-
- __ bind(&miss);
- KeyedStoreIC::GenerateMiss(masm);
-
- __ bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ b(&compare_map);
-}
-
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
PredictableCodeSizeScope predictable(masm,
@@ -3754,124 +3396,6 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateCase(masm, FAST_ELEMENTS);
}
-void FastNewObjectStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r3 : target
- // -- r5 : new target
- // -- cp : context
- // -- lr : return address
- // -----------------------------------
- __ AssertFunction(r3);
- __ AssertReceiver(r5);
-
- // Verify that the new target is a JSFunction.
- Label new_object;
- __ CompareObjectType(r5, r4, r4, JS_FUNCTION_TYPE);
- __ bne(&new_object);
-
- // Load the initial map and verify that it's in fact a map.
- __ LoadP(r4, FieldMemOperand(r5, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(r4, &new_object);
- __ CompareObjectType(r4, r2, r2, MAP_TYPE);
- __ bne(&new_object);
-
- // Fall back to runtime if the target differs from the new target's
- // initial map constructor.
- __ LoadP(r2, FieldMemOperand(r4, Map::kConstructorOrBackPointerOffset));
- __ CmpP(r2, r3);
- __ bne(&new_object);
-
- // Allocate the JSObject on the heap.
- Label allocate, done_allocate;
- __ LoadlB(r6, FieldMemOperand(r4, Map::kInstanceSizeOffset));
- __ Allocate(r6, r2, r7, r8, &allocate, SIZE_IN_WORDS);
- __ bind(&done_allocate);
-
- // Initialize the JSObject fields.
- __ StoreP(r4, FieldMemOperand(r2, JSObject::kMapOffset));
- __ LoadRoot(r5, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r5, FieldMemOperand(r2, JSObject::kPropertiesOffset));
- __ StoreP(r5, FieldMemOperand(r2, JSObject::kElementsOffset));
- STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ AddP(r3, r2, Operand(JSObject::kHeaderSize - kHeapObjectTag));
-
- // ----------- S t a t e -------------
- // -- r2 : result (tagged)
- // -- r3 : result fields (untagged)
- // -- r7 : result end (untagged)
- // -- r4 : initial map
- // -- cp : context
- // -- lr : return address
- // -----------------------------------
-
- // Perform in-object slack tracking if requested.
- Label slack_tracking;
- STATIC_ASSERT(Map::kNoSlackTracking == 0);
- __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
- __ LoadlW(r5, FieldMemOperand(r4, Map::kBitField3Offset));
- __ DecodeField<Map::ConstructionCounter>(r9, r5);
- __ LoadAndTestP(r9, r9);
- __ bne(&slack_tracking);
- {
- // Initialize all in-object fields with undefined.
- __ InitializeFieldsWithFiller(r3, r7, r8);
-
- __ Ret();
- }
- __ bind(&slack_tracking);
- {
- // Decrease generous allocation count.
- STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
- __ Add32(r5, r5, Operand(-(1 << Map::ConstructionCounter::kShift)));
- __ StoreW(r5, FieldMemOperand(r4, Map::kBitField3Offset));
-
- // Initialize the in-object fields with undefined.
- __ LoadlB(r6, FieldMemOperand(r4, Map::kUnusedPropertyFieldsOffset));
- __ ShiftLeftP(r6, r6, Operand(kPointerSizeLog2));
- __ SubP(r6, r7, r6);
- __ InitializeFieldsWithFiller(r3, r6, r8);
-
- // Initialize the remaining (reserved) fields with one pointer filler map.
- __ LoadRoot(r8, Heap::kOnePointerFillerMapRootIndex);
- __ InitializeFieldsWithFiller(r3, r7, r8);
-
- // Check if we can finalize the instance size.
- __ CmpP(r9, Operand(Map::kSlackTrackingCounterEnd));
- __ Ret(ne);
-
- // Finalize the instance size.
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r2, r4);
- __ CallRuntime(Runtime::kFinalizeInstanceSize);
- __ Pop(r2);
- }
- __ Ret();
- }
-
- // Fall back to %AllocateInNewSpace.
- __ bind(&allocate);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- STATIC_ASSERT(kSmiTag == 0);
- __ ShiftLeftP(r6, r6,
- Operand(kPointerSizeLog2 + kSmiTagSize + kSmiShiftSize));
- __ Push(r4, r6);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ Pop(r4);
- }
- __ LoadlB(r7, FieldMemOperand(r4, Map::kInstanceSizeOffset));
- __ ShiftLeftP(r7, r7, Operand(kPointerSizeLog2));
- __ AddP(r7, r2, r7);
- __ SubP(r7, r7, Operand(kHeapObjectTag));
- __ b(&done_allocate);
-
- // Fall back to %NewObject.
- __ bind(&new_object);
- __ Push(r3, r5);
- __ TailCallRuntime(Runtime::kNewObject);
-}
-
void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : function
@@ -3902,7 +3426,8 @@ void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
Label no_rest_parameters;
__ LoadP(r4, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
__ LoadP(ip, MemOperand(r4, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ LoadSmiLiteral(r0, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ CmpP(ip, r0);
__ bne(&no_rest_parameters);
// Check if the arguments adaptor frame contains more arguments than
@@ -4076,7 +3601,8 @@ void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
Label adaptor_frame, try_allocate, runtime;
__ LoadP(r6, MemOperand(r9, StandardFrameConstants::kCallerFPOffset));
__ LoadP(r2, MemOperand(r6, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ CmpSmiLiteral(r2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ LoadSmiLiteral(r0, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ CmpP(r2, r0);
__ beq(&adaptor_frame);
// No adaptor, parameter count = argument count.
@@ -4311,7 +3837,8 @@ void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
Label arguments_adaptor, arguments_done;
__ LoadP(r5, MemOperand(r4, StandardFrameConstants::kCallerFPOffset));
__ LoadP(ip, MemOperand(r5, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ LoadSmiLiteral(r0, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ CmpP(ip, r0);
__ beq(&arguments_adaptor);
{
__ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
diff --git a/deps/v8/src/s390/code-stubs-s390.h b/deps/v8/src/s390/code-stubs-s390.h
index 461e569023..c599308792 100644
--- a/deps/v8/src/s390/code-stubs-s390.h
+++ b/deps/v8/src/s390/code-stubs-s390.h
@@ -14,15 +14,6 @@ void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
class StringHelper : public AllStatic {
public:
- // Generate code for copying a large number of characters. This function
- // is allowed to spend extra time setting up conditions to make copying
- // faster. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharacters(MacroAssembler* masm, Register dest,
- Register src, Register count,
- Register scratch,
- String::Encoding encoding);
-
// Compares two flat one-byte strings and returns result in r0.
static void GenerateCompareFlatOneByteStrings(MacroAssembler* masm,
Register left, Register right,
@@ -321,10 +312,6 @@ class NameDictionaryLookupStub : public PlatformCodeStub {
Register properties, Handle<Name> name,
Register scratch0);
- static void GeneratePositiveLookup(MacroAssembler* masm, Label* miss,
- Label* done, Register elements,
- Register name, Register r0, Register r1);
-
bool SometimesSetsUpAFrame() override { return false; }
private:
diff --git a/deps/v8/src/s390/codegen-s390.cc b/deps/v8/src/s390/codegen-s390.cc
index d92cc54ab2..02cc8c206c 100644
--- a/deps/v8/src/s390/codegen-s390.cc
+++ b/deps/v8/src/s390/codegen-s390.cc
@@ -66,306 +66,6 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ ACCESS_MASM(masm)
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- MacroAssembler* masm, Register receiver, Register key, Register value,
- Register target_map, AllocationSiteMode mode,
- Label* allocation_memento_found) {
- Register scratch_elements = r6;
- DCHECK(!AreAliased(receiver, key, value, target_map, scratch_elements));
-
- if (mode == TRACK_ALLOCATION_SITE) {
- DCHECK(allocation_memento_found != NULL);
- __ JumpIfJSArrayHasAllocationMemento(receiver, scratch_elements, r1,
- allocation_memento_found);
- }
-
- // Set transitioned map.
- __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, r1,
- kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-void ElementsTransitionGenerator::GenerateSmiToDouble(
- MacroAssembler* masm, Register receiver, Register key, Register value,
- Register target_map, AllocationSiteMode mode, Label* fail) {
- // lr contains the return address
- Label loop, entry, convert_hole, gc_required, only_change_map, done;
- Register elements = r6;
- Register length = r7;
- Register array = r8;
- Register array_end = array;
-
- // target_map parameter can be clobbered.
- Register scratch1 = target_map;
- Register scratch2 = r1;
-
- // Verify input registers don't conflict with locals.
- DCHECK(!AreAliased(receiver, key, value, target_map, elements, length, array,
- scratch2));
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch2, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
- __ beq(&only_change_map, Label::kNear);
-
- // Preserve lr and use r14 as a temporary register.
- __ push(r14);
-
- __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
- // length: number of elements (smi-tagged)
-
- // Allocate new FixedDoubleArray.
- __ SmiToDoubleArrayOffset(r14, length);
- __ AddP(r14, Operand(FixedDoubleArray::kHeaderSize));
- __ Allocate(r14, array, r9, scratch2, &gc_required, DOUBLE_ALIGNMENT);
- __ SubP(array, array, Operand(kHeapObjectTag));
- // Set destination FixedDoubleArray's length and map.
- __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
- __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
- // Update receiver's map.
- __ StoreP(scratch2, MemOperand(array, HeapObject::kMapOffset));
-
- __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
- kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- // Replace receiver's backing store with newly created FixedDoubleArray.
- __ AddP(scratch1, array, Operand(kHeapObjectTag));
- __ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ RecordWriteField(receiver, JSObject::kElementsOffset, scratch1, scratch2,
- kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- // Prepare for conversion loop.
- __ AddP(target_map, elements,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ AddP(r9, array, Operand(FixedDoubleArray::kHeaderSize));
- __ SmiToDoubleArrayOffset(array, length);
- __ AddP(array_end, r9, array);
-// Repurpose registers no longer in use.
-#if V8_TARGET_ARCH_S390X
- Register hole_int64 = elements;
-#else
- Register hole_lower = elements;
- Register hole_upper = length;
-#endif
- // scratch1: begin of source FixedArray element fields, not tagged
- // hole_lower: kHoleNanLower32 OR hol_int64
- // hole_upper: kHoleNanUpper32
- // array_end: end of destination FixedDoubleArray, not tagged
- // scratch2: begin of FixedDoubleArray element fields, not tagged
-
- __ b(&entry, Label::kNear);
-
- __ bind(&only_change_map);
- __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
- kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ b(&done, Label::kNear);
-
- // Call into runtime if GC is required.
- __ bind(&gc_required);
- __ pop(r14);
- __ b(fail);
-
- // Convert and copy elements.
- __ bind(&loop);
- __ LoadP(r14, MemOperand(scratch1));
- __ la(scratch1, MemOperand(scratch1, kPointerSize));
- // r1: current element
- __ UntagAndJumpIfNotSmi(r14, r14, &convert_hole);
-
- // Normal smi, convert to double and store.
- __ ConvertIntToDouble(r14, d0);
- __ StoreDouble(d0, MemOperand(r9, 0));
- __ la(r9, MemOperand(r9, 8));
-
- __ b(&entry, Label::kNear);
-
- // Hole found, store the-hole NaN.
- __ bind(&convert_hole);
- if (FLAG_debug_code) {
- // Restore a "smi-untagged" heap object.
- __ LoadP(r1, MemOperand(r5, -kPointerSize));
- __ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
- __ Assert(eq, kObjectFoundInSmiOnlyArray);
- }
-#if V8_TARGET_ARCH_S390X
- __ stg(hole_int64, MemOperand(r9, 0));
-#else
- __ StoreW(hole_upper, MemOperand(r9, Register::kExponentOffset));
- __ StoreW(hole_lower, MemOperand(r9, Register::kMantissaOffset));
-#endif
- __ AddP(r9, Operand(8));
-
- __ bind(&entry);
- __ CmpP(r9, array_end);
- __ blt(&loop);
-
- __ pop(r14);
- __ bind(&done);
-}
-
-void ElementsTransitionGenerator::GenerateDoubleToObject(
- MacroAssembler* masm, Register receiver, Register key, Register value,
- Register target_map, AllocationSiteMode mode, Label* fail) {
- // Register lr contains the return address.
- Label loop, convert_hole, gc_required, only_change_map;
- Register elements = r6;
- Register array = r8;
- Register length = r7;
- Register scratch = r1;
- Register scratch3 = r9;
- Register hole_value = r9;
-
- // Verify input registers don't conflict with locals.
- DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length,
- scratch));
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch3, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
- __ beq(&only_change_map);
-
- __ Push(target_map, receiver, key, value);
- __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
- // elements: source FixedDoubleArray
- // length: number of elements (smi-tagged)
-
- // Allocate new FixedArray.
- // Re-use value and target_map registers, as they have been saved on the
- // stack.
- Register array_size = value;
- Register allocate_scratch = target_map;
- __ LoadImmP(array_size, Operand(FixedDoubleArray::kHeaderSize));
- __ SmiToPtrArrayOffset(r0, length);
- __ AddP(array_size, r0);
- __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
- NO_ALLOCATION_FLAGS);
- // array: destination FixedArray, tagged as heap object
- // Set destination FixedDoubleArray's length and map.
- __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
- __ StoreP(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset),
- r0);
- __ StoreP(scratch, FieldMemOperand(array, HeapObject::kMapOffset), r0);
-
- // Prepare for conversion loop.
- Register src_elements = elements;
- Register dst_elements = target_map;
- Register dst_end = length;
- Register heap_number_map = scratch;
- __ AddP(src_elements,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- __ SmiToPtrArrayOffset(length, length);
- __ LoadRoot(hole_value, Heap::kTheHoleValueRootIndex);
-
- Label initialization_loop, loop_done;
- __ ShiftRightP(scratch, length, Operand(kPointerSizeLog2));
- __ beq(&loop_done, Label::kNear);
-
- // Allocating heap numbers in the loop below can fail and cause a jump to
- // gc_required. We can't leave a partly initialized FixedArray behind,
- // so pessimistically fill it with holes now.
- __ AddP(dst_elements, array,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
- __ bind(&initialization_loop);
- __ StoreP(hole_value, MemOperand(dst_elements, kPointerSize));
- __ lay(dst_elements, MemOperand(dst_elements, kPointerSize));
- __ BranchOnCount(scratch, &initialization_loop);
-
- __ AddP(dst_elements, array,
- Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ AddP(dst_end, dst_elements, length);
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- // Using offsetted addresses in src_elements to fully take advantage of
- // post-indexing.
- // dst_elements: begin of destination FixedArray element fields, not tagged
- // src_elements: begin of source FixedDoubleArray element fields,
- // not tagged, +4
- // dst_end: end of destination FixedArray, not tagged
- // array: destination FixedArray
- // hole_value: the-hole pointer
- // heap_number_map: heap number map
- __ b(&loop, Label::kNear);
-
- // Call into runtime if GC is required.
- __ bind(&gc_required);
- __ Pop(target_map, receiver, key, value);
- __ b(fail);
-
- // Replace the-hole NaN with the-hole pointer.
- __ bind(&convert_hole);
- __ StoreP(hole_value, MemOperand(dst_elements));
- __ AddP(dst_elements, Operand(kPointerSize));
- __ CmpLogicalP(dst_elements, dst_end);
- __ bge(&loop_done);
-
- __ bind(&loop);
- Register upper_bits = key;
- __ LoadlW(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
- __ AddP(src_elements, Operand(kDoubleSize));
- // upper_bits: current element's upper 32 bit
- // src_elements: address of next element's upper 32 bit
- __ Cmp32(upper_bits, Operand(kHoleNanUpper32));
- __ beq(&convert_hole, Label::kNear);
-
- // Non-hole double, copy value into a heap number.
- Register heap_number = receiver;
- Register scratch2 = value;
- __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
- &gc_required);
-// heap_number: new heap number
-#if V8_TARGET_ARCH_S390X
- __ lg(scratch2, MemOperand(src_elements, -kDoubleSize));
- // subtract tag for std
- __ AddP(upper_bits, heap_number, Operand(-kHeapObjectTag));
- __ stg(scratch2, MemOperand(upper_bits, HeapNumber::kValueOffset));
-#else
- __ LoadlW(scratch2,
- MemOperand(src_elements, Register::kMantissaOffset - kDoubleSize));
- __ LoadlW(upper_bits,
- MemOperand(src_elements, Register::kExponentOffset - kDoubleSize));
- __ StoreW(scratch2,
- FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
- __ StoreW(upper_bits,
- FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
-#endif
- __ LoadRR(scratch2, dst_elements);
- __ StoreP(heap_number, MemOperand(dst_elements));
- __ AddP(dst_elements, Operand(kPointerSize));
- __ RecordWrite(array, scratch2, heap_number, kLRHasNotBeenSaved,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ CmpLogicalP(dst_elements, dst_end);
- __ blt(&loop);
- __ bind(&loop_done);
-
- __ Pop(target_map, receiver, key, value);
- // Replace receiver's backing store with newly created and filled FixedArray.
- __ StoreP(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
- __ RecordWriteField(receiver, JSObject::kElementsOffset, array, scratch,
- kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- __ bind(&only_change_map);
- // Update receiver's map.
- __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
- kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
// assume ip can be used as a scratch register below
void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
Register index, Register result,
@@ -487,29 +187,25 @@ bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
return result;
}
-void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
- MarkingParity* parity) {
- if (IsYoungSequence(isolate, sequence)) {
- *age = kNoAgeCodeAge;
- *parity = NO_MARKING_PARITY;
- } else {
- Code* code = NULL;
- Address target_address =
- Assembler::target_address_at(sequence + kCodeAgingTargetDelta, code);
- Code* stub = GetCodeFromTargetAddress(target_address);
- GetCodeAgeAndParity(stub, age, parity);
- }
+Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
+ if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
+
+ Code* code = NULL;
+ Address target_address =
+ Assembler::target_address_at(sequence + kCodeAgingTargetDelta, code);
+ Code* stub = GetCodeFromTargetAddress(target_address);
+ return GetAgeOfCodeAgeStub(stub);
}
-void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Code::Age age,
- MarkingParity parity) {
+void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
+ Code::Age age) {
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
Assembler::FlushICache(isolate, sequence, young_length);
} else {
// FIXED_SEQUENCE
- Code* stub = GetCodeAgeStub(isolate, age, parity);
+ Code* stub = GetCodeAgeStub(isolate, age);
CodePatcher patcher(isolate, sequence, young_length);
intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
// We need to push lr on stack so that GenerateMakeCodeYoungAgainCommon
diff --git a/deps/v8/src/s390/constants-s390.h b/deps/v8/src/s390/constants-s390.h
index 9dfb32c7e7..c1cace2634 100644
--- a/deps/v8/src/s390/constants-s390.h
+++ b/deps/v8/src/s390/constants-s390.h
@@ -157,765 +157,1565 @@ typedef uint16_t TwoByteInstr;
typedef uint32_t FourByteInstr;
typedef uint64_t SixByteInstr;
+#define S390_RSY_A_OPCODE_LIST(V) \
+ V(lmg, LMG, 0xEB04) /* type = RSY_A LOAD MULTIPLE (64) */ \
+ V(srag, SRAG, 0xEB0A) /* type = RSY_A SHIFT RIGHT SINGLE (64) */ \
+ V(slag, SLAG, 0xEB0B) /* type = RSY_A SHIFT LEFT SINGLE (64) */ \
+ V(srlg, SRLG, 0xEB0C) /* type = RSY_A SHIFT RIGHT SINGLE LOGICAL (64) */ \
+ V(sllg, SLLG, 0xEB0D) /* type = RSY_A SHIFT LEFT SINGLE LOGICAL (64) */ \
+ V(tracg, TRACG, 0xEB0F) /* type = RSY_A TRACE (64) */ \
+ V(csy, CSY, 0xEB14) /* type = RSY_A COMPARE AND SWAP (32) */ \
+ V(rllg, RLLG, 0xEB1C) /* type = RSY_A ROTATE LEFT SINGLE LOGICAL (64) */ \
+ V(rll, RLL, 0xEB1D) /* type = RSY_A ROTATE LEFT SINGLE LOGICAL (32) */ \
+ V(stmg, STMG, 0xEB24) /* type = RSY_A STORE MULTIPLE (64) */ \
+ V(stctg, STCTG, 0xEB25) /* type = RSY_A STORE CONTROL (64) */ \
+ V(stmh, STMH, 0xEB26) /* type = RSY_A STORE MULTIPLE HIGH (32) */ \
+ V(lctlg, LCTLG, 0xEB2F) /* type = RSY_A LOAD CONTROL (64) */ \
+ V(csg, CSG, 0xEB30) /* type = RSY_A COMPARE AND SWAP (64) */ \
+ V(cdsy, CDSY, 0xEB31) /* type = RSY_A COMPARE DOUBLE AND SWAP (32) */ \
+ V(cdsg, CDSG, 0xEB3E) /* type = RSY_A COMPARE DOUBLE AND SWAP (64) */ \
+ V(bxhg, BXHG, 0xEB44) /* type = RSY_A BRANCH ON INDEX HIGH (64) */ \
+ V(bxleg, BXLEG, 0xEB45) /* type = RSY_A BRANCH ON INDEX LOW OR EQUAL (64) */ \
+ V(ecag, ECAG, 0xEB4C) /* type = RSY_A EXTRACT CPU ATTRIBUTE */ \
+ V(mvclu, MVCLU, 0xEB8E) /* type = RSY_A MOVE LONG UNICODE */ \
+ V(clclu, CLCLU, 0xEB8F) /* type = RSY_A COMPARE LOGICAL LONG UNICODE */ \
+ V(stmy, STMY, 0xEB90) /* type = RSY_A STORE MULTIPLE (32) */ \
+ V(lmh, LMH, 0xEB96) /* type = RSY_A LOAD MULTIPLE HIGH (32) */ \
+ V(lmy, LMY, 0xEB98) /* type = RSY_A LOAD MULTIPLE (32) */ \
+ V(lamy, LAMY, 0xEB9A) /* type = RSY_A LOAD ACCESS MULTIPLE */ \
+ V(stamy, STAMY, 0xEB9B) /* type = RSY_A STORE ACCESS MULTIPLE */ \
+ V(srak, SRAK, 0xEBDC) /* type = RSY_A SHIFT RIGHT SINGLE (32) */ \
+ V(slak, SLAK, 0xEBDD) /* type = RSY_A SHIFT LEFT SINGLE (32) */ \
+ V(srlk, SRLK, 0xEBDE) /* type = RSY_A SHIFT RIGHT SINGLE LOGICAL (32) */ \
+ V(sllk, SLLK, 0xEBDF) /* type = RSY_A SHIFT LEFT SINGLE LOGICAL (32) */ \
+ V(lang, LANG, 0xEBE4) /* type = RSY_A LOAD AND AND (64) */ \
+ V(laog, LAOG, 0xEBE6) /* type = RSY_A LOAD AND OR (64) */ \
+ V(laxg, LAXG, 0xEBE7) /* type = RSY_A LOAD AND EXCLUSIVE OR (64) */ \
+ V(laag, LAAG, 0xEBE8) /* type = RSY_A LOAD AND ADD (64) */ \
+ V(laalg, LAALG, 0xEBEA) /* type = RSY_A LOAD AND ADD LOGICAL (64) */ \
+ V(lan, LAN, 0xEBF4) /* type = RSY_A LOAD AND AND (32) */ \
+ V(lao, LAO, 0xEBF6) /* type = RSY_A LOAD AND OR (32) */ \
+ V(lax, LAX, 0xEBF7) /* type = RSY_A LOAD AND EXCLUSIVE OR (32) */ \
+ V(laa, LAA, 0xEBF8) /* type = RSY_A LOAD AND ADD (32) */ \
+ V(laal, LAAL, 0xEBFA) /* type = RSY_A LOAD AND ADD LOGICAL (32) */
+
+#define S390_RSY_B_OPCODE_LIST(V) \
+ V(clmh, CLMH, \
+ 0xEB20) /* type = RSY_B COMPARE LOGICAL CHAR. UNDER MASK (high) */ \
+ V(clmy, CLMY, \
+ 0xEB21) /* type = RSY_B COMPARE LOGICAL CHAR. UNDER MASK (low) */ \
+ V(clt, CLT, 0xEB23) /* type = RSY_B COMPARE LOGICAL AND TRAP (32) */ \
+ V(clgt, CLGT, 0xEB2B) /* type = RSY_B COMPARE LOGICAL AND TRAP (64) */ \
+ V(stcmh, STCMH, \
+ 0xEB2C) /* type = RSY_B STORE CHARACTERS UNDER MASK (high) */ \
+ V(stcmy, STCMY, 0xEB2D) /* type = RSY_B STORE CHARACTERS UNDER MASK (low) */ \
+ V(icmh, ICMH, 0xEB80) /* type = RSY_B INSERT CHARACTERS UNDER MASK (high) */ \
+ V(icmy, ICMY, 0xEB81) /* type = RSY_B INSERT CHARACTERS UNDER MASK (low) */ \
+ V(locfh, LOCFH, 0xEBE0) /* type = RSY_B LOAD HIGH ON CONDITION (32) */ \
+ V(stocfh, STOCFH, 0xEBE1) /* type = RSY_B STORE HIGH ON CONDITION */ \
+ V(locg, LOCG, 0xEBE2) /* type = RSY_B LOAD ON CONDITION (64) */ \
+ V(stocg, STOCG, 0xEBE3) /* type = RSY_B STORE ON CONDITION (64) */ \
+ V(loc, LOC, 0xEBF2) /* type = RSY_B LOAD ON CONDITION (32) */ \
+ V(stoc, STOC, 0xEBF3) /* type = RSY_B STORE ON CONDITION (32) */
+
+#define S390_RXE_OPCODE_LIST(V) \
+ V(lcbb, LCBB, 0xE727) /* type = RXE LOAD COUNT TO BLOCK BOUNDARY */ \
+ V(ldeb, LDEB, 0xED04) /* type = RXE LOAD LENGTHENED (short to long BFP) */ \
+ V(lxdb, LXDB, \
+ 0xED05) /* type = RXE LOAD LENGTHENED (long to extended BFP) */ \
+ V(lxeb, LXEB, \
+ 0xED06) /* type = RXE LOAD LENGTHENED (short to extended BFP) */ \
+ V(mxdb, MXDB, 0xED07) /* type = RXE MULTIPLY (long to extended BFP) */ \
+ V(keb, KEB, 0xED08) /* type = RXE COMPARE AND SIGNAL (short BFP) */ \
+ V(ceb, CEB, 0xED09) /* type = RXE COMPARE (short BFP) */ \
+ V(aeb, AEB, 0xED0A) /* type = RXE ADD (short BFP) */ \
+ V(seb, SEB, 0xED0B) /* type = RXE SUBTRACT (short BFP) */ \
+ V(mdeb, MDEB, 0xED0C) /* type = RXE MULTIPLY (short to long BFP) */ \
+ V(deb, DEB, 0xED0D) /* type = RXE DIVIDE (short BFP) */ \
+ V(tceb, TCEB, 0xED10) /* type = RXE TEST DATA CLASS (short BFP) */ \
+ V(tcdb, TCDB, 0xED11) /* type = RXE TEST DATA CLASS (long BFP) */ \
+ V(tcxb, TCXB, 0xED12) /* type = RXE TEST DATA CLASS (extended BFP) */ \
+ V(sqeb, SQEB, 0xED14) /* type = RXE SQUARE ROOT (short BFP) */ \
+ V(sqdb, SQDB, 0xED15) /* type = RXE SQUARE ROOT (long BFP) */ \
+ V(meeb, MEEB, 0xED17) /* type = RXE MULTIPLY (short BFP) */ \
+ V(kdb, KDB, 0xED18) /* type = RXE COMPARE AND SIGNAL (long BFP) */ \
+ V(cdb, CDB, 0xED19) /* type = RXE COMPARE (long BFP) */ \
+ V(adb, ADB, 0xED1A) /* type = RXE ADD (long BFP) */ \
+ V(sdb, SDB, 0xED1B) /* type = RXE SUBTRACT (long BFP) */ \
+ V(mdb, MDB, 0xED1C) /* type = RXE MULTIPLY (long BFP) */ \
+ V(ddb, DDB, 0xED1D) /* type = RXE DIVIDE (long BFP) */ \
+ V(lde, LDE, 0xED24) /* type = RXE LOAD LENGTHENED (short to long HFP) */ \
+ V(lxd, LXD, \
+ 0xED25) /* type = RXE LOAD LENGTHENED (long to extended HFP) */ \
+ V(lxe, LXE, \
+ 0xED26) /* type = RXE LOAD LENGTHENED (short to extended HFP) */ \
+ V(sqe, SQE, 0xED34) /* type = RXE SQUARE ROOT (short HFP) */ \
+ V(sqd, SQD, 0xED35) /* type = RXE SQUARE ROOT (long HFP) */ \
+ V(mee, MEE, 0xED37) /* type = RXE MULTIPLY (short HFP) */ \
+ V(tdcet, TDCET, 0xED50) /* type = RXE TEST DATA CLASS (short DFP) */ \
+ V(tdget, TDGET, 0xED51) /* type = RXE TEST DATA GROUP (short DFP) */ \
+ V(tdcdt, TDCDT, 0xED54) /* type = RXE TEST DATA CLASS (long DFP) */ \
+ V(tdgdt, TDGDT, 0xED55) /* type = RXE TEST DATA GROUP (long DFP) */ \
+ V(tdcxt, TDCXT, 0xED58) /* type = RXE TEST DATA CLASS (extended DFP) */ \
+ V(tdgxt, TDGXT, 0xED59) /* type = RXE TEST DATA GROUP (extended DFP) */
+
+#define S390_RRF_A_OPCODE_LIST(V) \
+ V(ipte, IPTE, 0xB221) /* type = RRF_A INVALIDATE PAGE TABLE ENTRY */ \
+ V(mdtr, MDTR, 0xB3D0) /* type = RRF_A MULTIPLY (long DFP) */ \
+ V(mdtra, MDTRA, 0xB3D0) /* type = RRF_A MULTIPLY (long DFP) */ \
+ V(ddtr, DDTR, 0xB3D1) /* type = RRF_A DIVIDE (long DFP) */ \
+ V(ddtra, DDTRA, 0xB3D1) /* type = RRF_A DIVIDE (long DFP) */ \
+ V(adtr, ADTR, 0xB3D2) /* type = RRF_A ADD (long DFP) */ \
+ V(adtra, ADTRA, 0xB3D2) /* type = RRF_A ADD (long DFP) */ \
+ V(sdtr, SDTR, 0xB3D3) /* type = RRF_A SUBTRACT (long DFP) */ \
+ V(sdtra, SDTRA, 0xB3D3) /* type = RRF_A SUBTRACT (long DFP) */ \
+ V(mxtr, MXTR, 0xB3D8) /* type = RRF_A MULTIPLY (extended DFP) */ \
+ V(mxtra, MXTRA, 0xB3D8) /* type = RRF_A MULTIPLY (extended DFP) */ \
+ V(msrkc, MSRKC, 0xB9FD) /* type = RRF_A MULTIPLY (32)*/ \
+ V(msgrkc, MSGRKC, 0xB9ED) /* type = RRF_A MULTIPLY (64)*/ \
+ V(dxtr, DXTR, 0xB3D9) /* type = RRF_A DIVIDE (extended DFP) */ \
+ V(dxtra, DXTRA, 0xB3D9) /* type = RRF_A DIVIDE (extended DFP) */ \
+ V(axtr, AXTR, 0xB3DA) /* type = RRF_A ADD (extended DFP) */ \
+ V(axtra, AXTRA, 0xB3DA) /* type = RRF_A ADD (extended DFP) */ \
+ V(sxtr, SXTR, 0xB3DB) /* type = RRF_A SUBTRACT (extended DFP) */ \
+ V(sxtra, SXTRA, 0xB3DB) /* type = RRF_A SUBTRACT (extended DFP) */ \
+ V(ahhhr, AHHHR, 0xB9C8) /* type = RRF_A ADD HIGH (32) */ \
+ V(shhhr, SHHHR, 0xB9C9) /* type = RRF_A SUBTRACT HIGH (32) */ \
+ V(alhhhr, ALHHHR, 0xB9CA) /* type = RRF_A ADD LOGICAL HIGH (32) */ \
+ V(slhhhr, SLHHHR, 0xB9CB) /* type = RRF_A SUBTRACT LOGICAL HIGH (32) */ \
+ V(ahhlr, AHHLR, 0xB9D8) /* type = RRF_A ADD HIGH (32) */ \
+ V(shhlr, SHHLR, 0xB9D9) /* type = RRF_A SUBTRACT HIGH (32) */ \
+ V(alhhlr, ALHHLR, 0xB9DA) /* type = RRF_A ADD LOGICAL HIGH (32) */ \
+ V(slhhlr, SLHHLR, 0xB9DB) /* type = RRF_A SUBTRACT LOGICAL HIGH (32) */ \
+ V(ngrk, NGRK, 0xB9E4) /* type = RRF_A AND (64) */ \
+ V(ogrk, OGRK, 0xB9E6) /* type = RRF_A OR (64) */ \
+ V(xgrk, XGRK, 0xB9E7) /* type = RRF_A EXCLUSIVE OR (64) */ \
+ V(agrk, AGRK, 0xB9E8) /* type = RRF_A ADD (64) */ \
+ V(sgrk, SGRK, 0xB9E9) /* type = RRF_A SUBTRACT (64) */ \
+ V(algrk, ALGRK, 0xB9EA) /* type = RRF_A ADD LOGICAL (64) */ \
+ V(slgrk, SLGRK, 0xB9EB) /* type = RRF_A SUBTRACT LOGICAL (64) */ \
+ V(nrk, NRK, 0xB9F4) /* type = RRF_A AND (32) */ \
+ V(ork, ORK, 0xB9F6) /* type = RRF_A OR (32) */ \
+ V(xrk, XRK, 0xB9F7) /* type = RRF_A EXCLUSIVE OR (32) */ \
+ V(ark, ARK, 0xB9F8) /* type = RRF_A ADD (32) */ \
+ V(srk, SRK, 0xB9F9) /* type = RRF_A SUBTRACT (32) */ \
+ V(alrk, ALRK, 0xB9FA) /* type = RRF_A ADD LOGICAL (32) */ \
+ V(slrk, SLRK, 0xB9FB) /* type = RRF_A SUBTRACT LOGICAL (32) */
+
+#define S390_RXF_OPCODE_LIST(V) \
+ V(maeb, MAEB, 0xED0E) /* type = RXF MULTIPLY AND ADD (short BFP) */ \
+ V(mseb, MSEB, 0xED0F) /* type = RXF MULTIPLY AND SUBTRACT (short BFP) */ \
+ V(madb, MADB, 0xED1E) /* type = RXF MULTIPLY AND ADD (long BFP) */ \
+ V(msdb, MSDB, 0xED1F) /* type = RXF MULTIPLY AND SUBTRACT (long BFP) */ \
+ V(mae, MAE, 0xED2E) /* type = RXF MULTIPLY AND ADD (short HFP) */ \
+ V(mse, MSE, 0xED2F) /* type = RXF MULTIPLY AND SUBTRACT (short HFP) */ \
+ V(mayl, MAYL, \
+ 0xED38) /* type = RXF MULTIPLY AND ADD UNNRM. (long to ext. low HFP) */ \
+ V(myl, MYL, \
+ 0xED39) /* type = RXF MULTIPLY UNNORM. (long to ext. low HFP) */ \
+ V(may, MAY, \
+ 0xED3A) /* type = RXF MULTIPLY & ADD UNNORMALIZED (long to ext. HFP) */ \
+ V(my, MY, \
+ 0xED3B) /* type = RXF MULTIPLY UNNORMALIZED (long to ext. HFP) */ \
+ V(mayh, MAYH, \
+ 0xED3C) /* type = RXF MULTIPLY AND ADD UNNRM. (long to ext. high HFP) */ \
+ V(myh, MYH, \
+ 0xED3D) /* type = RXF MULTIPLY UNNORM. (long to ext. high HFP) */ \
+ V(mad, MAD, 0xED3E) /* type = RXF MULTIPLY AND ADD (long HFP) */ \
+ V(msd, MSD, 0xED3F) /* type = RXF MULTIPLY AND SUBTRACT (long HFP) */ \
+ V(sldt, SLDT, 0xED40) /* type = RXF SHIFT SIGNIFICAND LEFT (long DFP) */ \
+ V(srdt, SRDT, 0xED41) /* type = RXF SHIFT SIGNIFICAND RIGHT (long DFP) */ \
+ V(slxt, SLXT, \
+ 0xED48) /* type = RXF SHIFT SIGNIFICAND LEFT (extended DFP) */ \
+ V(srxt, SRXT, \
+ 0xED49) /* type = RXF SHIFT SIGNIFICAND RIGHT (extended DFP) */
+
+#define S390_IE_OPCODE_LIST(V) \
+ V(niai, NIAI, 0xB2FA) /* type = IE NEXT INSTRUCTION ACCESS INTENT */
+
+#define S390_RRF_B_OPCODE_LIST(V) \
+ V(diebr, DIEBR, 0xB353) /* type = RRF_B DIVIDE TO INTEGER (short BFP) */ \
+ V(didbr, DIDBR, 0xB35B) /* type = RRF_B DIVIDE TO INTEGER (long BFP) */ \
+ V(cpsdr, CPSDR, 0xB372) /* type = RRF_B COPY SIGN (long) */ \
+ V(qadtr, QADTR, 0xB3F5) /* type = RRF_B QUANTIZE (long DFP) */ \
+ V(iedtr, IEDTR, \
+ 0xB3F6) /* type = RRF_B INSERT BIASED EXPONENT (64 to long DFP) */ \
+ V(rrdtr, RRDTR, 0xB3F7) /* type = RRF_B REROUND (long DFP) */ \
+ V(qaxtr, QAXTR, 0xB3FD) /* type = RRF_B QUANTIZE (extended DFP) */ \
+ V(iextr, IEXTR, \
+ 0xB3FE) /* type = RRF_B INSERT BIASED EXPONENT (64 to extended DFP) */ \
+ V(rrxtr, RRXTR, 0xB3FF) /* type = RRF_B REROUND (extended DFP) */ \
+ V(kmctr, KMCTR, 0xB92D) /* type = RRF_B CIPHER MESSAGE WITH COUNTER */ \
+ V(idte, IDTE, 0xB98E) /* type = RRF_B INVALIDATE DAT TABLE ENTRY */ \
+ V(crdte, CRDTE, \
+ 0xB98F) /* type = RRF_B COMPARE AND REPLACE DAT TABLE ENTRY */ \
+ V(lptea, LPTEA, 0xB9AA) /* type = RRF_B LOAD PAGE TABLE ENTRY ADDRESS */
+
+#define S390_RRF_C_OPCODE_LIST(V) \
+ V(sske, SSKE, 0xB22B) /* type = RRF_C SET STORAGE KEY EXTENDED */ \
+ V(cuutf, CUUTF, 0xB2A6) /* type = RRF_C CONVERT UNICODE TO UTF-8 */ \
+ V(cu21, CU21, 0xB2A6) /* type = RRF_C CONVERT UTF-16 TO UTF-8 */ \
+ V(cutfu, CUTFU, 0xB2A7) /* type = RRF_C CONVERT UTF-8 TO UNICODE */ \
+ V(cu12, CU12, 0xB2A7) /* type = RRF_C CONVERT UTF-8 TO UTF-16 */ \
+ V(ppa, PPA, 0xB2E8) /* type = RRF_C PERFORM PROCESSOR ASSIST */ \
+ V(cgrt, CGRT, 0xB960) /* type = RRF_C COMPARE AND TRAP (64) */ \
+ V(clgrt, CLGRT, 0xB961) /* type = RRF_C COMPARE LOGICAL AND TRAP (64) */ \
+ V(crt, CRT, 0xB972) /* type = RRF_C COMPARE AND TRAP (32) */ \
+ V(clrt, CLRT, 0xB973) /* type = RRF_C COMPARE LOGICAL AND TRAP (32) */ \
+ V(trtt, TRTT, 0xB990) /* type = RRF_C TRANSLATE TWO TO TWO */ \
+ V(trto, TRTO, 0xB991) /* type = RRF_C TRANSLATE TWO TO ONE */ \
+ V(trot, TROT, 0xB992) /* type = RRF_C TRANSLATE ONE TO TWO */ \
+ V(troo, TROO, 0xB993) /* type = RRF_C TRANSLATE ONE TO ONE */ \
+ V(cu14, CU14, 0xB9B0) /* type = RRF_C CONVERT UTF-8 TO UTF-32 */ \
+ V(cu24, CU24, 0xB9B1) /* type = RRF_C CONVERT UTF-16 TO UTF-32 */ \
+ V(trtre, TRTRE, \
+ 0xB9BD) /* type = RRF_C TRANSLATE AND TEST REVERSE EXTENDED */ \
+ V(trte, TRTE, 0xB9BF) /* type = RRF_C TRANSLATE AND TEST EXTENDED */ \
+ V(locfhr, LOCFHR, 0xB9E0) /* type = RRF_C LOAD HIGH ON CONDITION (32) */ \
+ V(locgr, LOCGR, 0xB9E2) /* type = RRF_C LOAD ON CONDITION (64) */ \
+ V(locr, LOCR, 0xB9F2) /* type = RRF_C LOAD ON CONDITION (32) */
+
+#define S390_MII_OPCODE_LIST(V) \
+ V(bprp, BPRP, 0xC5) /* type = MII BRANCH PREDICTION RELATIVE PRELOAD */
+
+#define S390_RRF_D_OPCODE_LIST(V) \
+ V(ldetr, LDETR, \
+ 0xB3D4) /* type = RRF_D LOAD LENGTHENED (short to long DFP) */ \
+ V(lxdtr, LXDTR, \
+ 0xB3DC) /* type = RRF_D LOAD LENGTHENED (long to extended DFP) */ \
+ V(csdtr, CSDTR, \
+ 0xB3E3) /* type = RRF_D CONVERT TO SIGNED PACKED (long DFP to 64) */ \
+ V(csxtr, CSXTR, \
+ 0xB3EB) /* type = RRF_D CONVERT TO SIGNED PACKED (extended DFP to 128) */
+
+#define S390_RRF_E_OPCODE_LIST(V) \
+ V(ledbra, LEDBRA, \
+ 0xB344) /* type = RRF_E LOAD ROUNDED (long to short BFP) */ \
+ V(ldxbra, LDXBRA, \
+ 0xB345) /* type = RRF_E LOAD ROUNDED (extended to long BFP) */ \
+ V(lexbra, LEXBRA, \
+ 0xB346) /* type = RRF_E LOAD ROUNDED (extended to short BFP) */ \
+ V(fixbr, FIXBR, 0xB347) /* type = RRF_E LOAD FP INTEGER (extended BFP) */ \
+ V(fixbra, FIXBRA, 0xB347) /* type = RRF_E LOAD FP INTEGER (extended BFP) */ \
+ V(tbedr, TBEDR, \
+ 0xB350) /* type = RRF_E CONVERT HFP TO BFP (long to short) */ \
+ V(tbdr, TBDR, 0xB351) /* type = RRF_E CONVERT HFP TO BFP (long) */ \
+ V(fiebr, FIEBR, 0xB357) /* type = RRF_E LOAD FP INTEGER (short BFP) */ \
+ V(fiebra, FIEBRA, 0xB357) /* type = RRF_E LOAD FP INTEGER (short BFP) */ \
+ V(fidbr, FIDBR, 0xB35F) /* type = RRF_E LOAD FP INTEGER (long BFP) */ \
+ V(fidbra, FIDBRA, 0xB35F) /* type = RRF_E LOAD FP INTEGER (long BFP) */ \
+ V(celfbr, CELFBR, \
+ 0xB390) /* type = RRF_E CONVERT FROM LOGICAL (32 to short BFP) */ \
+ V(cdlfbr, CDLFBR, \
+ 0xB391) /* type = RRF_E CONVERT FROM LOGICAL (32 to long BFP) */ \
+ V(cxlfbr, CXLFBR, \
+ 0xB392) /* type = RRF_E CONVERT FROM LOGICAL (32 to extended BFP) */ \
+ V(cefbra, CEFBRA, \
+ 0xB394) /* type = RRF_E CONVERT FROM FIXED (32 to short BFP) */ \
+ V(cdfbra, CDFBRA, \
+ 0xB395) /* type = RRF_E CONVERT FROM FIXED (32 to long BFP) */ \
+ V(cxfbra, CXFBRA, \
+ 0xB396) /* type = RRF_E CONVERT FROM FIXED (32 to extended BFP) */ \
+ V(cfebr, CFEBR, \
+ 0xB398) /* type = RRF_E CONVERT TO FIXED (short BFP to 32) */ \
+ V(cfebra, CFEBRA, \
+ 0xB398) /* type = RRF_E CONVERT TO FIXED (short BFP to 32) */ \
+ V(cfdbr, CFDBR, 0xB399) /* type = RRF_E CONVERT TO FIXED (long BFP to 32) */ \
+ V(cfdbra, CFDBRA, \
+ 0xB399) /* type = RRF_E CONVERT TO FIXED (long BFP to 32) */ \
+ V(cfxbr, CFXBR, \
+ 0xB39A) /* type = RRF_E CONVERT TO FIXED (extended BFP to 32) */ \
+ V(cfxbra, CFXBRA, \
+ 0xB39A) /* type = RRF_E CONVERT TO FIXED (extended BFP to 32) */ \
+ V(clfebr, CLFEBR, \
+ 0xB39C) /* type = RRF_E CONVERT TO LOGICAL (short BFP to 32) */ \
+ V(clfdbr, CLFDBR, \
+ 0xB39D) /* type = RRF_E CONVERT TO LOGICAL (long BFP to 32) */ \
+ V(clfxbr, CLFXBR, \
+ 0xB39E) /* type = RRF_E CONVERT TO LOGICAL (extended BFP to 32) */ \
+ V(celgbr, CELGBR, \
+ 0xB3A0) /* type = RRF_E CONVERT FROM LOGICAL (64 to short BFP) */ \
+ V(cdlgbr, CDLGBR, \
+ 0xB3A1) /* type = RRF_E CONVERT FROM LOGICAL (64 to long BFP) */ \
+ V(cxlgbr, CXLGBR, \
+ 0xB3A2) /* type = RRF_E CONVERT FROM LOGICAL (64 to extended BFP) */ \
+ V(cegbra, CEGBRA, \
+ 0xB3A4) /* type = RRF_E CONVERT FROM FIXED (64 to short BFP) */ \
+ V(cdgbra, CDGBRA, \
+ 0xB3A5) /* type = RRF_E CONVERT FROM FIXED (64 to long BFP) */ \
+ V(cxgbra, CXGBRA, \
+ 0xB3A6) /* type = RRF_E CONVERT FROM FIXED (64 to extended BFP) */ \
+ V(cgebr, CGEBR, \
+ 0xB3A8) /* type = RRF_E CONVERT TO FIXED (short BFP to 64) */ \
+ V(cgebra, CGEBRA, \
+ 0xB3A8) /* type = RRF_E CONVERT TO FIXED (short BFP to 64) */ \
+ V(cgdbr, CGDBR, 0xB3A9) /* type = RRF_E CONVERT TO FIXED (long BFP to 64) */ \
+ V(cgdbra, CGDBRA, \
+ 0xB3A9) /* type = RRF_E CONVERT TO FIXED (long BFP to 64) */ \
+ V(cgxbr, CGXBR, \
+ 0xB3AA) /* type = RRF_E CONVERT TO FIXED (extended BFP to 64) */ \
+ V(cgxbra, CGXBRA, \
+ 0xB3AA) /* type = RRF_E CONVERT TO FIXED (extended BFP to 64) */ \
+ V(clgebr, CLGEBR, \
+ 0xB3AC) /* type = RRF_E CONVERT TO LOGICAL (short BFP to 64) */ \
+ V(clgdbr, CLGDBR, \
+ 0xB3AD) /* type = RRF_E CONVERT TO LOGICAL (long BFP to 64) */ \
+ V(clgxbr, CLGXBR, \
+ 0xB3AE) /* type = RRF_E CONVERT TO LOGICAL (extended BFP to 64) */ \
+ V(cfer, CFER, 0xB3B8) /* type = RRF_E CONVERT TO FIXED (short HFP to 32) */ \
+ V(cfdr, CFDR, 0xB3B9) /* type = RRF_E CONVERT TO FIXED (long HFP to 32) */ \
+ V(cfxr, CFXR, \
+ 0xB3BA) /* type = RRF_E CONVERT TO FIXED (extended HFP to 32) */ \
+ V(cger, CGER, 0xB3C8) /* type = RRF_E CONVERT TO FIXED (short HFP to 64) */ \
+ V(cgdr, CGDR, 0xB3C9) /* type = RRF_E CONVERT TO FIXED (long HFP to 64) */ \
+ V(cgxr, CGXR, \
+ 0xB3CA) /* type = RRF_E CONVERT TO FIXED (extended HFP to 64) */ \
+ V(ledtr, LEDTR, 0xB3D5) /* type = RRF_E LOAD ROUNDED (long to short DFP) */ \
+ V(fidtr, FIDTR, 0xB3D7) /* type = RRF_E LOAD FP INTEGER (long DFP) */ \
+ V(ldxtr, LDXTR, \
+ 0xB3DD) /* type = RRF_E LOAD ROUNDED (extended to long DFP) */ \
+ V(fixtr, FIXTR, 0xB3DF) /* type = RRF_E LOAD FP INTEGER (extended DFP) */ \
+ V(cgdtr, CGDTR, 0xB3E1) /* type = RRF_E CONVERT TO FIXED (long DFP to 64) */ \
+ V(cgdtra, CGDTRA, \
+ 0xB3E1) /* type = RRF_E CONVERT TO FIXED (long DFP to 64) */ \
+ V(cgxtr, CGXTR, \
+ 0xB3E9) /* type = RRF_E CONVERT TO FIXED (extended DFP to 64) */ \
+ V(cgxtra, CGXTRA, \
+ 0xB3E9) /* type = RRF_E CONVERT TO FIXED (extended DFP to 64) */ \
+ V(cdgtra, CDGTRA, \
+ 0xB3F1) /* type = RRF_E CONVERT FROM FIXED (64 to long DFP) */ \
+ V(cxgtra, CXGTRA, \
+ 0xB3F9) /* type = RRF_E CONVERT FROM FIXED (64 to extended DFP) */ \
+ V(cfdtr, CFDTR, 0xB941) /* type = RRF_E CONVERT TO FIXED (long DFP to 32) */ \
+ V(clgdtr, CLGDTR, \
+ 0xB942) /* type = RRF_E CONVERT TO LOGICAL (long DFP to 64) */ \
+ V(clfdtr, CLFDTR, \
+ 0xB943) /* type = RRF_E CONVERT TO LOGICAL (long DFP to 32) */ \
+ V(cfxtr, CFXTR, \
+ 0xB949) /* type = RRF_E CONVERT TO FIXED (extended DFP to 32) */ \
+ V(clgxtr, CLGXTR, \
+ 0xB94A) /* type = RRF_E CONVERT TO LOGICAL (extended DFP to 64) */ \
+ V(clfxtr, CLFXTR, \
+ 0xB94B) /* type = RRF_E CONVERT TO LOGICAL (extended DFP to 32) */ \
+ V(cdlgtr, CDLGTR, \
+ 0xB952) /* type = RRF_E CONVERT FROM LOGICAL (64 to long DFP) */ \
+ V(cdlftr, CDLFTR, \
+ 0xB953) /* type = RRF_E CONVERT FROM LOGICAL (32 to long DFP) */ \
+ V(cxlgtr, CXLGTR, \
+ 0xB95A) /* type = RRF_E CONVERT FROM LOGICAL (64 to extended DFP) */ \
+ V(cxlftr, CXLFTR, \
+ 0xB95B) /* type = RRF_E CONVERT FROM LOGICAL (32 to extended DFP) */
+
+#define S390_VRR_A_OPCODE_LIST(V) \
+ V(vpopct, VPOPCT, 0xE750) /* type = VRR_A VECTOR POPULATION COUNT */ \
+ V(vctz, VCTZ, 0xE752) /* type = VRR_A VECTOR COUNT TRAILING ZEROS */ \
+ V(vclz, VCLZ, 0xE753) /* type = VRR_A VECTOR COUNT LEADING ZEROS */ \
+ V(vlr, VLR, 0xE756) /* type = VRR_A VECTOR LOAD */ \
+ V(vistr, VISTR, 0xE75C) /* type = VRR_A VECTOR ISOLATE STRING */ \
+ V(vseg, VSEG, 0xE75F) /* type = VRR_A VECTOR SIGN EXTEND TO DOUBLEWORD */ \
+ V(vclgd, VCLGD, \
+ 0xE7C0) /* type = VRR_A VECTOR FP CONVERT TO LOGICAL 64-BIT */ \
+ V(vcdlg, VCDLG, \
+ 0xE7C1) /* type = VRR_A VECTOR FP CONVERT FROM LOGICAL 64-BIT */ \
+ V(vcgd, VCGD, 0xE7C2) /* type = VRR_A VECTOR FP CONVERT TO FIXED 64-BIT */ \
+ V(vcdg, VCDG, 0xE7C3) /* type = VRR_A VECTOR FP CONVERT FROM FIXED 64-BIT */ \
+ V(vlde, VLDE, 0xE7C4) /* type = VRR_A VECTOR FP LOAD LENGTHENED */ \
+ V(vled, VLED, 0xE7C5) /* type = VRR_A VECTOR FP LOAD ROUNDED */ \
+ V(vfi, VFI, 0xE7C7) /* type = VRR_A VECTOR LOAD FP INTEGER */ \
+ V(wfk, WFK, 0xE7CA) /* type = VRR_A VECTOR FP COMPARE AND SIGNAL SCALAR */ \
+ V(wfc, WFC, 0xE7CB) /* type = VRR_A VECTOR FP COMPARE SCALAR */ \
+ V(vfpso, VFPSO, 0xE7CC) /* type = VRR_A VECTOR FP PERFORM SIGN OPERATION */ \
+ V(vfsq, VFSQ, 0xE7CE) /* type = VRR_A VECTOR FP SQUARE ROOT */ \
+ V(vupll, VUPLL, 0xE7D4) /* type = VRR_A VECTOR UNPACK LOGICAL LOW */ \
+ V(vuplh, VUPLH, 0xE7D5) /* type = VRR_A VECTOR UNPACK LOGICAL HIGH */ \
+ V(vupl, VUPL, 0xE7D6) /* type = VRR_A VECTOR UNPACK LOW */ \
+ V(vuph, VUPH, 0xE7D7) /* type = VRR_A VECTOR UNPACK HIGH */ \
+ V(vtm, VTM, 0xE7D8) /* type = VRR_A VECTOR TEST UNDER MASK */ \
+ V(vecl, VECL, 0xE7D9) /* type = VRR_A VECTOR ELEMENT COMPARE LOGICAL */ \
+ V(vec, VEC, 0xE7DB) /* type = VRR_A VECTOR ELEMENT COMPARE */ \
+ V(vlc, VLC, 0xE7DE) /* type = VRR_A VECTOR LOAD COMPLEMENT */ \
+ V(vlp, VLP, 0xE7DF) /* type = VRR_A VECTOR LOAD POSITIVE */
+
+#define S390_VRR_B_OPCODE_LIST(V) \
+ V(vfee, VFEE, 0xE780) /* type = VRR_B VECTOR FIND ELEMENT EQUAL */ \
+ V(vfene, VFENE, 0xE781) /* type = VRR_B VECTOR FIND ELEMENT NOT EQUAL */ \
+ V(vfae, VFAE, 0xE782) /* type = VRR_B VECTOR FIND ANY ELEMENT EQUAL */ \
+ V(vpkls, VPKLS, 0xE795) /* type = VRR_B VECTOR PACK LOGICAL SATURATE */ \
+ V(vpks, VPKS, 0xE797) /* type = VRR_B VECTOR PACK SATURATE */ \
+ V(vceq, VCEQ, 0xE7F8) /* type = VRR_B VECTOR COMPARE EQUAL */ \
+ V(vchl, VCHL, 0xE7F9) /* type = VRR_B VECTOR COMPARE HIGH LOGICAL */ \
+ V(vch, VCH, 0xE7FB) /* type = VRR_B VECTOR COMPARE HIGH */
+
+#define S390_VRR_C_OPCODE_LIST(V) \
+ V(vmrl, VMRL, 0xE760) /* type = VRR_C VECTOR MERGE LOW */ \
+ V(vmrh, VMRH, 0xE761) /* type = VRR_C VECTOR MERGE HIGH */ \
+ V(vsum, VSUM, 0xE764) /* type = VRR_C VECTOR SUM ACROSS WORD */ \
+ V(vsumg, VSUMG, 0xE765) /* type = VRR_C VECTOR SUM ACROSS DOUBLEWORD */ \
+ V(vcksm, VCKSM, 0xE766) /* type = VRR_C VECTOR CHECKSUM */ \
+ V(vsumq, VSUMQ, 0xE767) /* type = VRR_C VECTOR SUM ACROSS QUADWORD */ \
+ V(vn, VN, 0xE768) /* type = VRR_C VECTOR AND */ \
+ V(vnc, VNC, 0xE769) /* type = VRR_C VECTOR AND WITH COMPLEMENT */ \
+ V(vo, VO, 0xE76A) /* type = VRR_C VECTOR OR */ \
+ V(vno, VNO, 0xE76B) /* type = VRR_C VECTOR NOR */ \
+ V(vx, VX, 0xE76D) /* type = VRR_C VECTOR EXCLUSIVE OR */ \
+ V(veslv, VESLV, 0xE770) /* type = VRR_C VECTOR ELEMENT SHIFT LEFT */ \
+ V(verllv, VERLLV, \
+ 0xE773) /* type = VRR_C VECTOR ELEMENT ROTATE LEFT LOGICAL */ \
+ V(vsl, VSL, 0xE774) /* type = VRR_C VECTOR SHIFT LEFT */ \
+ V(vslb, VSLB, 0xE775) /* type = VRR_C VECTOR SHIFT LEFT BY BYTE */ \
+ V(vesrlv, VESRLV, \
+ 0xE778) /* type = VRR_C VECTOR ELEMENT SHIFT RIGHT LOGICAL */ \
+ V(vesrav, VESRAV, \
+ 0xE77A) /* type = VRR_C VECTOR ELEMENT SHIFT RIGHT ARITHMETIC */ \
+ V(vsrl, VSRL, 0xE77C) /* type = VRR_C VECTOR SHIFT RIGHT LOGICAL */ \
+ V(vsrlb, VSRLB, \
+ 0xE77D) /* type = VRR_C VECTOR SHIFT RIGHT LOGICAL BY BYTE */ \
+ V(vsra, VSRA, 0xE77E) /* type = VRR_C VECTOR SHIFT RIGHT ARITHMETIC */ \
+ V(vsrab, VSRAB, \
+ 0xE77F) /* type = VRR_C VECTOR SHIFT RIGHT ARITHMETIC BY BYTE */ \
+ V(vpdi, VPDI, 0xE784) /* type = VRR_C VECTOR PERMUTE DOUBLEWORD IMMEDIATE */ \
+ V(vpk, VPK, 0xE794) /* type = VRR_C VECTOR PACK */ \
+ V(vmlh, VMLH, 0xE7A1) /* type = VRR_C VECTOR MULTIPLY LOGICAL HIGH */ \
+ V(vml, VML, 0xE7A2) /* type = VRR_C VECTOR MULTIPLY LOW */ \
+ V(vmh, VMH, 0xE7A3) /* type = VRR_C VECTOR MULTIPLY HIGH */ \
+ V(vmle, VMLE, 0xE7A4) /* type = VRR_C VECTOR MULTIPLY LOGICAL EVEN */ \
+ V(vmlo, VMLO, 0xE7A5) /* type = VRR_C VECTOR MULTIPLY LOGICAL ODD */ \
+ V(vme, VME, 0xE7A6) /* type = VRR_C VECTOR MULTIPLY EVEN */ \
+ V(vmo, VMO, 0xE7A7) /* type = VRR_C VECTOR MULTIPLY ODD */ \
+ V(vgfm, VGFM, 0xE7B4) /* type = VRR_C VECTOR GALOIS FIELD MULTIPLY SUM */ \
+ V(vfs, VFS, 0xE7E2) /* type = VRR_C VECTOR FP SUBTRACT */ \
+ V(vfa, VFA, 0xE7E3) /* type = VRR_C VECTOR FP ADD */ \
+ V(vfd, VFD, 0xE7E5) /* type = VRR_C VECTOR FP DIVIDE */ \
+ V(vfm, VFM, 0xE7E7) /* type = VRR_C VECTOR FP MULTIPLY */ \
+ V(vfce, VFCE, 0xE7E8) /* type = VRR_C VECTOR FP COMPARE EQUAL */ \
+ V(vfche, VFCHE, 0xE7EA) /* type = VRR_C VECTOR FP COMPARE HIGH OR EQUAL */ \
+ V(vfch, VFCH, 0xE7EB) /* type = VRR_C VECTOR FP COMPARE HIGH */ \
+ V(vavgl, VAVGL, 0xE7F0) /* type = VRR_C VECTOR AVERAGE LOGICAL */ \
+ V(vacc, VACC, 0xE7F1) /* type = VRR_C VECTOR ADD COMPUTE CARRY */ \
+ V(vavg, VAVG, 0xE7F2) /* type = VRR_C VECTOR AVERAGE */ \
+ V(va, VA, 0xE7F3) /* type = VRR_C VECTOR ADD */ \
+ V(vscbi, VSCBI, \
+ 0xE7F5) /* type = VRR_C VECTOR SUBTRACT COMPUTE BORROW INDICATION */ \
+ V(vs, VS, 0xE7F7) /* type = VRR_C VECTOR SUBTRACT */ \
+ V(vmnl, VMNL, 0xE7FC) /* type = VRR_C VECTOR MINIMUM LOGICAL */ \
+ V(vmxl, VMXL, 0xE7FD) /* type = VRR_C VECTOR MAXIMUM LOGICAL */ \
+ V(vmn, VMN, 0xE7FE) /* type = VRR_C VECTOR MINIMUM */ \
+ V(vmx, VMX, 0xE7FF) /* type = VRR_C VECTOR MAXIMUM */
+
+#define S390_VRI_A_OPCODE_LIST(V) \
+ V(vleib, VLEIB, 0xE740) /* type = VRI_A VECTOR LOAD ELEMENT IMMEDIATE (8) */ \
+ V(vleih, VLEIH, \
+ 0xE741) /* type = VRI_A VECTOR LOAD ELEMENT IMMEDIATE (16) */ \
+ V(vleig, VLEIG, \
+ 0xE742) /* type = VRI_A VECTOR LOAD ELEMENT IMMEDIATE (64) */ \
+ V(vleif, VLEIF, \
+ 0xE743) /* type = VRI_A VECTOR LOAD ELEMENT IMMEDIATE (32) */ \
+ V(vgbm, VGBM, 0xE744) /* type = VRI_A VECTOR GENERATE BYTE MASK */ \
+ V(vrepi, VREPI, 0xE745) /* type = VRI_A VECTOR REPLICATE IMMEDIATE */
+
+#define S390_VRR_D_OPCODE_LIST(V) \
+ V(vstrc, VSTRC, 0xE78A) /* type = VRR_D VECTOR STRING RANGE COMPARE */ \
+ V(vmalh, VMALH, \
+ 0xE7A9) /* type = VRR_D VECTOR MULTIPLY AND ADD LOGICAL HIGH */ \
+ V(vmal, VMAL, 0xE7AA) /* type = VRR_D VECTOR MULTIPLY AND ADD LOW */ \
+ V(vmah, VMAH, 0xE7AB) /* type = VRR_D VECTOR MULTIPLY AND ADD HIGH */ \
+ V(vmale, VMALE, \
+ 0xE7AC) /* type = VRR_D VECTOR MULTIPLY AND ADD LOGICAL EVEN */ \
+ V(vmalo, VMALO, \
+ 0xE7AD) /* type = VRR_D VECTOR MULTIPLY AND ADD LOGICAL ODD */ \
+ V(vmae, VMAE, 0xE7AE) /* type = VRR_D VECTOR MULTIPLY AND ADD EVEN */ \
+ V(vmao, VMAO, 0xE7AF) /* type = VRR_D VECTOR MULTIPLY AND ADD ODD */ \
+ V(vaccc, VACCC, \
+ 0xE7B9) /* type = VRR_D VECTOR ADD WITH CARRY COMPUTE CARRY */ \
+ V(vac, VAC, 0xE7BB) /* type = VRR_D VECTOR ADD WITH CARRY */ \
+ V(vgfma, VGFMA, \
+ 0xE7BC) /* type = VRR_D VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE */ \
+ V(vsbcbi, VSBCBI, 0xE7BD) /* type = VRR_D VECTOR SUBTRACT WITH BORROW */ \
+ /* COMPUTE BORROW INDICATION */ \
+ V(vsbi, VSBI, \
+ 0xE7BF) /* type = VRR_D VECTOR SUBTRACT WITH BORROW INDICATION */
+
+#define S390_VRI_B_OPCODE_LIST(V) \
+ V(vgm, VGM, 0xE746) /* type = VRI_B VECTOR GENERATE MASK */
+
+#define S390_VRR_E_OPCODE_LIST(V) \
+ V(vperm, VPERM, 0xE78C) /* type = VRR_E VECTOR PERMUTE */ \
+ V(vsel, VSEL, 0xE78D) /* type = VRR_E VECTOR SELECT */ \
+ V(vfms, VFMS, 0xE78E) /* type = VRR_E VECTOR FP MULTIPLY AND SUBTRACT */ \
+ V(vfma, VFMA, 0xE78F) /* type = VRR_E VECTOR FP MULTIPLY AND ADD */
+
+#define S390_VRI_C_OPCODE_LIST(V) \
+ V(vrep, VREP, 0xE74D) /* type = VRI_C VECTOR REPLICATE */
+
+#define S390_VRI_D_OPCODE_LIST(V) \
+ V(verim, VERIM, \
+ 0xE772) /* type = VRI_D VECTOR ELEMENT ROTATE AND INSERT UNDER MASK */ \
+ V(vsldb, VSLDB, 0xE777) /* type = VRI_D VECTOR SHIFT LEFT DOUBLE BY BYTE */
+
+#define S390_VRR_F_OPCODE_LIST(V) \
+ V(vlvgp, VLVGP, 0xE762) /* type = VRR_F VECTOR LOAD VR FROM GRS DISJOINT */
+
+#define S390_RIS_OPCODE_LIST(V) \
+ V(cgib, CGIB, \
+ 0xECFC) /* type = RIS COMPARE IMMEDIATE AND BRANCH (64<-8) */ \
+ V(clgib, CLGIB, \
+ 0xECFD) /* type = RIS COMPARE LOGICAL IMMEDIATE AND BRANCH (64<-8) */ \
+ V(cib, CIB, 0xECFE) /* type = RIS COMPARE IMMEDIATE AND BRANCH (32<-8) */ \
+ V(clib, CLIB, \
+ 0xECFF) /* type = RIS COMPARE LOGICAL IMMEDIATE AND BRANCH (32<-8) */
+
+#define S390_VRI_E_OPCODE_LIST(V) \
+ V(vftci, VFTCI, \
+ 0xE74A) /* type = VRI_E VECTOR FP TEST DATA CLASS IMMEDIATE */
+
+#define S390_RSL_A_OPCODE_LIST(V) \
+ V(tp, TP, 0xEBC0) /* type = RSL_A TEST DECIMAL */
+
+#define S390_RSL_B_OPCODE_LIST(V) \
+ V(cpdt, CPDT, 0xEDAC) /* type = RSL_B CONVERT TO PACKED (from long DFP) */ \
+ V(cpxt, CPXT, \
+ 0xEDAD) /* type = RSL_B CONVERT TO PACKED (from extended DFP) */ \
+ V(cdpt, CDPT, 0xEDAE) /* type = RSL_B CONVERT FROM PACKED (to long DFP) */ \
+ V(cxpt, CXPT, \
+ 0xEDAF) /* type = RSL_B CONVERT FROM PACKED (to extended DFP) */
+
+#define S390_SI_OPCODE_LIST(V) \
+ V(tm, TM, 0x91) /* type = SI TEST UNDER MASK */ \
+ V(mvi, MVI, 0x92) /* type = SI MOVE (immediate) */ \
+ V(ni, NI, 0x94) /* type = SI AND (immediate) */ \
+ V(cli, CLI, 0x95) /* type = SI COMPARE LOGICAL (immediate) */ \
+ V(oi, OI, 0x96) /* type = SI OR (immediate) */ \
+ V(xi, XI, 0x97) /* type = SI EXCLUSIVE OR (immediate) */ \
+ V(stnsm, STNSM, 0xAC) /* type = SI STORE THEN AND SYSTEM MASK */ \
+ V(stosm, STOSM, 0xAD) /* type = SI STORE THEN OR SYSTEM MASK */ \
+ V(mc, MC, 0xAF) /* type = SI MONITOR CALL */
+
+#define S390_SIL_OPCODE_LIST(V) \
+ V(mvhhi, MVHHI, 0xE544) /* type = SIL MOVE (16<-16) */ \
+ V(mvghi, MVGHI, 0xE548) /* type = SIL MOVE (64<-16) */ \
+ V(mvhi, MVHI, 0xE54C) /* type = SIL MOVE (32<-16) */ \
+ V(chhsi, CHHSI, \
+ 0xE554) /* type = SIL COMPARE HALFWORD IMMEDIATE (16<-16) */ \
+ V(clhhsi, CLHHSI, \
+ 0xE555) /* type = SIL COMPARE LOGICAL IMMEDIATE (16<-16) */ \
+ V(cghsi, CGHSI, \
+ 0xE558) /* type = SIL COMPARE HALFWORD IMMEDIATE (64<-16) */ \
+ V(clghsi, CLGHSI, \
+ 0xE559) /* type = SIL COMPARE LOGICAL IMMEDIATE (64<-16) */ \
+ V(chsi, CHSI, 0xE55C) /* type = SIL COMPARE HALFWORD IMMEDIATE (32<-16) */ \
+ V(clfhsi, CLFHSI, \
+ 0xE55D) /* type = SIL COMPARE LOGICAL IMMEDIATE (32<-16) */ \
+ V(tbegin, TBEGIN, \
+ 0xE560) /* type = SIL TRANSACTION BEGIN (nonconstrained) */ \
+ V(tbeginc, TBEGINC, \
+ 0xE561) /* type = SIL TRANSACTION BEGIN (constrained) */
+
+#define S390_VRS_A_OPCODE_LIST(V) \
+ V(vesl, VESL, 0xE730) /* type = VRS_A VECTOR ELEMENT SHIFT LEFT */ \
+ V(verll, VERLL, \
+ 0xE733) /* type = VRS_A VECTOR ELEMENT ROTATE LEFT LOGICAL */ \
+ V(vlm, VLM, 0xE736) /* type = VRS_A VECTOR LOAD MULTIPLE */ \
+ V(vesrl, VESRL, \
+ 0xE738) /* type = VRS_A VECTOR ELEMENT SHIFT RIGHT LOGICAL */ \
+ V(vesra, VESRA, \
+ 0xE73A) /* type = VRS_A VECTOR ELEMENT SHIFT RIGHT ARITHMETIC */ \
+ V(vstm, VSTM, 0xE73E) /* type = VRS_A VECTOR STORE MULTIPLE */
+
+#define S390_RIL_A_OPCODE_LIST(V) \
+ V(lgfi, LGFI, 0xC01) /* type = RIL_A LOAD IMMEDIATE (64<-32) */ \
+ V(xihf, XIHF, 0xC06) /* type = RIL_A EXCLUSIVE OR IMMEDIATE (high) */ \
+ V(xilf, XILF, 0xC07) /* type = RIL_A EXCLUSIVE OR IMMEDIATE (low) */ \
+ V(iihf, IIHF, 0xC08) /* type = RIL_A INSERT IMMEDIATE (high) */ \
+ V(iilf, IILF, 0xC09) /* type = RIL_A INSERT IMMEDIATE (low) */ \
+ V(nihf, NIHF, 0xC0A) /* type = RIL_A AND IMMEDIATE (high) */ \
+ V(nilf, NILF, 0xC0B) /* type = RIL_A AND IMMEDIATE (low) */ \
+ V(oihf, OIHF, 0xC0C) /* type = RIL_A OR IMMEDIATE (high) */ \
+ V(oilf, OILF, 0xC0D) /* type = RIL_A OR IMMEDIATE (low) */ \
+ V(llihf, LLIHF, 0xC0E) /* type = RIL_A LOAD LOGICAL IMMEDIATE (high) */ \
+ V(llilf, LLILF, 0xC0F) /* type = RIL_A LOAD LOGICAL IMMEDIATE (low) */ \
+ V(msgfi, MSGFI, 0xC20) /* type = RIL_A MULTIPLY SINGLE IMMEDIATE (64<-32) */ \
+ V(msfi, MSFI, 0xC21) /* type = RIL_A MULTIPLY SINGLE IMMEDIATE (32) */ \
+ V(slgfi, SLGFI, \
+ 0xC24) /* type = RIL_A SUBTRACT LOGICAL IMMEDIATE (64<-32) */ \
+ V(slfi, SLFI, 0xC25) /* type = RIL_A SUBTRACT LOGICAL IMMEDIATE (32) */ \
+ V(agfi, AGFI, 0xC28) /* type = RIL_A ADD IMMEDIATE (64<-32) */ \
+ V(afi, AFI, 0xC29) /* type = RIL_A ADD IMMEDIATE (32) */ \
+ V(algfi, ALGFI, 0xC2A) /* type = RIL_A ADD LOGICAL IMMEDIATE (64<-32) */ \
+ V(alfi, ALFI, 0xC2B) /* type = RIL_A ADD LOGICAL IMMEDIATE (32) */ \
+ V(cgfi, CGFI, 0xC2C) /* type = RIL_A COMPARE IMMEDIATE (64<-32) */ \
+ V(cfi, CFI, 0xC2D) /* type = RIL_A COMPARE IMMEDIATE (32) */ \
+ V(clgfi, CLGFI, 0xC2E) /* type = RIL_A COMPARE LOGICAL IMMEDIATE (64<-32) */ \
+ V(clfi, CLFI, 0xC2F) /* type = RIL_A COMPARE LOGICAL IMMEDIATE (32) */ \
+ V(aih, AIH, 0xCC8) /* type = RIL_A ADD IMMEDIATE HIGH (32) */ \
+ V(alsih, ALSIH, \
+ 0xCCA) /* type = RIL_A ADD LOGICAL WITH SIGNED IMMEDIATE HIGH (32) */ \
+ V(alsihn, ALSIHN, \
+ 0xCCB) /* type = RIL_A ADD LOGICAL WITH SIGNED IMMEDIATE HIGH (32) */ \
+ V(cih, CIH, 0xCCD) /* type = RIL_A COMPARE IMMEDIATE HIGH (32) */ \
+ V(clih, CLIH, 0xCCF) /* type = RIL_A COMPARE LOGICAL IMMEDIATE HIGH (32) */
+
+#define S390_RIL_B_OPCODE_LIST(V) \
+ V(larl, LARL, 0xC00) /* type = RIL_B LOAD ADDRESS RELATIVE LONG */ \
+ V(brasl, BRASL, 0xC05) /* type = RIL_B BRANCH RELATIVE AND SAVE LONG */ \
+ V(llhrl, LLHRL, \
+ 0xC42) /* type = RIL_B LOAD LOGICAL HALFWORD RELATIVE LONG (32<-16) */ \
+ V(lghrl, LGHRL, \
+ 0xC44) /* type = RIL_B LOAD HALFWORD RELATIVE LONG (64<-16) */ \
+ V(lhrl, LHRL, 0xC45) /* type = RIL_B LOAD HALFWORD RELATIVE LONG (32<-16) */ \
+ V(llghrl, LLGHRL, \
+ 0xC46) /* type = RIL_B LOAD LOGICAL HALFWORD RELATIVE LONG (64<-16) */ \
+ V(sthrl, STHRL, 0xC47) /* type = RIL_B STORE HALFWORD RELATIVE LONG (16) */ \
+ V(lgrl, LGRL, 0xC48) /* type = RIL_B LOAD RELATIVE LONG (64) */ \
+ V(stgrl, STGRL, 0xC4B) /* type = RIL_B STORE RELATIVE LONG (64) */ \
+ V(lgfrl, LGFRL, 0xC4C) /* type = RIL_B LOAD RELATIVE LONG (64<-32) */ \
+ V(lrl, LRL, 0xC4D) /* type = RIL_B LOAD RELATIVE LONG (32) */ \
+ V(llgfrl, LLGFRL, \
+ 0xC4E) /* type = RIL_B LOAD LOGICAL RELATIVE LONG (64<-32) */ \
+ V(strl, STRL, 0xC4F) /* type = RIL_B STORE RELATIVE LONG (32) */ \
+ V(exrl, EXRL, 0xC60) /* type = RIL_B EXECUTE RELATIVE LONG */ \
+ V(cghrl, CGHRL, \
+ 0xC64) /* type = RIL_B COMPARE HALFWORD RELATIVE LONG (64<-16) */ \
+ V(chrl, CHRL, \
+ 0xC65) /* type = RIL_B COMPARE HALFWORD RELATIVE LONG (32<-16) */ \
+ V(clghrl, CLGHRL, \
+ 0xC66) /* type = RIL_B COMPARE LOGICAL RELATIVE LONG (64<-16) */ \
+ V(clhrl, CLHRL, \
+ 0xC67) /* type = RIL_B COMPARE LOGICAL RELATIVE LONG (32<-16) */ \
+ V(cgrl, CGRL, 0xC68) /* type = RIL_B COMPARE RELATIVE LONG (64) */ \
+ V(clgrl, CLGRL, 0xC6A) /* type = RIL_B COMPARE LOGICAL RELATIVE LONG (64) */ \
+ V(cgfrl, CGFRL, 0xC6C) /* type = RIL_B COMPARE RELATIVE LONG (64<-32) */ \
+ V(crl, CRL, 0xC6D) /* type = RIL_B COMPARE RELATIVE LONG (32) */ \
+ V(clgfrl, CLGFRL, \
+ 0xC6E) /* type = RIL_B COMPARE LOGICAL RELATIVE LONG (64<-32) */ \
+ V(clrl, CLRL, 0xC6F) /* type = RIL_B COMPARE LOGICAL RELATIVE LONG (32) */ \
+ V(brcth, BRCTH, 0xCC6) /* type = RIL_B BRANCH RELATIVE ON COUNT HIGH (32) */
+
+#define S390_VRS_B_OPCODE_LIST(V) \
+ V(vlvg, VLVG, 0xE722) /* type = VRS_B VECTOR LOAD VR ELEMENT FROM GR */ \
+ V(vll, VLL, 0xE737) /* type = VRS_B VECTOR LOAD WITH LENGTH */ \
+ V(vstl, VSTL, 0xE73F) /* type = VRS_B VECTOR STORE WITH LENGTH */
+
+#define S390_RIL_C_OPCODE_LIST(V) \
+ V(brcl, BRCL, 0xC04) /* type = RIL_C BRANCH RELATIVE ON CONDITION LONG */ \
+ V(pfdrl, PFDRL, 0xC62) /* type = RIL_C PREFETCH DATA RELATIVE LONG */
+
+#define S390_VRS_C_OPCODE_LIST(V) \
+ V(vlgv, VLGV, 0xE721) /* type = VRS_C VECTOR LOAD GR FROM VR ELEMENT */
+
+#define S390_RI_A_OPCODE_LIST(V) \
+ V(iihh, IIHH, 0xA50) /* type = RI_A INSERT IMMEDIATE (high high) */ \
+ V(iihl, IIHL, 0xA51) /* type = RI_A INSERT IMMEDIATE (high low) */ \
+ V(iilh, IILH, 0xA52) /* type = RI_A INSERT IMMEDIATE (low high) */ \
+ V(iill, IILL, 0xA53) /* type = RI_A INSERT IMMEDIATE (low low) */ \
+ V(nihh, NIHH, 0xA54) /* type = RI_A AND IMMEDIATE (high high) */ \
+ V(nihl, NIHL, 0xA55) /* type = RI_A AND IMMEDIATE (high low) */ \
+ V(nilh, NILH, 0xA56) /* type = RI_A AND IMMEDIATE (low high) */ \
+ V(nill, NILL, 0xA57) /* type = RI_A AND IMMEDIATE (low low) */ \
+ V(oihh, OIHH, 0xA58) /* type = RI_A OR IMMEDIATE (high high) */ \
+ V(oihl, OIHL, 0xA59) /* type = RI_A OR IMMEDIATE (high low) */ \
+ V(oilh, OILH, 0xA5A) /* type = RI_A OR IMMEDIATE (low high) */ \
+ V(oill, OILL, 0xA5B) /* type = RI_A OR IMMEDIATE (low low) */ \
+ V(llihh, LLIHH, 0xA5C) /* type = RI_A LOAD LOGICAL IMMEDIATE (high high) */ \
+ V(llihl, LLIHL, 0xA5D) /* type = RI_A LOAD LOGICAL IMMEDIATE (high low) */ \
+ V(llilh, LLILH, 0xA5E) /* type = RI_A LOAD LOGICAL IMMEDIATE (low high) */ \
+ V(llill, LLILL, 0xA5F) /* type = RI_A LOAD LOGICAL IMMEDIATE (low low) */ \
+ V(tmlh, TMLH, 0xA70) /* type = RI_A TEST UNDER MASK (low high) */ \
+ V(tmh, TMH, 0xA70) /* type = RI_A TEST UNDER MASK HIGH */ \
+ V(tmll, TMLL, 0xA71) /* type = RI_A TEST UNDER MASK (low low) */ \
+ V(tml, TML, 0xA71) /* type = RI_A TEST UNDER MASK LOW */ \
+ V(tmhh, TMHH, 0xA72) /* type = RI_A TEST UNDER MASK (high high) */ \
+ V(tmhl, TMHL, 0xA73) /* type = RI_A TEST UNDER MASK (high low) */ \
+ V(lhi, LHI, 0xA78) /* type = RI_A LOAD HALFWORD IMMEDIATE (32)<-16 */ \
+ V(lghi, LGHI, 0xA79) /* type = RI_A LOAD HALFWORD IMMEDIATE (64<-16) */ \
+ V(ahi, AHI, 0xA7A) /* type = RI_A ADD HALFWORD IMMEDIATE (32<-16) */ \
+ V(aghi, AGHI, 0xA7B) /* type = RI_A ADD HALFWORD IMMEDIATE (64<-16) */ \
+ V(mhi, MHI, 0xA7C) /* type = RI_A MULTIPLY HALFWORD IMMEDIATE (32<-16) */ \
+ V(mghi, MGHI, 0xA7D) /* type = RI_A MULTIPLY HALFWORD IMMEDIATE (64<-16) */ \
+ V(chi, CHI, 0xA7E) /* type = RI_A COMPARE HALFWORD IMMEDIATE (32<-16) */ \
+ V(cghi, CGHI, 0xA7F) /* type = RI_A COMPARE HALFWORD IMMEDIATE (64<-16) */
+
+#define S390_RSI_OPCODE_LIST(V) \
+ V(brxh, BRXH, 0x84) /* type = RSI BRANCH RELATIVE ON INDEX HIGH (32) */ \
+ V(brxle, BRXLE, \
+ 0x85) /* type = RSI BRANCH RELATIVE ON INDEX LOW OR EQ. (32) */
+
+#define S390_RI_B_OPCODE_LIST(V) \
+ V(bras, BRAS, 0xA75) /* type = RI_B BRANCH RELATIVE AND SAVE */ \
+ V(brct, BRCT, 0xA76) /* type = RI_B BRANCH RELATIVE ON COUNT (32) */ \
+ V(brctg, BRCTG, 0xA77) /* type = RI_B BRANCH RELATIVE ON COUNT (64) */
+
+#define S390_RI_C_OPCODE_LIST(V) \
+ V(brc, BRC, 0xA74) /* type = RI_C BRANCH RELATIVE ON CONDITION */
+
+#define S390_RSL_OPCODE_LIST(V) \
+ V(czdt, CZDT, 0xEDA8) /* type = RSL CONVERT TO ZONED (from long DFP) */ \
+ V(czxt, CZXT, 0xEDA9) /* type = RSL CONVERT TO ZONED (from extended DFP) */ \
+ V(cdzt, CDZT, 0xEDAA) /* type = RSL CONVERT FROM ZONED (to long DFP) */ \
+ V(cxzt, CXZT, 0xEDAB) /* type = RSL CONVERT FROM ZONED (to extended DFP) */
+
+#define S390_SMI_OPCODE_LIST(V) \
+ V(bpp, BPP, 0xC7) /* type = SMI BRANCH PREDICTION PRELOAD */
+
+#define S390_RXY_A_OPCODE_LIST(V) \
+ V(ltg, LTG, 0xE302) /* type = RXY_A LOAD AND TEST (64) */ \
+ V(lrag, LRAG, 0xE303) /* type = RXY_A LOAD REAL ADDRESS (64) */ \
+ V(lg, LG, 0xE304) /* type = RXY_A LOAD (64) */ \
+ V(cvby, CVBY, 0xE306) /* type = RXY_A CONVERT TO BINARY (32) */ \
+ V(ag, AG, 0xE308) /* type = RXY_A ADD (64) */ \
+ V(sg, SG, 0xE309) /* type = RXY_A SUBTRACT (64) */ \
+ V(alg, ALG, 0xE30A) /* type = RXY_A ADD LOGICAL (64) */ \
+ V(slg, SLG, 0xE30B) /* type = RXY_A SUBTRACT LOGICAL (64) */ \
+ V(msg, MSG, 0xE30C) /* type = RXY_A MULTIPLY SINGLE (64) */ \
+ V(dsg, DSG, 0xE30D) /* type = RXY_A DIVIDE SINGLE (64) */ \
+ V(cvbg, CVBG, 0xE30E) /* type = RXY_A CONVERT TO BINARY (64) */ \
+ V(lrvg, LRVG, 0xE30F) /* type = RXY_A LOAD REVERSED (64) */ \
+ V(lt, LT, 0xE312) /* type = RXY_A LOAD AND TEST (32) */ \
+ V(lray, LRAY, 0xE313) /* type = RXY_A LOAD REAL ADDRESS (32) */ \
+ V(lgf, LGF, 0xE314) /* type = RXY_A LOAD (64<-32) */ \
+ V(lgh, LGH, 0xE315) /* type = RXY_A LOAD HALFWORD (64<-16) */ \
+ V(llgf, LLGF, 0xE316) /* type = RXY_A LOAD LOGICAL (64<-32) */ \
+ V(llgt, LLGT, \
+ 0xE317) /* type = RXY_A LOAD LOGICAL THIRTY ONE BITS (64<-31) */ \
+ V(agf, AGF, 0xE318) /* type = RXY_A ADD (64<-32) */ \
+ V(sgf, SGF, 0xE319) /* type = RXY_A SUBTRACT (64<-32) */ \
+ V(algf, ALGF, 0xE31A) /* type = RXY_A ADD LOGICAL (64<-32) */ \
+ V(slgf, SLGF, 0xE31B) /* type = RXY_A SUBTRACT LOGICAL (64<-32) */ \
+ V(msgf, MSGF, 0xE31C) /* type = RXY_A MULTIPLY SINGLE (64<-32) */ \
+ V(dsgf, DSGF, 0xE31D) /* type = RXY_A DIVIDE SINGLE (64<-32) */ \
+ V(lrv, LRV, 0xE31E) /* type = RXY_A LOAD REVERSED (32) */ \
+ V(lrvh, LRVH, 0xE31F) /* type = RXY_A LOAD REVERSED (16) */ \
+ V(cg, CG, 0xE320) /* type = RXY_A COMPARE (64) */ \
+ V(clg, CLG, 0xE321) /* type = RXY_A COMPARE LOGICAL (64) */ \
+ V(stg, STG, 0xE324) /* type = RXY_A STORE (64) */ \
+ V(ntstg, NTSTG, 0xE325) /* type = RXY_A NONTRANSACTIONAL STORE (64) */ \
+ V(cvdy, CVDY, 0xE326) /* type = RXY_A CONVERT TO DECIMAL (32) */ \
+ V(lzrg, LZRG, 0xE32A) /* type = RXY_A LOAD AND ZERO RIGHTMOST BYTE (64) */ \
+ V(cvdg, CVDG, 0xE32E) /* type = RXY_A CONVERT TO DECIMAL (64) */ \
+ V(strvg, STRVG, 0xE32F) /* type = RXY_A STORE REVERSED (64) */ \
+ V(cgf, CGF, 0xE330) /* type = RXY_A COMPARE (64<-32) */ \
+ V(clgf, CLGF, 0xE331) /* type = RXY_A COMPARE LOGICAL (64<-32) */ \
+ V(ltgf, LTGF, 0xE332) /* type = RXY_A LOAD AND TEST (64<-32) */ \
+ V(cgh, CGH, 0xE334) /* type = RXY_A COMPARE HALFWORD (64<-16) */ \
+ V(llzrgf, LLZRGF, \
+ 0xE33A) /* type = RXY_A LOAD LOGICAL AND ZERO RIGHTMOST BYTE (64<-32) */ \
+ V(lzrf, LZRF, 0xE33B) /* type = RXY_A LOAD AND ZERO RIGHTMOST BYTE (32) */ \
+ V(strv, STRV, 0xE33E) /* type = RXY_A STORE REVERSED (32) */ \
+ V(strvh, STRVH, 0xE33F) /* type = RXY_A STORE REVERSED (16) */ \
+ V(bctg, BCTG, 0xE346) /* type = RXY_A BRANCH ON COUNT (64) */ \
+ V(sty, STY, 0xE350) /* type = RXY_A STORE (32) */ \
+ V(msy, MSY, 0xE351) /* type = RXY_A MULTIPLY SINGLE (32) */ \
+ V(ny, NY, 0xE354) /* type = RXY_A AND (32) */ \
+ V(cly, CLY, 0xE355) /* type = RXY_A COMPARE LOGICAL (32) */ \
+ V(oy, OY, 0xE356) /* type = RXY_A OR (32) */ \
+ V(xy, XY, 0xE357) /* type = RXY_A EXCLUSIVE OR (32) */ \
+ V(ly, LY, 0xE358) /* type = RXY_A LOAD (32) */ \
+ V(cy, CY, 0xE359) /* type = RXY_A COMPARE (32) */ \
+ V(ay, AY, 0xE35A) /* type = RXY_A ADD (32) */ \
+ V(sy, SY, 0xE35B) /* type = RXY_A SUBTRACT (32) */ \
+ V(mfy, MFY, 0xE35C) /* type = RXY_A MULTIPLY (64<-32) */ \
+ V(aly, ALY, 0xE35E) /* type = RXY_A ADD LOGICAL (32) */ \
+ V(sly, SLY, 0xE35F) /* type = RXY_A SUBTRACT LOGICAL (32) */ \
+ V(sthy, STHY, 0xE370) /* type = RXY_A STORE HALFWORD (16) */ \
+ V(lay, LAY, 0xE371) /* type = RXY_A LOAD ADDRESS */ \
+ V(stcy, STCY, 0xE372) /* type = RXY_A STORE CHARACTER */ \
+ V(icy, ICY, 0xE373) /* type = RXY_A INSERT CHARACTER */ \
+ V(laey, LAEY, 0xE375) /* type = RXY_A LOAD ADDRESS EXTENDED */ \
+ V(lb, LB, 0xE376) /* type = RXY_A LOAD BYTE (32<-8) */ \
+ V(lgb, LGB, 0xE377) /* type = RXY_A LOAD BYTE (64<-8) */ \
+ V(lhy, LHY, 0xE378) /* type = RXY_A LOAD HALFWORD (32)<-16 */ \
+ V(chy, CHY, 0xE379) /* type = RXY_A COMPARE HALFWORD (32<-16) */ \
+ V(ahy, AHY, 0xE37A) /* type = RXY_A ADD HALFWORD (32<-16) */ \
+ V(shy, SHY, 0xE37B) /* type = RXY_A SUBTRACT HALFWORD (32<-16) */ \
+ V(mhy, MHY, 0xE37C) /* type = RXY_A MULTIPLY HALFWORD (32<-16) */ \
+ V(ng, NG, 0xE380) /* type = RXY_A AND (64) */ \
+ V(og, OG, 0xE381) /* type = RXY_A OR (64) */ \
+ V(xg, XG, 0xE382) /* type = RXY_A EXCLUSIVE OR (64) */ \
+ V(lgat, LGAT, 0xE385) /* type = RXY_A LOAD AND TRAP (64) */ \
+ V(mlg, MLG, 0xE386) /* type = RXY_A MULTIPLY LOGICAL (128<-64) */ \
+ V(dlg, DLG, 0xE387) /* type = RXY_A DIVIDE LOGICAL (64<-128) */ \
+ V(alcg, ALCG, 0xE388) /* type = RXY_A ADD LOGICAL WITH CARRY (64) */ \
+ V(slbg, SLBG, 0xE389) /* type = RXY_A SUBTRACT LOGICAL WITH BORROW (64) */ \
+ V(stpq, STPQ, 0xE38E) /* type = RXY_A STORE PAIR TO QUADWORD */ \
+ V(lpq, LPQ, 0xE38F) /* type = RXY_A LOAD PAIR FROM QUADWORD (64&64<-128) */ \
+ V(llgc, LLGC, 0xE390) /* type = RXY_A LOAD LOGICAL CHARACTER (64<-8) */ \
+ V(llgh, LLGH, 0xE391) /* type = RXY_A LOAD LOGICAL HALFWORD (64<-16) */ \
+ V(llc, LLC, 0xE394) /* type = RXY_A LOAD LOGICAL CHARACTER (32<-8) */ \
+ V(llh, LLH, 0xE395) /* type = RXY_A LOAD LOGICAL HALFWORD (32<-16) */ \
+ V(ml, ML, 0xE396) /* type = RXY_A MULTIPLY LOGICAL (64<-32) */ \
+ V(dl, DL, 0xE397) /* type = RXY_A DIVIDE LOGICAL (32<-64) */ \
+ V(alc, ALC, 0xE398) /* type = RXY_A ADD LOGICAL WITH CARRY (32) */ \
+ V(slb, SLB, 0xE399) /* type = RXY_A SUBTRACT LOGICAL WITH BORROW (32) */ \
+ V(llgtat, LLGTAT, \
+ 0xE39C) /* type = RXY_A LOAD LOGICAL THIRTY ONE BITS AND TRAP (64<-31) */ \
+ V(llgfat, LLGFAT, 0xE39D) /* type = RXY_A LOAD LOGICAL AND TRAP (64<-32) */ \
+ V(lat, LAT, 0xE39F) /* type = RXY_A LOAD AND TRAP (32L<-32) */ \
+ V(lbh, LBH, 0xE3C0) /* type = RXY_A LOAD BYTE HIGH (32<-8) */ \
+ V(llch, LLCH, 0xE3C2) /* type = RXY_A LOAD LOGICAL CHARACTER HIGH (32<-8) */ \
+ V(stch, STCH, 0xE3C3) /* type = RXY_A STORE CHARACTER HIGH (8) */ \
+ V(lhh, LHH, 0xE3C4) /* type = RXY_A LOAD HALFWORD HIGH (32<-16) */ \
+ V(llhh, LLHH, 0xE3C6) /* type = RXY_A LOAD LOGICAL HALFWORD HIGH (32<-16) */ \
+ V(sthh, STHH, 0xE3C7) /* type = RXY_A STORE HALFWORD HIGH (16) */ \
+ V(lfhat, LFHAT, 0xE3C8) /* type = RXY_A LOAD HIGH AND TRAP (32H<-32) */ \
+ V(lfh, LFH, 0xE3CA) /* type = RXY_A LOAD HIGH (32) */ \
+ V(stfh, STFH, 0xE3CB) /* type = RXY_A STORE HIGH (32) */ \
+ V(chf, CHF, 0xE3CD) /* type = RXY_A COMPARE HIGH (32) */ \
+ V(clhf, CLHF, 0xE3CF) /* type = RXY_A COMPARE LOGICAL HIGH (32) */ \
+ V(ley, LEY, 0xED64) /* type = RXY_A LOAD (short) */ \
+ V(ldy, LDY, 0xED65) /* type = RXY_A LOAD (long) */ \
+ V(stey, STEY, 0xED66) /* type = RXY_A STORE (short) */ \
+ V(stdy, STDY, 0xED67) /* type = RXY_A STORE (long) */
+
+#define S390_RXY_B_OPCODE_LIST(V) \
+ V(pfd, PFD, 0xE336) /* type = RXY_B PREFETCH DATA */
+
+#define S390_SIY_OPCODE_LIST(V) \
+ V(tmy, TMY, 0xEB51) /* type = SIY TEST UNDER MASK */ \
+ V(mviy, MVIY, 0xEB52) /* type = SIY MOVE (immediate) */ \
+ V(niy, NIY, 0xEB54) /* type = SIY AND (immediate) */ \
+ V(cliy, CLIY, 0xEB55) /* type = SIY COMPARE LOGICAL (immediate) */ \
+ V(oiy, OIY, 0xEB56) /* type = SIY OR (immediate) */ \
+ V(xiy, XIY, 0xEB57) /* type = SIY EXCLUSIVE OR (immediate) */ \
+ V(asi, ASI, 0xEB6A) /* type = SIY ADD IMMEDIATE (32<-8) */ \
+ V(alsi, ALSI, \
+ 0xEB6E) /* type = SIY ADD LOGICAL WITH SIGNED IMMEDIATE (32<-8) */ \
+ V(agsi, AGSI, 0xEB7A) /* type = SIY ADD IMMEDIATE (64<-8) */ \
+ V(algsi, ALGSI, \
+ 0xEB7E) /* type = SIY ADD LOGICAL WITH SIGNED IMMEDIATE (64<-8) */
+
+#define S390_SS_A_OPCODE_LIST(V) \
+ V(trtr, TRTR, 0xD0) /* type = SS_A TRANSLATE AND TEST REVERSE */ \
+ V(mvn, MVN, 0xD1) /* type = SS_A MOVE NUMERICS */ \
+ V(mvc, MVC, 0xD2) /* type = SS_A MOVE (character) */ \
+ V(mvz, MVZ, 0xD3) /* type = SS_A MOVE ZONES */ \
+ V(nc, NC, 0xD4) /* type = SS_A AND (character) */ \
+ V(clc, CLC, 0xD5) /* type = SS_A COMPARE LOGICAL (character) */ \
+ V(oc, OC, 0xD6) /* type = SS_A OR (character) */ \
+ V(xc, XC, 0xD7) /* type = SS_A EXCLUSIVE OR (character) */ \
+ V(tr, TR, 0xDC) /* type = SS_A TRANSLATE */ \
+ V(trt, TRT, 0xDD) /* type = SS_A TRANSLATE AND TEST */ \
+ V(ed, ED, 0xDE) /* type = SS_A EDIT */ \
+ V(edmk, EDMK, 0xDF) /* type = SS_A EDIT AND MARK */ \
+ V(unpku, UNPKU, 0xE2) /* type = SS_A UNPACK UNICODE */ \
+ V(mvcin, MVCIN, 0xE8) /* type = SS_A MOVE INVERSE */ \
+ V(unpka, UNPKA, 0xEA) /* type = SS_A UNPACK ASCII */
+
+#define S390_E_OPCODE_LIST(V) \
+ V(pr, PR, 0x0101) /* type = E PROGRAM RETURN */ \
+ V(upt, UPT, 0x0102) /* type = E UPDATE TREE */ \
+ V(ptff, PTFF, 0x0104) /* type = E PERFORM TIMING FACILITY FUNCTION */ \
+ V(sckpf, SCKPF, 0x0107) /* type = E SET CLOCK PROGRAMMABLE FIELD */ \
+ V(pfpo, PFPO, 0x010A) /* type = E PERFORM FLOATING-POINT OPERATION */ \
+ V(tam, TAM, 0x010B) /* type = E TEST ADDRESSING MODE */ \
+ V(sam24, SAM24, 0x010C) /* type = E SET ADDRESSING MODE (24) */ \
+ V(sam31, SAM31, 0x010D) /* type = E SET ADDRESSING MODE (31) */ \
+ V(sam64, SAM64, 0x010E) /* type = E SET ADDRESSING MODE (64) */ \
+ V(trap2, TRAP2, 0x01FF) /* type = E TRAP */
+
+#define S390_SS_B_OPCODE_LIST(V) \
+ V(mvo, MVO, 0xF1) /* type = SS_B MOVE WITH OFFSET */ \
+ V(pack, PACK, 0xF2) /* type = SS_B PACK */ \
+ V(unpk, UNPK, 0xF3) /* type = SS_B UNPACK */ \
+ V(zap, ZAP, 0xF8) /* type = SS_B ZERO AND ADD */ \
+ V(cp, CP, 0xF9) /* type = SS_B COMPARE DECIMAL */ \
+ V(ap, AP, 0xFA) /* type = SS_B ADD DECIMAL */ \
+ V(sp, SP, 0xFB) /* type = SS_B SUBTRACT DECIMAL */ \
+ V(mp, MP, 0xFC) /* type = SS_B MULTIPLY DECIMAL */ \
+ V(dp, DP, 0xFD) /* type = SS_B DIVIDE DECIMAL */
+
+#define S390_SS_C_OPCODE_LIST(V) \
+ V(srp, SRP, 0xF0) /* type = SS_C SHIFT AND ROUND DECIMAL */
+
+#define S390_SS_D_OPCODE_LIST(V) \
+ V(mvck, MVCK, 0xD9) /* type = SS_D MOVE WITH KEY */ \
+ V(mvcp, MVCP, 0xDA) /* type = SS_D MOVE TO PRIMARY */ \
+ V(mvcs, MVCS, 0xDB) /* type = SS_D MOVE TO SECONDARY */
+
+#define S390_SS_E_OPCODE_LIST(V) \
+ V(plo, PLO, 0xEE) /* type = SS_E PERFORM LOCKED OPERATION */ \
+ V(lmd, LMD, 0xEF) /* type = SS_E LOAD MULTIPLE DISJOINT (64<-32&32) */
+
+#define S390_I_OPCODE_LIST(V) \
+ V(svc, SVC, 0x0A) /* type = I SUPERVISOR CALL */
+
+#define S390_SS_F_OPCODE_LIST(V) \
+ V(pku, PKU, 0xE1) /* type = SS_F PACK UNICODE */ \
+ V(pka, PKA, 0xE9) /* type = SS_F PACK ASCII */
+
+#define S390_SSE_OPCODE_LIST(V) \
+ V(lasp, LASP, 0xE500) /* type = SSE LOAD ADDRESS SPACE PARAMETERS */ \
+ V(tprot, TPROT, 0xE501) /* type = SSE TEST PROTECTION */ \
+ V(strag, STRAG, 0xE502) /* type = SSE STORE REAL ADDRESS */ \
+ V(mvcsk, MVCSK, 0xE50E) /* type = SSE MOVE WITH SOURCE KEY */ \
+ V(mvcdk, MVCDK, 0xE50F) /* type = SSE MOVE WITH DESTINATION KEY */
+
+#define S390_SSF_OPCODE_LIST(V) \
+ V(mvcos, MVCOS, 0xC80) /* type = SSF MOVE WITH OPTIONAL SPECIFICATIONS */ \
+ V(ectg, ECTG, 0xC81) /* type = SSF EXTRACT CPU TIME */ \
+ V(csst, CSST, 0xC82) /* type = SSF COMPARE AND SWAP AND STORE */ \
+ V(lpd, LPD, 0xC84) /* type = SSF LOAD PAIR DISJOINT (32) */ \
+ V(lpdg, LPDG, 0xC85) /* type = SSF LOAD PAIR DISJOINT (64) */
+
+#define S390_RS_A_OPCODE_LIST(V) \
+ V(bxh, BXH, 0x86) /* type = RS_A BRANCH ON INDEX HIGH (32) */ \
+ V(bxle, BXLE, 0x87) /* type = RS_A BRANCH ON INDEX LOW OR EQUAL (32) */ \
+ V(srl, SRL, 0x88) /* type = RS_A SHIFT RIGHT SINGLE LOGICAL (32) */ \
+ V(sll, SLL, 0x89) /* type = RS_A SHIFT LEFT SINGLE LOGICAL (32) */ \
+ V(sra, SRA, 0x8A) /* type = RS_A SHIFT RIGHT SINGLE (32) */ \
+ V(sla, SLA, 0x8B) /* type = RS_A SHIFT LEFT SINGLE (32) */ \
+ V(srdl, SRDL, 0x8C) /* type = RS_A SHIFT RIGHT DOUBLE LOGICAL (64) */ \
+ V(sldl, SLDL, 0x8D) /* type = RS_A SHIFT LEFT DOUBLE LOGICAL (64) */ \
+ V(srda, SRDA, 0x8E) /* type = RS_A SHIFT RIGHT DOUBLE (64) */ \
+ V(slda, SLDA, 0x8F) /* type = RS_A SHIFT LEFT DOUBLE (64) */ \
+ V(stm, STM, 0x90) /* type = RS_A STORE MULTIPLE (32) */ \
+ V(lm, LM, 0x98) /* type = RS_A LOAD MULTIPLE (32) */ \
+ V(trace, TRACE, 0x99) /* type = RS_A TRACE (32) */ \
+ V(lam, LAM, 0x9A) /* type = RS_A LOAD ACCESS MULTIPLE */ \
+ V(stam, STAM, 0x9B) /* type = RS_A STORE ACCESS MULTIPLE */ \
+ V(mvcle, MVCLE, 0xA8) /* type = RS_A MOVE LONG EXTENDED */ \
+ V(clcle, CLCLE, 0xA9) /* type = RS_A COMPARE LOGICAL LONG EXTENDED */ \
+ V(sigp, SIGP, 0xAE) /* type = RS_A SIGNAL PROCESSOR */ \
+ V(stctl, STCTL, 0xB6) /* type = RS_A STORE CONTROL (32) */ \
+ V(lctl, LCTL, 0xB7) /* type = RS_A LOAD CONTROL (32) */ \
+ V(cs, CS, 0xBA) /* type = RS_A COMPARE AND SWAP (32) */ \
+ V(cds, CDS, 0xBB) /* type = RS_A COMPARE DOUBLE AND SWAP (32) */
+
+#define S390_RS_B_OPCODE_LIST(V) \
+ V(clm, CLM, 0xBD) /* type = RS_B COMPARE LOGICAL CHAR. UNDER MASK (low) */ \
+ V(stcm, STCM, 0xBE) /* type = RS_B STORE CHARACTERS UNDER MASK (low) */ \
+ V(icm, ICM, 0xBF) /* type = RS_B INSERT CHARACTERS UNDER MASK (low) */
+
+#define S390_S_OPCODE_LIST(V) \
+ V(awr, AWR, 0x2E) /* type = S ADD UNNORMALIZED (long HFP) */ \
+ V(lpsw, LPSW, 0x82) /* type = S LOAD PSW */ \
+ V(diagnose, DIAGNOSE, 0x83) /* type = S DIAGNOSE */ \
+ V(ts, TS, 0x93) /* type = S TEST AND SET */ \
+ V(stidp, STIDP, 0xB202) /* type = S STORE CPU ID */ \
+ V(sck, SCK, 0xB204) /* type = S SET CLOCK */ \
+ V(stck, STCK, 0xB205) /* type = S STORE CLOCK */ \
+ V(sckc, SCKC, 0xB206) /* type = S SET CLOCK COMPARATOR */ \
+ V(stckc, STCKC, 0xB207) /* type = S STORE CLOCK COMPARATOR */ \
+ V(spt, SPT, 0xB208) /* type = S SET CPU TIMER */ \
+ V(stpt, STPT, 0xB209) /* type = S STORE CPU TIMER */ \
+ V(spka, SPKA, 0xB20A) /* type = S SET PSW KEY FROM ADDRESS */ \
+ V(ipk, IPK, 0xB20B) /* type = S INSERT PSW KEY */ \
+ V(ptlb, PTLB, 0xB20D) /* type = S PURGE TLB */ \
+ V(spx, SPX, 0xB210) /* type = S SET PREFIX */ \
+ V(stpx, STPX, 0xB211) /* type = S STORE PREFIX */ \
+ V(stap, STAP, 0xB212) /* type = S STORE CPU ADDRESS */ \
+ V(pc, PC, 0xB218) /* type = S PROGRAM CALL */ \
+ V(sac, SAC, 0xB219) /* type = S SET ADDRESS SPACE CONTROL */ \
+ V(cfc, CFC, 0xB21A) /* type = S COMPARE AND FORM CODEWORD */ \
+ V(csch, CSCH, 0xB230) /* type = S CLEAR SUBCHANNEL */ \
+ V(hsch, HSCH, 0xB231) /* type = S HALT SUBCHANNEL */ \
+ V(msch, MSCH, 0xB232) /* type = S MODIFY SUBCHANNEL */ \
+ V(ssch, SSCH, 0xB233) /* type = S START SUBCHANNEL */ \
+ V(stsch, STSCH, 0xB234) /* type = S STORE SUBCHANNEL */ \
+ V(tsch, TSCH, 0xB235) /* type = S TEST SUBCHANNEL */ \
+ V(tpi, TPI, 0xB236) /* type = S TEST PENDING INTERRUPTION */ \
+ V(sal, SAL, 0xB237) /* type = S SET ADDRESS LIMIT */ \
+ V(rsch, RSCH, 0xB238) /* type = S RESUME SUBCHANNEL */ \
+ V(stcrw, STCRW, 0xB239) /* type = S STORE CHANNEL REPORT WORD */ \
+ V(stcps, STCPS, 0xB23A) /* type = S STORE CHANNEL PATH STATUS */ \
+ V(rchp, RCHP, 0xB23B) /* type = S RESET CHANNEL PATH */ \
+ V(schm, SCHM, 0xB23C) /* type = S SET CHANNEL MONITOR */ \
+ V(xsch, XSCH, 0xB276) /* type = S CANCEL SUBCHANNEL */ \
+ V(rp, RP_Z, 0xB277) /* type = S RESUME PROGRAM */ \
+ V(stcke, STCKE, 0xB278) /* type = S STORE CLOCK EXTENDED */ \
+ V(sacf, SACF, 0xB279) /* type = S SET ADDRESS SPACE CONTROL FAST */ \
+ V(stckf, STCKF, 0xB27C) /* type = S STORE CLOCK FAST */ \
+ V(stsi, STSI, 0xB27D) /* type = S STORE SYSTEM INFORMATION */ \
+ V(srnm, SRNM, 0xB299) /* type = S SET BFP ROUNDING MODE (2 bit) */ \
+ V(stfpc, STFPC, 0xB29C) /* type = S STORE FPC */ \
+ V(lfpc, LFPC, 0xB29D) /* type = S LOAD FPC */ \
+ V(stfle, STFLE, 0xB2B0) /* type = S STORE FACILITY LIST EXTENDED */ \
+ V(stfl, STFL, 0xB2B1) /* type = S STORE FACILITY LIST */ \
+ V(lpswe, LPSWE, 0xB2B2) /* type = S LOAD PSW EXTENDED */ \
+ V(srnmb, SRNMB, 0xB2B8) /* type = S SET BFP ROUNDING MODE (3 bit) */ \
+ V(srnmt, SRNMT, 0xB2B9) /* type = S SET DFP ROUNDING MODE */ \
+ V(lfas, LFAS, 0xB2BD) /* type = S LOAD FPC AND SIGNAL */ \
+ V(tend, TEND, 0xB2F8) /* type = S TRANSACTION END */ \
+ V(tabort, TABORT, 0xB2FC) /* type = S TRANSACTION ABORT */ \
+ V(trap4, TRAP4, 0xB2FF) /* type = S TRAP */
+
+#define S390_RX_A_OPCODE_LIST(V) \
+ V(la, LA, 0x41) /* type = RX_A LOAD ADDRESS */ \
+ V(stc, STC, 0x42) /* type = RX_A STORE CHARACTER */ \
+ V(ic_z, IC_z, 0x43) /* type = RX_A INSERT CHARACTER */ \
+ V(ex, EX, 0x44) /* type = RX_A EXECUTE */ \
+ V(bal, BAL, 0x45) /* type = RX_A BRANCH AND LINK */ \
+ V(bct, BCT, 0x46) /* type = RX_A BRANCH ON COUNT (32) */ \
+ V(bc, BC, 0x47) /* type = RX_A BRANCH ON CONDITION */ \
+ V(ch, CH, 0x49) /* type = RX_A COMPARE HALFWORD (32<-16) */ \
+ V(ah, AH, 0x4A) /* type = RX_A ADD HALFWORD (32<-16) */ \
+ V(sh, SH, 0x4B) /* type = RX_A SUBTRACT HALFWORD (32<-16) */ \
+ V(mh, MH, 0x4C) /* type = RX_A MULTIPLY HALFWORD (32<-16) */ \
+ V(bas, BAS, 0x4D) /* type = RX_A BRANCH AND SAVE */ \
+ V(cvd, CVD, 0x4E) /* type = RX_A CONVERT TO DECIMAL (32) */ \
+ V(cvb, CVB, 0x4F) /* type = RX_A CONVERT TO BINARY (32) */ \
+ V(st, ST, 0x50) /* type = RX_A STORE (32) */ \
+ V(lae, LAE, 0x51) /* type = RX_A LOAD ADDRESS EXTENDED */ \
+ V(n, N, 0x54) /* type = RX_A AND (32) */ \
+ V(cl, CL, 0x55) /* type = RX_A COMPARE LOGICAL (32) */ \
+ V(o, O, 0x56) /* type = RX_A OR (32) */ \
+ V(x, X, 0x57) /* type = RX_A EXCLUSIVE OR (32) */ \
+ V(l, L, 0x58) /* type = RX_A LOAD (32) */ \
+ V(c, C, 0x59) /* type = RX_A COMPARE (32) */ \
+ V(a, A, 0x5A) /* type = RX_A ADD (32) */ \
+ V(s, S, 0x5B) /* type = RX_A SUBTRACT (32) */ \
+ V(m, M, 0x5C) /* type = RX_A MULTIPLY (64<-32) */ \
+ V(d, D, 0x5D) /* type = RX_A DIVIDE (32<-64) */ \
+ V(al, AL, 0x5E) /* type = RX_A ADD LOGICAL (32) */ \
+ V(sl, SL, 0x5F) /* type = RX_A SUBTRACT LOGICAL (32) */ \
+ V(std, STD, 0x60) /* type = RX_A STORE (long) */ \
+ V(mxd, MXD, 0x67) /* type = RX_A MULTIPLY (long to extended HFP) */ \
+ V(ld, LD, 0x68) /* type = RX_A LOAD (long) */ \
+ V(cd, CD, 0x69) /* type = RX_A COMPARE (long HFP) */ \
+ V(ad, AD, 0x6A) /* type = RX_A ADD NORMALIZED (long HFP) */ \
+ V(sd, SD, 0x6B) /* type = RX_A SUBTRACT NORMALIZED (long HFP) */ \
+ V(md, MD, 0x6C) /* type = RX_A MULTIPLY (long HFP) */ \
+ V(dd, DD, 0x6D) /* type = RX_A DIVIDE (long HFP) */ \
+ V(aw, AW, 0x6E) /* type = RX_A ADD UNNORMALIZED (long HFP) */ \
+ V(sw, SW, 0x6F) /* type = RX_A SUBTRACT UNNORMALIZED (long HFP) */ \
+ V(ste, STE, 0x70) /* type = RX_A STORE (short) */ \
+ V(ms, MS, 0x71) /* type = RX_A MULTIPLY SINGLE (32) */ \
+ V(le, LE, 0x78) /* type = RX_A LOAD (short) */ \
+ V(ce, CE, 0x79) /* type = RX_A COMPARE (short HFP) */ \
+ V(ae, AE, 0x7A) /* type = RX_A ADD NORMALIZED (short HFP) */ \
+ V(se, SE, 0x7B) /* type = RX_A SUBTRACT NORMALIZED (short HFP) */ \
+ V(mde, MDE, 0x7C) /* type = RX_A MULTIPLY (short to long HFP) */ \
+ V(me, ME, 0x7C) /* type = RX_A MULTIPLY (short to long HFP) */ \
+ V(de, DE, 0x7D) /* type = RX_A DIVIDE (short HFP) */ \
+ V(au, AU, 0x7E) /* type = RX_A ADD UNNORMALIZED (short HFP) */ \
+ V(su, SU, 0x7F) /* type = RX_A SUBTRACT UNNORMALIZED (short HFP) */ \
+ V(ssm, SSM, 0x80) /* type = RX_A SET SYSTEM MASK */ \
+ V(lra, LRA, 0xB1) /* type = RX_A LOAD REAL ADDRESS (32) */
+
+#define S390_RX_B_OPCODE_LIST(V) \
+ V(lh, LH, 0x48) /* type = RX_B LOAD HALFWORD (32<-16) */
+
+#define S390_RIE_A_OPCODE_LIST(V) \
+ V(cgit, CGIT, 0xEC70) /* type = RIE_A COMPARE IMMEDIATE AND TRAP (64<-16) */ \
+ V(clgit, CLGIT, \
+ 0xEC71) /* type = RIE_A COMPARE LOGICAL IMMEDIATE AND TRAP (64<-16) */ \
+ V(cit, CIT, 0xEC72) /* type = RIE_A COMPARE IMMEDIATE AND TRAP (32<-16) */ \
+ V(clfit, CLFIT, \
+ 0xEC73) /* type = RIE_A COMPARE LOGICAL IMMEDIATE AND TRAP (32<-16) */
+
+#define S390_RRD_OPCODE_LIST(V) \
+ V(maebr, MAEBR, 0xB30E) /* type = RRD MULTIPLY AND ADD (short BFP) */ \
+ V(msebr, MSEBR, 0xB30F) /* type = RRD MULTIPLY AND SUBTRACT (short BFP) */ \
+ V(madbr, MADBR, 0xB31E) /* type = RRD MULTIPLY AND ADD (long BFP) */ \
+ V(msdbr, MSDBR, 0xB31F) /* type = RRD MULTIPLY AND SUBTRACT (long BFP) */ \
+ V(maer, MAER, 0xB32E) /* type = RRD MULTIPLY AND ADD (short HFP) */ \
+ V(mser, MSER, 0xB32F) /* type = RRD MULTIPLY AND SUBTRACT (short HFP) */ \
+ V(maylr, MAYLR, \
+ 0xB338) /* type = RRD MULTIPLY AND ADD UNNRM. (long to ext. low HFP) */ \
+ V(mylr, MYLR, \
+ 0xB339) /* type = RRD MULTIPLY UNNORM. (long to ext. low HFP) */ \
+ V(mayr, MAYR, \
+ 0xB33A) /* type = RRD MULTIPLY & ADD UNNORMALIZED (long to ext. HFP) */ \
+ V(myr, MYR, \
+ 0xB33B) /* type = RRD MULTIPLY UNNORMALIZED (long to ext. HFP) */ \
+ V(mayhr, MAYHR, \
+ 0xB33C) /* type = RRD MULTIPLY AND ADD UNNRM. (long to ext. high HFP) */ \
+ V(myhr, MYHR, \
+ 0xB33D) /* type = RRD MULTIPLY UNNORM. (long to ext. high HFP) */ \
+ V(madr, MADR, 0xB33E) /* type = RRD MULTIPLY AND ADD (long HFP) */ \
+ V(msdr, MSDR, 0xB33F) /* type = RRD MULTIPLY AND SUBTRACT (long HFP) */
+
+#define S390_RIE_B_OPCODE_LIST(V) \
+ V(cgrj, CGRJ, 0xEC64) /* type = RIE_B COMPARE AND BRANCH RELATIVE (64) */ \
+ V(clgrj, CLGRJ, \
+ 0xEC65) /* type = RIE_B COMPARE LOGICAL AND BRANCH RELATIVE (64) */ \
+ V(crj, CRJ, 0xEC76) /* type = RIE_B COMPARE AND BRANCH RELATIVE (32) */ \
+ V(clrj, CLRJ, \
+ 0xEC77) /* type = RIE_B COMPARE LOGICAL AND BRANCH RELATIVE (32) */
+
+#define S390_RRE_OPCODE_LIST(V) \
+ V(ipm, IPM, 0xB222) /* type = RRE INSERT PROGRAM MASK */ \
+ V(ivsk, IVSK, 0xB223) /* type = RRE INSERT VIRTUAL STORAGE KEY */ \
+ V(iac, IAC, 0xB224) /* type = RRE INSERT ADDRESS SPACE CONTROL */ \
+ V(ssar, SSAR, 0xB225) /* type = RRE SET SECONDARY ASN */ \
+ V(epar, EPAR, 0xB226) /* type = RRE EXTRACT PRIMARY ASN */ \
+ V(esar, ESAR, 0xB227) /* type = RRE EXTRACT SECONDARY ASN */ \
+ V(pt, PT, 0xB228) /* type = RRE PROGRAM TRANSFER */ \
+ V(iske, ISKE, 0xB229) /* type = RRE INSERT STORAGE KEY EXTENDED */ \
+ V(rrbe, RRBE, 0xB22A) /* type = RRE RESET REFERENCE BIT EXTENDED */ \
+ V(tb, TB, 0xB22C) /* type = RRE TEST BLOCK */ \
+ V(dxr, DXR, 0xB22D) /* type = RRE DIVIDE (extended HFP) */ \
+ V(pgin, PGIN, 0xB22E) /* type = RRE PAGE IN */ \
+ V(pgout, PGOUT, 0xB22F) /* type = RRE PAGE OUT */ \
+ V(bakr, BAKR, 0xB240) /* type = RRE BRANCH AND STACK */ \
+ V(cksm, CKSM, 0xB241) /* type = RRE CHECKSUM */ \
+ V(sqdr, SQDR, 0xB244) /* type = RRE SQUARE ROOT (long HFP) */ \
+ V(sqer, SQER, 0xB245) /* type = RRE SQUARE ROOT (short HFP) */ \
+ V(stura, STURA, 0xB246) /* type = RRE STORE USING REAL ADDRESS (32) */ \
+ V(msta, MSTA, 0xB247) /* type = RRE MODIFY STACKED STATE */ \
+ V(palb, PALB, 0xB248) /* type = RRE PURGE ALB */ \
+ V(ereg, EREG, 0xB249) /* type = RRE EXTRACT STACKED REGISTERS (32) */ \
+ V(esta, ESTA, 0xB24A) /* type = RRE EXTRACT STACKED STATE */ \
+ V(lura, LURA, 0xB24B) /* type = RRE LOAD USING REAL ADDRESS (32) */ \
+ V(tar, TAR, 0xB24C) /* type = RRE TEST ACCESS */ \
+ V(cpya, CPYA, 0xB24D) /* type = RRE COPY ACCESS */ \
+ V(sar, SAR, 0xB24E) /* type = RRE SET ACCESS */ \
+ V(ear, EAR, 0xB24F) /* type = RRE EXTRACT ACCESS */ \
+ V(csp, CSP, 0xB250) /* type = RRE COMPARE AND SWAP AND PURGE (32) */ \
+ V(msr, MSR, 0xB252) /* type = RRE MULTIPLY SINGLE (32) */ \
+ V(mvpg, MVPG, 0xB254) /* type = RRE MOVE PAGE */ \
+ V(mvst, MVST, 0xB255) /* type = RRE MOVE STRING */ \
+ V(cuse, CUSE, 0xB257) /* type = RRE COMPARE UNTIL SUBSTRING EQUAL */ \
+ V(bsg, BSG, 0xB258) /* type = RRE BRANCH IN SUBSPACE GROUP */ \
+ V(bsa, BSA, 0xB25A) /* type = RRE BRANCH AND SET AUTHORITY */ \
+ V(clst, CLST, 0xB25D) /* type = RRE COMPARE LOGICAL STRING */ \
+ V(srst, SRST, 0xB25E) /* type = RRE SEARCH STRING */ \
+ V(cmpsc, CMPSC, 0xB263) /* type = RRE COMPRESSION CALL */ \
+ V(tre, TRE, 0xB2A5) /* type = RRE TRANSLATE EXTENDED */ \
+ V(etnd, ETND, 0xB2EC) /* type = RRE EXTRACT TRANSACTION NESTING DEPTH */ \
+ V(lpebr, LPEBR, 0xB300) /* type = RRE LOAD POSITIVE (short BFP) */ \
+ V(lnebr, LNEBR, 0xB301) /* type = RRE LOAD NEGATIVE (short BFP) */ \
+ V(ltebr, LTEBR, 0xB302) /* type = RRE LOAD AND TEST (short BFP) */ \
+ V(lcebr, LCEBR, 0xB303) /* type = RRE LOAD COMPLEMENT (short BFP) */ \
+ V(ldebr, LDEBR, \
+ 0xB304) /* type = RRE LOAD LENGTHENED (short to long BFP) */ \
+ V(lxdbr, LXDBR, \
+ 0xB305) /* type = RRE LOAD LENGTHENED (long to extended BFP) */ \
+ V(lxebr, LXEBR, \
+ 0xB306) /* type = RRE LOAD LENGTHENED (short to extended BFP) */ \
+ V(mxdbr, MXDBR, 0xB307) /* type = RRE MULTIPLY (long to extended BFP) */ \
+ V(kebr, KEBR, 0xB308) /* type = RRE COMPARE AND SIGNAL (short BFP) */ \
+ V(cebr, CEBR, 0xB309) /* type = RRE COMPARE (short BFP) */ \
+ V(aebr, AEBR, 0xB30A) /* type = RRE ADD (short BFP) */ \
+ V(sebr, SEBR, 0xB30B) /* type = RRE SUBTRACT (short BFP) */ \
+ V(mdebr, MDEBR, 0xB30C) /* type = RRE MULTIPLY (short to long BFP) */ \
+ V(debr, DEBR, 0xB30D) /* type = RRE DIVIDE (short BFP) */ \
+ V(lpdbr, LPDBR, 0xB310) /* type = RRE LOAD POSITIVE (long BFP) */ \
+ V(lndbr, LNDBR, 0xB311) /* type = RRE LOAD NEGATIVE (long BFP) */ \
+ V(ltdbr, LTDBR, 0xB312) /* type = RRE LOAD AND TEST (long BFP) */ \
+ V(lcdbr, LCDBR, 0xB313) /* type = RRE LOAD COMPLEMENT (long BFP) */ \
+ V(sqebr, SQEBR, 0xB314) /* type = RRE SQUARE ROOT (short BFP) */ \
+ V(sqdbr, SQDBR, 0xB315) /* type = RRE SQUARE ROOT (long BFP) */ \
+ V(sqxbr, SQXBR, 0xB316) /* type = RRE SQUARE ROOT (extended BFP) */ \
+ V(meebr, MEEBR, 0xB317) /* type = RRE MULTIPLY (short BFP) */ \
+ V(kdbr, KDBR, 0xB318) /* type = RRE COMPARE AND SIGNAL (long BFP) */ \
+ V(cdbr, CDBR, 0xB319) /* type = RRE COMPARE (long BFP) */ \
+ V(adbr, ADBR, 0xB31A) /* type = RRE ADD (long BFP) */ \
+ V(sdbr, SDBR, 0xB31B) /* type = RRE SUBTRACT (long BFP) */ \
+ V(mdbr, MDBR, 0xB31C) /* type = RRE MULTIPLY (long BFP) */ \
+ V(ddbr, DDBR, 0xB31D) /* type = RRE DIVIDE (long BFP) */ \
+ V(lder, LDER, 0xB324) /* type = RRE LOAD LENGTHENED (short to long HFP) */ \
+ V(lxdr, LXDR, \
+ 0xB325) /* type = RRE LOAD LENGTHENED (long to extended HFP) */ \
+ V(lxer, LXER, \
+ 0xB326) /* type = RRE LOAD LENGTHENED (short to extended HFP) */ \
+ V(sqxr, SQXR, 0xB336) /* type = RRE SQUARE ROOT (extended HFP) */ \
+ V(meer, MEER, 0xB337) /* type = RRE MULTIPLY (short HFP) */ \
+ V(lpxbr, LPXBR, 0xB340) /* type = RRE LOAD POSITIVE (extended BFP) */ \
+ V(lnxbr, LNXBR, 0xB341) /* type = RRE LOAD NEGATIVE (extended BFP) */ \
+ V(ltxbr, LTXBR, 0xB342) /* type = RRE LOAD AND TEST (extended BFP) */ \
+ V(lcxbr, LCXBR, 0xB343) /* type = RRE LOAD COMPLEMENT (extended BFP) */ \
+ V(ledbr, LEDBR, 0xB344) /* type = RRE LOAD ROUNDED (long to short BFP) */ \
+ V(ldxbr, LDXBR, \
+ 0xB345) /* type = RRE LOAD ROUNDED (extended to long BFP) */ \
+ V(lexbr, LEXBR, \
+ 0xB346) /* type = RRE LOAD ROUNDED (extended to short BFP) */ \
+ V(kxbr, KXBR, 0xB348) /* type = RRE COMPARE AND SIGNAL (extended BFP) */ \
+ V(cxbr, CXBR, 0xB349) /* type = RRE COMPARE (extended BFP) */ \
+ V(axbr, AXBR, 0xB34A) /* type = RRE ADD (extended BFP) */ \
+ V(sxbr, SXBR, 0xB34B) /* type = RRE SUBTRACT (extended BFP) */ \
+ V(mxbr, MXBR, 0xB34C) /* type = RRE MULTIPLY (extended BFP) */ \
+ V(dxbr, DXBR, 0xB34D) /* type = RRE DIVIDE (extended BFP) */ \
+ V(thder, THDER, \
+ 0xB358) /* type = RRE CONVERT BFP TO HFP (short to long) */ \
+ V(thdr, THDR, 0xB359) /* type = RRE CONVERT BFP TO HFP (long) */ \
+ V(lpxr, LPXR, 0xB360) /* type = RRE LOAD POSITIVE (extended HFP) */ \
+ V(lnxr, LNXR, 0xB361) /* type = RRE LOAD NEGATIVE (extended HFP) */ \
+ V(ltxr, LTXR, 0xB362) /* type = RRE LOAD AND TEST (extended HFP) */ \
+ V(lcxr, LCXR, 0xB363) /* type = RRE LOAD COMPLEMENT (extended HFP) */ \
+ V(lxr, LXR, 0xB365) /* type = RRE LOAD (extended) */ \
+ V(lexr, LEXR, \
+ 0xB366) /* type = RRE LOAD ROUNDED (extended to short HFP) */ \
+ V(fixr, FIXR, 0xB367) /* type = RRE LOAD FP INTEGER (extended HFP) */ \
+ V(cxr, CXR, 0xB369) /* type = RRE COMPARE (extended HFP) */ \
+ V(lpdfr, LPDFR, 0xB370) /* type = RRE LOAD POSITIVE (long) */ \
+ V(lndfr, LNDFR, 0xB371) /* type = RRE LOAD NEGATIVE (long) */ \
+ V(lcdfr, LCDFR, 0xB373) /* type = RRE LOAD COMPLEMENT (long) */ \
+ V(lzer, LZER, 0xB374) /* type = RRE LOAD ZERO (short) */ \
+ V(lzdr, LZDR, 0xB375) /* type = RRE LOAD ZERO (long) */ \
+ V(lzxr, LZXR, 0xB376) /* type = RRE LOAD ZERO (extended) */ \
+ V(fier, FIER, 0xB377) /* type = RRE LOAD FP INTEGER (short HFP) */ \
+ V(fidr, FIDR, 0xB37F) /* type = RRE LOAD FP INTEGER (long HFP) */ \
+ V(sfpc, SFPC, 0xB384) /* type = RRE SET FPC */ \
+ V(sfasr, SFASR, 0xB385) /* type = RRE SET FPC AND SIGNAL */ \
+ V(efpc, EFPC, 0xB38C) /* type = RRE EXTRACT FPC */ \
+ V(cefbr, CEFBR, \
+ 0xB394) /* type = RRE CONVERT FROM FIXED (32 to short BFP) */ \
+ V(cdfbr, CDFBR, \
+ 0xB395) /* type = RRE CONVERT FROM FIXED (32 to long BFP) */ \
+ V(cxfbr, CXFBR, \
+ 0xB396) /* type = RRE CONVERT FROM FIXED (32 to extended BFP) */ \
+ V(cegbr, CEGBR, \
+ 0xB3A4) /* type = RRE CONVERT FROM FIXED (64 to short BFP) */ \
+ V(cdgbr, CDGBR, \
+ 0xB3A5) /* type = RRE CONVERT FROM FIXED (64 to long BFP) */ \
+ V(cxgbr, CXGBR, \
+ 0xB3A6) /* type = RRE CONVERT FROM FIXED (64 to extended BFP) */ \
+ V(cefr, CEFR, \
+ 0xB3B4) /* type = RRE CONVERT FROM FIXED (32 to short HFP) */ \
+ V(cdfr, CDFR, 0xB3B5) /* type = RRE CONVERT FROM FIXED (32 to long HFP) */ \
+ V(cxfr, CXFR, \
+ 0xB3B6) /* type = RRE CONVERT FROM FIXED (32 to extended HFP) */ \
+ V(ldgr, LDGR, 0xB3C1) /* type = RRE LOAD FPR FROM GR (64 to long) */ \
+ V(cegr, CEGR, \
+ 0xB3C4) /* type = RRE CONVERT FROM FIXED (64 to short HFP) */ \
+ V(cdgr, CDGR, 0xB3C5) /* type = RRE CONVERT FROM FIXED (64 to long HFP) */ \
+ V(cxgr, CXGR, \
+ 0xB3C6) /* type = RRE CONVERT FROM FIXED (64 to extended HFP) */ \
+ V(lgdr, LGDR, 0xB3CD) /* type = RRE LOAD GR FROM FPR (long to 64) */ \
+ V(ltdtr, LTDTR, 0xB3D6) /* type = RRE LOAD AND TEST (long DFP) */ \
+ V(ltxtr, LTXTR, 0xB3DE) /* type = RRE LOAD AND TEST (extended DFP) */ \
+ V(kdtr, KDTR, 0xB3E0) /* type = RRE COMPARE AND SIGNAL (long DFP) */ \
+ V(cudtr, CUDTR, 0xB3E2) /* type = RRE CONVERT TO UNSIGNED PACKED (long */ \
+ /* DFP to 64) CUDTR */ \
+ V(cdtr, CDTR, 0xB3E4) /* type = RRE COMPARE (long DFP) */ \
+ V(eedtr, EEDTR, \
+ 0xB3E5) /* type = RRE EXTRACT BIASED EXPONENT (long DFP to 64) */ \
+ V(esdtr, ESDTR, \
+ 0xB3E7) /* type = RRE EXTRACT SIGNIFICANCE (long DFP to 64) */ \
+ V(kxtr, KXTR, 0xB3E8) /* type = RRE COMPARE AND SIGNAL (extended DFP) */ \
+ V(cuxtr, CUXTR, \
+ 0xB3EA) /* type = RRE CONVERT TO UNSIGNED PACKED (extended DFP */ \
+ /* CUXTR to 128) */ \
+ V(cxtr, CXTR, 0xB3EC) /* type = RRE COMPARE (extended DFP) */ \
+ V(eextr, EEXTR, \
+ 0xB3ED) /* type = RRE EXTRACT BIASED EXPONENT (extended DFP to 64) */ \
+ V(esxtr, ESXTR, \
+ 0xB3EF) /* type = RRE EXTRACT SIGNIFICANCE (extended DFP to 64) */ \
+ V(cdgtr, CDGTR, \
+ 0xB3F1) /* type = RRE CONVERT FROM FIXED (64 to long DFP) */ \
+ V(cdutr, CDUTR, \
+ 0xB3F2) /* type = RRE CONVERT FROM UNSIGNED PACKED (64 to long DFP) */ \
+ V(cdstr, CDSTR, \
+ 0xB3F3) /* type = RRE CONVERT FROM SIGNED PACKED (64 to long DFP) */ \
+ V(cedtr, CEDTR, \
+ 0xB3F4) /* type = RRE COMPARE BIASED EXPONENT (long DFP) */ \
+ V(cxgtr, CXGTR, \
+ 0xB3F9) /* type = RRE CONVERT FROM FIXED (64 to extended DFP) */ \
+ V(cxutr, CXUTR, \
+ 0xB3FA) /* type = RRE CONVERT FROM UNSIGNED PACKED (128 to ext. DFP) */ \
+ V(cxstr, CXSTR, 0xB3FB) /* type = RRE CONVERT FROM SIGNED PACKED (128 to*/ \
+ /* extended DFP) */ \
+ V(cextr, CEXTR, \
+ 0xB3FC) /* type = RRE COMPARE BIASED EXPONENT (extended DFP) */ \
+ V(lpgr, LPGR, 0xB900) /* type = RRE LOAD POSITIVE (64) */ \
+ V(lngr, LNGR, 0xB901) /* type = RRE LOAD NEGATIVE (64) */ \
+ V(ltgr, LTGR, 0xB902) /* type = RRE LOAD AND TEST (64) */ \
+ V(lcgr, LCGR, 0xB903) /* type = RRE LOAD COMPLEMENT (64) */ \
+ V(lgr, LGR, 0xB904) /* type = RRE LOAD (64) */ \
+ V(lurag, LURAG, 0xB905) /* type = RRE LOAD USING REAL ADDRESS (64) */ \
+ V(lgbr, LGBR, 0xB906) /* type = RRE LOAD BYTE (64<-8) */ \
+ V(lghr, LGHR, 0xB907) /* type = RRE LOAD HALFWORD (64<-16) */ \
+ V(agr, AGR, 0xB908) /* type = RRE ADD (64) */ \
+ V(sgr, SGR, 0xB909) /* type = RRE SUBTRACT (64) */ \
+ V(algr, ALGR, 0xB90A) /* type = RRE ADD LOGICAL (64) */ \
+ V(slgr, SLGR, 0xB90B) /* type = RRE SUBTRACT LOGICAL (64) */ \
+ V(msgr, MSGR, 0xB90C) /* type = RRE MULTIPLY SINGLE (64) */ \
+ V(dsgr, DSGR, 0xB90D) /* type = RRE DIVIDE SINGLE (64) */ \
+ V(eregg, EREGG, 0xB90E) /* type = RRE EXTRACT STACKED REGISTERS (64) */ \
+ V(lrvgr, LRVGR, 0xB90F) /* type = RRE LOAD REVERSED (64) */ \
+ V(lpgfr, LPGFR, 0xB910) /* type = RRE LOAD POSITIVE (64<-32) */ \
+ V(lngfr, LNGFR, 0xB911) /* type = RRE LOAD NEGATIVE (64<-32) */ \
+ V(ltgfr, LTGFR, 0xB912) /* type = RRE LOAD AND TEST (64<-32) */ \
+ V(lcgfr, LCGFR, 0xB913) /* type = RRE LOAD COMPLEMENT (64<-32) */ \
+ V(lgfr, LGFR, 0xB914) /* type = RRE LOAD (64<-32) */ \
+ V(llgfr, LLGFR, 0xB916) /* type = RRE LOAD LOGICAL (64<-32) */ \
+ V(llgtr, LLGTR, \
+ 0xB917) /* type = RRE LOAD LOGICAL THIRTY ONE BITS (64<-31) */ \
+ V(agfr, AGFR, 0xB918) /* type = RRE ADD (64<-32) */ \
+ V(sgfr, SGFR, 0xB919) /* type = RRE SUBTRACT (64<-32) */ \
+ V(algfr, ALGFR, 0xB91A) /* type = RRE ADD LOGICAL (64<-32) */ \
+ V(slgfr, SLGFR, 0xB91B) /* type = RRE SUBTRACT LOGICAL (64<-32) */ \
+ V(msgfr, MSGFR, 0xB91C) /* type = RRE MULTIPLY SINGLE (64<-32) */ \
+ V(dsgfr, DSGFR, 0xB91D) /* type = RRE DIVIDE SINGLE (64<-32) */ \
+ V(kmac, KMAC, 0xB91E) /* type = RRE COMPUTE MESSAGE AUTHENTICATION CODE */ \
+ V(lrvr, LRVR, 0xB91F) /* type = RRE LOAD REVERSED (32) */ \
+ V(cgr, CGR, 0xB920) /* type = RRE COMPARE (64) */ \
+ V(clgr, CLGR, 0xB921) /* type = RRE COMPARE LOGICAL (64) */ \
+ V(sturg, STURG, 0xB925) /* type = RRE STORE USING REAL ADDRESS (64) */ \
+ V(lbr, LBR, 0xB926) /* type = RRE LOAD BYTE (32<-8) */ \
+ V(lhr, LHR, 0xB927) /* type = RRE LOAD HALFWORD (32<-16) */ \
+ V(pckmo, PCKMO, \
+ 0xB928) /* type = RRE PERFORM CRYPTOGRAPHIC KEY MGMT. OPERATIONS */ \
+ V(kmf, KMF, 0xB92A) /* type = RRE CIPHER MESSAGE WITH CIPHER FEEDBACK */ \
+ V(kmo, KMO, 0xB92B) /* type = RRE CIPHER MESSAGE WITH OUTPUT FEEDBACK */ \
+ V(pcc, PCC, 0xB92C) /* type = RRE PERFORM CRYPTOGRAPHIC COMPUTATION */ \
+ V(km, KM, 0xB92E) /* type = RRE CIPHER MESSAGE */ \
+ V(kmc, KMC, 0xB92F) /* type = RRE CIPHER MESSAGE WITH CHAINING */ \
+ V(cgfr, CGFR, 0xB930) /* type = RRE COMPARE (64<-32) */ \
+ V(clgfr, CLGFR, 0xB931) /* type = RRE COMPARE LOGICAL (64<-32) */ \
+ V(ppno, PPNO, \
+ 0xB93C) /* type = RRE PERFORM PSEUDORANDOM NUMBER OPERATION */ \
+ V(kimd, KIMD, 0xB93E) /* type = RRE COMPUTE INTERMEDIATE MESSAGE DIGEST */ \
+ V(klmd, KLMD, 0xB93F) /* type = RRE COMPUTE LAST MESSAGE DIGEST */ \
+ V(bctgr, BCTGR, 0xB946) /* type = RRE BRANCH ON COUNT (64) */ \
+ V(cdftr, CDFTR, \
+ 0xB951) /* type = RRE CONVERT FROM FIXED (32 to long DFP) */ \
+ V(cxftr, CXFTR, \
+ 0xB959) /* type = RRE CONVERT FROM FIXED (32 to extended DFP) */ \
+ V(ngr, NGR, 0xB980) /* type = RRE AND (64) */ \
+ V(ogr, OGR, 0xB981) /* type = RRE OR (64) */ \
+ V(xgr, XGR, 0xB982) /* type = RRE EXCLUSIVE OR (64) */ \
+ V(flogr, FLOGR, 0xB983) /* type = RRE FIND LEFTMOST ONE */ \
+ V(llgcr, LLGCR, 0xB984) /* type = RRE LOAD LOGICAL CHARACTER (64<-8) */ \
+ V(llghr, LLGHR, 0xB985) /* type = RRE LOAD LOGICAL HALFWORD (64<-16) */ \
+ V(mlgr, MLGR, 0xB986) /* type = RRE MULTIPLY LOGICAL (128<-64) */ \
+ V(dlgr, DLGR, 0xB987) /* type = RRE DIVIDE LOGICAL (64<-128) */ \
+ V(alcgr, ALCGR, 0xB988) /* type = RRE ADD LOGICAL WITH CARRY (64) */ \
+ V(slbgr, SLBGR, 0xB989) /* type = RRE SUBTRACT LOGICAL WITH BORROW (64) */ \
+ V(cspg, CSPG, 0xB98A) /* type = RRE COMPARE AND SWAP AND PURGE (64) */ \
+ V(epsw, EPSW, 0xB98D) /* type = RRE EXTRACT PSW */ \
+ V(llcr, LLCR, 0xB994) /* type = RRE LOAD LOGICAL CHARACTER (32<-8) */ \
+ V(llhr, LLHR, 0xB995) /* type = RRE LOAD LOGICAL HALFWORD (32<-16) */ \
+ V(mlr, MLR, 0xB996) /* type = RRE MULTIPLY LOGICAL (64<-32) */ \
+ V(dlr, DLR, 0xB997) /* type = RRE DIVIDE LOGICAL (32<-64) */ \
+ V(alcr, ALCR, 0xB998) /* type = RRE ADD LOGICAL WITH CARRY (32) */ \
+ V(slbr, SLBR, 0xB999) /* type = RRE SUBTRACT LOGICAL WITH BORROW (32) */ \
+ V(epair, EPAIR, 0xB99A) /* type = RRE EXTRACT PRIMARY ASN AND INSTANCE */ \
+ V(esair, ESAIR, \
+ 0xB99B) /* type = RRE EXTRACT SECONDARY ASN AND INSTANCE */ \
+ V(esea, ESEA, 0xB99D) /* type = RRE EXTRACT AND SET EXTENDED AUTHORITY */ \
+ V(pti, PTI, 0xB99E) /* type = RRE PROGRAM TRANSFER WITH INSTANCE */ \
+ V(ssair, SSAIR, 0xB99F) /* type = RRE SET SECONDARY ASN WITH INSTANCE */ \
+ V(ptf, PTF, 0xB9A2) /* type = RRE PERFORM TOPOLOGY FUNCTION */ \
+ V(rrbm, RRBM, 0xB9AE) /* type = RRE RESET REFERENCE BITS MULTIPLE */ \
+ V(pfmf, PFMF, 0xB9AF) /* type = RRE PERFORM FRAME MANAGEMENT FUNCTION */ \
+ V(cu41, CU41, 0xB9B2) /* type = RRE CONVERT UTF-32 TO UTF-8 */ \
+ V(cu42, CU42, 0xB9B3) /* type = RRE CONVERT UTF-32 TO UTF-16 */ \
+ V(srstu, SRSTU, 0xB9BE) /* type = RRE SEARCH STRING UNICODE */ \
+ V(chhr, CHHR, 0xB9CD) /* type = RRE COMPARE HIGH (32) */ \
+ V(clhhr, CLHHR, 0xB9CF) /* type = RRE COMPARE LOGICAL HIGH (32) */ \
+ V(chlr, CHLR, 0xB9DD) /* type = RRE COMPARE HIGH (32) */ \
+ V(clhlr, CLHLR, 0xB9DF) /* type = RRE COMPARE LOGICAL HIGH (32) */ \
+ V(popcnt, POPCNT_Z, 0xB9E1) /* type = RRE POPULATION COUNT */
+
+#define S390_RIE_C_OPCODE_LIST(V) \
+ V(cgij, CGIJ, \
+ 0xEC7C) /* type = RIE_C COMPARE IMMEDIATE AND BRANCH RELATIVE (64<-8) */ \
+ V(clgij, CLGIJ, \
+ 0xEC7D) /* type = RIE_C COMPARE LOGICAL IMMEDIATE AND BRANCH RELATIVE */ \
+ /* (64<-8) */ \
+ V(cij, CIJ, \
+ 0xEC7E) /* type = RIE_C COMPARE IMMEDIATE AND BRANCH RELATIVE (32<-8) */ \
+ V(clij, CLIJ, 0xEC7F) /* type = RIE_C COMPARE LOGICAL IMMEDIATE AND */ \
+ /* BRANCH RELATIVE (32<-8) */
+
+#define S390_RIE_D_OPCODE_LIST(V) \
+ V(ahik, AHIK, 0xECD8) /* type = RIE_D ADD IMMEDIATE (32<-16) */ \
+ V(aghik, AGHIK, 0xECD9) /* type = RIE_D ADD IMMEDIATE (64<-16) */ \
+ V(alhsik, ALHSIK, \
+ 0xECDA) /* type = RIE_D ADD LOGICAL WITH SIGNED IMMEDIATE (32<-16) */ \
+ V(alghsik, ALGHSIK, \
+ 0xECDB) /* type = RIE_D ADD LOGICAL WITH SIGNED IMMEDIATE (64<-16) */
+
+#define S390_VRV_OPCODE_LIST(V) \
+ V(vgeg, VGEG, 0xE712) /* type = VRV VECTOR GATHER ELEMENT (64) */ \
+ V(vgef, VGEF, 0xE713) /* type = VRV VECTOR GATHER ELEMENT (32) */ \
+ V(vsceg, VSCEG, 0xE71A) /* type = VRV VECTOR SCATTER ELEMENT (64) */ \
+ V(vscef, VSCEF, 0xE71B) /* type = VRV VECTOR SCATTER ELEMENT (32) */
+
+#define S390_RIE_E_OPCODE_LIST(V) \
+ V(brxhg, BRXHG, \
+ 0xEC44) /* type = RIE_E BRANCH RELATIVE ON INDEX HIGH (64) */ \
+ V(brxlg, BRXLG, \
+ 0xEC45) /* type = RIE_E BRANCH RELATIVE ON INDEX LOW OR EQ. (64) */
+
+#define S390_RR_OPCODE_LIST(V) \
+ V(spm, SPM, 0x04) /* type = RR SET PROGRAM MASK */ \
+ V(balr, BALR, 0x05) /* type = RR BRANCH AND LINK */ \
+ V(bctr, BCTR, 0x06) /* type = RR BRANCH ON COUNT (32) */ \
+ V(bcr, BCR, 0x07) /* type = RR BRANCH ON CONDITION */ \
+ V(bsm, BSM, 0x0B) /* type = RR BRANCH AND SET MODE */ \
+ V(bassm, BASSM, 0x0C) /* type = RR BRANCH AND SAVE AND SET MODE */ \
+ V(basr, BASR, 0x0D) /* type = RR BRANCH AND SAVE */ \
+ V(mvcl, MVCL, 0x0E) /* type = RR MOVE LONG */ \
+ V(clcl, CLCL, 0x0F) /* type = RR COMPARE LOGICAL LONG */ \
+ V(lpr, LPR, 0x10) /* type = RR LOAD POSITIVE (32) */ \
+ V(lnr, LNR, 0x11) /* type = RR LOAD NEGATIVE (32) */ \
+ V(ltr, LTR, 0x12) /* type = RR LOAD AND TEST (32) */ \
+ V(lcr, LCR, 0x13) /* type = RR LOAD COMPLEMENT (32) */ \
+ V(nr, NR, 0x14) /* type = RR AND (32) */ \
+ V(clr, CLR, 0x15) /* type = RR COMPARE LOGICAL (32) */ \
+ V(or_z, OR, 0x16) /* type = RR OR (32) */ \
+ V(xr, XR, 0x17) /* type = RR EXCLUSIVE OR (32) */ \
+ V(lr, LR, 0x18) /* type = RR LOAD (32) */ \
+ V(cr_z, CR, 0x19) /* type = RR COMPARE (32) */ \
+ V(ar, AR, 0x1A) /* type = RR ADD (32) */ \
+ V(sr, SR, 0x1B) /* type = RR SUBTRACT (32) */ \
+ V(mr_z, MR, 0x1C) /* type = RR MULTIPLY (64<-32) */ \
+ V(dr, DR, 0x1D) /* type = RR DIVIDE (32<-64) */ \
+ V(alr, ALR, 0x1E) /* type = RR ADD LOGICAL (32) */ \
+ V(slr, SLR, 0x1F) /* type = RR SUBTRACT LOGICAL (32) */ \
+ V(lpdr, LPDR, 0x20) /* type = RR LOAD POSITIVE (long HFP) */ \
+ V(lndr, LNDR, 0x21) /* type = RR LOAD NEGATIVE (long HFP) */ \
+ V(ltdr, LTDR, 0x22) /* type = RR LOAD AND TEST (long HFP) */ \
+ V(lcdr, LCDR, 0x23) /* type = RR LOAD COMPLEMENT (long HFP) */ \
+ V(hdr, HDR, 0x24) /* type = RR HALVE (long HFP) */ \
+ V(ldxr, LDXR, 0x25) /* type = RR LOAD ROUNDED (extended to long HFP) */ \
+ V(lrdr, LRDR, 0x25) /* type = RR LOAD ROUNDED (extended to long HFP) */ \
+ V(mxr, MXR, 0x26) /* type = RR MULTIPLY (extended HFP) */ \
+ V(mxdr, MXDR, 0x27) /* type = RR MULTIPLY (long to extended HFP) */ \
+ V(ldr, LDR, 0x28) /* type = RR LOAD (long) */ \
+ V(cdr, CDR, 0x29) /* type = RR COMPARE (long HFP) */ \
+ V(adr, ADR, 0x2A) /* type = RR ADD NORMALIZED (long HFP) */ \
+ V(sdr, SDR, 0x2B) /* type = RR SUBTRACT NORMALIZED (long HFP) */ \
+ V(mdr, MDR, 0x2C) /* type = RR MULTIPLY (long HFP) */ \
+ V(ddr, DDR, 0x2D) /* type = RR DIVIDE (long HFP) */ \
+ V(swr, SWR, 0x2F) /* type = RR SUBTRACT UNNORMALIZED (long HFP) */ \
+ V(lper, LPER, 0x30) /* type = RR LOAD POSITIVE (short HFP) */ \
+ V(lner, LNER, 0x31) /* type = RR LOAD NEGATIVE (short HFP) */ \
+ V(lter, LTER, 0x32) /* type = RR LOAD AND TEST (short HFP) */ \
+ V(lcer, LCER, 0x33) /* type = RR LOAD COMPLEMENT (short HFP) */ \
+ V(her_z, HER_Z, 0x34) /* type = RR HALVE (short HFP) */ \
+ V(ledr, LEDR, 0x35) /* type = RR LOAD ROUNDED (long to short HFP) */ \
+ V(lrer, LRER, 0x35) /* type = RR LOAD ROUNDED (long to short HFP) */ \
+ V(axr, AXR, 0x36) /* type = RR ADD NORMALIZED (extended HFP) */ \
+ V(sxr, SXR, 0x37) /* type = RR SUBTRACT NORMALIZED (extended HFP) */ \
+ V(ler, LER, 0x38) /* type = RR LOAD (short) */ \
+ V(cer, CER, 0x39) /* type = RR COMPARE (short HFP) */ \
+ V(aer, AER, 0x3A) /* type = RR ADD NORMALIZED (short HFP) */ \
+ V(ser, SER, 0x3B) /* type = RR SUBTRACT NORMALIZED (short HFP) */ \
+ V(mder, MDER, 0x3C) /* type = RR MULTIPLY (short to long HFP) */ \
+ V(mer, MER, 0x3C) /* type = RR MULTIPLY (short to long HFP) */ \
+ V(der, DER, 0x3D) /* type = RR DIVIDE (short HFP) */ \
+ V(aur, AUR, 0x3E) /* type = RR ADD UNNORMALIZED (short HFP) */ \
+ V(sur, SUR, 0x3F) /* type = RR SUBTRACT UNNORMALIZED (short HFP) */ \
+ V(sth, STH, 0x40) /* type = RR STORE HALFWORD (16) */
+
+#define S390_RIE_F_OPCODE_LIST(V) \
+ V(risblg, RISBLG, \
+ 0xEC51) /* type = RIE_F ROTATE THEN INSERT SELECTED BITS LOW (64) */ \
+ V(rnsbg, RNSBG, \
+ 0xEC54) /* type = RIE_F ROTATE THEN AND SELECTED BITS (64) */ \
+ V(risbg, RISBG, \
+ 0xEC55) /* type = RIE_F ROTATE THEN INSERT SELECTED BITS (64) */ \
+ V(rosbg, ROSBG, 0xEC56) /* type = RIE_F ROTATE THEN OR SELECTED BITS (64) */ \
+ V(rxsbg, RXSBG, \
+ 0xEC57) /* type = RIE_F ROTATE THEN EXCLUSIVE OR SELECT. BITS (64) */ \
+ V(risbgn, RISBGN, \
+ 0xEC59) /* type = RIE_F ROTATE THEN INSERT SELECTED BITS (64) */ \
+ V(risbhg, RISBHG, \
+ 0xEC5D) /* type = RIE_F ROTATE THEN INSERT SELECTED BITS HIGH (64) */
+
+#define S390_VRX_OPCODE_LIST(V) \
+ V(vleb, VLEB, 0xE700) /* type = VRX VECTOR LOAD ELEMENT (8) */ \
+ V(vleh, VLEH, 0xE701) /* type = VRX VECTOR LOAD ELEMENT (16) */ \
+ V(vleg, VLEG, 0xE702) /* type = VRX VECTOR LOAD ELEMENT (64) */ \
+ V(vlef, VLEF, 0xE703) /* type = VRX VECTOR LOAD ELEMENT (32) */ \
+ V(vllez, VLLEZ, \
+ 0xE704) /* type = VRX VECTOR LOAD LOGICAL ELEMENT AND ZERO */ \
+ V(vlrep, VLREP, 0xE705) /* type = VRX VECTOR LOAD AND REPLICATE */ \
+ V(vl, VL, 0xE706) /* type = VRX VECTOR LOAD */ \
+ V(vlbb, VLBB, 0xE707) /* type = VRX VECTOR LOAD TO BLOCK BOUNDARY */ \
+ V(vsteb, VSTEB, 0xE708) /* type = VRX VECTOR STORE ELEMENT (8) */ \
+ V(vsteh, VSTEH, 0xE709) /* type = VRX VECTOR STORE ELEMENT (16) */ \
+ V(vsteg, VSTEG, 0xE70A) /* type = VRX VECTOR STORE ELEMENT (64) */ \
+ V(vstef, VSTEF, 0xE70B) /* type = VRX VECTOR STORE ELEMENT (32) */ \
+ V(vst, VST, 0xE70E) /* type = VRX VECTOR STORE */
+
+#define S390_RIE_G_OPCODE_LIST(V) \
+ V(lochi, LOCHI, \
+ 0xEC42) /* type = RIE_G LOAD HALFWORD IMMEDIATE ON CONDITION (32<-16) */ \
+ V(locghi, LOCGHI, \
+ 0xEC46) /* type = RIE_G LOAD HALFWORD IMMEDIATE ON CONDITION (64<-16) */ \
+ V(lochhi, LOCHHI, 0xEC4E) /* type = RIE_G LOAD HALFWORD HIGH IMMEDIATE */ \
+ /* ON CONDITION (32<-16) */
+
+#define S390_RRS_OPCODE_LIST(V) \
+ V(cgrb, CGRB, 0xECE4) /* type = RRS COMPARE AND BRANCH (64) */ \
+ V(clgrb, CLGRB, 0xECE5) /* type = RRS COMPARE LOGICAL AND BRANCH (64) */ \
+ V(crb, CRB, 0xECF6) /* type = RRS COMPARE AND BRANCH (32) */ \
+ V(clrb, CLRB, 0xECF7) /* type = RRS COMPARE LOGICAL AND BRANCH (32) */
+
+#define S390_OPCODE_LIST(V) \
+ S390_RSY_A_OPCODE_LIST(V) \
+ S390_RSY_B_OPCODE_LIST(V) \
+ S390_RXE_OPCODE_LIST(V) \
+ S390_RRF_A_OPCODE_LIST(V) \
+ S390_RXF_OPCODE_LIST(V) \
+ S390_IE_OPCODE_LIST(V) \
+ S390_RRF_B_OPCODE_LIST(V) \
+ S390_RRF_C_OPCODE_LIST(V) \
+ S390_MII_OPCODE_LIST(V) \
+ S390_RRF_D_OPCODE_LIST(V) \
+ S390_RRF_E_OPCODE_LIST(V) \
+ S390_VRR_A_OPCODE_LIST(V) \
+ S390_VRR_B_OPCODE_LIST(V) \
+ S390_VRR_C_OPCODE_LIST(V) \
+ S390_VRI_A_OPCODE_LIST(V) \
+ S390_VRR_D_OPCODE_LIST(V) \
+ S390_VRI_B_OPCODE_LIST(V) \
+ S390_VRR_E_OPCODE_LIST(V) \
+ S390_VRI_C_OPCODE_LIST(V) \
+ S390_VRI_D_OPCODE_LIST(V) \
+ S390_VRR_F_OPCODE_LIST(V) \
+ S390_RIS_OPCODE_LIST(V) \
+ S390_VRI_E_OPCODE_LIST(V) \
+ S390_RSL_A_OPCODE_LIST(V) \
+ S390_RSL_B_OPCODE_LIST(V) \
+ S390_SI_OPCODE_LIST(V) \
+ S390_SIL_OPCODE_LIST(V) \
+ S390_VRS_A_OPCODE_LIST(V) \
+ S390_RIL_A_OPCODE_LIST(V) \
+ S390_RIL_B_OPCODE_LIST(V) \
+ S390_VRS_B_OPCODE_LIST(V) \
+ S390_RIL_C_OPCODE_LIST(V) \
+ S390_VRS_C_OPCODE_LIST(V) \
+ S390_RI_A_OPCODE_LIST(V) \
+ S390_RSI_OPCODE_LIST(V) \
+ S390_RI_B_OPCODE_LIST(V) \
+ S390_RI_C_OPCODE_LIST(V) \
+ S390_RSL_OPCODE_LIST(V) \
+ S390_SMI_OPCODE_LIST(V) \
+ S390_RXY_A_OPCODE_LIST(V) \
+ S390_RXY_B_OPCODE_LIST(V) \
+ S390_SIY_OPCODE_LIST(V) \
+ S390_SS_A_OPCODE_LIST(V) \
+ S390_E_OPCODE_LIST(V) \
+ S390_SS_B_OPCODE_LIST(V) \
+ S390_SS_C_OPCODE_LIST(V) \
+ S390_SS_D_OPCODE_LIST(V) \
+ S390_SS_E_OPCODE_LIST(V) \
+ S390_I_OPCODE_LIST(V) \
+ S390_SS_F_OPCODE_LIST(V) \
+ S390_SSE_OPCODE_LIST(V) \
+ S390_SSF_OPCODE_LIST(V) \
+ S390_RS_A_OPCODE_LIST(V) \
+ S390_RS_B_OPCODE_LIST(V) \
+ S390_S_OPCODE_LIST(V) \
+ S390_RX_A_OPCODE_LIST(V) \
+ S390_RX_B_OPCODE_LIST(V) \
+ S390_RIE_A_OPCODE_LIST(V) \
+ S390_RRD_OPCODE_LIST(V) \
+ S390_RIE_B_OPCODE_LIST(V) \
+ S390_RRE_OPCODE_LIST(V) \
+ S390_RIE_C_OPCODE_LIST(V) \
+ S390_RIE_D_OPCODE_LIST(V) \
+ S390_VRV_OPCODE_LIST(V) \
+ S390_RIE_E_OPCODE_LIST(V) \
+ S390_RR_OPCODE_LIST(V) \
+ S390_RIE_F_OPCODE_LIST(V) \
+ S390_VRX_OPCODE_LIST(V) \
+ S390_RIE_G_OPCODE_LIST(V) \
+ S390_RRS_OPCODE_LIST(V)
+
// Opcodes as defined in Appendix B-2 table
enum Opcode {
- A = 0x5A, // Add (32)
- ADB = 0xED1A, // Add (long BFP)
- ADBR = 0xB31A, // Add (long BFP)
- ADTR = 0xB3D2, // Add (long DFP)
- ADTRA = 0xB3D2, // Add (long DFP)
- AEB = 0xED0A, // Add (short BFP)
- AEBR = 0xB30A, // Add (short BFP)
- AFI = 0xC29, // Add Immediate (32)
- AG = 0xE308, // Add (64)
- AGF = 0xE318, // Add (64<-32)
- AGFI = 0xC28, // Add Immediate (64<-32)
- AGFR = 0xB918, // Add (64<-32)
- AGHI = 0xA7B, // Add Halfword Immediate (64)
- AGHIK = 0xECD9, // Add Immediate (64<-16)
- AGR = 0xB908, // Add (64)
- AGRK = 0xB9E8, // Add (64)
- AGSI = 0xEB7A, // Add Immediate (64<-8)
- AH = 0x4A, // Add Halfword
- AHHHR = 0xB9C8, // Add High (32)
- AHHLR = 0xB9D8, // Add High (32)
- AHI = 0xA7A, // Add Halfword Immediate (32)
- AHIK = 0xECD8, // Add Immediate (32<-16)
- AHY = 0xE37A, // Add Halfword
- AIH = 0xCC8, // Add Immediate High (32)
- AL = 0x5E, // Add Logical (32)
- ALC = 0xE398, // Add Logical With Carry (32)
- ALCG = 0xE388, // Add Logical With Carry (64)
- ALCGR = 0xB988, // Add Logical With Carry (64)
- ALCR = 0xB998, // Add Logical With Carry (32)
- ALFI = 0xC2B, // Add Logical Immediate (32)
- ALG = 0xE30A, // Add Logical (64)
- ALGF = 0xE31A, // Add Logical (64<-32)
- ALGFI = 0xC2A, // Add Logical Immediate (64<-32)
- ALGFR = 0xB91A, // Add Logical (64<-32)
- ALGHSIK = 0xECDB, // Add Logical With Signed Immediate (64<-16)
- ALGR = 0xB90A, // Add Logical (64)
- ALGRK = 0xB9EA, // Add Logical (64)
- ALGSI = 0xEB7E, // Add Logical With Signed Immediate (64<-8)
- ALHHHR = 0xB9CA, // Add Logical High (32)
- ALHHLR = 0xB9DA, // Add Logical High (32)
- ALHSIK = 0xECDA, // Add Logical With Signed Immediate (32<-16)
- ALR = 0x1E, // Add Logical (32)
- ALRK = 0xB9FA, // Add Logical (32)
- ALSI = 0xEB6E, // Add Logical With Signed Immediate (32<-8)
- ALSIH = 0xCCA, // Add Logical With Signed Immediate High (32)
- ALSIHN = 0xCCB, // Add Logical With Signed Immediate High (32)
- ALY = 0xE35E, // Add Logical (32)
- AP = 0xFA, // Add Decimal
- AR = 0x1A, // Add (32)
- ARK = 0xB9F8, // Add (32)
- ASI = 0xEB6A, // Add Immediate (32<-8)
- AXBR = 0xB34A, // Add (extended BFP)
- AXTR = 0xB3DA, // Add (extended DFP)
- AXTRA = 0xB3DA, // Add (extended DFP)
- AY = 0xE35A, // Add (32)
- BAL = 0x45, // Branch And Link
- BALR = 0x05, // Branch And Link
- BAS = 0x4D, // Branch And Save
- BASR = 0x0D, // Branch And Save
- BASSM = 0x0C, // Branch And Save And Set Mode
- BC = 0x47, // Branch On Condition
- BCR = 0x07, // Branch On Condition
- BCT = 0x46, // Branch On Count (32)
- BCTG = 0xE346, // Branch On Count (64)
- BCTGR = 0xB946, // Branch On Count (64)
- BCTR = 0x06, // Branch On Count (32)
- BPP = 0xC7, // Branch Prediction Preload
- BPRP = 0xC5, // Branch Prediction Relative Preload
- BRAS = 0xA75, // Branch Relative And Save
- BRASL = 0xC05, // Branch Relative And Save Long
- BRC = 0xA74, // Branch Relative On Condition
- BRCL = 0xC04, // Branch Relative On Condition Long
- BRCT = 0xA76, // Branch Relative On Count (32)
- BRCTG = 0xA77, // Branch Relative On Count (64)
- BRCTH = 0xCC6, // Branch Relative On Count High (32)
- BRXH = 0x84, // Branch Relative On Index High (32)
- BRXHG = 0xEC44, // Branch Relative On Index High (64)
- BRXLE = 0x85, // Branch Relative On Index Low Or Eq. (32)
- BRXLG = 0xEC45, // Branch Relative On Index Low Or Eq. (64)
- BSM = 0x0B, // Branch And Set Mode
- BXH = 0x86, // Branch On Index High (32)
- BXHG = 0xEB44, // Branch On Index High (64)
- BXLE = 0x87, // Branch On Index Low Or Equal (32)
- BXLEG = 0xEB45, // Branch On Index Low Or Equal (64)
- C = 0x59, // Compare (32)
- CDB = 0xED19, // Compare (long BFP)
- CDBR = 0xB319, // Compare (long BFP)
- CDFBR = 0xB395, // Convert From Fixed (32 to long BFP)
- CDFBRA = 0xB395, // Convert From Fixed (32 to long BFP)
- CDFTR = 0xB951, // Convert From Fixed (32 to long DFP)
- CDGBR = 0xB3A5, // Convert From Fixed (64 to long BFP)
- CDGBRA = 0xB3A5, // Convert From Fixed (64 to long BFP)
- CDGTR = 0xB3F1, // Convert From Fixed (64 to long DFP)
- CDGTRA = 0xB3F1, // Convert From Fixed (64 to long DFP)
- CDLFBR = 0xB391, // Convert From Logical (32 to long BFP)
- CDLFTR = 0xB953, // Convert From Logical (32 to long DFP)
- CDLGBR = 0xB3A1, // Convert From Logical (64 to long BFP)
- CDLGTR = 0xB952, // Convert From Logical (64 to long DFP)
- CDS = 0xBB, // Compare Double And Swap (32)
- CDSG = 0xEB3E, // Compare Double And Swap (64)
- CDSTR = 0xB3F3, // Convert From Signed Packed (64 to long DFP)
- CDSY = 0xEB31, // Compare Double And Swap (32)
- CDTR = 0xB3E4, // Compare (long DFP)
- CDUTR = 0xB3F2, // Convert From Unsigned Packed (64 to long DFP)
- CDZT = 0xEDAA, // Convert From Zoned (to long DFP)
- CEB = 0xED09, // Compare (short BFP)
- CEBR = 0xB309, // Compare (short BFP)
- CEDTR = 0xB3F4, // Compare Biased Exponent (long DFP)
- CEFBR = 0xB394, // Convert From Fixed (32 to short BFP)
- CEFBRA = 0xB394, // Convert From Fixed (32 to short BFP)
- CEGBR = 0xB3A4, // Convert From Fixed (64 to short BFP)
- CEGBRA = 0xB3A4, // Convert From Fixed (64 to short BFP)
- CELFBR = 0xB390, // Convert From Logical (32 to short BFP)
- CELGBR = 0xB3A0, // Convert From Logical (64 to short BFP)
- CEXTR = 0xB3FC, // Compare Biased Exponent (extended DFP)
- CFC = 0xB21A, // Compare And Form Codeword
- CFDBR = 0xB399, // Convert To Fixed (long BFP to 32)
- CFDBRA = 0xB399, // Convert To Fixed (long BFP to 32)
- CFDR = 0xB3B9, // Convert To Fixed (long HFP to 32)
- CFDTR = 0xB941, // Convert To Fixed (long DFP to 32)
- CFEBR = 0xB398, // Convert To Fixed (short BFP to 32)
- CFEBRA = 0xB398, // Convert To Fixed (short BFP to 32)
- CFER = 0xB3B8, // Convert To Fixed (short HFP to 32)
- CFI = 0xC2D, // Compare Immediate (32)
- CFXBR = 0xB39A, // Convert To Fixed (extended BFP to 32)
- CFXBRA = 0xB39A, // Convert To Fixed (extended BFP to 32)
- CFXR = 0xB3BA, // Convert To Fixed (extended HFP to 32)
- CFXTR = 0xB949, // Convert To Fixed (extended DFP to 32)
- CG = 0xE320, // Compare (64)
- CGDBR = 0xB3A9, // Convert To Fixed (long BFP to 64)
- CGDBRA = 0xB3A9, // Convert To Fixed (long BFP to 64)
- CGDR = 0xB3C9, // Convert To Fixed (long HFP to 64)
- CGDTR = 0xB3E1, // Convert To Fixed (long DFP to 64)
- CGDTRA = 0xB3E1, // Convert To Fixed (long DFP to 64)
- CGEBR = 0xB3A8, // Convert To Fixed (short BFP to 64)
- CGEBRA = 0xB3A8, // Convert To Fixed (short BFP to 64)
- CGER = 0xB3C8, // Convert To Fixed (short HFP to 64)
- CGF = 0xE330, // Compare (64<-32)
- CGFI = 0xC2C, // Compare Immediate (64<-32)
- CGFR = 0xB930, // Compare (64<-32)
- CGFRL = 0xC6C, // Compare Relative Long (64<-32)
- CGH = 0xE334, // Compare Halfword (64<-16)
- CGHI = 0xA7F, // Compare Halfword Immediate (64<-16)
- CGHRL = 0xC64, // Compare Halfword Relative Long (64<-16)
- CGHSI = 0xE558, // Compare Halfword Immediate (64<-16)
- CGIB = 0xECFC, // Compare Immediate And Branch (64<-8)
- CGIJ = 0xEC7C, // Compare Immediate And Branch Relative (64<-8)
- CGIT = 0xEC70, // Compare Immediate And Trap (64<-16)
- CGR = 0xB920, // Compare (64)
- CGRB = 0xECE4, // Compare And Branch (64)
- CGRJ = 0xEC64, // Compare And Branch Relative (64)
- CGRL = 0xC68, // Compare Relative Long (64)
- CGRT = 0xB960, // Compare And Trap (64)
- CGXBR = 0xB3AA, // Convert To Fixed (extended BFP to 64)
- CGXBRA = 0xB3AA, // Convert To Fixed (extended BFP to 64)
- CGXR = 0xB3CA, // Convert To Fixed (extended HFP to 64)
- CGXTR = 0xB3E9, // Convert To Fixed (extended DFP to 64)
- CGXTRA = 0xB3E9, // Convert To Fixed (extended DFP to 64)
- CH = 0x49, // Compare Halfword (32<-16)
- CHF = 0xE3CD, // Compare High (32)
- CHHR = 0xB9CD, // Compare High (32)
- CHHSI = 0xE554, // Compare Halfword Immediate (16)
- CHI = 0xA7E, // Compare Halfword Immediate (32<-16)
- CHLR = 0xB9DD, // Compare High (32)
- CHRL = 0xC65, // Compare Halfword Relative Long (32<-16)
- CHSI = 0xE55C, // Compare Halfword Immediate (32<-16)
- CHY = 0xE379, // Compare Halfword (32<-16)
- CIB = 0xECFE, // Compare Immediate And Branch (32<-8)
- CIH = 0xCCD, // Compare Immediate High (32)
- CIJ = 0xEC7E, // Compare Immediate And Branch Relative (32<-8)
- CIT = 0xEC72, // Compare Immediate And Trap (32<-16)
- CKSM = 0xB241, // Checksum
- CL = 0x55, // Compare Logical (32)
- CLC = 0xD5, // Compare Logical (character)
- CLCL = 0x0F, // Compare Logical Long
- CLCLE = 0xA9, // Compare Logical Long Extended
- CLCLU = 0xEB8F, // Compare Logical Long Unicode
- CLFDBR = 0xB39D, // Convert To Logical (long BFP to 32)
- CLFDTR = 0xB943, // Convert To Logical (long DFP to 32)
- CLFEBR = 0xB39C, // Convert To Logical (short BFP to 32)
- CLFHSI = 0xE55D, // Compare Logical Immediate (32<-16)
- CLFI = 0xC2F, // Compare Logical Immediate (32)
- CLFIT = 0xEC73, // Compare Logical Immediate And Trap (32<-16)
- CLFXBR = 0xB39E, // Convert To Logical (extended BFP to 32)
- CLFXTR = 0xB94B, // Convert To Logical (extended DFP to 32)
- CLG = 0xE321, // Compare Logical (64)
- CLGDBR = 0xB3AD, // Convert To Logical (long BFP to 64)
- CLGDTR = 0xB942, // Convert To Logical (long DFP to 64)
- CLGEBR = 0xB3AC, // Convert To Logical (short BFP to 64)
- CLGF = 0xE331, // Compare Logical (64<-32)
- CLGFI = 0xC2E, // Compare Logical Immediate (64<-32)
- CLGR = 0xB921, // Compare Logical (64)
- CLI = 0x95, // Compare Logical Immediate (8)
- CLIY = 0xEB55, // Compare Logical Immediate (8)
- CLR = 0x15, // Compare Logical (32)
- CLY = 0xE355, // Compare Logical (32)
- CD = 0x69, // Compare (LH)
- CDR = 0x29, // Compare (LH)
- CR = 0x19, // Compare (32)
- CSST = 0xC82, // Compare And Swap And Store
- CSXTR = 0xB3EB, // Convert To Signed Packed (extended DFP to 128)
- CSY = 0xEB14, // Compare And Swap (32)
- CU12 = 0xB2A7, // Convert Utf-8 To Utf-16
- CU14 = 0xB9B0, // Convert Utf-8 To Utf-32
- CU21 = 0xB2A6, // Convert Utf-16 To Utf-8
- CU24 = 0xB9B1, // Convert Utf-16 To Utf-32
- CU41 = 0xB9B2, // Convert Utf-32 To Utf-8
- CU42 = 0xB9B3, // Convert Utf-32 To Utf-16
- CUDTR = 0xB3E2, // Convert To Unsigned Packed (long DFP to 64)
- CUSE = 0xB257, // Compare Until Substring Equal
- CUTFU = 0xB2A7, // Convert Utf-8 To Unicode
- CUUTF = 0xB2A6, // Convert Unicode To Utf-8
- CUXTR = 0xB3EA, // Convert To Unsigned Packed (extended DFP to 128)
- CVB = 0x4F, // Convert To Binary (32)
- CVBG = 0xE30E, // Convert To Binary (64)
- CVBY = 0xE306, // Convert To Binary (32)
- CVD = 0x4E, // Convert To Decimal (32)
- CVDG = 0xE32E, // Convert To Decimal (64)
- CVDY = 0xE326, // Convert To Decimal (32)
- CXBR = 0xB349, // Compare (extended BFP)
- CXFBR = 0xB396, // Convert From Fixed (32 to extended BFP)
- CXFBRA = 0xB396, // Convert From Fixed (32 to extended BFP)
- CXFTR = 0xB959, // Convert From Fixed (32 to extended DFP)
- CXGBR = 0xB3A6, // Convert From Fixed (64 to extended BFP)
- CXGBRA = 0xB3A6, // Convert From Fixed (64 to extended BFP)
- CXGTR = 0xB3F9, // Convert From Fixed (64 to extended DFP)
- CXGTRA = 0xB3F9, // Convert From Fixed (64 to extended DFP)
- CXLFBR = 0xB392, // Convert From Logical (32 to extended BFP)
- CXLFTR = 0xB95B, // Convert From Logical (32 to extended DFP)
- CXLGBR = 0xB3A2, // Convert From Logical (64 to extended BFP)
- CXLGTR = 0xB95A, // Convert From Logical (64 to extended DFP)
- CXSTR = 0xB3FB, // Convert From Signed Packed (128 to extended DFP)
- CXTR = 0xB3EC, // Compare (extended DFP)
- CXUTR = 0xB3FA, // Convert From Unsigned Packed (128 to ext. DFP)
- CXZT = 0xEDAB, // Convert From Zoned (to extended DFP)
- CY = 0xE359, // Compare (32)
- CZDT = 0xEDA8, // Convert To Zoned (from long DFP)
- CZXT = 0xEDA9, // Convert To Zoned (from extended DFP)
- D = 0x5D, // Divide (32<-64)
- DDB = 0xED1D, // Divide (long BFP)
- DDBR = 0xB31D, // Divide (long BFP)
- DDTR = 0xB3D1, // Divide (long DFP)
- DDTRA = 0xB3D1, // Divide (long DFP)
- DEB = 0xED0D, // Divide (short BFP)
- DEBR = 0xB30D, // Divide (short BFP)
- DIDBR = 0xB35B, // Divide To Integer (long BFP)
- DIEBR = 0xB353, // Divide To Integer (short BFP)
- DL = 0xE397, // Divide Logical (32<-64)
- DLG = 0xE387, // Divide Logical (64<-128)
- DLGR = 0xB987, // Divide Logical (64<-128)
- DLR = 0xB997, // Divide Logical (32<-64)
- DP = 0xFD, // Divide Decimal
- DR = 0x1D, // Divide (32<-64)
- DSG = 0xE30D, // Divide Single (64)
- DSGF = 0xE31D, // Divide Single (64<-32)
- DSGFR = 0xB91D, // Divide Single (64<-32)
- DSGR = 0xB90D, // Divide Single (64)
- DXBR = 0xB34D, // Divide (extended BFP)
- DXTR = 0xB3D9, // Divide (extended DFP)
- DXTRA = 0xB3D9, // Divide (extended DFP)
- EAR = 0xB24F, // Extract Access
- ECAG = 0xEB4C, // Extract Cache Attribute
- ECTG = 0xC81, // Extract Cpu Time
- ED = 0xDE, // Edit
- EDMK = 0xDF, // Edit And Mark
- EEDTR = 0xB3E5, // Extract Biased Exponent (long DFP to 64)
- EEXTR = 0xB3ED, // Extract Biased Exponent (extended DFP to 64)
- EFPC = 0xB38C, // Extract Fpc
- EPSW = 0xB98D, // Extract Psw
- ESDTR = 0xB3E7, // Extract Significance (long DFP)
- ESXTR = 0xB3EF, // Extract Significance (extended DFP)
- ETND = 0xB2EC, // Extract Transaction Nesting Depth
- EX = 0x44, // Execute
- EXRL = 0xC60, // Execute Relative Long
- FIDBR = 0xB35F, // Load Fp Integer (long BFP)
- FIDBRA = 0xB35F, // Load Fp Integer (long BFP)
- FIDTR = 0xB3D7, // Load Fp Integer (long DFP)
- FIEBR = 0xB357, // Load Fp Integer (short BFP)
- FIEBRA = 0xB357, // Load Fp Integer (short BFP)
- FIXBR = 0xB347, // Load Fp Integer (extended BFP)
- FIXBRA = 0xB347, // Load Fp Integer (extended BFP)
- FIXTR = 0xB3DF, // Load Fp Integer (extended DFP)
- FLOGR = 0xB983, // Find Leftmost One
- HSCH = 0xB231, // Halt Subchannel
- IC_z = 0x43, // Insert Character
- ICM = 0xBF, // Insert Characters Under Mask (low)
- ICMH = 0xEB80, // Insert Characters Under Mask (high)
- ICMY = 0xEB81, // Insert Characters Under Mask (low)
- ICY = 0xE373, // Insert Character
- IEDTR = 0xB3F6, // Insert Biased Exponent (64 to long DFP)
- IEXTR = 0xB3FE, // Insert Biased Exponent (64 to extended DFP)
- IIHF = 0xC08, // Insert Immediate (high)
- IIHH = 0xA50, // Insert Immediate (high high)
- IIHL = 0xA51, // Insert Immediate (high low)
- IILF = 0xC09, // Insert Immediate (low)
- IILH = 0xA52, // Insert Immediate (low high)
- IILL = 0xA53, // Insert Immediate (low low)
- IPM = 0xB222, // Insert Program Mask
- KDB = 0xED18, // Compare And Signal (long BFP)
- KDBR = 0xB318, // Compare And Signal (long BFP)
- KDTR = 0xB3E0, // Compare And Signal (long DFP)
- KEB = 0xED08, // Compare And Signal (short BFP)
- KEBR = 0xB308, // Compare And Signal (short BFP)
- KIMD = 0xB93E, // Compute Intermediate Message Digest
- KLMD = 0xB93F, // Compute Last Message Digest
- KM = 0xB92E, // Cipher Message
- KMAC = 0xB91E, // Compute Message Authentication Code
- KMC = 0xB92F, // Cipher Message With Chaining
- KMCTR = 0xB92D, // Cipher Message With Counter
- KMF = 0xB92A, // Cipher Message With Cfb
- KMO = 0xB92B, // Cipher Message With Ofb
- KXBR = 0xB348, // Compare And Signal (extended BFP)
- KXTR = 0xB3E8, // Compare And Signal (extended DFP)
- L = 0x58, // Load (32)
- LA = 0x41, // Load Address
- LAA = 0xEBF8, // Load And Add (32)
- LAAG = 0xEBE8, // Load And Add (64)
- LAAL = 0xEBFA, // Load And Add Logical (32)
- LAALG = 0xEBEA, // Load And Add Logical (64)
- LAE = 0x51, // Load Address Extended
- LAEY = 0xE375, // Load Address Extended
- LAN = 0xEBF4, // Load And And (32)
- LANG = 0xEBE4, // Load And And (64)
- LAO = 0xEBF6, // Load And Or (32)
- LAOG = 0xEBE6, // Load And Or (64)
- LARL = 0xC00, // Load Address Relative Long
- LAT = 0xE39F, // Load And Trap (32L<-32)
- LAX = 0xEBF7, // Load And Exclusive Or (32)
- LAXG = 0xEBE7, // Load And Exclusive Or (64)
- LAY = 0xE371, // Load Address
- LB = 0xE376, // Load Byte (32)
- LBH = 0xE3C0, // Load Byte High (32<-8)
- LBR = 0xB926, // Load Byte (32)
- LCDBR = 0xB313, // Load Complement (long BFP)
- LCDFR = 0xB373, // Load Complement (long)
- LCEBR = 0xB303, // Load Complement (short BFP)
- LCGFR = 0xB913, // Load Complement (64<-32)
- LCGR = 0xB903, // Load Complement (64)
- LCR = 0x13, // Load Complement (32)
- LCXBR = 0xB343, // Load Complement (extended BFP)
- LD = 0x68, // Load (long)
- LDEB = 0xED04, // Load Lengthened (short to long BFP)
- LDEBR = 0xB304, // Load Lengthened (short to long BFP)
- LDETR = 0xB3D4, // Load Lengthened (short to long DFP)
- LDGR = 0xB3C1, // Load Fpr From Gr (64 to long)
- LDR = 0x28, // Load (long)
- LDXBR = 0xB345, // Load Rounded (extended to long BFP)
- LDXBRA = 0xB345, // Load Rounded (extended to long BFP)
- LDXTR = 0xB3DD, // Load Rounded (extended to long DFP)
- LDY = 0xED65, // Load (long)
- LE = 0x78, // Load (short)
- LEDBR = 0xB344, // Load Rounded (long to short BFP)
- LEDBRA = 0xB344, // Load Rounded (long to short BFP)
- LEDTR = 0xB3D5, // Load Rounded (long to short DFP)
- LER = 0x38, // Load (short)
- LEXBR = 0xB346, // Load Rounded (extended to short BFP)
- LEXBRA = 0xB346, // Load Rounded (extended to short BFP)
- LEY = 0xED64, // Load (short)
- LFAS = 0xB2BD, // Load Fpc And Signal
- LFH = 0xE3CA, // Load High (32)
- LFHAT = 0xE3C8, // Load High And Trap (32H<-32)
- LFPC = 0xB29D, // Load Fpc
- LG = 0xE304, // Load (64)
- LGAT = 0xE385, // Load And Trap (64)
- LGB = 0xE377, // Load Byte (64)
- LGBR = 0xB906, // Load Byte (64)
- LGDR = 0xB3CD, // Load Gr From Fpr (long to 64)
- LGF = 0xE314, // Load (64<-32)
- LGFI = 0xC01, // Load Immediate (64<-32)
- LGFR = 0xB914, // Load (64<-32)
- LGFRL = 0xC4C, // Load Relative Long (64<-32)
- LGH = 0xE315, // Load Halfword (64)
- LGHI = 0xA79, // Load Halfword Immediate (64)
- LGHR = 0xB907, // Load Halfword (64)
- LGHRL = 0xC44, // Load Halfword Relative Long (64<-16)
- LGR = 0xB904, // Load (64)
- LGRL = 0xC48, // Load Relative Long (64)
- LH = 0x48, // Load Halfword (32)
- LHH = 0xE3C4, // Load Halfword High (32<-16)
- LHI = 0xA78, // Load Halfword Immediate (32)
- LHR = 0xB927, // Load Halfword (32)
- LHRL = 0xC45, // Load Halfword Relative Long (32<-16)
- LHY = 0xE378, // Load Halfword (32)
- LLC = 0xE394, // Load Logical Character (32)
- LLCH = 0xE3C2, // Load Logical Character High (32<-8)
- LLCR = 0xB994, // Load Logical Character (32)
- LLGC = 0xE390, // Load Logical Character (64)
- LLGCR = 0xB984, // Load Logical Character (64)
- LLGF = 0xE316, // Load Logical (64<-32)
- LLGFAT = 0xE39D, // Load Logical And Trap (64<-32)
- LLGFR = 0xB916, // Load Logical (64<-32)
- LLGFRL = 0xC4E, // Load Logical Relative Long (64<-32)
- LLGH = 0xE391, // Load Logical Halfword (64)
- LLGHR = 0xB985, // Load Logical Halfword (64)
- LLGHRL = 0xC46, // Load Logical Halfword Relative Long (64<-16)
- LLGT = 0xE317, // Load Logical Thirty One Bits
- LLGTAT = 0xE39C, // Load Logical Thirty One Bits And Trap (64<-31)
- LLGTR = 0xB917, // Load Logical Thirty One Bits
- LLH = 0xE395, // Load Logical Halfword (32)
- LLHH = 0xE3C6, // Load Logical Halfword High (32<-16)
- LLHR = 0xB995, // Load Logical Halfword (32)
- LLHRL = 0xC42, // Load Logical Halfword Relative Long (32<-16)
- LLIHF = 0xC0E, // Load Logical Immediate (high)
- LLIHH = 0xA5C, // Load Logical Immediate (high high)
- LLIHL = 0xA5D, // Load Logical Immediate (high low)
- LLILF = 0xC0F, // Load Logical Immediate (low)
- LLILH = 0xA5E, // Load Logical Immediate (low high)
- LLILL = 0xA5F, // Load Logical Immediate (low low)
- LM = 0x98, // Load Multiple (32)
- LMD = 0xEF, // Load Multiple Disjoint
- LMG = 0xEB04, // Load Multiple (64)
- LMH = 0xEB96, // Load Multiple High
- LMY = 0xEB98, // Load Multiple (32)
- LNDBR = 0xB311, // Load Negative (long BFP)
- LNDFR = 0xB371, // Load Negative (long)
- LNEBR = 0xB301, // Load Negative (short BFP)
- LNGFR = 0xB911, // Load Negative (64<-32)
- LNGR = 0xB901, // Load Negative (64)
- LNR = 0x11, // Load Negative (32)
- LNXBR = 0xB341, // Load Negative (extended BFP)
- LOC = 0xEBF2, // Load On Condition (32)
- LOCG = 0xEBE2, // Load On Condition (64)
- LOCGR = 0xB9E2, // Load On Condition (64)
- LOCR = 0xB9F2, // Load On Condition (32)
- LPD = 0xC84, // Load Pair Disjoint (32)
- LPDBR = 0xB310, // Load Positive (long BFP)
- LPDFR = 0xB370, // Load Positive (long)
- LPDG = 0xC85, // Load Pair Disjoint (64)
- LPEBR = 0xB300, // Load Positive (short BFP)
- LPGFR = 0xB910, // Load Positive (64<-32)
- LPGR = 0xB900, // Load Positive (64)
- LPQ = 0xE38F, // Load Pair From Quadword
- LPR = 0x10, // Load Positive (32)
- LPXBR = 0xB340, // Load Positive (extended BFP)
- LR = 0x18, // Load (32)
- LRL = 0xC4D, // Load Relative Long (32)
- LRV = 0xE31E, // Load Reversed (32)
- LRVG = 0xE30F, // Load Reversed (64)
- LRVGR = 0xB90F, // Load Reversed (64)
- LRVH = 0xE31F, // Load Reversed (16)
- LRVR = 0xB91F, // Load Reversed (32)
- LT = 0xE312, // Load And Test (32)
- LTDBR = 0xB312, // Load And Test (long BFP)
- LTDTR = 0xB3D6, // Load And Test (long DFP)
- LTEBR = 0xB302, // Load And Test (short BFP)
- LTG = 0xE302, // Load And Test (64)
- LTGF = 0xE332, // Load And Test (64<-32)
- LTGFR = 0xB912, // Load And Test (64<-32)
- LTGR = 0xB902, // Load And Test (64)
- LTR = 0x12, // Load And Test (32)
- LTXBR = 0xB342, // Load And Test (extended BFP)
- LTXTR = 0xB3DE, // Load And Test (extended DFP)
- LXDB = 0xED05, // Load Lengthened (long to extended BFP)
- LXDBR = 0xB305, // Load Lengthened (long to extended BFP)
- LXDTR = 0xB3DC, // Load Lengthened (long to extended DFP)
- LXEB = 0xED06, // Load Lengthened (short to extended BFP)
- LXEBR = 0xB306, // Load Lengthened (short to extended BFP)
- LXR = 0xB365, // Load (extended)
- LY = 0xE358, // Load (32)
- LZDR = 0xB375, // Load Zero (long)
- LZER = 0xB374, // Load Zero (short)
- LZXR = 0xB376, // Load Zero (extended)
- M = 0x5C, // Multiply (64<-32)
- MADB = 0xED1E, // Multiply And Add (long BFP)
- MADBR = 0xB31E, // Multiply And Add (long BFP)
- MAEB = 0xED0E, // Multiply And Add (short BFP)
- MAEBR = 0xB30E, // Multiply And Add (short BFP)
- MC = 0xAF, // Monitor Call
- MDB = 0xED1C, // Multiply (long BFP)
- MDBR = 0xB31C, // Multiply (long BFP)
- MDEB = 0xED0C, // Multiply (short to long BFP)
- MDEBR = 0xB30C, // Multiply (short to long BFP)
- MDTR = 0xB3D0, // Multiply (long DFP)
- MDTRA = 0xB3D0, // Multiply (long DFP)
- MEEB = 0xED17, // Multiply (short BFP)
- MEEBR = 0xB317, // Multiply (short BFP)
- MFY = 0xE35C, // Multiply (64<-32)
- MGHI = 0xA7D, // Multiply Halfword Immediate (64)
- MH = 0x4C, // Multiply Halfword (32)
- MHI = 0xA7C, // Multiply Halfword Immediate (32)
- MHY = 0xE37C, // Multiply Halfword (32)
- ML = 0xE396, // Multiply Logical (64<-32)
- MLG = 0xE386, // Multiply Logical (128<-64)
- MLGR = 0xB986, // Multiply Logical (128<-64)
- MLR = 0xB996, // Multiply Logical (64<-32)
- MP = 0xFC, // Multiply Decimal
- MR = 0x1C, // Multiply (64<-32)
- MS = 0x71, // Multiply Single (32)
- MSCH = 0xB232, // Modify Subchannel
- MSDB = 0xED1F, // Multiply And Subtract (long BFP)
- MSDBR = 0xB31F, // Multiply And Subtract (long BFP)
- MSEB = 0xED0F, // Multiply And Subtract (short BFP)
- MSEBR = 0xB30F, // Multiply And Subtract (short BFP)
- MSFI = 0xC21, // Multiply Single Immediate (32)
- MSG = 0xE30C, // Multiply Single (64)
- MSGF = 0xE31C, // Multiply Single (64<-32)
- MSGFI = 0xC20, // Multiply Single Immediate (64<-32)
- MSGFR = 0xB91C, // Multiply Single (64<-32)
- MSGR = 0xB90C, // Multiply Single (64)
- MSR = 0xB252, // Multiply Single (32)
- MSY = 0xE351, // Multiply Single (32)
- MVC = 0xD2, // Move (character)
- MVCP = 0xDA, // Move To Primary
- MVCDK = 0xE50F, // Move To Primary
- MVCIN = 0xE8, // Move Inverse
- MVCL = 0x0E, // Move Long
- MVCLE = 0xA8, // Move Long Extended
- MVCLU = 0xEB8E, // Move Long Unicode
- MVGHI = 0xE548, // Move (64<-16)
- MVHHI = 0xE544, // Move (16<-16)
- MVHI = 0xE54C, // Move (32<-16)
- MVI = 0x92, // Move (immediate)
- MVIY = 0xEB52, // Move (immediate)
- MVN = 0xD1, // Move Numerics
- MVO = 0xF1, // Move With Offset
- MVST = 0xB255, // Move String
- MVZ = 0xD3, // Move Zones
- MXBR = 0xB34C, // Multiply (extended BFP)
- MXDB = 0xED07, // Multiply (long to extended BFP)
- MXDBR = 0xB307, // Multiply (long to extended BFP)
- MXTR = 0xB3D8, // Multiply (extended DFP)
- MXTRA = 0xB3D8, // Multiply (extended DFP)
- N = 0x54, // And (32)
- NC = 0xD4, // And (character)
- NG = 0xE380, // And (64)
- NGR = 0xB980, // And (64)
- NGRK = 0xB9E4, // And (64)
- NI = 0x94, // And (immediate)
- NIAI = 0xB2FA, // Next Instruction Access Intent Ie Eh
- NIHF = 0xC0A, // And Immediate (high)
- NIHH = 0xA54, // And Immediate (high high)
- NIHL = 0xA55, // And Immediate (high low)
- NILF = 0xC0B, // And Immediate (low)
- NILH = 0xA56, // And Immediate (low high)
- NILL = 0xA57, // And Immediate (low low)
- NIY = 0xEB54, // And (immediate)
- NR = 0x14, // And (32)
- NRK = 0xB9F4, // And (32)
- NTSTG = 0xE325, // Nontransactional Store Rxy Tx ¤9 A Sp St B2
- NY = 0xE354, // And (32)
- O = 0x56, // Or (32)
- OC = 0xD6, // Or (character)
- OG = 0xE381, // Or (64)
- OGR = 0xB981, // Or (64)
- OGRK = 0xB9E6, // Or (64)
- OI = 0x96, // Or (immediate)
- OIHF = 0xC0C, // Or Immediate (high)
- OIHH = 0xA58, // Or Immediate (high high)
- OIHL = 0xA59, // Or Immediate (high low)
- OILF = 0xC0D, // Or Immediate (low)
- OILH = 0xA5A, // Or Immediate (low high)
- OILL = 0xA5B, // Or Immediate (low low)
- OIY = 0xEB56, // Or (immediate)
- OR = 0x16, // Or (32)
- ORK = 0xB9F6, // Or (32)
- OY = 0xE356, // Or (32)
- PACK = 0xF2, // Pack
- PCC = 0xB92C, // Perform Cryptographic Computation
- PFD = 0xE336, // Prefetch Data
- PFDRL = 0xC62, // Prefetch Data Relative Long
- PFPO = 0x010A, // Perform Floating-POINT Operation
- PKA = 0xE9, // Pack Ascii
- PKU = 0xE1, // Pack Unicode
- PLO = 0xEE, // Perform Locked Operation
- POPCNT_Z = 0xB9E1, // Population Count
- PPA = 0xB2E8, // Perform Processor Assist
- QADTR = 0xB3F5, // Quantize (long DFP)
- QAXTR = 0xB3FD, // Quantize (extended DFP)
- RCHP = 0xB23B, // Reset Channel Path
- RISBG = 0xEC55, // Rotate Then Insert Selected Bits
- RISBGN = 0xEC59, // Rotate Then Insert Selected Bits
- RISBHG = 0xEC5D, // Rotate Then Insert Selected Bits High
- RISBLG = 0xEC51, // Rotate Then Insert Selected Bits Low
- RLL = 0xEB1D, // Rotate Left Single Logical (32)
- RLLG = 0xEB1C, // Rotate Left Single Logical (64)
- RNSBG = 0xEC54, // Rotate Then And Selected Bits
- ROSBG = 0xEC56, // Rotate Then Or Selected Bits
- RRDTR = 0xB3F7, // Reround (long DFP)
- RRXTR = 0xB3FF, // Reround (extended DFP)
- RSCH = 0xB238, // Resume Subchannel
- RXSBG = 0xEC57, // Rotate Then Exclusive Or Selected Bits
- S = 0x5B, // Subtract (32)
- SAL = 0xB237, // Set Address Limit
- SAR = 0xB24E, // Set Access
- SCHM = 0xB23C, // Set Channel Monitor
- SDB = 0xED1B, // Subtract (long BFP)
- SDBR = 0xB31B, // Subtract (long BFP)
- SDTR = 0xB3D3, // Subtract (long DFP)
- SDTRA = 0xB3D3, // Subtract (long DFP)
- SEB = 0xED0B, // Subtract (short BFP)
- SEBR = 0xB30B, // Subtract (short BFP)
- SFASR = 0xB385, // Set Fpc And Signal
- SFPC = 0xB384, // Set Fpc
- SG = 0xE309, // Subtract (64)
- SGF = 0xE319, // Subtract (64<-32)
- SGFR = 0xB919, // Subtract (64<-32)
- SGR = 0xB909, // Subtract (64)
- SGRK = 0xB9E9, // Subtract (64)
- SH = 0x4B, // Subtract Halfword
- SHHHR = 0xB9C9, // Subtract High (32)
- SHHLR = 0xB9D9, // Subtract High (32)
- SHY = 0xE37B, // Subtract Halfword
- SL = 0x5F, // Subtract Logical (32)
- SLA = 0x8B, // Shift Left Single (32)
- SLAG = 0xEB0B, // Shift Left Single (64)
- SLAK = 0xEBDD, // Shift Left Single (32)
- SLB = 0xE399, // Subtract Logical With Borrow (32)
- SLBG = 0xE389, // Subtract Logical With Borrow (64)
- SLBGR = 0xB989, // Subtract Logical With Borrow (64)
- SLBR = 0xB999, // Subtract Logical With Borrow (32)
- SLDA = 0x8F, // Shift Left Double
- SLDL = 0x8D, // Shift Left Double Logical
- SLDT = 0xED40, // Shift Significand Left (long DFP)
- SLFI = 0xC25, // Subtract Logical Immediate (32)
- SLG = 0xE30B, // Subtract Logical (64)
- SLGF = 0xE31B, // Subtract Logical (64<-32)
- SLGFI = 0xC24, // Subtract Logical Immediate (64<-32)
- SLGFR = 0xB91B, // Subtract Logical (64<-32)
- SLGR = 0xB90B, // Subtract Logical (64)
- SLGRK = 0xB9EB, // Subtract Logical (64)
- SLHHHR = 0xB9CB, // Subtract Logical High (32)
- SLHHLR = 0xB9DB, // Subtract Logical High (32)
- SLL = 0x89, // Shift Left Single Logical (32)
- SLLG = 0xEB0D, // Shift Left Single Logical (64)
- SLLK = 0xEBDF, // Shift Left Single Logical (32)
- SLR = 0x1F, // Subtract Logical (32)
- SLRK = 0xB9FB, // Subtract Logical (32)
- SLXT = 0xED48, // Shift Significand Left (extended DFP)
- SLY = 0xE35F, // Subtract Logical (32)
- SP = 0xFB, // Subtract Decimal
- SPM = 0x04, // Set Program Mask
- SQDB = 0xED15, // Square Root (long BFP)
- SQDBR = 0xB315, // Square Root (long BFP)
- SQEB = 0xED14, // Square Root (short BFP)
- SQEBR = 0xB314, // Square Root (short BFP)
- SQXBR = 0xB316, // Square Root (extended BFP)
- SR = 0x1B, // Subtract (32)
- SRA = 0x8A, // Shift Right Single (32)
- SRAG = 0xEB0A, // Shift Right Single (64)
- SRAK = 0xEBDC, // Shift Right Single (32)
- SRDA = 0x8E, // Shift Right Double
- SRDL = 0x8C, // Shift Right Double Logical
- SRDT = 0xED41, // Shift Significand Right (long DFP)
- SRK = 0xB9F9, // Subtract (32)
- SRL = 0x88, // Shift Right Single Logical (32)
- SRLG = 0xEB0C, // Shift Right Single Logical (64)
- SRLK = 0xEBDE, // Shift Right Single Logical (32)
- SRNM = 0xB299, // Set BFP Rounding Mode (2 bit)
- SRNMB = 0xB2B8, // Set BFP Rounding Mode (3 bit)
- SRNMT = 0xB2B9, // Set DFP Rounding Mode
- SRP = 0xF0, // Shift And Round Decimal
- SRST = 0xB25E, // Search String
- SRSTU = 0xB9BE, // Search String Unicode
- SRXT = 0xED49, // Shift Significand Right (extended DFP)
- SSCH = 0xB233, // Start Subchannel
- ST = 0x50, // Store (32)
- STC = 0x42, // Store Character
- STCH = 0xE3C3, // Store Character High (8)
- STCK = 0xB205, // Store Clock
- STCKE = 0xB278, // Store Clock Extended
- STCKF = 0xB27C, // Store Clock Fast
- STCM = 0xBE, // Store Characters Under Mask (low)
- STCMH = 0xEB2C, // Store Characters Under Mask (high)
- STCMY = 0xEB2D, // Store Characters Under Mask (low)
- STCPS = 0xB23A, // Store Channel Path Status
- STCRW = 0xB239, // Store Channel Report Word
- STCY = 0xE372, // Store Character
- STD = 0x60, // Store (long)
- STDY = 0xED67, // Store (long)
- STE = 0x70, // Store (short)
- STEY = 0xED66, // Store (short)
- STFH = 0xE3CB, // Store High (32)
- STFLE = 0xB2B0, // Store Facility List Extended
- STFPC = 0xB29C, // Store Fpc
- STG = 0xE324, // Store (64)
- STGRL = 0xC4B, // Store Relative Long (64)
- STH = 0x40, // Store Halfword
- STHH = 0xE3C7, // Store Halfword High (16)
- STHRL = 0xC47, // Store Halfword Relative Long
- STHY = 0xE370, // Store Halfword
- STM = 0x90, // Store Multiple (32)
- STMG = 0xEB24, // Store Multiple (64)
- STMH = 0xEB26, // Store Multiple High
- STMY = 0xEB90, // Store Multiple (32)
- STOC = 0xEBF3, // Store On Condition (32)
- STOCG = 0xEBE3, // Store On Condition (64)
- STPQ = 0xE38E, // Store Pair To Quadword
- STRL = 0xC4F, // Store Relative Long (32)
- STRV = 0xE33E, // Store Reversed (32)
- STRVG = 0xE32F, // Store Reversed (64)
- STRVH = 0xE33F, // Store Reversed (16)
- STSCH = 0xB234, // Store Subchannel
- STY = 0xE350, // Store (32)
- SVC = 0x0A, // Supervisor Call
- SXBR = 0xB34B, // Subtract (extended BFP)
- SXTR = 0xB3DB, // Subtract (extended DFP)
- SXTRA = 0xB3DB, // Subtract (extended DFP)
- SY = 0xE35B, // Subtract (32)
- TABORT = 0xB2FC, // Transaction Abort
- TBDR = 0xB351, // Convert HFP To BFP (long)
- TBEDR = 0xB350, // Convert HFP To BFP (long to short)
- TBEGIN = 0xE560, // Transaction Begin
- TBEGINC = 0xE561, // Transaction Begin
- TCDB = 0xED11, // Test Data Class (long BFP)
- TCEB = 0xED10, // Test Data Class (short BFP)
- TCXB = 0xED12, // Test Data Class (extended BFP)
- TDCDT = 0xED54, // Test Data Class (long DFP)
- TDCET = 0xED50, // Test Data Class (short DFP)
- TDCXT = 0xED58, // Test Data Class (extended DFP)
- TDGDT = 0xED55, // Test Data Group (long DFP)
- TDGET = 0xED51, // Test Data Group (short DFP)
- TDGXT = 0xED59, // Test Data Group (extended DFP)
- TEND = 0xB2F8, // Transaction End
- THDER = 0xB358, // Convert BFP To HFP (short to long)
- THDR = 0xB359, // Convert BFP To HFP (long)
- TM = 0x91, // Test Under Mask Si C A B1
- TMH = 0xA70, // Test Under Mask High
- TMHH = 0xA72, // Test Under Mask (high high)
- TMHL = 0xA73, // Test Under Mask (high low)
- TML = 0xA71, // Test Under Mask Low
- TMLH = 0xA70, // Test Under Mask (low high)
- TMLL = 0xA71, // Test Under Mask (low low)
- TMY = 0xEB51, // Test Under Mask
- TP = 0xEBC0, // Test Decimal
- TPI = 0xB236, // Test Pending Interruption
- TR = 0xDC, // Translate
- TRAP4 = 0xB2FF, // Trap (4)
- TRE = 0xB2A5, // Translate Extended
- TROO = 0xB993, // Translate One To One
- TROT = 0xB992, // Translate One To Two
- TRT = 0xDD, // Translate And Test
- TRTE = 0xB9BF, // Translate And Test Extended
- TRTO = 0xB991, // Translate Two To One
- TRTR = 0xD0, // Translate And Test Reverse
- TRTRE = 0xB9BD, // Translate And Test Reverse Extended
- TRTT = 0xB990, // Translate Two To Two
- TS = 0x93, // Test And Set
- TSCH = 0xB235, // Test Subchannel
- UNPK = 0xF3, // Unpack
- UNPKA = 0xEA, // Unpack Ascii
- UNPKU = 0xE2, // Unpack Unicode
- UPT = 0x0102, // Update Tree
- X = 0x57, // Exclusive Or (32)
- XC = 0xD7, // Exclusive Or (character)
- XG = 0xE382, // Exclusive Or (64)
- XGR = 0xB982, // Exclusive Or (64)
- XGRK = 0xB9E7, // Exclusive Or (64)
- XI = 0x97, // Exclusive Or (immediate)
- XIHF = 0xC06, // Exclusive Or Immediate (high)
- XILF = 0xC07, // Exclusive Or Immediate (low)
- XIY = 0xEB57, // Exclusive Or (immediate)
- XR = 0x17, // Exclusive Or (32)
- XRK = 0xB9F7, // Exclusive Or (32)
- XSCH = 0xB276, // Cancel Subchannel
- XY = 0xE357, // Exclusive Or (32)
- ZAP = 0xF8, // Zero And Add
- BKPT = 0x0001 // GDB Software Breakpoint
+#define DECLARE_OPCODES(name, opcode_name, opcode_value) \
+ opcode_name = opcode_value,
+ S390_OPCODE_LIST(DECLARE_OPCODES)
+#undef DECLARE_OPCODES
+
+ BKPT = 0x0001, // GDB Software Breakpoint
+ DUMY = 0xE353 // Special dummy opcode
};
// Instruction encoding bits and masks.
@@ -1303,15 +2103,69 @@ class Instruction {
DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
};
-// I Instruction -- suspect this will not be used,
-// but implement for completeness
-class IInstruction : Instruction {
- public:
- inline int IValue() const { return Bits<TwoByteInstr, int>(7, 0); }
+#define DECLARE_FIELD_FOR_TWO_BYTE_INSTR(name, T, lo, hi) \
+ inline int name() const { \
+ return Bits<TwoByteInstr, T>(15 - (lo), 15 - (hi) + 1); \
+ }
+
+#define DECLARE_FIELD_FOR_FOUR_BYTE_INSTR(name, T, lo, hi) \
+ inline int name() const { \
+ return Bits<FourByteInstr, T>(31 - (lo), 31 - (hi) + 1); \
+ }
+#define DECLARE_FIELD_FOR_SIX_BYTE_INSTR(name, T, lo, hi) \
+ inline int name() const { \
+ return Bits<SixByteInstr, T>(47 - (lo), 47 - (hi) + 1); \
+ }
+
+class TwoByteInstruction : public Instruction {
+ public:
inline int size() const { return 2; }
};
+class FourByteInstruction : public Instruction {
+ public:
+ inline int size() const { return 4; }
+};
+
+class SixByteInstruction : public Instruction {
+ public:
+ inline int size() const { return 6; }
+};
+
+// I Instruction
+class IInstruction : public TwoByteInstruction {
+ public:
+ DECLARE_FIELD_FOR_TWO_BYTE_INSTR(IValue, int, 8, 16);
+};
+
+// E Instruction
+class EInstruction : public TwoByteInstruction {};
+
+// IE Instruction
+class IEInstruction : public FourByteInstruction {
+ public:
+ DECLARE_FIELD_FOR_FOUR_BYTE_INSTR(I1Value, int, 24, 28);
+ DECLARE_FIELD_FOR_FOUR_BYTE_INSTR(I2Value, int, 28, 32);
+};
+
+// MII Instruction
+class MIIInstruction : public SixByteInstruction {
+ public:
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(M1Value, uint32_t, 8, 12);
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(RI2Value, int, 12, 24);
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(RI3Value, int, 24, 47);
+};
+
+// RI Instruction
+class RIInstruction : public FourByteInstruction {
+ public:
+ DECLARE_FIELD_FOR_FOUR_BYTE_INSTR(R1Value, int, 8, 12);
+ DECLARE_FIELD_FOR_FOUR_BYTE_INSTR(I2Value, int, 16, 32);
+ DECLARE_FIELD_FOR_FOUR_BYTE_INSTR(I2UnsignedValue, uint32_t, 16, 32);
+ DECLARE_FIELD_FOR_FOUR_BYTE_INSTR(M1Value, uint32_t, 8, 12);
+};
+
// RR Instruction
class RRInstruction : Instruction {
public:
@@ -1358,20 +2212,6 @@ class RRDInstruction : Instruction {
inline int size() const { return 4; }
};
-// RI Instruction
-class RIInstruction : Instruction {
- public:
- inline int R1Value() const { return Bits<FourByteInstr, int>(23, 20); }
- inline int16_t I2Value() const { return Bits<FourByteInstr, int16_t>(15, 0); }
- inline uint16_t I2UnsignedValue() const {
- return Bits<FourByteInstr, uint16_t>(15, 0);
- }
- inline Condition M1Value() const {
- return static_cast<Condition>(Bits<FourByteInstr, int>(23, 20));
- }
- inline int size() const { return 4; }
-};
-
// RS Instruction
class RSInstruction : Instruction {
public:
@@ -1505,6 +2345,17 @@ class RIEInstruction : Instruction {
inline int size() const { return 6; }
};
+// VRR Instruction
+class VRR_C_Instruction : SixByteInstruction {
+ public:
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(R1Value, int, 8, 12);
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(R2Value, int, 12, 16);
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(R3Value, int, 16, 20);
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(M6Value, uint32_t, 24, 28);
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(M5Value, uint32_t, 28, 32);
+ DECLARE_FIELD_FOR_SIX_BYTE_INSTR(M4Value, uint32_t, 32, 36);
+};
+
// Helper functions for converting between register numbers and names.
class Registers {
public:
diff --git a/deps/v8/src/s390/disasm-s390.cc b/deps/v8/src/s390/disasm-s390.cc
index 26079b9992..f33812096c 100644
--- a/deps/v8/src/s390/disasm-s390.cc
+++ b/deps/v8/src/s390/disasm-s390.cc
@@ -562,6 +562,9 @@ bool Decoder::DecodeTwoByte(Instruction* instr) {
case BKPT:
Format(instr, "bkpt");
break;
+ case LPR:
+ Format(instr, "lpr\t'r1, 'r2");
+ break;
default:
return false;
}
@@ -760,6 +763,9 @@ bool Decoder::DecodeFourByte(Instruction* instr) {
case MSR:
Format(instr, "msr\t'r5,'r6");
break;
+ case MSRKC:
+ Format(instr, "msrkc\t'r5,'r6,'r3");
+ break;
case LGBR:
Format(instr, "lgbr\t'r5,'r6");
break;
@@ -769,6 +775,9 @@ bool Decoder::DecodeFourByte(Instruction* instr) {
case MSGR:
Format(instr, "msgr\t'r5,'r6");
break;
+ case MSGRKC:
+ Format(instr, "msgrkc\t'r5,'r6,'r3");
+ break;
case DSGR:
Format(instr, "dsgr\t'r5,'r6");
break;
@@ -1036,6 +1045,12 @@ bool Decoder::DecodeFourByte(Instruction* instr) {
Format(instr, "trap4");
break;
}
+ case LPGR:
+ Format(instr, "lpgr\t'r1, 'r2");
+ break;
+ case LPGFR:
+ Format(instr, "lpgfr\t'r1,'r2");
+ break;
default:
return false;
}
@@ -1052,6 +1067,15 @@ bool Decoder::DecodeSixByte(Instruction* instr) {
Opcode opcode = instr->S390OpcodeValue();
switch (opcode) {
+ case DUMY:
+ Format(instr, "dumy\t'r1, 'd2 ( 'r2d, 'r3 )");
+ break;
+#define DECODE_VRR_C_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: \
+ Format(instr, #name "\t'f1,'f2,'f3"); \
+ break;
+ S390_VRR_C_OPCODE_LIST(DECODE_VRR_C_INSTRUCTIONS)
+#undef DECODE_VRR_C_INSTRUCTIONS
case LLILF:
Format(instr, "llilf\t'r1,'i7");
break;
@@ -1061,6 +1085,9 @@ bool Decoder::DecodeSixByte(Instruction* instr) {
case AFI:
Format(instr, "afi\t'r1,'i7");
break;
+ case AIH:
+ Format(instr, "aih\t'r1,'i7");
+ break;
case ASI:
Format(instr, "asi\t'd2('r3),'ic");
break;
@@ -1082,6 +1109,12 @@ bool Decoder::DecodeSixByte(Instruction* instr) {
case CLFI:
Format(instr, "clfi\t'r1,'i7");
break;
+ case CLIH:
+ Format(instr, "clih\t'r1,'i7");
+ break;
+ case CIH:
+ Format(instr, "cih\t'r1,'i2");
+ break;
case CFI:
Format(instr, "cfi\t'r1,'i2");
break;
@@ -1388,6 +1421,9 @@ bool Decoder::DecodeSixByte(Instruction* instr) {
case SQDB:
Format(instr, "sqdb\t'r1,'d1('r2d, 'r3)");
break;
+ case PFD:
+ Format(instr, "pfd\t'm1,'d2('r2d,'r3)");
+ break;
default:
return false;
}
diff --git a/deps/v8/src/s390/interface-descriptors-s390.cc b/deps/v8/src/s390/interface-descriptors-s390.cc
index 7fdf99349e..3ffcd5fc1c 100644
--- a/deps/v8/src/s390/interface-descriptors-s390.cc
+++ b/deps/v8/src/s390/interface-descriptors-s390.cc
@@ -61,13 +61,7 @@ const Register GrowArrayElementsDescriptor::KeyRegister() { return r5; }
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {r4};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastNewObjectDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {r3, r5};
+ Register registers[] = {r3, r4, r5};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/s390/macro-assembler-s390.cc b/deps/v8/src/s390/macro-assembler-s390.cc
index fbf82ccbc5..f087cc4c8a 100644
--- a/deps/v8/src/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/s390/macro-assembler-s390.cc
@@ -1306,17 +1306,16 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
}
-void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual) {
- Label skip_flooding;
- ExternalReference last_step_action =
- ExternalReference::debug_last_step_action_address(isolate());
- STATIC_ASSERT(StepFrame > StepIn);
- mov(r6, Operand(last_step_action));
+void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_hook;
+ ExternalReference debug_hook_avtive =
+ ExternalReference::debug_hook_on_function_call_address(isolate());
+ mov(r6, Operand(debug_hook_avtive));
LoadB(r6, MemOperand(r6));
- CmpP(r6, Operand(StepIn));
- blt(&skip_flooding);
+ CmpP(r6, Operand::Zero());
+ beq(&skip_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -1332,7 +1331,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
Push(new_target);
}
Push(fun, fun);
- CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ CallRuntime(Runtime::kDebugOnFunctionCall);
Pop(fun);
if (new_target.is_valid()) {
Pop(new_target);
@@ -1346,7 +1345,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
SmiUntag(expected.reg());
}
}
- bind(&skip_flooding);
+ bind(&skip_hook);
}
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
@@ -1360,8 +1359,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK(function.is(r3));
DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r5));
- if (call_wrapper.NeedsDebugStepCheck()) {
- FloodFunctionIfStepping(function, new_target, expected, actual);
+ if (call_wrapper.NeedsDebugHookCheck()) {
+ CheckDebugHook(function, new_target, expected, actual);
}
// Clear the new.target register if not given.
@@ -1579,25 +1578,18 @@ void MacroAssembler::Allocate(int object_size, Register result,
// Set up allocation top address register.
Register top_address = scratch1;
- // This code stores a temporary value in ip. This is OK, as the code below
- // does not need ip for implicit literal generation.
- Register alloc_limit = ip;
Register result_end = scratch2;
mov(top_address, Operand(allocation_top));
if ((flags & RESULT_CONTAINS_TOP) == 0) {
// Load allocation top into result and allocation limit into ip.
LoadP(result, MemOperand(top_address));
- LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
} else {
if (emit_debug_code()) {
// Assert that result actually contains top on entry.
- LoadP(alloc_limit, MemOperand(top_address));
- CmpP(result, alloc_limit);
+ CmpP(result, MemOperand(top_address));
Check(eq, kUnexpectedAllocationTop);
}
- // Load allocation limit. Result already contains allocation top.
- LoadP(alloc_limit, MemOperand(top_address, limit - top));
}
if ((flags & DOUBLE_ALIGNMENT) != 0) {
@@ -1611,7 +1603,7 @@ void MacroAssembler::Allocate(int object_size, Register result,
Label aligned;
beq(&aligned, Label::kNear);
if ((flags & PRETENURE) != 0) {
- CmpLogicalP(result, alloc_limit);
+ CmpLogicalP(result, MemOperand(top_address, limit - top));
bge(gc_required);
}
mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
@@ -1621,27 +1613,26 @@ void MacroAssembler::Allocate(int object_size, Register result,
#endif
}
- // Calculate new top and bail out if new space is exhausted. Use result
- // to calculate the new top.
- SubP(r0, alloc_limit, result);
- if (is_int16(object_size)) {
- CmpP(r0, Operand(object_size));
- blt(gc_required);
- AddP(result_end, result, Operand(object_size));
- } else {
- mov(result_end, Operand(object_size));
- CmpP(r0, result_end);
- blt(gc_required);
- AddP(result_end, result, result_end);
- }
+ AddP(result_end, result, Operand(object_size));
+
+ // Compare with allocation limit.
+ CmpLogicalP(result_end, MemOperand(top_address, limit - top));
+ bge(gc_required);
if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
// The top pointer is not updated for allocation folding dominators.
StoreP(result_end, MemOperand(top_address));
}
+ if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+ // Prefetch the allocation_top's next cache line in advance to
+ // help alleviate potential cache misses.
+ // Mode 2 - Prefetch the data into a cache line for store access.
+ pfd(r2, MemOperand(result, 256));
+ }
+
// Tag object.
- AddP(result, result, Operand(kHeapObjectTag));
+ la(result, MemOperand(result, kHeapObjectTag));
}
void MacroAssembler::Allocate(Register object_size, Register result,
@@ -1676,24 +1667,17 @@ void MacroAssembler::Allocate(Register object_size, Register result,
// Set up allocation top address and allocation limit registers.
Register top_address = scratch;
- // This code stores a temporary value in ip. This is OK, as the code below
- // does not need ip for implicit literal generation.
- Register alloc_limit = ip;
mov(top_address, Operand(allocation_top));
if ((flags & RESULT_CONTAINS_TOP) == 0) {
- // Load allocation top into result and allocation limit into alloc_limit..
+ // Load allocation top into result
LoadP(result, MemOperand(top_address));
- LoadP(alloc_limit, MemOperand(top_address, kPointerSize));
} else {
if (emit_debug_code()) {
// Assert that result actually contains top on entry.
- LoadP(alloc_limit, MemOperand(top_address));
- CmpP(result, alloc_limit);
+ CmpP(result, MemOperand(top_address));
Check(eq, kUnexpectedAllocationTop);
}
- // Load allocation limit. Result already contains allocation top.
- LoadP(alloc_limit, MemOperand(top_address, limit - top));
}
if ((flags & DOUBLE_ALIGNMENT) != 0) {
@@ -1707,7 +1691,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
Label aligned;
beq(&aligned, Label::kNear);
if ((flags & PRETENURE) != 0) {
- CmpLogicalP(result, alloc_limit);
+ CmpLogicalP(result, MemOperand(top_address, limit - top));
bge(gc_required);
}
mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
@@ -1720,17 +1704,14 @@ void MacroAssembler::Allocate(Register object_size, Register result,
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes.
- SubP(r0, alloc_limit, result);
if ((flags & SIZE_IN_WORDS) != 0) {
ShiftLeftP(result_end, object_size, Operand(kPointerSizeLog2));
- CmpP(r0, result_end);
- blt(gc_required);
AddP(result_end, result, result_end);
} else {
- CmpP(r0, object_size);
- blt(gc_required);
AddP(result_end, result, object_size);
}
+ CmpLogicalP(result_end, MemOperand(top_address, limit - top));
+ bge(gc_required);
// Update allocation top. result temporarily holds the new top.
if (emit_debug_code()) {
@@ -1742,8 +1723,15 @@ void MacroAssembler::Allocate(Register object_size, Register result,
StoreP(result_end, MemOperand(top_address));
}
+ if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+ // Prefetch the allocation_top's next cache line in advance to
+ // help alleviate potential cache misses.
+ // Mode 2 - Prefetch the data into a cache line for store access.
+ pfd(r2, MemOperand(result, 256));
+ }
+
// Tag object.
- AddP(result, result, Operand(kHeapObjectTag));
+ la(result, MemOperand(result, kHeapObjectTag));
}
void MacroAssembler::FastAllocate(Register object_size, Register result,
@@ -1795,8 +1783,15 @@ void MacroAssembler::FastAllocate(Register object_size, Register result,
}
StoreP(result_end, MemOperand(top_address));
+ if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+ // Prefetch the allocation_top's next cache line in advance to
+ // help alleviate potential cache misses.
+ // Mode 2 - Prefetch the data into a cache line for store access.
+ pfd(r2, MemOperand(result, 256));
+ }
+
// Tag object.
- AddP(result, result, Operand(kHeapObjectTag));
+ la(result, MemOperand(result, kHeapObjectTag));
}
void MacroAssembler::FastAllocate(int object_size, Register result,
@@ -1837,103 +1832,34 @@ void MacroAssembler::FastAllocate(int object_size, Register result,
#endif
}
+#if V8_TARGET_ARCH_S390X
+ // Limit to 64-bit only, as double alignment check above may adjust
+ // allocation top by an extra kDoubleSize/2.
+ if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_int8(object_size)) {
+ // Update allocation top.
+ AddP(MemOperand(top_address), Operand(object_size));
+ } else {
+ // Calculate new top using result.
+ AddP(result_end, result, Operand(object_size));
+ // Update allocation top.
+ StoreP(result_end, MemOperand(top_address));
+ }
+#else
// Calculate new top using result.
AddP(result_end, result, Operand(object_size));
-
- // The top pointer is not updated for allocation folding dominators.
+ // Update allocation top.
StoreP(result_end, MemOperand(top_address));
+#endif
- // Tag object.
- AddP(result, result, Operand(kHeapObjectTag));
-}
-
-void MacroAssembler::AllocateTwoByteString(Register result, Register length,
- Register scratch1, Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-
- ShiftLeftP(scratch1, length, Operand(1)); // Length in bytes, not chars.
- AddP(scratch1, Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
-
- AndP(scratch1, Operand(~kObjectAlignmentMask));
-
- // Allocate two-byte string in new space.
- Allocate(scratch1, result, scratch2, scratch3, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map, length and hash field.
- InitializeNewString(result, length, Heap::kStringMapRootIndex, scratch1,
- scratch2);
-}
-
-void MacroAssembler::AllocateOneByteString(Register result, Register length,
- Register scratch1, Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- DCHECK(kCharSize == 1);
- AddP(scratch1, length,
- Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
- AndP(scratch1, Operand(~kObjectAlignmentMask));
-
- // Allocate one-byte string in new space.
- Allocate(scratch1, result, scratch2, scratch3, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map, length and hash field.
- InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
- scratch1, scratch2);
-}
-
-void MacroAssembler::AllocateTwoByteConsString(Register result, Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- InitializeNewString(result, length, Heap::kConsStringMapRootIndex, scratch1,
- scratch2);
-}
-
-void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
- scratch1, scratch2);
-}
-
-void MacroAssembler::AllocateTwoByteSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- InitializeNewString(result, length, Heap::kSlicedStringMapRootIndex, scratch1,
- scratch2);
-}
-
-void MacroAssembler::AllocateOneByteSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
+ if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
+ // Prefetch the allocation_top's next cache line in advance to
+ // help alleviate potential cache misses.
+ // Mode 2 - Prefetch the data into a cache line for store access.
+ pfd(r2, MemOperand(result, 256));
+ }
- InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
- scratch1, scratch2);
+ // Tag object.
+ la(result, MemOperand(result, kHeapObjectTag));
}
void MacroAssembler::CompareObjectType(Register object, Register map,
@@ -1956,62 +1882,10 @@ void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
CmpP(obj, MemOperand(kRootRegister, index << kPointerSizeLog2));
}
-void MacroAssembler::CheckFastObjectElements(Register map, Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
- Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
- ble(fail);
- CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
- Operand(Map::kMaximumBitField2FastHoleyElementValue));
- bgt(fail);
-}
-
-void MacroAssembler::CheckFastSmiElements(Register map, Register scratch,
- Label* fail) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset),
- Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
- bgt(fail);
-}
-
void MacroAssembler::SmiToDouble(DoubleRegister value, Register smi) {
SmiUntag(ip, smi);
ConvertIntToDouble(ip, value);
}
-void MacroAssembler::StoreNumberToDoubleElements(
- Register value_reg, Register key_reg, Register elements_reg,
- Register scratch1, DoubleRegister double_scratch, Label* fail,
- int elements_offset) {
- DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
- Label smi_value, store;
-
- // Handle smi values specially.
- JumpIfSmi(value_reg, &smi_value);
-
- // Ensure that the object is a heap number
- CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), fail,
- DONT_DO_SMI_CHECK);
-
- LoadDouble(double_scratch,
- FieldMemOperand(value_reg, HeapNumber::kValueOffset));
- // Force a canonical NaN.
- CanonicalizeNaN(double_scratch);
- b(&store);
-
- bind(&smi_value);
- SmiToDouble(double_scratch, value_reg);
-
- bind(&store);
- SmiToDoubleArrayOffset(scratch1, key_reg);
- StoreDouble(double_scratch,
- FieldMemOperand(elements_reg, scratch1,
- FixedDoubleArray::kHeaderSize - elements_offset));
-}
void MacroAssembler::CompareMap(Register obj, Register scratch, Handle<Map> map,
Label* early_success) {
@@ -2491,23 +2365,6 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
}
-void MacroAssembler::LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind, ElementsKind transitioned_kind,
- Register map_in_out, Register scratch, Label* no_map_match) {
- DCHECK(IsFastElementsKind(expected_kind));
- DCHECK(IsFastElementsKind(transitioned_kind));
-
- // Check that the function's map is the same as the expected cached map.
- LoadP(scratch, NativeContextMemOperand());
- LoadP(ip, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
- CmpP(map_in_out, ip);
- bne(no_map_match);
-
- // Use the transitioned cached map.
- LoadP(map_in_out,
- ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
-}
-
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
LoadP(dst, NativeContextMemOperand());
LoadP(dst, ContextMemOperand(dst, index));
@@ -2592,25 +2449,6 @@ void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
beq(smi_case);
}
-void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, Register src,
- Label* non_smi_case) {
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
-
- // We can more optimally use TestIfSmi if dst != src
- // otherwise, the UnTag operation will kill the CC and we cannot
- // test the Tag bit.
- if (src.code() != dst.code()) {
- SmiUntag(dst, src);
- TestIfSmi(src);
- } else {
- TestBit(src, 0, r0);
- SmiUntag(dst, src);
- LoadAndTestRR(r0, r0);
- }
- bne(non_smi_case);
-}
-
void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
Label* on_either_smi) {
STATIC_ASSERT(kSmiTag == 0);
@@ -2881,20 +2719,6 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
bne(failure);
}
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
- Register scratch,
- Label* failure) {
- const int kFlatOneByteStringMask =
- kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatOneByteStringTag =
- kStringTag | kOneByteStringTag | kSeqStringTag;
-
- if (!scratch.is(type)) LoadRR(scratch, type);
- nilf(scratch, Operand(kFlatOneByteStringMask));
- CmpP(scratch, Operand(kFlatOneByteStringTag));
- bne(failure);
-}
-
static const int kRegisterPassedArguments = 5;
int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
@@ -3307,12 +3131,10 @@ void MacroAssembler::LoadRepresentation(Register dst, const MemOperand& mem,
DCHECK(!r.IsDouble());
if (r.IsInteger8()) {
LoadB(dst, mem);
- lgbr(dst, dst);
} else if (r.IsUInteger8()) {
LoadlB(dst, mem);
} else if (r.IsInteger16()) {
LoadHalfWordP(dst, mem, scratch);
- lghr(dst, dst);
} else if (r.IsUInteger16()) {
LoadHalfWordP(dst, mem, scratch);
#if V8_TARGET_ARCH_S390X
@@ -3413,42 +3235,6 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
return no_reg;
}
-void MacroAssembler::JumpIfDictionaryInPrototypeChain(Register object,
- Register scratch0,
- Register scratch1,
- Label* found) {
- DCHECK(!scratch1.is(scratch0));
- Register current = scratch0;
- Label loop_again, end;
-
- // scratch contained elements pointer.
- LoadRR(current, object);
- LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
- LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
- CompareRoot(current, Heap::kNullValueRootIndex);
- beq(&end);
-
- // Loop based on the map going up the prototype chain.
- bind(&loop_again);
- LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
-
- STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
- STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
- LoadlB(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
- CmpP(scratch1, Operand(JS_OBJECT_TYPE));
- blt(found);
-
- LoadlB(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
- DecodeField<Map::ElementsKindBits>(scratch1);
- CmpP(scratch1, Operand(DICTIONARY_ELEMENTS));
- beq(found);
- LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
- CompareRoot(current, Heap::kNullValueRootIndex);
- bne(&loop_again);
-
- bind(&end);
-}
-
void MacroAssembler::mov(Register dst, const Operand& src) {
if (src.rmode_ != kRelocInfo_NONEPTR) {
// some form of relocation needed
@@ -3499,13 +3285,17 @@ void MacroAssembler::Mul64(Register dst, const Operand& src1) {
}
void MacroAssembler::Mul(Register dst, Register src1, Register src2) {
- if (dst.is(src2)) {
- MulP(dst, src1);
- } else if (dst.is(src1)) {
- MulP(dst, src2);
+ if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
+ MulPWithCondition(dst, src1, src2);
} else {
- Move(dst, src1);
- MulP(dst, src2);
+ if (dst.is(src2)) {
+ MulP(dst, src1);
+ } else if (dst.is(src1)) {
+ MulP(dst, src2);
+ } else {
+ Move(dst, src1);
+ MulP(dst, src2);
+ }
}
}
@@ -3535,6 +3325,16 @@ void MacroAssembler::MulP(Register dst, Register src) {
#endif
}
+void MacroAssembler::MulPWithCondition(Register dst, Register src1,
+ Register src2) {
+ CHECK(CpuFeatures::IsSupported(MISC_INSTR_EXT2));
+#if V8_TARGET_ARCH_S390X
+ msgrkc(dst, src1, src2);
+#else
+ msrkc(dst, src1, src2);
+#endif
+}
+
void MacroAssembler::MulP(Register dst, const MemOperand& opnd) {
#if V8_TARGET_ARCH_S390X
if (is_uint16(opnd.offset())) {
@@ -3553,6 +3353,17 @@ void MacroAssembler::MulP(Register dst, const MemOperand& opnd) {
#endif
}
+void MacroAssembler::Sqrt(DoubleRegister result, DoubleRegister input) {
+ sqdbr(result, input);
+}
+void MacroAssembler::Sqrt(DoubleRegister result, const MemOperand& input) {
+ if (is_uint12(input.offset())) {
+ sqdb(result, input);
+ } else {
+ ldy(result, input);
+ sqdbr(result, result);
+ }
+}
//----------------------------------------------------------------------------
// Add Instructions
//----------------------------------------------------------------------------
@@ -3955,8 +3766,8 @@ void MacroAssembler::SubP(Register dst, const MemOperand& opnd) {
}
void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
- sllg(src, src, Operand(32));
- ldgr(dst, src);
+ sllg(r0, src, Operand(32));
+ ldgr(dst, r0);
}
void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
@@ -4339,7 +4150,7 @@ void MacroAssembler::Load(Register dst, const Operand& opnd) {
#endif
} else {
#if V8_TARGET_ARCH_S390X
- llilf(dst, opnd);
+ lgfi(dst, opnd);
#else
iilf(dst, opnd);
#endif
@@ -4359,6 +4170,19 @@ void MacroAssembler::Load(Register dst, const MemOperand& opnd) {
#endif
}
+void MacroAssembler::LoadPositiveP(Register result, Register input) {
+#if V8_TARGET_ARCH_S390X
+ lpgr(result, input);
+#else
+ lpr(result, input);
+#endif
+}
+
+void MacroAssembler::LoadPositive32(Register result, Register input) {
+ lpr(result, input);
+ lgfr(result, result);
+}
+
//-----------------------------------------------------------------------------
// Compare Helpers
//-----------------------------------------------------------------------------
@@ -4532,9 +4356,16 @@ void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, uint64_t value,
uint32_t lo_32 = static_cast<uint32_t>(value);
// Load the 64-bit value into a GPR, then transfer it to FPR via LDGR
- iihf(scratch, Operand(hi_32));
- iilf(scratch, Operand(lo_32));
- ldgr(result, scratch);
+ if (value == 0) {
+ lzdr(result);
+ } else if (lo_32 == 0) {
+ llihf(scratch, Operand(hi_32));
+ ldgr(result, scratch);
+ } else {
+ iihf(scratch, Operand(hi_32));
+ iilf(scratch, Operand(lo_32));
+ ldgr(result, scratch);
+ }
}
void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
@@ -4545,19 +4376,19 @@ void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
void MacroAssembler::LoadFloat32Literal(DoubleRegister result, float value,
Register scratch) {
- uint32_t hi_32 = bit_cast<uint32_t>(value);
- uint32_t lo_32 = 0;
-
- // Load the 64-bit value into a GPR, then transfer it to FPR via LDGR
- iihf(scratch, Operand(hi_32));
- iilf(scratch, Operand(lo_32));
- ldgr(result, scratch);
+ uint64_t int_val = static_cast<uint64_t>(bit_cast<uint32_t, float>(value))
+ << 32;
+ LoadDoubleLiteral(result, int_val, scratch);
}
void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch) {
#if V8_TARGET_ARCH_S390X
- LoadSmiLiteral(scratch, smi);
- cgr(src1, scratch);
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ cih(src1, Operand(reinterpret_cast<intptr_t>(smi) >> 32));
+ } else {
+ LoadSmiLiteral(scratch, smi);
+ cgr(src1, scratch);
+ }
#else
// CFI takes 32-bit immediate.
cfi(src1, Operand(smi));
@@ -4567,8 +4398,12 @@ void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch) {
void MacroAssembler::CmpLogicalSmiLiteral(Register src1, Smi* smi,
Register scratch) {
#if V8_TARGET_ARCH_S390X
- LoadSmiLiteral(scratch, smi);
- clgr(src1, scratch);
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ clih(src1, Operand(reinterpret_cast<intptr_t>(smi) >> 32));
+ } else {
+ LoadSmiLiteral(scratch, smi);
+ clgr(src1, scratch);
+ }
#else
// CLFI takes 32-bit immediate
clfi(src1, Operand(smi));
@@ -4578,8 +4413,13 @@ void MacroAssembler::CmpLogicalSmiLiteral(Register src1, Smi* smi,
void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
Register scratch) {
#if V8_TARGET_ARCH_S390X
- LoadSmiLiteral(scratch, smi);
- AddP(dst, src, scratch);
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ if (!dst.is(src)) LoadRR(dst, src);
+ aih(dst, Operand(reinterpret_cast<intptr_t>(smi) >> 32));
+ } else {
+ LoadSmiLiteral(scratch, smi);
+ AddP(dst, src, scratch);
+ }
#else
AddP(dst, src, Operand(reinterpret_cast<intptr_t>(smi)));
#endif
@@ -4588,8 +4428,13 @@ void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
Register scratch) {
#if V8_TARGET_ARCH_S390X
- LoadSmiLiteral(scratch, smi);
- SubP(dst, src, scratch);
+ if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+ if (!dst.is(src)) LoadRR(dst, src);
+ aih(dst, Operand((-reinterpret_cast<intptr_t>(smi)) >> 32));
+ } else {
+ LoadSmiLiteral(scratch, smi);
+ SubP(dst, src, scratch);
+ }
#else
AddP(dst, src, Operand(-(reinterpret_cast<intptr_t>(smi))));
#endif
diff --git a/deps/v8/src/s390/macro-assembler-s390.h b/deps/v8/src/s390/macro-assembler-s390.h
index 06fcaf0519..572f2759f6 100644
--- a/deps/v8/src/s390/macro-assembler-s390.h
+++ b/deps/v8/src/s390/macro-assembler-s390.h
@@ -319,10 +319,15 @@ class MacroAssembler : public Assembler {
void Mul64(Register dst, const MemOperand& src1);
void Mul64(Register dst, Register src1);
void Mul64(Register dst, const Operand& src1);
+ void MulPWithCondition(Register dst, Register src1, Register src2);
// Divide
void DivP(Register dividend, Register divider);
+ // Square root
+ void Sqrt(DoubleRegister result, DoubleRegister input);
+ void Sqrt(DoubleRegister result, const MemOperand& input);
+
// Compare
void Cmp32(Register src1, Register src2);
void CmpP(Register src1, Register src2);
@@ -374,6 +379,9 @@ class MacroAssembler : public Assembler {
// Load On Condition
void LoadOnConditionP(Condition cond, Register dst, Register src);
+ void LoadPositiveP(Register result, Register input);
+ void LoadPositive32(Register result, Register input);
+
// Store Floating Point
void StoreDouble(DoubleRegister dst, const MemOperand& opnd);
void StoreFloat32(DoubleRegister dst, const MemOperand& opnd);
@@ -784,16 +792,6 @@ class MacroAssembler : public Assembler {
LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
}
- // Conditionally load the cached Array transitioned map of type
- // transitioned_kind from the native context if the map in register
- // map_in_out is the cached Array map in the native context of
- // expected_kind.
- void LoadTransitionedArrayMapConditional(ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match);
-
void LoadNativeContextSlot(int index, Register dst);
// Load the initial map from the global function. The registers
@@ -838,8 +836,10 @@ class MacroAssembler : public Assembler {
void StoreRepresentation(Register src, const MemOperand& mem,
Representation r, Register scratch = no_reg);
- void AddSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
- void SubSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
+ void AddSmiLiteral(Register dst, Register src, Smi* smi,
+ Register scratch = r0);
+ void SubSmiLiteral(Register dst, Register src, Smi* smi,
+ Register scratch = r0);
void CmpSmiLiteral(Register src1, Smi* smi, Register scratch);
void CmpLogicalSmiLiteral(Register src1, Smi* smi, Register scratch);
void AndSmiLiteral(Register dst, Register src, Smi* smi);
@@ -891,9 +891,10 @@ class MacroAssembler : public Assembler {
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
- void FloodFunctionIfStepping(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual);
+ // On function call, call into the debugger if necessary.
+ void CheckDebugHook(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
@@ -990,25 +991,6 @@ class MacroAssembler : public Assembler {
void FastAllocate(Register object_size, Register result, Register result_end,
Register scratch, AllocationFlags flags);
- void AllocateTwoByteString(Register result, Register length,
- Register scratch1, Register scratch2,
- Register scratch3, Label* gc_required);
- void AllocateOneByteString(Register result, Register length,
- Register scratch1, Register scratch2,
- Register scratch3, Label* gc_required);
- void AllocateTwoByteConsString(Register result, Register length,
- Register scratch1, Register scratch2,
- Label* gc_required);
- void AllocateOneByteConsString(Register result, Register length,
- Register scratch1, Register scratch2,
- Label* gc_required);
- void AllocateTwoByteSlicedString(Register result, Register length,
- Register scratch1, Register scratch2,
- Label* gc_required);
- void AllocateOneByteSlicedString(Register result, Register length,
- Register scratch1, Register scratch2,
- Label* gc_required);
-
// Allocates a heap number or jumps to the gc_required label if the young
// space is full and a scavenge is needed. All registers are clobbered also
// when control continues at the gc_required label.
@@ -1071,22 +1053,6 @@ class MacroAssembler : public Assembler {
// sets the flags and leaves the object type in the type_reg register.
void CompareInstanceType(Register map, Register type_reg, InstanceType type);
- // Check if a map for a JSObject indicates that the object can have both smi
- // and HeapObject elements. Jump to the specified label if it does not.
- void CheckFastObjectElements(Register map, Register scratch, Label* fail);
-
- // Check if a map for a JSObject indicates that the object has fast smi only
- // elements. Jump to the specified label if it does not.
- void CheckFastSmiElements(Register map, Register scratch, Label* fail);
-
- // Check to see if maybe_number can be stored as a double in
- // FastDoubleElements. If it can, store it at the index specified by key in
- // the FastDoubleElements array elements. Otherwise jump to fail.
- void StoreNumberToDoubleElements(Register value_reg, Register key_reg,
- Register elements_reg, Register scratch1,
- DoubleRegister double_scratch, Label* fail,
- int elements_offset = 0);
-
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
// set with result of map compare. If multiple map compares are required, the
@@ -1576,12 +1542,19 @@ class MacroAssembler : public Assembler {
// Souce and destination can be the same register.
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
- // Untag the source value into destination and jump if source is not a smi.
- // Souce and destination can be the same register.
- void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
-
inline void TestIfSmi(Register value) { tmll(value, Operand(1)); }
+ inline void TestIfSmi(MemOperand value) {
+ if (is_uint12(value.offset())) {
+ tm(value, Operand(1));
+ } else if (is_int20(value.offset())) {
+ tmy(value, Operand(1));
+ } else {
+ LoadB(r0, value);
+ tmll(r0, Operand(1));
+ }
+ }
+
inline void TestIfPositiveSmi(Register value, Register scratch) {
STATIC_ASSERT((kSmiTagMask | kSmiSignMask) ==
(intptr_t)(1UL << (kBitsPerPointer - 1) | 1));
@@ -1695,11 +1668,6 @@ class MacroAssembler : public Assembler {
Register first_object_instance_type, Register second_object_instance_type,
Register scratch1, Register scratch2, Label* failure);
- // Check if instance type is sequential one-byte string and jump to label if
- // it is not.
- void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
- Label* failure);
-
void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
void EmitSeqStringSetCharCheck(Register string, Register index,
@@ -1772,21 +1740,6 @@ class MacroAssembler : public Assembler {
Register scratch2_reg,
Label* no_memento_found);
- void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
- Register scratch_reg,
- Register scratch2_reg,
- Label* memento_found) {
- Label no_memento_found;
- TestJSArrayForAllocationMemento(receiver_reg, scratch_reg, scratch2_reg,
- &no_memento_found);
- beq(memento_found);
- bind(&no_memento_found);
- }
-
- // Jumps to found label if a prototype map has dictionary elements.
- void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
- Register scratch1, Label* found);
-
private:
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
diff --git a/deps/v8/src/s390/simulator-s390.cc b/deps/v8/src/s390/simulator-s390.cc
index 74d37bc20a..311e59d5e6 100644
--- a/deps/v8/src/s390/simulator-s390.cc
+++ b/deps/v8/src/s390/simulator-s390.cc
@@ -743,6 +743,18 @@ void Simulator::EvalTableInit() {
EvalTable[i] = &Simulator::Evaluate_Unknown;
}
+#define S390_SUPPORTED_VECTOR_OPCODE_LIST(V) \
+ V(vfs, VFS, 0xE7E2) /* type = VRR_C VECTOR FP SUBTRACT */ \
+ V(vfa, VFA, 0xE7E3) /* type = VRR_C VECTOR FP ADD */ \
+ V(vfd, VFD, 0xE7E5) /* type = VRR_C VECTOR FP DIVIDE */ \
+ V(vfm, VFM, 0xE7E7) /* type = VRR_C VECTOR FP MULTIPLY */
+
+#define CREATE_EVALUATE_TABLE(name, op_name, op_value) \
+ EvalTable[op_name] = &Simulator::Evaluate_##op_name;
+ S390_SUPPORTED_VECTOR_OPCODE_LIST(CREATE_EVALUATE_TABLE);
+#undef CREATE_EVALUATE_TABLE
+
+ EvalTable[DUMY] = &Simulator::Evaluate_DUMY;
EvalTable[BKPT] = &Simulator::Evaluate_BKPT;
EvalTable[SPM] = &Simulator::Evaluate_SPM;
EvalTable[BALR] = &Simulator::Evaluate_BALR;
@@ -953,6 +965,7 @@ void Simulator::EvalTableInit() {
EvalTable[ALSIH] = &Simulator::Evaluate_ALSIH;
EvalTable[ALSIHN] = &Simulator::Evaluate_ALSIHN;
EvalTable[CIH] = &Simulator::Evaluate_CIH;
+ EvalTable[CLIH] = &Simulator::Evaluate_CLIH;
EvalTable[STCK] = &Simulator::Evaluate_STCK;
EvalTable[CFC] = &Simulator::Evaluate_CFC;
EvalTable[IPM] = &Simulator::Evaluate_IPM;
@@ -972,6 +985,7 @@ void Simulator::EvalTableInit() {
EvalTable[SAR] = &Simulator::Evaluate_SAR;
EvalTable[EAR] = &Simulator::Evaluate_EAR;
EvalTable[MSR] = &Simulator::Evaluate_MSR;
+ EvalTable[MSRKC] = &Simulator::Evaluate_MSRKC;
EvalTable[MVST] = &Simulator::Evaluate_MVST;
EvalTable[CUSE] = &Simulator::Evaluate_CUSE;
EvalTable[SRST] = &Simulator::Evaluate_SRST;
@@ -1145,6 +1159,7 @@ void Simulator::EvalTableInit() {
EvalTable[ALGR] = &Simulator::Evaluate_ALGR;
EvalTable[SLGR] = &Simulator::Evaluate_SLGR;
EvalTable[MSGR] = &Simulator::Evaluate_MSGR;
+ EvalTable[MSGRKC] = &Simulator::Evaluate_MSGRKC;
EvalTable[DSGR] = &Simulator::Evaluate_DSGR;
EvalTable[LRVGR] = &Simulator::Evaluate_LRVGR;
EvalTable[LPGFR] = &Simulator::Evaluate_LPGFR;
@@ -6049,6 +6064,15 @@ uintptr_t Simulator::PopAddress() {
int d2 = AS(RXEInstruction)->D2Value(); \
int length = 6;
+#define DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4) \
+ int r1 = AS(VRR_C_Instruction)->R1Value(); \
+ int r2 = AS(VRR_C_Instruction)->R2Value(); \
+ int r3 = AS(VRR_C_Instruction)->R3Value(); \
+ int m6 = AS(VRR_C_Instruction)->M6Value(); \
+ int m5 = AS(VRR_C_Instruction)->M5Value(); \
+ int m4 = AS(VRR_C_Instruction)->M4Value(); \
+ int length = 6;
+
#define GET_ADDRESS(index_reg, base_reg, offset) \
(((index_reg) == 0) ? 0 : get_register(index_reg)) + \
(((base_reg) == 0) ? 0 : get_register(base_reg)) + offset
@@ -6058,6 +6082,77 @@ int Simulator::Evaluate_Unknown(Instruction* instr) {
return 0;
}
+EVALUATE(VFA) {
+ DCHECK_OPCODE(VFA);
+ DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+ USE(m6);
+ USE(m5);
+ USE(m4);
+ DCHECK(m5 == 8);
+ DCHECK(m4 == 3);
+ double r2_val = get_double_from_d_register(r2);
+ double r3_val = get_double_from_d_register(r3);
+ double r1_val = r2_val + r3_val;
+ set_d_register_from_double(r1, r1_val);
+ return length;
+}
+
+EVALUATE(VFS) {
+ DCHECK_OPCODE(VFS);
+ DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+ USE(m6);
+ USE(m5);
+ USE(m4);
+ DCHECK(m5 == 8);
+ DCHECK(m4 == 3);
+ double r2_val = get_double_from_d_register(r2);
+ double r3_val = get_double_from_d_register(r3);
+ double r1_val = r2_val - r3_val;
+ set_d_register_from_double(r1, r1_val);
+ return length;
+}
+
+EVALUATE(VFM) {
+ DCHECK_OPCODE(VFM);
+ DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+ USE(m6);
+ USE(m5);
+ USE(m4);
+ DCHECK(m5 == 8);
+ DCHECK(m4 == 3);
+ double r2_val = get_double_from_d_register(r2);
+ double r3_val = get_double_from_d_register(r3);
+ double r1_val = r2_val * r3_val;
+ set_d_register_from_double(r1, r1_val);
+ return length;
+}
+
+EVALUATE(VFD) {
+ DCHECK_OPCODE(VFD);
+ DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
+ USE(m6);
+ USE(m5);
+ USE(m4);
+ DCHECK(m5 == 8);
+ DCHECK(m4 == 3);
+ double r2_val = get_double_from_d_register(r2);
+ double r3_val = get_double_from_d_register(r3);
+ double r1_val = r2_val / r3_val;
+ set_d_register_from_double(r1, r1_val);
+ return length;
+}
+
+EVALUATE(DUMY) {
+ DCHECK_OPCODE(DUMY);
+ DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
+ USE(r1);
+ USE(x2);
+ USE(b2);
+ USE(d2);
+ // dummy instruction does nothing.
+ return length;
+}
+
EVALUATE(CLR) {
DCHECK_OPCODE(CLR);
DECODE_RR_INSTRUCTION(r1, r2);
@@ -6474,9 +6569,18 @@ EVALUATE(CLCL) {
}
EVALUATE(LPR) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
+ DCHECK_OPCODE(LPR);
+ // Load Positive (32)
+ DECODE_RR_INSTRUCTION(r1, r2);
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ // If negative, then negate it.
+ r2_val = (r2_val < 0) ? -r2_val : r2_val;
+ set_low_register(r1, r2_val);
+ SetS390ConditionCode<int32_t>(r2_val, 0);
+ if (r2_val == (static_cast<int32_t>(1) << 31)) {
+ SetS390OverflowCode(true);
+ }
+ return length;
}
EVALUATE(LNR) {
@@ -7677,47 +7781,38 @@ EVALUATE(TMLH) {
EVALUATE(TMLL) {
DCHECK_OPCODE(TMLL);
DECODE_RI_A_INSTRUCTION(instr, r1, i2);
- int mask = i2 & 0x0000FFFF;
- if (mask == 0) {
- condition_reg_ = 0x0;
- return length;
- }
+ uint32_t mask = i2 & 0x0000FFFF;
uint32_t r1_val = get_low_register<uint32_t>(r1);
r1_val = r1_val & 0x0000FFFF; // uses only the last 16bits
- // Test if all selected bits are Zero
- bool allSelectedBitsAreZeros = true;
- for (int i = 0; i < 15; i++) {
- if (mask & (1 << i)) {
- if (r1_val & (1 << i)) {
- allSelectedBitsAreZeros = false;
- break;
- }
- }
- }
- if (allSelectedBitsAreZeros) {
+ // Test if all selected bits are zeros or mask is zero
+ if (0 == (mask & r1_val)) {
condition_reg_ = 0x8;
return length; // Done!
}
+ DCHECK(mask != 0);
// Test if all selected bits are one
- bool allSelectedBitsAreOnes = true;
- for (int i = 0; i < 15; i++) {
- if (mask & (1 << i)) {
- if (!(r1_val & (1 << i))) {
- allSelectedBitsAreOnes = false;
- break;
- }
- }
- }
- if (allSelectedBitsAreOnes) {
+ if (mask == (mask & r1_val)) {
condition_reg_ = 0x1;
return length; // Done!
}
// Now we know selected bits mixed zeros and ones
// Test if the leftmost bit is zero or one
- for (int i = 14; i >= 0; i--) {
+#if defined(__GNUC__)
+ int leadingZeros = __builtin_clz(mask);
+ mask = 0x80000000u >> leadingZeros;
+ if (mask & r1_val) {
+ // leftmost bit is one
+ condition_reg_ = 0x4;
+ } else {
+ // leftmost bit is zero
+ condition_reg_ = 0x2;
+ }
+ return length; // Done!
+#else
+ for (int i = 15; i >= 0; i--) {
if (mask & (1 << i)) {
if (r1_val & (1 << i)) {
// leftmost bit is one
@@ -7729,6 +7824,8 @@ EVALUATE(TMLL) {
return length; // Done!
}
}
+#endif
+ UNREACHABLE();
return length;
}
@@ -8220,9 +8317,15 @@ EVALUATE(BRCTH) {
}
EVALUATE(AIH) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
+ DCHECK_OPCODE(AIH);
+ DECODE_RIL_A_INSTRUCTION(r1, i2);
+ int32_t r1_val = get_high_register<int32_t>(r1);
+ bool isOF = CheckOverflowForIntAdd(r1_val, static_cast<int32_t>(i2), int32_t);
+ r1_val += static_cast<int32_t>(i2);
+ set_high_register(r1, r1_val);
+ SetS390ConditionCode<int32_t>(r1_val, 0);
+ SetS390OverflowCode(isOF);
+ return length;
}
EVALUATE(ALSIH) {
@@ -8238,9 +8341,19 @@ EVALUATE(ALSIHN) {
}
EVALUATE(CIH) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
+ DCHECK_OPCODE(CIH);
+ DECODE_RIL_A_INSTRUCTION(r1, imm);
+ int32_t r1_val = get_high_register<int32_t>(r1);
+ SetS390ConditionCode<int32_t>(r1_val, static_cast<int32_t>(imm));
+ return length;
+}
+
+EVALUATE(CLIH) {
+ DCHECK_OPCODE(CLIH);
+ // Compare Logical with Immediate (32)
+ DECODE_RIL_A_INSTRUCTION(r1, imm);
+ SetS390ConditionCode<uint32_t>(get_high_register<uint32_t>(r1), imm);
+ return length;
}
EVALUATE(STCK) {
@@ -8360,6 +8473,21 @@ EVALUATE(MSR) {
return length;
}
+EVALUATE(MSRKC) {
+ DCHECK_OPCODE(MSRKC);
+ DECODE_RRF_A_INSTRUCTION(r1, r2, r3);
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ int32_t r3_val = get_low_register<int32_t>(r3);
+ int64_t result64 =
+ static_cast<int64_t>(r2_val) * static_cast<int64_t>(r3_val);
+ int32_t result32 = static_cast<int32_t>(result64);
+ bool isOF = (static_cast<int64_t>(result32) != result64);
+ SetS390ConditionCode<int32_t>(result32, 0);
+ SetS390OverflowCode(isOF);
+ set_low_register(r1, result32);
+ return length;
+}
+
EVALUATE(MVST) {
UNIMPLEMENTED();
USE(instr);
@@ -9800,9 +9928,17 @@ EVALUATE(RRXTR) {
}
EVALUATE(LPGR) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
+ DCHECK_OPCODE(LPGR);
+ // Load Positive (32)
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ int64_t r2_val = get_register(r2);
+ r2_val = (r2_val < 0) ? -r2_val : r2_val; // If negative, then negate it.
+ set_register(r1, r2_val);
+ SetS390ConditionCode<int64_t>(r2_val, 0);
+ if (r2_val == (static_cast<int64_t>(1) << 63)) {
+ SetS390OverflowCode(true);
+ }
+ return length;
}
EVALUATE(LNGR) {
@@ -9877,6 +10013,20 @@ EVALUATE(MSGR) {
return length;
}
+EVALUATE(MSGRKC) {
+ DCHECK_OPCODE(MSGRKC);
+ DECODE_RRF_A_INSTRUCTION(r1, r2, r3);
+ int64_t r2_val = get_register(r2);
+ int64_t r3_val = get_register(r3);
+ volatile int64_t result64 = r2_val * r3_val;
+ bool isOF = ((r2_val == -1 && result64 == (static_cast<int64_t>(1L) << 63)) ||
+ (r2_val != 0 && result64 / r2_val != r3_val));
+ SetS390ConditionCode<int64_t>(result64, 0);
+ SetS390OverflowCode(isOF);
+ set_register(r1, result64);
+ return length;
+}
+
EVALUATE(DSGR) {
DCHECK_OPCODE(DSGR);
DECODE_RRE_INSTRUCTION(r1, r2);
@@ -9901,9 +10051,15 @@ EVALUATE(LRVGR) {
}
EVALUATE(LPGFR) {
- UNIMPLEMENTED();
- USE(instr);
- return 0;
+ DCHECK_OPCODE(LPGFR);
+ // Load Positive (32)
+ DECODE_RRE_INSTRUCTION(r1, r2);
+ int32_t r2_val = get_low_register<int32_t>(r2);
+ // If negative, then negate it.
+ int64_t r1_val = static_cast<int64_t>((r2_val < 0) ? -r2_val : r2_val);
+ set_register(r1, r1_val);
+ SetS390ConditionCode<int64_t>(r1_val, 0);
+ return length;
}
EVALUATE(LNGFR) {
@@ -11020,9 +11176,9 @@ EVALUATE(CGH) {
}
EVALUATE(PFD) {
- UNIMPLEMENTED();
+ DCHECK_OPCODE(PFD);
USE(instr);
- return 0;
+ return 6;
}
EVALUATE(STRV) {
diff --git a/deps/v8/src/s390/simulator-s390.h b/deps/v8/src/s390/simulator-s390.h
index 1ce6bf776b..c66b05e92d 100644
--- a/deps/v8/src/s390/simulator-s390.h
+++ b/deps/v8/src/s390/simulator-s390.h
@@ -522,6 +522,12 @@ class Simulator {
static void EvalTableInit();
#define EVALUATE(name) int Evaluate_##name(Instruction* instr)
+#define EVALUATE_VRR_INSTRUCTIONS(name, op_name, op_value) EVALUATE(op_name);
+ S390_VRR_C_OPCODE_LIST(EVALUATE_VRR_INSTRUCTIONS)
+ S390_VRR_A_OPCODE_LIST(EVALUATE_VRR_INSTRUCTIONS)
+#undef EVALUATE_VRR_INSTRUCTIONS
+
+ EVALUATE(DUMY);
EVALUATE(BKPT);
EVALUATE(SPM);
EVALUATE(BALR);
@@ -732,6 +738,7 @@ class Simulator {
EVALUATE(ALSIH);
EVALUATE(ALSIHN);
EVALUATE(CIH);
+ EVALUATE(CLIH);
EVALUATE(STCK);
EVALUATE(CFC);
EVALUATE(IPM);
@@ -751,6 +758,7 @@ class Simulator {
EVALUATE(SAR);
EVALUATE(EAR);
EVALUATE(MSR);
+ EVALUATE(MSRKC);
EVALUATE(MVST);
EVALUATE(CUSE);
EVALUATE(SRST);
@@ -924,6 +932,7 @@ class Simulator {
EVALUATE(ALGR);
EVALUATE(SLGR);
EVALUATE(MSGR);
+ EVALUATE(MSGRKC);
EVALUATE(DSGR);
EVALUATE(LRVGR);
EVALUATE(LPGFR);
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
index 86a91643d2..1776cf1e4f 100644
--- a/deps/v8/src/snapshot/code-serializer.cc
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -88,7 +88,12 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
#define IC_KIND_CASE(KIND) case Code::KIND:
IC_KIND_LIST(IC_KIND_CASE)
#undef IC_KIND_CASE
- SerializeCodeStub(code_object, how_to_code, where_to_point);
+ if (code_object->builtin_index() == -1) {
+ SerializeCodeStub(code_object, how_to_code, where_to_point);
+ } else {
+ SerializeBuiltin(code_object->builtin_index(), how_to_code,
+ where_to_point);
+ }
return;
case Code::FUNCTION:
DCHECK(code_object->has_reloc_info_for_serialization());
@@ -104,6 +109,12 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
return SerializeObject(isolate()->heap()->undefined_value(), how_to_code,
where_to_point, skip);
}
+
+ if (obj->IsScript()) {
+ // Wrapper object is a context-dependent JSValue. Reset it here.
+ Script::cast(obj)->set_wrapper(isolate()->heap()->undefined_value());
+ }
+
// Past this point we should not see any (context-specific) maps anymore.
CHECK(!obj->IsMap());
// There should be no references to the global object embedded.
@@ -225,16 +236,20 @@ std::unique_ptr<ScriptData> WasmCompiledModuleSerializer::SerializeWasmModule(
WasmCompiledModuleSerializer wasm_cs(isolate, 0);
wasm_cs.reference_map()->AddAttachedReference(*isolate->native_context());
wasm_cs.reference_map()->AddAttachedReference(
- *compiled_module->module_bytes());
+ compiled_module->module_bytes());
ScriptData* data = wasm_cs.Serialize(compiled_module);
return std::unique_ptr<ScriptData>(data);
}
MaybeHandle<FixedArray> WasmCompiledModuleSerializer::DeserializeWasmModule(
Isolate* isolate, ScriptData* data, Vector<const byte> wire_bytes) {
+ MaybeHandle<FixedArray> nothing;
+ if (!wasm::IsWasmCodegenAllowed(isolate, isolate->native_context())) {
+ return nothing;
+ }
SerializedCodeData::SanityCheckResult sanity_check_result =
SerializedCodeData::CHECK_SUCCESS;
- MaybeHandle<FixedArray> nothing;
+
const SerializedCodeData scd = SerializedCodeData::FromCachedData(
isolate, data, 0, &sanity_check_result);
@@ -262,10 +277,12 @@ MaybeHandle<FixedArray> WasmCompiledModuleSerializer::DeserializeWasmModule(
MaybeHandle<HeapObject> obj = deserializer.DeserializeObject(isolate);
if (obj.is_null() || !obj.ToHandleChecked()->IsFixedArray()) return nothing;
- Handle<WasmCompiledModule> compiled_module =
- Handle<WasmCompiledModule>::cast(obj.ToHandleChecked());
+ // Cast without type checks, as the module wrapper is not there yet.
+ Handle<WasmCompiledModule> compiled_module(
+ static_cast<WasmCompiledModule*>(*obj.ToHandleChecked()), isolate);
WasmCompiledModule::RecreateModuleWrapper(isolate, compiled_module);
+ DCHECK(WasmCompiledModule::IsWasmCompiledModule(*compiled_module));
return compiled_module;
}
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index aabd806b7a..87e430baf5 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -93,6 +93,7 @@ void Deserializer::Deserialize(Isolate* isolate) {
isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
DeserializeDeferredObjects();
FlushICacheForNewIsolate();
+ RestoreExternalReferenceRedirectors(&accessor_infos_);
}
isolate_->heap()->set_native_contexts_list(
@@ -111,7 +112,8 @@ void Deserializer::Deserialize(Isolate* isolate) {
}
MaybeHandle<Object> Deserializer::DeserializePartial(
- Isolate* isolate, Handle<JSGlobalProxy> global_proxy) {
+ Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
+ v8::DeserializeInternalFieldsCallback internal_fields_deserializer) {
Initialize(isolate);
if (!ReserveSpace()) {
V8::FatalProcessOutOfMemory("deserialize context");
@@ -128,7 +130,7 @@ MaybeHandle<Object> Deserializer::DeserializePartial(
Object* root;
VisitPointer(&root);
DeserializeDeferredObjects();
- DeserializeInternalFields();
+ DeserializeInternalFields(internal_fields_deserializer);
isolate->heap()->RegisterReservationsForBlackAllocation(reservations_);
@@ -213,14 +215,13 @@ void Deserializer::DeserializeDeferredObjects() {
}
}
-void Deserializer::DeserializeInternalFields() {
+void Deserializer::DeserializeInternalFields(
+ v8::DeserializeInternalFieldsCallback internal_fields_deserializer) {
if (!source_.HasMore() || source_.Get() != kInternalFieldsData) return;
DisallowHeapAllocation no_gc;
DisallowJavascriptExecution no_js(isolate_);
DisallowCompilation no_compile(isolate_);
- v8::DeserializeInternalFieldsCallback callback =
- isolate_->deserialize_internal_fields_callback();
- DCHECK_NOT_NULL(callback);
+ DCHECK_NOT_NULL(internal_fields_deserializer.callback);
for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
HandleScope scope(isolate_);
int space = code & kSpaceMask;
@@ -232,8 +233,9 @@ void Deserializer::DeserializeInternalFields() {
int size = source_.GetInt();
byte* data = new byte[size];
source_.CopyRaw(data, size);
- callback(v8::Utils::ToLocal(obj), index,
- {reinterpret_cast<char*>(data), size});
+ internal_fields_deserializer.callback(v8::Utils::ToLocal(obj), index,
+ {reinterpret_cast<char*>(data), size},
+ internal_fields_deserializer.data);
delete[] data;
}
}
@@ -316,6 +318,10 @@ HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
if (deserializing_user_code() || space == LO_SPACE) {
new_code_objects_.Add(Code::cast(obj));
}
+ } else if (obj->IsAccessorInfo()) {
+ if (isolate_->external_reference_redirector()) {
+ accessor_infos_.Add(AccessorInfo::cast(obj));
+ }
}
// Check alignment.
DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(), obj->RequiredAlignment()));
diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h
index db7996297d..7b1ced8159 100644
--- a/deps/v8/src/snapshot/deserializer.h
+++ b/deps/v8/src/snapshot/deserializer.h
@@ -48,8 +48,9 @@ class Deserializer : public SerializerDeserializer {
void Deserialize(Isolate* isolate);
// Deserialize a single object and the objects reachable from it.
- MaybeHandle<Object> DeserializePartial(Isolate* isolate,
- Handle<JSGlobalProxy> global_proxy);
+ MaybeHandle<Object> DeserializePartial(
+ Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
+ v8::DeserializeInternalFieldsCallback internal_fields_deserializer);
// Deserialize an object graph. Fail gracefully.
MaybeHandle<HeapObject> DeserializeObject(Isolate* isolate);
@@ -88,7 +89,8 @@ class Deserializer : public SerializerDeserializer {
}
void DeserializeDeferredObjects();
- void DeserializeInternalFields();
+ void DeserializeInternalFields(
+ v8::DeserializeInternalFieldsCallback internal_fields_deserializer);
void FlushICacheForNewIsolate();
void FlushICacheForNewCodeObjectsAndRecordEmbeddedObjects();
@@ -138,6 +140,7 @@ class Deserializer : public SerializerDeserializer {
List<HeapObject*> deserialized_large_objects_;
List<Code*> new_code_objects_;
+ List<AccessorInfo*> accessor_infos_;
List<Handle<String> > new_internalized_strings_;
List<Handle<Script> > new_scripts_;
diff --git a/deps/v8/src/snapshot/partial-serializer.cc b/deps/v8/src/snapshot/partial-serializer.cc
index e89f44f6e2..b78a1edbd0 100644
--- a/deps/v8/src/snapshot/partial-serializer.cc
+++ b/deps/v8/src/snapshot/partial-serializer.cc
@@ -23,7 +23,7 @@ PartialSerializer::~PartialSerializer() {
OutputStatistics("PartialSerializer");
}
-void PartialSerializer::Serialize(Object** o) {
+void PartialSerializer::Serialize(Object** o, bool include_global_proxy) {
if ((*o)->IsContext()) {
Context* context = Context::cast(*o);
reference_map()->AddAttachedReference(context->global_proxy());
@@ -102,7 +102,10 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
if (obj->IsJSObject()) {
JSObject* jsobj = JSObject::cast(obj);
- if (jsobj->GetInternalFieldCount() > 0) internal_field_holders_.Add(jsobj);
+ if (jsobj->GetInternalFieldCount() > 0) {
+ DCHECK_NOT_NULL(serialize_internal_fields_.callback);
+ internal_field_holders_.Add(jsobj);
+ }
}
// Object has not yet been serialized. Serialize it here.
@@ -129,7 +132,7 @@ void PartialSerializer::SerializeInternalFields() {
DisallowHeapAllocation no_gc;
DisallowJavascriptExecution no_js(isolate());
DisallowCompilation no_compile(isolate());
- DCHECK_NOT_NULL(serialize_internal_fields_);
+ DCHECK_NOT_NULL(serialize_internal_fields_.callback);
sink_.Put(kInternalFieldsData, "internal fields data");
while (internal_field_holders_.length() > 0) {
HandleScope scope(isolate());
@@ -139,7 +142,8 @@ void PartialSerializer::SerializeInternalFields() {
int internal_fields_count = obj->GetInternalFieldCount();
for (int i = 0; i < internal_fields_count; i++) {
if (obj->GetInternalField(i)->IsHeapObject()) continue;
- StartupData data = serialize_internal_fields_(v8::Utils::ToLocal(obj), i);
+ StartupData data = serialize_internal_fields_.callback(
+ v8::Utils::ToLocal(obj), i, serialize_internal_fields_.data);
sink_.Put(kNewObject + reference.space(), "internal field holder");
PutBackReference(*obj, reference);
sink_.PutInt(i, "internal field index");
diff --git a/deps/v8/src/snapshot/partial-serializer.h b/deps/v8/src/snapshot/partial-serializer.h
index 45d64e431e..2d7c9ed415 100644
--- a/deps/v8/src/snapshot/partial-serializer.h
+++ b/deps/v8/src/snapshot/partial-serializer.h
@@ -21,7 +21,7 @@ class PartialSerializer : public Serializer {
~PartialSerializer() override;
// Serialize the objects reachable from a single object pointer.
- void Serialize(Object** o);
+ void Serialize(Object** o, bool include_global_proxy);
private:
void SerializeObject(HeapObject* o, HowToCode how_to_code,
diff --git a/deps/v8/src/snapshot/serializer-common.cc b/deps/v8/src/snapshot/serializer-common.cc
index f188793419..ca4db75239 100644
--- a/deps/v8/src/snapshot/serializer-common.cc
+++ b/deps/v8/src/snapshot/serializer-common.cc
@@ -21,8 +21,7 @@ ExternalReferenceEncoder::ExternalReferenceEncoder(Isolate* isolate) {
ExternalReferenceTable* table = ExternalReferenceTable::instance(isolate);
for (uint32_t i = 0; i < table->size(); ++i) {
Address addr = table->address(i);
- DCHECK(map_->Get(addr).IsNothing() ||
- strncmp(table->name(i), "Redirect to ", 12) == 0);
+ DCHECK(map_->Get(addr).IsNothing());
map_->Set(addr, i);
DCHECK(map_->Get(addr).IsJust());
}
@@ -81,5 +80,14 @@ bool SerializerDeserializer::CanBeDeferred(HeapObject* o) {
return !o->IsString() && !o->IsScript();
}
+void SerializerDeserializer::RestoreExternalReferenceRedirectors(
+ List<AccessorInfo*>* accessor_infos) {
+ // Restore wiped accessor infos.
+ for (AccessorInfo* info : *accessor_infos) {
+ Foreign::cast(info->js_getter())
+ ->set_foreign_address(info->redirected_getter());
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/serializer-common.h b/deps/v8/src/snapshot/serializer-common.h
index 201ac4e039..b426efd538 100644
--- a/deps/v8/src/snapshot/serializer-common.h
+++ b/deps/v8/src/snapshot/serializer-common.h
@@ -86,6 +86,8 @@ class SerializerDeserializer : public ObjectVisitor {
protected:
static bool CanBeDeferred(HeapObject* o);
+ void RestoreExternalReferenceRedirectors(List<AccessorInfo*>* accessor_infos);
+
// ---------- byte code range 0x00..0x7f ----------
// Byte codes in this range represent Where, HowToCode and WhereToPoint.
// Where the pointed-to object can be found:
diff --git a/deps/v8/src/snapshot/snapshot-common.cc b/deps/v8/src/snapshot/snapshot-common.cc
index 959ac56fa9..83ad2e7d39 100644
--- a/deps/v8/src/snapshot/snapshot-common.cc
+++ b/deps/v8/src/snapshot/snapshot-common.cc
@@ -50,8 +50,8 @@ bool Snapshot::Initialize(Isolate* isolate) {
}
MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
- Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
- size_t context_index) {
+ Isolate* isolate, Handle<JSGlobalProxy> global_proxy, size_t context_index,
+ v8::DeserializeInternalFieldsCallback internal_fields_deserializer) {
if (!isolate->snapshot_available()) return Handle<Context>();
base::ElapsedTimer timer;
if (FLAG_profile_deserialization) timer.Start();
@@ -62,8 +62,8 @@ MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
SnapshotData snapshot_data(context_data);
Deserializer deserializer(&snapshot_data);
- MaybeHandle<Object> maybe_context =
- deserializer.DeserializePartial(isolate, global_proxy);
+ MaybeHandle<Object> maybe_context = deserializer.DeserializePartial(
+ isolate, global_proxy, internal_fields_deserializer);
Handle<Object> result;
if (!maybe_context.ToHandle(&result)) return MaybeHandle<Context>();
CHECK(result->IsContext());
diff --git a/deps/v8/src/snapshot/snapshot-source-sink.cc b/deps/v8/src/snapshot/snapshot-source-sink.cc
index cee5875310..66a14bc599 100644
--- a/deps/v8/src/snapshot/snapshot-source-sink.cc
+++ b/deps/v8/src/snapshot/snapshot-source-sink.cc
@@ -7,7 +7,7 @@
#include "src/base/logging.h"
#include "src/handles-inl.h"
-
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index 49a60926dc..010072a694 100644
--- a/deps/v8/src/snapshot/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -59,7 +59,8 @@ class Snapshot : public AllStatic {
// Create a new context using the internal partial snapshot.
static MaybeHandle<Context> NewContextFromSnapshot(
Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
- size_t context_index);
+ size_t context_index,
+ v8::DeserializeInternalFieldsCallback internal_fields_deserializer);
static bool HaveASnapshotToStartFrom(Isolate* isolate);
diff --git a/deps/v8/src/snapshot/startup-serializer.cc b/deps/v8/src/snapshot/startup-serializer.cc
index 80598e80bd..4b27746f8e 100644
--- a/deps/v8/src/snapshot/startup-serializer.cc
+++ b/deps/v8/src/snapshot/startup-serializer.cc
@@ -21,6 +21,7 @@ StartupSerializer::StartupSerializer(
}
StartupSerializer::~StartupSerializer() {
+ RestoreExternalReferenceRedirectors(&accessor_infos_);
OutputStatistics("StartupSerializer");
}
@@ -66,6 +67,14 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
FlushSkip(skip);
+ if (isolate_->external_reference_redirector() && obj->IsAccessorInfo()) {
+ // Wipe external reference redirects in the accessor info.
+ AccessorInfo* info = AccessorInfo::cast(obj);
+ Address original_address = Foreign::cast(info->getter())->foreign_address();
+ Foreign::cast(info->js_getter())->set_foreign_address(original_address);
+ accessor_infos_.Add(info);
+ }
+
// Object has not yet been serialized. Serialize it here.
ObjectSerializer object_serializer(this, obj, &sink_, how_to_code,
where_to_point);
@@ -116,10 +125,8 @@ void StartupSerializer::SerializeStrongReferences() {
CHECK_NULL(isolate->thread_manager()->FirstThreadStateInUse());
// No active or weak handles.
CHECK(isolate->handle_scope_implementer()->blocks()->is_empty());
- CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles());
+ CHECK_EQ(0, isolate->global_handles()->global_handles_count());
CHECK_EQ(0, isolate->eternal_handles()->NumberOfHandles());
- // We don't support serializing installed extensions.
- CHECK(!isolate->has_installed_extensions());
// First visit immortal immovables to make sure they end up in the first page.
serializing_immortal_immovables_roots_ = true;
isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG_ROOT_LIST);
diff --git a/deps/v8/src/snapshot/startup-serializer.h b/deps/v8/src/snapshot/startup-serializer.h
index ac75c5d163..4a597e6a32 100644
--- a/deps/v8/src/snapshot/startup-serializer.h
+++ b/deps/v8/src/snapshot/startup-serializer.h
@@ -73,6 +73,7 @@ class StartupSerializer : public Serializer {
bool serializing_immortal_immovables_roots_;
std::bitset<Heap::kStrongRootListLength> root_has_been_serialized_;
PartialCacheIndexMap partial_cache_index_map_;
+ List<AccessorInfo*> accessor_infos_;
DISALLOW_COPY_AND_ASSIGN(StartupSerializer);
};
diff --git a/deps/v8/src/source-position.cc b/deps/v8/src/source-position.cc
index e9f86db01b..ff204be73d 100644
--- a/deps/v8/src/source-position.cc
+++ b/deps/v8/src/source-position.cc
@@ -43,29 +43,16 @@ std::ostream& operator<<(std::ostream& out, const SourcePosition& pos) {
return out;
}
-SourcePositionInfo SourcePosition::Info(
- Handle<SharedFunctionInfo> function) const {
- SourcePositionInfo result(*this, function);
- Handle<Script> script(Script::cast(function->script()));
- Script::PositionInfo pos;
- if (Script::GetPositionInfo(script, ScriptOffset(), &pos,
- Script::WITH_OFFSET)) {
- result.line = pos.line;
- result.column = pos.column;
- }
- return result;
-}
-
std::vector<SourcePositionInfo> SourcePosition::InliningStack(
CompilationInfo* cinfo) const {
SourcePosition pos = *this;
std::vector<SourcePositionInfo> stack;
while (pos.isInlined()) {
const auto& inl = cinfo->inlined_functions()[pos.InliningId()];
- stack.push_back(pos.Info(inl.shared_info));
+ stack.push_back(SourcePositionInfo(pos, inl.shared_info));
pos = inl.position.position;
}
- stack.push_back(pos.Info(cinfo->shared_info()));
+ stack.push_back(SourcePositionInfo(pos, cinfo->shared_info()));
return stack;
}
@@ -80,12 +67,12 @@ std::vector<SourcePositionInfo> SourcePosition::InliningStack(
deopt_data->InliningPositions()->get(pos.InliningId());
Handle<SharedFunctionInfo> function(
deopt_data->GetInlinedFunction(inl.inlined_function_id));
- stack.push_back(pos.Info(function));
+ stack.push_back(SourcePositionInfo(pos, function));
pos = inl.position;
}
Handle<SharedFunctionInfo> function(
SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo()));
- stack.push_back(pos.Info(function));
+ stack.push_back(SourcePositionInfo(pos, function));
return stack;
}
@@ -127,5 +114,17 @@ void SourcePosition::Print(std::ostream& out, Code* code) const {
}
}
+SourcePositionInfo::SourcePositionInfo(SourcePosition pos,
+ Handle<SharedFunctionInfo> f)
+ : position(pos), function(f) {
+ Handle<Script> script(Script::cast(function->script()));
+ Script::PositionInfo info;
+ if (Script::GetPositionInfo(script, pos.ScriptOffset(), &info,
+ Script::WITH_OFFSET)) {
+ line = info.line;
+ column = info.column;
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/source-position.h b/deps/v8/src/source-position.h
index aa7d31bae2..beab996c04 100644
--- a/deps/v8/src/source-position.h
+++ b/deps/v8/src/source-position.h
@@ -43,10 +43,11 @@ class SourcePosition final {
}
bool isInlined() const { return InliningId() != kNotInlined; }
+ // Assumes that the code object is optimized
std::vector<SourcePositionInfo> InliningStack(Handle<Code> code) const;
- std::vector<SourcePositionInfo> InliningStack(CompilationInfo* code) const;
+ std::vector<SourcePositionInfo> InliningStack(CompilationInfo* cinfo) const;
- void Print(std::ostream& out, Code* function) const;
+ void Print(std::ostream& out, Code* code) const;
int ScriptOffset() const { return ScriptOffsetField::decode(value_) - 1; }
int InliningId() const { return InliningIdField::decode(value_) - 1; }
@@ -75,7 +76,6 @@ class SourcePosition final {
private:
void Print(std::ostream& out, SharedFunctionInfo* function) const;
- SourcePositionInfo Info(Handle<SharedFunctionInfo> script) const;
// InliningId is in the high bits for better compression in
// SourcePositionTable.
@@ -102,8 +102,7 @@ struct InliningPosition {
};
struct SourcePositionInfo {
- explicit SourcePositionInfo(SourcePosition pos, Handle<SharedFunctionInfo> f)
- : position(pos), function(f) {}
+ SourcePositionInfo(SourcePosition pos, Handle<SharedFunctionInfo> f);
SourcePosition position;
Handle<SharedFunctionInfo> function;
diff --git a/deps/v8/src/string-case.cc b/deps/v8/src/string-case.cc
new file mode 100644
index 0000000000..52d9636083
--- /dev/null
+++ b/deps/v8/src/string-case.cc
@@ -0,0 +1,130 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/string-case.h"
+
+#include "src/assert-scope.h"
+#include "src/base/logging.h"
+#include "src/globals.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef DEBUG
+bool CheckFastAsciiConvert(char* dst, const char* src, int length, bool changed,
+ bool is_to_lower) {
+ bool expected_changed = false;
+ for (int i = 0; i < length; i++) {
+ if (dst[i] == src[i]) continue;
+ expected_changed = true;
+ if (is_to_lower) {
+ DCHECK('A' <= src[i] && src[i] <= 'Z');
+ DCHECK(dst[i] == src[i] + ('a' - 'A'));
+ } else {
+ DCHECK('a' <= src[i] && src[i] <= 'z');
+ DCHECK(dst[i] == src[i] - ('a' - 'A'));
+ }
+ }
+ return (expected_changed == changed);
+}
+#endif
+
+const uintptr_t kOneInEveryByte = kUintptrAllBitsSet / 0xFF;
+const uintptr_t kAsciiMask = kOneInEveryByte << 7;
+
+// Given a word and two range boundaries returns a word with high bit
+// set in every byte iff the corresponding input byte was strictly in
+// the range (m, n). All the other bits in the result are cleared.
+// This function is only useful when it can be inlined and the
+// boundaries are statically known.
+// Requires: all bytes in the input word and the boundaries must be
+// ASCII (less than 0x7F).
+static inline uintptr_t AsciiRangeMask(uintptr_t w, char m, char n) {
+ // Use strict inequalities since in edge cases the function could be
+ // further simplified.
+ DCHECK(0 < m && m < n);
+ // Has high bit set in every w byte less than n.
+ uintptr_t tmp1 = kOneInEveryByte * (0x7F + n) - w;
+ // Has high bit set in every w byte greater than m.
+ uintptr_t tmp2 = w + kOneInEveryByte * (0x7F - m);
+ return (tmp1 & tmp2 & (kOneInEveryByte * 0x80));
+}
+
+template <bool is_lower>
+int FastAsciiConvert(char* dst, const char* src, int length,
+ bool* changed_out) {
+#ifdef DEBUG
+ char* saved_dst = dst;
+#endif
+ const char* saved_src = src;
+ DisallowHeapAllocation no_gc;
+ // We rely on the distance between upper and lower case letters
+ // being a known power of 2.
+ DCHECK('a' - 'A' == (1 << 5));
+ // Boundaries for the range of input characters than require conversion.
+ static const char lo = is_lower ? 'A' - 1 : 'a' - 1;
+ static const char hi = is_lower ? 'Z' + 1 : 'z' + 1;
+ bool changed = false;
+ const char* const limit = src + length;
+
+ // dst is newly allocated and always aligned.
+ DCHECK(IsAligned(reinterpret_cast<intptr_t>(dst), sizeof(uintptr_t)));
+ // Only attempt processing one word at a time if src is also aligned.
+ if (IsAligned(reinterpret_cast<intptr_t>(src), sizeof(uintptr_t))) {
+ // Process the prefix of the input that requires no conversion one aligned
+ // (machine) word at a time.
+ while (src <= limit - sizeof(uintptr_t)) {
+ const uintptr_t w = *reinterpret_cast<const uintptr_t*>(src);
+ if ((w & kAsciiMask) != 0) return static_cast<int>(src - saved_src);
+ if (AsciiRangeMask(w, lo, hi) != 0) {
+ changed = true;
+ break;
+ }
+ *reinterpret_cast<uintptr_t*>(dst) = w;
+ src += sizeof(uintptr_t);
+ dst += sizeof(uintptr_t);
+ }
+ // Process the remainder of the input performing conversion when
+ // required one word at a time.
+ while (src <= limit - sizeof(uintptr_t)) {
+ const uintptr_t w = *reinterpret_cast<const uintptr_t*>(src);
+ if ((w & kAsciiMask) != 0) return static_cast<int>(src - saved_src);
+ uintptr_t m = AsciiRangeMask(w, lo, hi);
+ // The mask has high (7th) bit set in every byte that needs
+ // conversion and we know that the distance between cases is
+ // 1 << 5.
+ *reinterpret_cast<uintptr_t*>(dst) = w ^ (m >> 2);
+ src += sizeof(uintptr_t);
+ dst += sizeof(uintptr_t);
+ }
+ }
+ // Process the last few bytes of the input (or the whole input if
+ // unaligned access is not supported).
+ while (src < limit) {
+ char c = *src;
+ if ((c & kAsciiMask) != 0) return static_cast<int>(src - saved_src);
+ if (lo < c && c < hi) {
+ c ^= (1 << 5);
+ changed = true;
+ }
+ *dst = c;
+ ++src;
+ ++dst;
+ }
+
+ DCHECK(
+ CheckFastAsciiConvert(saved_dst, saved_src, length, changed, is_lower));
+
+ *changed_out = changed;
+ return length;
+}
+
+template int FastAsciiConvert<false>(char* dst, const char* src, int length,
+ bool* changed_out);
+template int FastAsciiConvert<true>(char* dst, const char* src, int length,
+ bool* changed_out);
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/string-case.h b/deps/v8/src/string-case.h
new file mode 100644
index 0000000000..3fe3bc2b81
--- /dev/null
+++ b/deps/v8/src/string-case.h
@@ -0,0 +1,17 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_STRING_CASE_H_
+#define V8_STRING_CASE_H_
+
+namespace v8 {
+namespace internal {
+
+template <bool is_lower>
+int FastAsciiConvert(char* dst, const char* src, int length, bool* changed_out);
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_STRING_CASE_H__
diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc
index acfb917414..650b3cf93a 100644
--- a/deps/v8/src/string-stream.cc
+++ b/deps/v8/src/string-stream.cc
@@ -7,6 +7,8 @@
#include <memory>
#include "src/handles-inl.h"
+#include "src/log.h"
+#include "src/objects-inl.h"
#include "src/prototype.h"
namespace v8 {
@@ -204,53 +206,6 @@ void StringStream::PrintObject(Object* o) {
}
-void StringStream::Add(const char* format) {
- Add(CStrVector(format));
-}
-
-
-void StringStream::Add(Vector<const char> format) {
- Add(format, Vector<FmtElm>::empty());
-}
-
-
-void StringStream::Add(const char* format, FmtElm arg0) {
- const char argc = 1;
- FmtElm argv[argc] = { arg0 };
- Add(CStrVector(format), Vector<FmtElm>(argv, argc));
-}
-
-
-void StringStream::Add(const char* format, FmtElm arg0, FmtElm arg1) {
- const char argc = 2;
- FmtElm argv[argc] = { arg0, arg1 };
- Add(CStrVector(format), Vector<FmtElm>(argv, argc));
-}
-
-
-void StringStream::Add(const char* format, FmtElm arg0, FmtElm arg1,
- FmtElm arg2) {
- const char argc = 3;
- FmtElm argv[argc] = { arg0, arg1, arg2 };
- Add(CStrVector(format), Vector<FmtElm>(argv, argc));
-}
-
-
-void StringStream::Add(const char* format, FmtElm arg0, FmtElm arg1,
- FmtElm arg2, FmtElm arg3) {
- const char argc = 4;
- FmtElm argv[argc] = { arg0, arg1, arg2, arg3 };
- Add(CStrVector(format), Vector<FmtElm>(argv, argc));
-}
-
-
-void StringStream::Add(const char* format, FmtElm arg0, FmtElm arg1,
- FmtElm arg2, FmtElm arg3, FmtElm arg4) {
- const char argc = 5;
- FmtElm argv[argc] = { arg0, arg1, arg2, arg3, arg4 };
- Add(CStrVector(format), Vector<FmtElm>(argv, argc));
-}
-
std::unique_ptr<char[]> StringStream::ToCString() const {
char* str = NewArray<char>(length_ + 1);
MemCopy(str, buffer_, length_);
@@ -349,7 +304,8 @@ void StringStream::PrintUsingMap(JSObject* js_object) {
DescriptorArray* descs = map->instance_descriptors();
for (int i = 0; i < real_size; i++) {
PropertyDetails details = descs->GetDetails(i);
- if (details.type() == DATA) {
+ if (details.location() == kField) {
+ DCHECK_EQ(kData, details.kind());
Object* key = descs->GetKey(i);
if (key->IsString() || key->IsNumber()) {
int len = 3;
@@ -528,8 +484,8 @@ void StringStream::PrintPrototype(JSFunction* fun, Object* receiver) {
Object* name = fun->shared()->name();
bool print_name = false;
Isolate* isolate = fun->GetIsolate();
- if (receiver->IsNull(isolate) || receiver->IsUndefined(isolate) ||
- receiver->IsTheHole(isolate) || receiver->IsJSProxy()) {
+ if (receiver->IsNullOrUndefined(isolate) || receiver->IsTheHole(isolate) ||
+ receiver->IsJSProxy()) {
print_name = true;
} else if (isolate->context() != nullptr) {
if (!receiver->IsJSObject()) {
diff --git a/deps/v8/src/string-stream.h b/deps/v8/src/string-stream.h
index 1c1d27a16b..c9be46f046 100644
--- a/deps/v8/src/string-stream.h
+++ b/deps/v8/src/string-stream.h
@@ -5,8 +5,6 @@
#ifndef V8_STRING_STREAM_H_
#define V8_STRING_STREAM_H_
-#include <memory>
-
#include "src/allocation.h"
#include "src/handles.h"
#include "src/vector.h"
@@ -56,48 +54,53 @@ class FixedStringAllocator final : public StringAllocator {
DISALLOW_COPY_AND_ASSIGN(FixedStringAllocator);
};
+class StringStream final {
+ class FmtElm final {
+ public:
+ FmtElm(int value) : FmtElm(INT) { // NOLINT
+ data_.u_int_ = value;
+ }
+ explicit FmtElm(double value) : FmtElm(DOUBLE) { // NOLINT
+ data_.u_double_ = value;
+ }
+ FmtElm(const char* value) : FmtElm(C_STR) { // NOLINT
+ data_.u_c_str_ = value;
+ }
+ FmtElm(const Vector<const uc16>& value) : FmtElm(LC_STR) { // NOLINT
+ data_.u_lc_str_ = &value;
+ }
+ FmtElm(Object* value) : FmtElm(OBJ) { // NOLINT
+ data_.u_obj_ = value;
+ }
+ FmtElm(Handle<Object> value) : FmtElm(HANDLE) { // NOLINT
+ data_.u_handle_ = value.location();
+ }
+ FmtElm(void* value) : FmtElm(POINTER) { // NOLINT
+ data_.u_pointer_ = value;
+ }
+
+ private:
+ friend class StringStream;
+ enum Type { INT, DOUBLE, C_STR, LC_STR, OBJ, HANDLE, POINTER };
-class FmtElm final {
- public:
- FmtElm(int value) : type_(INT) { // NOLINT
- data_.u_int_ = value;
- }
- explicit FmtElm(double value) : type_(DOUBLE) {
- data_.u_double_ = value;
- }
- FmtElm(const char* value) : type_(C_STR) { // NOLINT
- data_.u_c_str_ = value;
- }
- FmtElm(const Vector<const uc16>& value) : type_(LC_STR) { // NOLINT
- data_.u_lc_str_ = &value;
- }
- FmtElm(Object* value) : type_(OBJ) { // NOLINT
- data_.u_obj_ = value;
- }
- FmtElm(Handle<Object> value) : type_(HANDLE) { // NOLINT
- data_.u_handle_ = value.location();
- }
- FmtElm(void* value) : type_(POINTER) { // NOLINT
- data_.u_pointer_ = value;
- }
-
- private:
- friend class StringStream;
- enum Type { INT, DOUBLE, C_STR, LC_STR, OBJ, HANDLE, POINTER };
- Type type_;
- union {
- int u_int_;
- double u_double_;
- const char* u_c_str_;
- const Vector<const uc16>* u_lc_str_;
- Object* u_obj_;
- Object** u_handle_;
- void* u_pointer_;
- } data_;
-};
+#ifdef DEBUG
+ Type type_;
+ explicit FmtElm(Type type) : type_(type) {}
+#else
+ explicit FmtElm(Type) {}
+#endif
+ union {
+ int u_int_;
+ double u_double_;
+ const char* u_c_str_;
+ const Vector<const uc16>* u_lc_str_;
+ Object* u_obj_;
+ Object** u_handle_;
+ void* u_pointer_;
+ } data_;
+ };
-class StringStream final {
public:
enum ObjectPrintMode { kPrintObjectConcise, kPrintObjectVerbose };
StringStream(StringAllocator* allocator,
@@ -113,23 +116,19 @@ class StringStream final {
bool Put(char c);
bool Put(String* str);
bool Put(String* str, int start, int end);
- void Add(Vector<const char> format, Vector<FmtElm> elms);
- void Add(const char* format);
- void Add(Vector<const char> format);
- void Add(const char* format, FmtElm arg0);
- void Add(const char* format, FmtElm arg0, FmtElm arg1);
- void Add(const char* format, FmtElm arg0, FmtElm arg1, FmtElm arg2);
- void Add(const char* format,
- FmtElm arg0,
- FmtElm arg1,
- FmtElm arg2,
- FmtElm arg3);
- void Add(const char* format,
- FmtElm arg0,
- FmtElm arg1,
- FmtElm arg2,
- FmtElm arg3,
- FmtElm arg4);
+ void Add(const char* format) { Add(CStrVector(format)); }
+ void Add(Vector<const char> format) { Add(format, Vector<FmtElm>()); }
+
+ template <typename... Args>
+ void Add(const char* format, Args... args) {
+ Add(CStrVector(format), args...);
+ }
+
+ template <typename... Args>
+ void Add(Vector<const char> format, Args... args) {
+ FmtElm elems[]{args...};
+ Add(format, ArrayVector(elems));
+ }
// Getting the message out.
void OutputToFile(FILE* out);
@@ -165,6 +164,7 @@ class StringStream final {
static const int kInitialCapacity = 16;
private:
+ void Add(Vector<const char> format, Vector<FmtElm> elms);
void PrintObject(Object* obj);
StringAllocator* allocator_;
diff --git a/deps/v8/src/tracing/traced-value.cc b/deps/v8/src/tracing/traced-value.cc
index 81be6237f9..9b2a45c991 100644
--- a/deps/v8/src/tracing/traced-value.cc
+++ b/deps/v8/src/tracing/traced-value.cc
@@ -24,40 +24,36 @@ const bool kStackTypeArray = true;
#define DEBUG_POP_CONTAINER() ((void)0)
#endif
-std::string EscapeString(const std::string& value) {
- std::string result;
- result.reserve(value.length() + 2);
- result += '"';
- size_t length = value.length();
+void EscapeAndAppendString(const char* value, std::string* result) {
+ *result += '"';
char number_buffer[10];
- for (size_t src = 0; src < length; ++src) {
- char c = value[src];
+ while (*value) {
+ char c = *value++;
switch (c) {
case '\t':
- result += "\\t";
+ *result += "\\t";
break;
case '\n':
- result += "\\n";
+ *result += "\\n";
break;
case '\"':
- result += "\\\"";
+ *result += "\\\"";
break;
case '\\':
- result += "\\\\";
+ *result += "\\\\";
break;
default:
if (c < '\040') {
base::OS::SNPrintF(
number_buffer, arraysize(number_buffer), "\\u%04X",
static_cast<unsigned>(static_cast<unsigned char>(c)));
- result += number_buffer;
+ *result += number_buffer;
} else {
- result += c;
+ *result += c;
}
}
}
- result += '"';
- return result;
+ *result += '"';
}
} // namespace
@@ -95,10 +91,10 @@ void TracedValue::SetBoolean(const char* name, bool value) {
data_ += value ? "true" : "false";
}
-void TracedValue::SetString(const char* name, const std::string& value) {
+void TracedValue::SetString(const char* name, const char* value) {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
WriteName(name);
- data_ += EscapeString(value);
+ EscapeAndAppendString(value, &data_);
}
void TracedValue::BeginDictionary(const char* name) {
@@ -123,12 +119,6 @@ void TracedValue::AppendInteger(int value) {
data_ += std::to_string(value);
}
-void TracedValue::AppendLongInteger(int64_t value) {
- DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
- WriteComma();
- data_ += std::to_string(value);
-}
-
void TracedValue::AppendDouble(double value) {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
WriteComma();
@@ -142,10 +132,10 @@ void TracedValue::AppendBoolean(bool value) {
data_ += value ? "true" : "false";
}
-void TracedValue::AppendString(const std::string& value) {
+void TracedValue::AppendString(const char* value) {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
WriteComma();
- data_ += EscapeString(value);
+ EscapeAndAppendString(value, &data_);
}
void TracedValue::BeginDictionary() {
diff --git a/deps/v8/src/tracing/traced-value.h b/deps/v8/src/tracing/traced-value.h
index b5c265cd48..7de4c234a2 100644
--- a/deps/v8/src/tracing/traced-value.h
+++ b/deps/v8/src/tracing/traced-value.h
@@ -29,15 +29,18 @@ class TracedValue : public ConvertableToTraceFormat {
void SetInteger(const char* name, int value);
void SetDouble(const char* name, double value);
void SetBoolean(const char* name, bool value);
- void SetString(const char* name, const std::string& value);
+ void SetString(const char* name, const char* value);
+ void SetString(const char* name, const std::string& value) {
+ SetString(name, value.c_str());
+ }
void BeginDictionary(const char* name);
void BeginArray(const char* name);
void AppendInteger(int);
- void AppendLongInteger(int64_t);
void AppendDouble(double);
void AppendBoolean(bool);
- void AppendString(const std::string&);
+ void AppendString(const char*);
+ void AppendString(const std::string& value) { AppendString(value.c_str()); }
void BeginArray();
void BeginDictionary();
diff --git a/deps/v8/src/tracing/tracing-category-observer.cc b/deps/v8/src/tracing/tracing-category-observer.cc
index 3fffd2f9ca..6a36158741 100644
--- a/deps/v8/src/tracing/tracing-category-observer.cc
+++ b/deps/v8/src/tracing/tracing-category-observer.cc
@@ -21,6 +21,7 @@ void TracingCategoryObserver::SetUp() {
TRACE_EVENT_WARMUP_CATEGORY(
TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats_sampling"));
TRACE_EVENT_WARMUP_CATEGORY(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"));
+ TRACE_EVENT_WARMUP_CATEGORY(TRACE_DISABLED_BY_DEFAULT("v8.ic_stats"));
}
void TracingCategoryObserver::TearDown() {
@@ -46,12 +47,18 @@ void TracingCategoryObserver::OnTraceEnabled() {
if (enabled) {
v8::internal::FLAG_gc_stats |= ENABLED_BY_TRACING;
}
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("v8.ic_stats"),
+ &enabled);
+ if (enabled) {
+ v8::internal::FLAG_ic_stats |= ENABLED_BY_TRACING;
+ }
}
void TracingCategoryObserver::OnTraceDisabled() {
v8::internal::FLAG_runtime_stats &=
~(ENABLED_BY_TRACING | ENABLED_BY_SAMPLING);
v8::internal::FLAG_gc_stats &= ~ENABLED_BY_TRACING;
+ v8::internal::FLAG_ic_stats &= ~ENABLED_BY_TRACING;
}
} // namespace tracing
diff --git a/deps/v8/src/transitions.cc b/deps/v8/src/transitions.cc
index 88c1549579..5333fa6e25 100644
--- a/deps/v8/src/transitions.cc
+++ b/deps/v8/src/transitions.cc
@@ -202,7 +202,8 @@ Handle<Map> TransitionArray::FindTransitionToField(Handle<Map> map,
if (target == NULL) return Handle<Map>::null();
PropertyDetails details = target->GetLastDescriptorDetails();
DCHECK_EQ(NONE, details.attributes());
- if (details.type() != DATA) return Handle<Map>::null();
+ if (details.location() != kField) return Handle<Map>::null();
+ DCHECK_EQ(kData, details.kind());
return Handle<Map>(target);
}
@@ -214,7 +215,8 @@ Handle<String> TransitionArray::ExpectedTransitionKey(Handle<Map> map) {
if (!IsSimpleTransition(raw_transition)) return Handle<String>::null();
Map* target = GetSimpleTransition(raw_transition);
PropertyDetails details = GetSimpleTargetDetails(target);
- if (details.type() != DATA) return Handle<String>::null();
+ if (details.location() != kField) return Handle<String>::null();
+ DCHECK_EQ(kData, details.kind());
if (details.attributes() != NONE) return Handle<String>::null();
Name* name = GetSimpleTransitionKey(target);
if (!name->IsString()) return Handle<String>::null();
diff --git a/deps/v8/src/trap-handler/trap-handler.h b/deps/v8/src/trap-handler/trap-handler.h
new file mode 100644
index 0000000000..7c78b1f232
--- /dev/null
+++ b/deps/v8/src/trap-handler/trap-handler.h
@@ -0,0 +1,26 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TRAP_HANDLER_H_
+#define V8_TRAP_HANDLER_H_
+
+namespace v8 {
+namespace internal {
+namespace trap_handler {
+
+struct ProtectedInstructionData {
+ // The offset of this instruction from the start of its code object.
+ int32_t instr_offset;
+
+ // The offset of the landing pad from the start of its code object.
+ //
+ // TODO(eholk): Using a single landing pad and store parameters here.
+ int32_t landing_offset;
+};
+
+} // namespace trap_handler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TRAP_HANDLER_H_
diff --git a/deps/v8/src/type-feedback-vector-inl.h b/deps/v8/src/type-feedback-vector-inl.h
index 58dfe33ac7..ca7229a94a 100644
--- a/deps/v8/src/type-feedback-vector-inl.h
+++ b/deps/v8/src/type-feedback-vector-inl.h
@@ -27,7 +27,7 @@ FeedbackVectorSlot FeedbackVectorSpecBase<Derived>::AddSlot(
// static
TypeFeedbackMetadata* TypeFeedbackMetadata::cast(Object* obj) {
- DCHECK(obj->IsTypeFeedbackVector());
+ DCHECK(obj->IsTypeFeedbackMetadata());
return reinterpret_cast<TypeFeedbackMetadata*>(obj);
}
@@ -55,25 +55,28 @@ int TypeFeedbackMetadata::GetSlotSize(FeedbackVectorSlotKind kind) {
DCHECK_NE(FeedbackVectorSlotKind::KINDS_NUMBER, kind);
if (kind == FeedbackVectorSlotKind::GENERAL ||
kind == FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC ||
- kind == FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC) {
+ kind == FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC ||
+ kind == FeedbackVectorSlotKind::CREATE_CLOSURE) {
return 1;
}
return 2;
}
-bool TypeFeedbackMetadata::SlotRequiresName(FeedbackVectorSlotKind kind) {
+bool TypeFeedbackMetadata::SlotRequiresParameter(FeedbackVectorSlotKind kind) {
switch (kind) {
- case FeedbackVectorSlotKind::LOAD_GLOBAL_IC:
+ case FeedbackVectorSlotKind::CREATE_CLOSURE:
return true;
case FeedbackVectorSlotKind::CALL_IC:
case FeedbackVectorSlotKind::LOAD_IC:
+ case FeedbackVectorSlotKind::LOAD_GLOBAL_IC:
case FeedbackVectorSlotKind::KEYED_LOAD_IC:
case FeedbackVectorSlotKind::STORE_IC:
case FeedbackVectorSlotKind::KEYED_STORE_IC:
case FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC:
case FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC:
+ case FeedbackVectorSlotKind::STORE_DATA_PROPERTY_IN_LITERAL_IC:
case FeedbackVectorSlotKind::GENERAL:
case FeedbackVectorSlotKind::INVALID:
return false;
@@ -105,7 +108,7 @@ int TypeFeedbackVector::invocation_count() const {
// Conversion from an integer index to either a slot or an ic slot.
// static
FeedbackVectorSlot TypeFeedbackVector::ToSlot(int index) {
- DCHECK(index >= kReservedIndexCount);
+ DCHECK_GE(index, kReservedIndexCount);
return FeedbackVectorSlot(index - kReservedIndexCount);
}
@@ -149,6 +152,12 @@ CompareOperationHint CompareOperationHintFromFeedback(int type_feedback) {
return CompareOperationHint::kSignedSmall;
case CompareOperationFeedback::kNumber:
return CompareOperationHint::kNumber;
+ case CompareOperationFeedback::kNumberOrOddball:
+ return CompareOperationHint::kNumberOrOddball;
+ case CompareOperationFeedback::kInternalizedString:
+ return CompareOperationHint::kInternalizedString;
+ case CompareOperationFeedback::kString:
+ return CompareOperationHint::kString;
default:
return CompareOperationHint::kAny;
}
@@ -176,7 +185,8 @@ void TypeFeedbackVector::ComputeCounts(int* with_type_info, int* generic,
case FeedbackVectorSlotKind::LOAD_GLOBAL_IC:
case FeedbackVectorSlotKind::KEYED_LOAD_IC:
case FeedbackVectorSlotKind::STORE_IC:
- case FeedbackVectorSlotKind::KEYED_STORE_IC: {
+ case FeedbackVectorSlotKind::KEYED_STORE_IC:
+ case FeedbackVectorSlotKind::STORE_DATA_PROPERTY_IN_LITERAL_IC: {
if (obj->IsWeakCell() || obj->IsFixedArray() || obj->IsString()) {
with++;
} else if (obj == megamorphic_sentinel) {
@@ -215,6 +225,7 @@ void TypeFeedbackVector::ComputeCounts(int* with_type_info, int* generic,
}
break;
}
+ case FeedbackVectorSlotKind::CREATE_CLOSURE:
case FeedbackVectorSlotKind::GENERAL:
break;
case FeedbackVectorSlotKind::INVALID:
diff --git a/deps/v8/src/type-feedback-vector.cc b/deps/v8/src/type-feedback-vector.cc
index 2ba9690b9f..267fd862ed 100644
--- a/deps/v8/src/type-feedback-vector.cc
+++ b/deps/v8/src/type-feedback-vector.cc
@@ -37,15 +37,9 @@ FeedbackVectorSlotKind TypeFeedbackMetadata::GetKind(
return VectorICComputer::decode(data, slot.ToInt());
}
-String* TypeFeedbackMetadata::GetName(FeedbackVectorSlot slot) const {
- DCHECK(SlotRequiresName(GetKind(slot)));
- UnseededNumberDictionary* names =
- UnseededNumberDictionary::cast(get(kNamesTableIndex));
- int entry = names->FindEntry(GetIsolate(), slot.ToInt());
- CHECK_NE(UnseededNumberDictionary::kNotFound, entry);
- Object* name = names->ValueAt(entry);
- DCHECK(name->IsString());
- return String::cast(name);
+int TypeFeedbackMetadata::GetParameter(int parameter_index) const {
+ FixedArray* parameters = FixedArray::cast(get(kParametersTableIndex));
+ return Smi::cast(parameters->get(parameter_index))->value();
}
void TypeFeedbackMetadata::SetKind(FeedbackVectorSlot slot,
@@ -97,31 +91,22 @@ Handle<TypeFeedbackMetadata> TypeFeedbackMetadata::New(Isolate* isolate,
Handle<TypeFeedbackMetadata> metadata =
Handle<TypeFeedbackMetadata>::cast(array);
- // Add names to NamesTable.
- const int name_count = spec->name_count();
-
- Handle<UnseededNumberDictionary> names;
- if (name_count) {
- names = UnseededNumberDictionary::New(isolate, name_count, TENURED);
- }
-
- int name_index = 0;
for (int i = 0; i < slot_count; i++) {
FeedbackVectorSlotKind kind = spec->GetKind(i);
metadata->SetKind(FeedbackVectorSlot(i), kind);
- if (SlotRequiresName(kind)) {
- Handle<String> name = spec->GetName(name_index);
- DCHECK(!name.is_null());
- Handle<UnseededNumberDictionary> new_names =
- UnseededNumberDictionary::AtNumberPut(names, i, name);
- DCHECK_EQ(*new_names, *names);
- names = new_names;
- name_index++;
+ }
+
+ if (spec->parameters_count() > 0) {
+ const int parameters_count = spec->parameters_count();
+ Handle<FixedArray> params_array =
+ factory->NewFixedArray(parameters_count, TENURED);
+ for (int i = 0; i < parameters_count; i++) {
+ params_array->set(i, Smi::FromInt(spec->GetParameter(i)));
}
+ metadata->set(kParametersTableIndex, *params_array);
+ } else {
+ metadata->set(kParametersTableIndex, *factory->empty_fixed_array());
}
- DCHECK_EQ(name_count, name_index);
- metadata->set(kNamesTableIndex,
- name_count ? static_cast<Object*>(*names) : Smi::kZero);
// It's important that the TypeFeedbackMetadata have a COW map, since it's
// pointed to by both a SharedFunctionInfo and indirectly by closures through
@@ -133,7 +118,6 @@ Handle<TypeFeedbackMetadata> TypeFeedbackMetadata::New(Isolate* isolate,
return metadata;
}
-
bool TypeFeedbackMetadata::SpecDiffersFrom(
const FeedbackVectorSpec* other_spec) const {
if (other_spec->slots() != slot_count()) {
@@ -141,7 +125,7 @@ bool TypeFeedbackMetadata::SpecDiffersFrom(
}
int slots = slot_count();
- int name_index = 0;
+ int parameter_index = 0;
for (int i = 0; i < slots;) {
FeedbackVectorSlot slot(i);
FeedbackVectorSlotKind kind = GetKind(slot);
@@ -150,13 +134,13 @@ bool TypeFeedbackMetadata::SpecDiffersFrom(
if (kind != other_spec->GetKind(i)) {
return true;
}
- if (SlotRequiresName(kind)) {
- String* name = GetName(slot);
- DCHECK(name != GetHeap()->empty_string());
- String* other_name = *other_spec->GetName(name_index++);
- if (name != other_name) {
+ if (SlotRequiresParameter(kind)) {
+ int parameter = GetParameter(parameter_index);
+ int other_parameter = other_spec->GetParameter(parameter_index);
+ if (parameter != other_parameter) {
return true;
}
+ parameter_index++;
}
i += entry_size;
}
@@ -170,6 +154,7 @@ bool TypeFeedbackMetadata::DiffersFrom(
}
int slots = slot_count();
+ int parameter_index = 0;
for (int i = 0; i < slots;) {
FeedbackVectorSlot slot(i);
FeedbackVectorSlotKind kind = GetKind(slot);
@@ -177,10 +162,12 @@ bool TypeFeedbackMetadata::DiffersFrom(
if (GetKind(slot) != other_metadata->GetKind(slot)) {
return true;
}
- if (SlotRequiresName(kind)) {
- if (GetName(slot) != other_metadata->GetName(slot)) {
+ if (SlotRequiresParameter(kind)) {
+ if (GetParameter(parameter_index) !=
+ other_metadata->GetParameter(parameter_index)) {
return true;
}
+ parameter_index++;
}
i += entry_size;
}
@@ -207,6 +194,10 @@ const char* TypeFeedbackMetadata::Kind2String(FeedbackVectorSlotKind kind) {
return "INTERPRETER_BINARYOP_IC";
case FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC:
return "INTERPRETER_COMPARE_IC";
+ case FeedbackVectorSlotKind::STORE_DATA_PROPERTY_IN_LITERAL_IC:
+ return "STORE_DATA_PROPERTY_IN_LITERAL_IC";
+ case FeedbackVectorSlotKind::CREATE_CLOSURE:
+ return "CREATE_CLOSURE";
case FeedbackVectorSlotKind::GENERAL:
return "STUB";
case FeedbackVectorSlotKind::KINDS_NUMBER:
@@ -222,9 +213,11 @@ FeedbackVectorSlotKind TypeFeedbackVector::GetKind(
return metadata()->GetKind(slot);
}
-String* TypeFeedbackVector::GetName(FeedbackVectorSlot slot) const {
+int TypeFeedbackVector::GetParameter(FeedbackVectorSlot slot) const {
DCHECK(!is_empty());
- return metadata()->GetName(slot);
+ DCHECK(
+ TypeFeedbackMetadata::SlotRequiresParameter(metadata()->GetKind(slot)));
+ return FixedArray::cast(Get(slot))->length();
}
// static
@@ -240,8 +233,32 @@ Handle<TypeFeedbackVector> TypeFeedbackVector::New(
}
Handle<FixedArray> array = factory->NewFixedArray(length, TENURED);
+ array->set_map_no_write_barrier(isolate->heap()->type_feedback_vector_map());
array->set(kMetadataIndex, *metadata);
array->set(kInvocationCountIndex, Smi::kZero);
+ int parameter_index = 0;
+ for (int i = 0; i < slot_count;) {
+ FeedbackVectorSlot slot(i);
+ FeedbackVectorSlotKind kind = metadata->GetKind(slot);
+ int index = TypeFeedbackVector::GetIndex(slot);
+ int entry_size = TypeFeedbackMetadata::GetSlotSize(kind);
+
+ if (kind == FeedbackVectorSlotKind::CREATE_CLOSURE) {
+ // This fixed array is filled with undefined.
+ int length = metadata->GetParameter(parameter_index++);
+ if (length == 0) {
+ // This is a native function literal. We can always point to
+ // the empty literals array here.
+ array->set(index, *factory->empty_literals_array(), SKIP_WRITE_BARRIER);
+ } else {
+ // TODO(mvstanton): Create the array.
+ // Handle<FixedArray> value = factory->NewFixedArray(length);
+ // array->set(index, *value);
+ array->set(index, *factory->empty_literals_array(), SKIP_WRITE_BARRIER);
+ }
+ }
+ i += entry_size;
+ }
DisallowHeapAllocation no_gc;
@@ -263,12 +280,14 @@ Handle<TypeFeedbackVector> TypeFeedbackVector::New(
} else {
value = *uninitialized_sentinel;
}
- array->set(index, value, SKIP_WRITE_BARRIER);
- value = kind == FeedbackVectorSlotKind::CALL_IC ? Smi::kZero
- : *uninitialized_sentinel;
- for (int j = 1; j < entry_size; j++) {
- array->set(index + j, value, SKIP_WRITE_BARRIER);
+ if (kind != FeedbackVectorSlotKind::CREATE_CLOSURE) {
+ array->set(index, value, SKIP_WRITE_BARRIER);
+ value = kind == FeedbackVectorSlotKind::CALL_IC ? Smi::kZero
+ : *uninitialized_sentinel;
+ for (int j = 1; j < entry_size; j++) {
+ array->set(index + j, value, SKIP_WRITE_BARRIER);
+ }
}
i += entry_size;
}
@@ -303,9 +322,10 @@ static bool ClearLogic(Isolate* isolate) {
void TypeFeedbackVector::ClearSlotsImpl(SharedFunctionInfo* shared,
bool force_clear) {
Isolate* isolate = GetIsolate();
-
if (!force_clear && !ClearLogic(isolate)) return;
+ if (this == isolate->heap()->empty_type_feedback_vector()) return;
+
Object* uninitialized_sentinel =
TypeFeedbackVector::RawUninitializedSentinel(isolate);
@@ -354,6 +374,14 @@ void TypeFeedbackVector::ClearSlotsImpl(SharedFunctionInfo* shared,
// Set(slot, Smi::kZero);
break;
}
+ case FeedbackVectorSlotKind::CREATE_CLOSURE: {
+ // Fill the array with undefined.
+ FixedArray* array = FixedArray::cast(Get(slot));
+ for (int i = 1; i < array->length(); i++) {
+ array->set_undefined(i);
+ }
+ break;
+ }
case FeedbackVectorSlotKind::GENERAL: {
if (obj->IsHeapObject()) {
InstanceType instance_type =
@@ -367,6 +395,11 @@ void TypeFeedbackVector::ClearSlotsImpl(SharedFunctionInfo* shared,
}
break;
}
+ case FeedbackVectorSlotKind::STORE_DATA_PROPERTY_IN_LITERAL_IC: {
+ StoreDataPropertyInLiteralICNexus nexus(this, slot);
+ nexus.Clear(shared->code());
+ break;
+ }
case FeedbackVectorSlotKind::INVALID:
case FeedbackVectorSlotKind::KINDS_NUMBER:
UNREACHABLE();
@@ -378,50 +411,6 @@ void TypeFeedbackVector::ClearSlotsImpl(SharedFunctionInfo* shared,
// static
-void TypeFeedbackVector::ClearAllKeyedStoreICs(Isolate* isolate) {
- SharedFunctionInfo::Iterator iterator(isolate);
- SharedFunctionInfo* shared;
- while ((shared = iterator.Next())) {
- if (!shared->OptimizedCodeMapIsCleared()) {
- FixedArray* optimized_code_map = shared->optimized_code_map();
- int length = optimized_code_map->length();
- for (int i = SharedFunctionInfo::kEntriesStart; i < length;
- i += SharedFunctionInfo::kEntryLength) {
- WeakCell* cell = WeakCell::cast(
- optimized_code_map->get(i + SharedFunctionInfo::kLiteralsOffset));
- if (cell->value()->IsLiteralsArray()) {
- TypeFeedbackVector* vector =
- LiteralsArray::cast(cell->value())->feedback_vector();
- vector->ClearKeyedStoreICs(shared);
- }
- }
- }
- }
-}
-
-
-void TypeFeedbackVector::ClearKeyedStoreICs(SharedFunctionInfo* shared) {
- Isolate* isolate = GetIsolate();
-
- Code* host = shared->code();
- Object* uninitialized_sentinel =
- TypeFeedbackVector::RawUninitializedSentinel(isolate);
-
- TypeFeedbackMetadataIterator iter(metadata());
- while (iter.HasNext()) {
- FeedbackVectorSlot slot = iter.Next();
- FeedbackVectorSlotKind kind = iter.kind();
- if (kind != FeedbackVectorSlotKind::KEYED_STORE_IC) continue;
- Object* obj = Get(slot);
- if (obj != uninitialized_sentinel) {
- KeyedStoreICNexus nexus(this, slot);
- nexus.Clear(host);
- }
- }
-}
-
-
-// static
Handle<TypeFeedbackVector> TypeFeedbackVector::DummyVector(Isolate* isolate) {
return isolate->factory()->dummy_vector();
}
@@ -713,7 +702,7 @@ void LoadGlobalICNexus::ConfigurePropertyCellMode(Handle<PropertyCell> cell) {
SKIP_WRITE_BARRIER);
}
-void LoadGlobalICNexus::ConfigureHandlerMode(Handle<Code> handler) {
+void LoadGlobalICNexus::ConfigureHandlerMode(Handle<Object> handler) {
SetFeedback(GetIsolate()->heap()->empty_weak_cell());
SetFeedbackExtra(*handler);
}
@@ -811,10 +800,9 @@ void KeyedStoreICNexus::ConfigurePolymorphic(Handle<Name> name,
InstallHandlers(array, maps, handlers);
}
-
void KeyedStoreICNexus::ConfigurePolymorphic(MapHandleList* maps,
MapHandleList* transitioned_maps,
- CodeHandleList* handlers) {
+ List<Handle<Object>>* handlers) {
int receiver_count = maps->length();
DCHECK(receiver_count > 1);
Handle<FixedArray> array = EnsureArrayOfSize(receiver_count * 3);
@@ -1011,7 +999,14 @@ KeyedAccessStoreMode KeyedStoreICNexus::GetKeyedAccessStoreMode() const {
FindHandlers(&handlers, maps.length());
for (int i = 0; i < handlers.length(); i++) {
// The first handler that isn't the slow handler will have the bits we need.
- Handle<Code> handler = Handle<Code>::cast(handlers.at(i));
+ Handle<Object> maybe_code_handler = handlers.at(i);
+ Handle<Code> handler;
+ if (maybe_code_handler->IsTuple2()) {
+ Handle<Tuple2> data_handler = Handle<Tuple2>::cast(maybe_code_handler);
+ handler = handle(Code::cast(data_handler->value2()));
+ } else {
+ handler = Handle<Code>::cast(maybe_code_handler);
+ }
CodeStub::Major major_key = CodeStub::MajorKeyFromKey(handler->stub_key());
uint32_t minor_key = CodeStub::MinorKeyFromKey(handler->stub_key());
CHECK(major_key == CodeStub::KeyedStoreSloppyArguments ||
@@ -1076,5 +1071,27 @@ CompareOperationHint CompareICNexus::GetCompareOperationFeedback() const {
return CompareOperationHintFromFeedback(feedback);
}
+InlineCacheState StoreDataPropertyInLiteralICNexus::StateFromFeedback() const {
+ Isolate* isolate = GetIsolate();
+ Object* feedback = GetFeedback();
+
+ if (feedback == *TypeFeedbackVector::UninitializedSentinel(isolate)) {
+ return UNINITIALIZED;
+ } else if (feedback->IsWeakCell()) {
+ // Don't check if the map is cleared.
+ return MONOMORPHIC;
+ }
+
+ return MEGAMORPHIC;
+}
+
+void StoreDataPropertyInLiteralICNexus::ConfigureMonomorphic(
+ Handle<Name> name, Handle<Map> receiver_map) {
+ Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
+
+ SetFeedback(*cell);
+ SetFeedbackExtra(*name);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/type-feedback-vector.h b/deps/v8/src/type-feedback-vector.h
index 3bb51c1d34..c9eae023a8 100644
--- a/deps/v8/src/type-feedback-vector.h
+++ b/deps/v8/src/type-feedback-vector.h
@@ -30,7 +30,10 @@ enum class FeedbackVectorSlotKind {
KEYED_STORE_IC,
INTERPRETER_BINARYOP_IC,
INTERPRETER_COMPARE_IC,
+ STORE_DATA_PROPERTY_IN_LITERAL_IC,
+ // This kind of slot has an integer parameter associated with it.
+ CREATE_CLOSURE,
// This is a general purpose slot that occupies one feedback vector element.
GENERAL,
@@ -53,11 +56,15 @@ class FeedbackVectorSpecBase {
return AddSlot(FeedbackVectorSlotKind::LOAD_IC);
}
- FeedbackVectorSlot AddLoadGlobalICSlot(Handle<String> name) {
- This()->append_name(name);
+ FeedbackVectorSlot AddLoadGlobalICSlot() {
return AddSlot(FeedbackVectorSlotKind::LOAD_GLOBAL_IC);
}
+ FeedbackVectorSlot AddCreateClosureSlot(int size) {
+ This()->append_parameter(size);
+ return AddSlot(FeedbackVectorSlotKind::CREATE_CLOSURE);
+ }
+
FeedbackVectorSlot AddKeyedLoadICSlot() {
return AddSlot(FeedbackVectorSlotKind::KEYED_LOAD_IC);
}
@@ -82,6 +89,10 @@ class FeedbackVectorSpecBase {
return AddSlot(FeedbackVectorSlotKind::GENERAL);
}
+ FeedbackVectorSlot AddStoreDataPropertyInLiteralICSlot() {
+ return AddSlot(FeedbackVectorSlotKind::STORE_DATA_PROPERTY_IN_LITERAL_IC);
+ }
+
#ifdef OBJECT_PRINT
// For gdb debugging.
void Print();
@@ -97,7 +108,7 @@ class FeedbackVectorSpecBase {
class StaticFeedbackVectorSpec
: public FeedbackVectorSpecBase<StaticFeedbackVectorSpec> {
public:
- StaticFeedbackVectorSpec() : slot_count_(0), name_count_(0) {}
+ StaticFeedbackVectorSpec() : slot_count_(0), parameters_count_(0) {}
int slots() const { return slot_count_; }
@@ -106,11 +117,11 @@ class StaticFeedbackVectorSpec
return kinds_[slot];
}
- int name_count() const { return name_count_; }
+ int parameters_count() const { return parameters_count_; }
- Handle<String> GetName(int index) const {
- DCHECK(index >= 0 && index < name_count_);
- return names_[index];
+ int GetParameter(int index) const {
+ DCHECK(index >= 0 && index < parameters_count_);
+ return parameters_[index];
}
private:
@@ -121,25 +132,26 @@ class StaticFeedbackVectorSpec
kinds_[slot_count_++] = kind;
}
- void append_name(Handle<String> name) {
- DCHECK(name_count_ < kMaxLength);
- names_[name_count_++] = name;
+ void append_parameter(int parameter) {
+ DCHECK(parameters_count_ < kMaxLength);
+ parameters_[parameters_count_++] = parameter;
}
static const int kMaxLength = 12;
int slot_count_;
FeedbackVectorSlotKind kinds_[kMaxLength];
- int name_count_;
- Handle<String> names_[kMaxLength];
+ int parameters_count_;
+ int parameters_[kMaxLength];
};
class FeedbackVectorSpec : public FeedbackVectorSpecBase<FeedbackVectorSpec> {
public:
- explicit FeedbackVectorSpec(Zone* zone) : slot_kinds_(zone), names_(zone) {
+ explicit FeedbackVectorSpec(Zone* zone)
+ : slot_kinds_(zone), parameters_(zone) {
slot_kinds_.reserve(16);
- names_.reserve(8);
+ parameters_.reserve(8);
}
int slots() const { return static_cast<int>(slot_kinds_.size()); }
@@ -148,9 +160,9 @@ class FeedbackVectorSpec : public FeedbackVectorSpecBase<FeedbackVectorSpec> {
return static_cast<FeedbackVectorSlotKind>(slot_kinds_.at(slot));
}
- int name_count() const { return static_cast<int>(names_.size()); }
+ int parameters_count() const { return static_cast<int>(parameters_.size()); }
- Handle<String> GetName(int index) const { return names_.at(index); }
+ int GetParameter(int index) const { return parameters_.at(index); }
private:
friend class FeedbackVectorSpecBase<FeedbackVectorSpec>;
@@ -159,17 +171,18 @@ class FeedbackVectorSpec : public FeedbackVectorSpecBase<FeedbackVectorSpec> {
slot_kinds_.push_back(static_cast<unsigned char>(kind));
}
- void append_name(Handle<String> name) { names_.push_back(name); }
+ void append_parameter(int parameter) { parameters_.push_back(parameter); }
ZoneVector<unsigned char> slot_kinds_;
- ZoneVector<Handle<String>> names_;
+ ZoneVector<int> parameters_;
};
// The shape of the TypeFeedbackMetadata is an array with:
// 0: slot_count
// 1: names table
-// 2..N: slot kinds packed into a bit vector
+// 2: parameters table
+// 3..N: slot kinds packed into a bit vector
//
class TypeFeedbackMetadata : public FixedArray {
public:
@@ -177,18 +190,14 @@ class TypeFeedbackMetadata : public FixedArray {
static inline TypeFeedbackMetadata* cast(Object* obj);
static const int kSlotsCountIndex = 0;
- static const int kNamesTableIndex = 1;
+ static const int kParametersTableIndex = 1;
static const int kReservedIndexCount = 2;
- static const int kNameTableEntrySize = 2;
- static const int kNameTableSlotIndex = 0;
- static const int kNameTableNameIndex = 1;
-
// Returns number of feedback vector elements used by given slot kind.
static inline int GetSlotSize(FeedbackVectorSlotKind kind);
- // Defines if slots of given kind require "name".
- static inline bool SlotRequiresName(FeedbackVectorSlotKind kind);
+ // Defines if slots of given kind require "parameter".
+ static inline bool SlotRequiresParameter(FeedbackVectorSlotKind kind);
bool SpecDiffersFrom(const FeedbackVectorSpec* other_spec) const;
@@ -202,8 +211,8 @@ class TypeFeedbackMetadata : public FixedArray {
// Returns slot kind for given slot.
FeedbackVectorSlotKind GetKind(FeedbackVectorSlot slot) const;
- // Returns name for given slot.
- String* GetName(FeedbackVectorSlot slot) const;
+ // Returns parameter for given index (note: this is not the slot)
+ int GetParameter(int parameter_index) const;
template <typename Spec>
static Handle<TypeFeedbackMetadata> New(Isolate* isolate, const Spec* spec);
@@ -273,8 +282,8 @@ class TypeFeedbackVector : public FixedArray {
// Returns slot kind for given slot.
FeedbackVectorSlotKind GetKind(FeedbackVectorSlot slot) const;
- // Returns name corresponding to given slot or an empty string.
- String* GetName(FeedbackVectorSlot slot) const;
+ // Returns parameter corresponding to given slot or -1.
+ int GetParameter(FeedbackVectorSlot slot) const;
static Handle<TypeFeedbackVector> New(Isolate* isolate,
Handle<TypeFeedbackMetadata> metadata);
@@ -296,9 +305,6 @@ class TypeFeedbackVector : public FixedArray {
ClearSlotsImpl(shared, false);
}
- static void ClearAllKeyedStoreICs(Isolate* isolate);
- void ClearKeyedStoreICs(SharedFunctionInfo* shared);
-
// The object that indicates an uninitialized cache.
static inline Handle<Symbol> UninitializedSentinel(Isolate* isolate);
@@ -318,10 +324,6 @@ class TypeFeedbackVector : public FixedArray {
static const int kDummyKeyedStoreICSlot = 6;
static Handle<TypeFeedbackVector> DummyVector(Isolate* isolate);
- static FeedbackVectorSlot DummySlot(int dummyIndex) {
- DCHECK(dummyIndex >= 0 && dummyIndex <= kDummyKeyedStoreICSlot);
- return FeedbackVectorSlot(dummyIndex);
- }
private:
void ClearSlotsImpl(SharedFunctionInfo* shared, bool force_clear);
@@ -371,11 +373,6 @@ class TypeFeedbackMetadataIterator {
// Returns entry size of the last slot returned by Next().
inline int entry_size() const;
- String* name() const {
- DCHECK(TypeFeedbackMetadata::SlotRequiresName(kind()));
- return metadata()->GetName(cur_slot_);
- }
-
private:
TypeFeedbackMetadata* metadata() const {
return !metadata_handle_.is_null() ? *metadata_handle_ : metadata_;
@@ -555,7 +552,7 @@ class LoadGlobalICNexus : public FeedbackNexus {
void ConfigureUninitialized() override;
void ConfigurePropertyCellMode(Handle<PropertyCell> cell);
- void ConfigureHandlerMode(Handle<Code> handler);
+ void ConfigureHandlerMode(Handle<Object> handler);
InlineCacheState StateFromFeedback() const override;
};
@@ -643,7 +640,7 @@ class KeyedStoreICNexus : public FeedbackNexus {
List<Handle<Object>>* handlers);
void ConfigurePolymorphic(MapHandleList* maps,
MapHandleList* transitioned_maps,
- CodeHandleList* handlers);
+ List<Handle<Object>>* handlers);
void ConfigureMegamorphicKeyed(IcCheckType property_type);
KeyedAccessStoreMode GetKeyedAccessStoreMode() const;
@@ -715,6 +712,28 @@ class CompareICNexus final : public FeedbackNexus {
}
};
+class StoreDataPropertyInLiteralICNexus : public FeedbackNexus {
+ public:
+ StoreDataPropertyInLiteralICNexus(Handle<TypeFeedbackVector> vector,
+ FeedbackVectorSlot slot)
+ : FeedbackNexus(vector, slot) {
+ DCHECK_EQ(FeedbackVectorSlotKind::STORE_DATA_PROPERTY_IN_LITERAL_IC,
+ vector->GetKind(slot));
+ }
+ StoreDataPropertyInLiteralICNexus(TypeFeedbackVector* vector,
+ FeedbackVectorSlot slot)
+ : FeedbackNexus(vector, slot) {
+ DCHECK_EQ(FeedbackVectorSlotKind::STORE_DATA_PROPERTY_IN_LITERAL_IC,
+ vector->GetKind(slot));
+ }
+
+ void Clear(Code* host) { ConfigureUninitialized(); }
+
+ void ConfigureMonomorphic(Handle<Name> name, Handle<Map> receiver_map);
+
+ InlineCacheState StateFromFeedback() const override;
+};
+
inline BinaryOperationHint BinaryOperationHintFromFeedback(int type_feedback);
inline CompareOperationHint CompareOperationHintFromFeedback(int type_feedback);
diff --git a/deps/v8/src/type-hints.cc b/deps/v8/src/type-hints.cc
index 1c40c59b63..4267ab8906 100644
--- a/deps/v8/src/type-hints.cc
+++ b/deps/v8/src/type-hints.cc
@@ -36,6 +36,10 @@ std::ostream& operator<<(std::ostream& os, CompareOperationHint hint) {
return os << "Number";
case CompareOperationHint::kNumberOrOddball:
return os << "NumberOrOddball";
+ case CompareOperationHint::kInternalizedString:
+ return os << "InternalizedString";
+ case CompareOperationHint::kString:
+ return os << "String";
case CompareOperationHint::kAny:
return os << "Any";
}
@@ -74,6 +78,37 @@ std::ostream& operator<<(std::ostream& os, ToBooleanHint hint) {
return os;
}
+std::string ToString(ToBooleanHint hint) {
+ switch (hint) {
+ case ToBooleanHint::kNone:
+ return "None";
+ case ToBooleanHint::kUndefined:
+ return "Undefined";
+ case ToBooleanHint::kBoolean:
+ return "Boolean";
+ case ToBooleanHint::kNull:
+ return "Null";
+ case ToBooleanHint::kSmallInteger:
+ return "SmallInteger";
+ case ToBooleanHint::kReceiver:
+ return "Receiver";
+ case ToBooleanHint::kString:
+ return "String";
+ case ToBooleanHint::kSymbol:
+ return "Symbol";
+ case ToBooleanHint::kHeapNumber:
+ return "HeapNumber";
+ case ToBooleanHint::kSimdValue:
+ return "SimdValue";
+ case ToBooleanHint::kAny:
+ return "Any";
+ case ToBooleanHint::kNeedsMap:
+ return "NeedsMap";
+ }
+ UNREACHABLE();
+ return "";
+}
+
std::ostream& operator<<(std::ostream& os, ToBooleanHints hints) {
if (hints == ToBooleanHint::kAny) return os << "Any";
if (hints == ToBooleanHint::kNone) return os << "None";
@@ -89,6 +124,22 @@ std::ostream& operator<<(std::ostream& os, ToBooleanHints hints) {
return os;
}
+std::string ToString(ToBooleanHints hints) {
+ if (hints == ToBooleanHint::kAny) return "Any";
+ if (hints == ToBooleanHint::kNone) return "None";
+ std::string ret;
+ bool first = true;
+ for (ToBooleanHints::mask_type i = 0; i < sizeof(i) * 8; ++i) {
+ ToBooleanHint const hint = static_cast<ToBooleanHint>(1u << i);
+ if (hints & hint) {
+ if (!first) ret += "|";
+ first = false;
+ ret += ToString(hint);
+ }
+ }
+ return ret;
+}
+
std::ostream& operator<<(std::ostream& os, const StringAddFlags& flags) {
switch (flags) {
case STRING_ADD_CHECK_NONE:
diff --git a/deps/v8/src/type-hints.h b/deps/v8/src/type-hints.h
index e6138c771d..0364154593 100644
--- a/deps/v8/src/type-hints.h
+++ b/deps/v8/src/type-hints.h
@@ -33,6 +33,8 @@ enum class CompareOperationHint : uint8_t {
kSignedSmall,
kNumber,
kNumberOrOddball,
+ kInternalizedString,
+ kString,
kAny
};
@@ -61,10 +63,12 @@ enum class ToBooleanHint : uint16_t {
};
std::ostream& operator<<(std::ostream&, ToBooleanHint);
+std::string ToString(ToBooleanHint);
typedef base::Flags<ToBooleanHint, uint16_t> ToBooleanHints;
std::ostream& operator<<(std::ostream&, ToBooleanHints);
+std::string ToString(ToBooleanHints);
DEFINE_OPERATORS_FOR_FLAGS(ToBooleanHints)
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index fd3a2dc01e..d1a89c3aeb 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -203,6 +203,10 @@ AstType* CompareOpHintToType(CompareOperationHint hint) {
return AstType::Number();
case CompareOperationHint::kNumberOrOddball:
return AstType::NumberOrOddball();
+ case CompareOperationHint::kInternalizedString:
+ return AstType::InternalizedString();
+ case CompareOperationHint::kString:
+ return AstType::String();
case CompareOperationHint::kAny:
return AstType::Any();
}
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index bd5589cc89..0ea1de1e07 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -20,6 +20,7 @@
#include "src/globals.h"
#include "src/list.h"
#include "src/vector.h"
+#include "src/zone/zone.h"
namespace v8 {
namespace internal {
@@ -946,19 +947,6 @@ class BailoutId {
int id_;
};
-class TokenDispenserForFinally {
- public:
- int GetBreakContinueToken() { return next_token_++; }
- static const int kFallThroughToken = 0;
- static const int kThrowToken = 1;
- static const int kReturnToken = 2;
-
- static const int kFirstBreakContinueToken = 3;
- static const int kInvalidToken = -1;
-
- private:
- int next_token_ = kFirstBreakContinueToken;
-};
// ----------------------------------------------------------------------------
// I/O support.
@@ -1640,9 +1628,31 @@ class ThreadedList final {
friend class ThreadedList;
};
+ class ConstIterator final {
+ public:
+ ConstIterator& operator++() {
+ entry_ = (*entry_)->next();
+ return *this;
+ }
+ bool operator!=(const ConstIterator& other) {
+ return entry_ != other.entry_;
+ }
+ const T* operator*() const { return *entry_; }
+
+ private:
+ explicit ConstIterator(T* const* entry) : entry_(entry) {}
+
+ T* const* entry_;
+
+ friend class ThreadedList;
+ };
+
Iterator begin() { return Iterator(&head_); }
Iterator end() { return Iterator(tail_); }
+ ConstIterator begin() const { return ConstIterator(&head_); }
+ ConstIterator end() const { return ConstIterator(tail_); }
+
void Rewind(Iterator reset_point) {
tail_ = reset_point.entry_;
*tail_ = nullptr;
@@ -1677,6 +1687,21 @@ class ThreadedList final {
DISALLOW_COPY_AND_ASSIGN(ThreadedList);
};
+// Can be used to create a threaded list of |T|.
+template <typename T>
+class ThreadedListZoneEntry final : public ZoneObject {
+ public:
+ explicit ThreadedListZoneEntry(T value) : value_(value), next_(nullptr) {}
+
+ T value() { return value_; }
+ ThreadedListZoneEntry<T>** next() { return &next_; }
+
+ private:
+ T value_;
+ ThreadedListZoneEntry<T>* next_;
+ DISALLOW_COPY_AND_ASSIGN(ThreadedListZoneEntry);
+};
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index 7f0230aa4c..7fd8dc0930 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -66,7 +66,7 @@ void V8::InitializeOncePerProcessImpl() {
FLAG_max_semi_space_size = 1;
}
- if (FLAG_turbo && strcmp(FLAG_turbo_filter, "~~") == 0) {
+ if (FLAG_opt && FLAG_turbo && strcmp(FLAG_turbo_filter, "~~") == 0) {
const char* filter_flag = "--turbo-filter=*";
FlagList::SetFlagsFromString(filter_flag, StrLength(filter_flag));
}
diff --git a/deps/v8/src/v8.gyp b/deps/v8/src/v8.gyp
index 020ec0928f..0a76944e50 100644
--- a/deps/v8/src/v8.gyp
+++ b/deps/v8/src/v8.gyp
@@ -397,6 +397,7 @@
'../include/v8-profiler.h',
'../include/v8-testing.h',
'../include/v8-util.h',
+ '../include/v8-version-string.h',
'../include/v8-version.h',
'../include/v8.h',
'../include/v8config.h',
@@ -431,10 +432,13 @@
'asmjs/switch-logic.cc',
'assembler.cc',
'assembler.h',
+ 'assembler-inl.h',
'assert-scope.h',
'assert-scope.cc',
'ast/ast-expression-rewriter.cc',
'ast/ast-expression-rewriter.h',
+ 'ast/ast-function-literal-id-reindexer.cc',
+ 'ast/ast-function-literal-id-reindexer.h',
'ast/ast-literal-reindexer.cc',
'ast/ast-literal-reindexer.h',
'ast/ast-numbering.cc',
@@ -455,7 +459,6 @@
'ast/modules.h',
'ast/prettyprinter.cc',
'ast/prettyprinter.h',
- 'ast/scopeinfo.cc',
'ast/scopes.cc',
'ast/scopes.h',
'ast/variables.cc',
@@ -481,6 +484,8 @@
'builtins/builtins-call.cc',
'builtins/builtins-callsite.cc',
'builtins/builtins-conversion.cc',
+ 'builtins/builtins-constructor.cc',
+ 'builtins/builtins-constructor.h',
'builtins/builtins-dataview.cc',
'builtins/builtins-date.cc',
'builtins/builtins-debug.cc',
@@ -489,14 +494,15 @@
'builtins/builtins-generator.cc',
'builtins/builtins-global.cc',
'builtins/builtins-handler.cc',
+ 'builtins/builtins-ic.cc',
'builtins/builtins-internal.cc',
'builtins/builtins-interpreter.cc',
- 'builtins/builtins-iterator.cc',
'builtins/builtins-json.cc',
'builtins/builtins-math.cc',
'builtins/builtins-number.cc',
'builtins/builtins-object.cc',
'builtins/builtins-promise.cc',
+ 'builtins/builtins-promise.h',
'builtins/builtins-proxy.cc',
'builtins/builtins-reflect.cc',
'builtins/builtins-regexp.cc',
@@ -548,12 +554,12 @@
'compiler/basic-block-instrumentor.h',
'compiler/branch-elimination.cc',
'compiler/branch-elimination.h',
- 'compiler/bytecode-branch-analysis.cc',
- 'compiler/bytecode-branch-analysis.h',
+ 'compiler/bytecode-analysis.cc',
+ 'compiler/bytecode-analysis.h',
'compiler/bytecode-graph-builder.cc',
'compiler/bytecode-graph-builder.h',
- 'compiler/bytecode-loop-analysis.cc',
- 'compiler/bytecode-loop-analysis.h',
+ 'compiler/bytecode-liveness-map.cc',
+ 'compiler/bytecode-liveness-map.h',
'compiler/c-linkage.cc',
'compiler/checkpoint-elimination.cc',
'compiler/checkpoint-elimination.h',
@@ -591,6 +597,8 @@
'compiler/frame-states.h',
'compiler/gap-resolver.cc',
'compiler/gap-resolver.h',
+ 'compiler/graph-assembler.cc',
+ 'compiler/graph-assembler.h',
'compiler/graph-reducer.cc',
'compiler/graph-reducer.h',
'compiler/graph-replay.cc',
@@ -726,8 +734,6 @@
'compiler/types.h',
'compiler/type-cache.cc',
'compiler/type-cache.h',
- 'compiler/type-hint-analyzer.cc',
- 'compiler/type-hint-analyzer.h',
'compiler/typed-optimization.cc',
'compiler/typed-optimization.h',
'compiler/typer.cc',
@@ -742,6 +748,8 @@
'compiler/wasm-linkage.cc',
'compiler/zone-stats.cc',
'compiler/zone-stats.h',
+ 'compiler-dispatcher/compiler-dispatcher.cc',
+ 'compiler-dispatcher/compiler-dispatcher.h',
'compiler-dispatcher/compiler-dispatcher-job.cc',
'compiler-dispatcher/compiler-dispatcher-job.h',
'compiler-dispatcher/compiler-dispatcher-tracer.cc',
@@ -836,6 +844,7 @@
'debug/debug-scopes.h',
'debug/debug.cc',
'debug/debug.h',
+ 'debug/interface-types.h',
'debug/liveedit.cc',
'debug/liveedit.h',
'deoptimize-reason.cc',
@@ -909,6 +918,8 @@
'heap/array-buffer-tracker.h',
'heap/code-stats.cc',
'heap/code-stats.h',
+ 'heap/embedder-tracing.cc',
+ 'heap/embedder-tracing.h',
'heap/memory-reducer.cc',
'heap/memory-reducer.h',
'heap/gc-idle-time-handler.cc',
@@ -952,6 +963,9 @@
'ic/access-compiler-data.h',
'ic/access-compiler.cc',
'ic/access-compiler.h',
+ 'ic/accessor-assembler.cc',
+ 'ic/accessor-assembler-impl.h',
+ 'ic/accessor-assembler.h',
'ic/call-optimization.cc',
'ic/call-optimization.h',
'ic/handler-compiler.cc',
@@ -961,6 +975,8 @@
'ic/ic-inl.h',
'ic/ic-state.cc',
'ic/ic-state.h',
+ 'ic/ic-stats.cc',
+ 'ic/ic-stats.h',
'ic/ic.cc',
'ic/ic.h',
'ic/ic-compiler.cc',
@@ -973,10 +989,14 @@
'interface-descriptors.h',
'interpreter/bytecodes.cc',
'interpreter/bytecodes.h',
+ 'interpreter/bytecode-array-accessor.cc',
+ 'interpreter/bytecode-array-accessor.h',
'interpreter/bytecode-array-builder.cc',
'interpreter/bytecode-array-builder.h',
'interpreter/bytecode-array-iterator.cc',
'interpreter/bytecode-array-iterator.h',
+ 'interpreter/bytecode-array-random-iterator.cc',
+ 'interpreter/bytecode-array-random-iterator.h',
'interpreter/bytecode-array-writer.cc',
'interpreter/bytecode-array-writer.h',
'interpreter/bytecode-dead-code-optimizer.cc',
@@ -1040,6 +1060,8 @@
'lookup-cache.h',
'lookup.cc',
'lookup.h',
+ 'map-updater.cc',
+ 'map-updater.h',
'macro-assembler.h',
'machine-type.cc',
'machine-type.h',
@@ -1053,6 +1075,11 @@
'objects-printer.cc',
'objects.cc',
'objects.h',
+ 'objects/module-info.h',
+ 'objects/object-macros.h',
+ 'objects/object-macros-undef.h',
+ 'objects/scope-info.cc',
+ 'objects/scope-info.h',
'ostreams.cc',
'ostreams.h',
'parsing/duplicate-finder.cc',
@@ -1067,6 +1094,8 @@
'parsing/parser-base.h',
'parsing/parser.cc',
'parsing/parser.h',
+ 'parsing/parsing.cc',
+ 'parsing/parsing.h',
'parsing/pattern-rewriter.cc',
'parsing/preparse-data-format.h',
'parsing/preparse-data.cc',
@@ -1112,8 +1141,6 @@
'profiler/tracing-cpu-profiler.h',
'profiler/unbound-queue-inl.h',
'profiler/unbound-queue.h',
- 'promise-utils.h',
- 'promise-utils.cc',
'property-descriptor.cc',
'property-descriptor.h',
'property-details.h',
@@ -1213,6 +1240,8 @@
'startup-data-util.h',
'string-builder.cc',
'string-builder.h',
+ 'string-case.cc',
+ 'string-case.h',
'string-search.h',
'string-stream.cc',
'string-stream.h',
@@ -1229,6 +1258,7 @@
'transitions-inl.h',
'transitions.cc',
'transitions.h',
+ 'trap-handler/trap-handler.h',
'type-feedback-vector-inl.h',
'type-feedback-vector.cc',
'type-feedback-vector.h',
@@ -1260,9 +1290,9 @@
'version.h',
'vm-state-inl.h',
'vm-state.h',
- 'wasm/ast-decoder.cc',
- 'wasm/ast-decoder.h',
'wasm/decoder.h',
+ 'wasm/function-body-decoder.cc',
+ 'wasm/function-body-decoder.h',
'wasm/leb-helper.h',
'wasm/managed.h',
'wasm/module-decoder.cc',
@@ -1274,6 +1304,7 @@
'wasm/wasm-external-refs.h',
'wasm/wasm-js.cc',
'wasm/wasm-js.h',
+ 'wasm/wasm-limits.h',
'wasm/wasm-macro-gen.h',
'wasm/wasm-module.cc',
'wasm/wasm-module.h',
@@ -1287,6 +1318,8 @@
'wasm/wasm-opcodes.h',
'wasm/wasm-result.cc',
'wasm/wasm-result.h',
+ 'wasm/wasm-text.cc',
+ 'wasm/wasm-text.h',
'zone/accounting-allocator.cc',
'zone/accounting-allocator.h',
'zone/zone-segment.cc',
@@ -1298,6 +1331,7 @@
'zone/zone-segment.h',
'zone/zone-allocator.h',
'zone/zone-containers.h',
+ 'zone/zone-handle-set.h',
],
'conditions': [
['want_separate_host_toolset==1', {
@@ -1351,8 +1385,6 @@
'ic/arm/access-compiler-arm.cc',
'ic/arm/handler-compiler-arm.cc',
'ic/arm/ic-arm.cc',
- 'ic/arm/ic-compiler-arm.cc',
- 'ic/arm/stub-cache-arm.cc',
'regexp/arm/regexp-macro-assembler-arm.cc',
'regexp/arm/regexp-macro-assembler-arm.h',
],
@@ -1411,8 +1443,6 @@
'ic/arm64/access-compiler-arm64.cc',
'ic/arm64/handler-compiler-arm64.cc',
'ic/arm64/ic-arm64.cc',
- 'ic/arm64/ic-compiler-arm64.cc',
- 'ic/arm64/stub-cache-arm64.cc',
'regexp/arm64/regexp-macro-assembler-arm64.cc',
'regexp/arm64/regexp-macro-assembler-arm64.h',
],
@@ -1452,8 +1482,6 @@
'ic/ia32/access-compiler-ia32.cc',
'ic/ia32/handler-compiler-ia32.cc',
'ic/ia32/ic-ia32.cc',
- 'ic/ia32/ic-compiler-ia32.cc',
- 'ic/ia32/stub-cache-ia32.cc',
'regexp/ia32/regexp-macro-assembler-ia32.cc',
'regexp/ia32/regexp-macro-assembler-ia32.h',
],
@@ -1493,8 +1521,6 @@
'ic/x87/access-compiler-x87.cc',
'ic/x87/handler-compiler-x87.cc',
'ic/x87/ic-x87.cc',
- 'ic/x87/ic-compiler-x87.cc',
- 'ic/x87/stub-cache-x87.cc',
'regexp/x87/regexp-macro-assembler-x87.cc',
'regexp/x87/regexp-macro-assembler-x87.h',
],
@@ -1536,8 +1562,6 @@
'ic/mips/access-compiler-mips.cc',
'ic/mips/handler-compiler-mips.cc',
'ic/mips/ic-mips.cc',
- 'ic/mips/ic-compiler-mips.cc',
- 'ic/mips/stub-cache-mips.cc',
'regexp/mips/regexp-macro-assembler-mips.cc',
'regexp/mips/regexp-macro-assembler-mips.h',
],
@@ -1579,8 +1603,6 @@
'ic/mips64/access-compiler-mips64.cc',
'ic/mips64/handler-compiler-mips64.cc',
'ic/mips64/ic-mips64.cc',
- 'ic/mips64/ic-compiler-mips64.cc',
- 'ic/mips64/stub-cache-mips64.cc',
'regexp/mips64/regexp-macro-assembler-mips64.cc',
'regexp/mips64/regexp-macro-assembler-mips64.h',
],
@@ -1624,8 +1646,6 @@
'ic/x64/access-compiler-x64.cc',
'ic/x64/handler-compiler-x64.cc',
'ic/x64/ic-x64.cc',
- 'ic/x64/ic-compiler-x64.cc',
- 'ic/x64/stub-cache-x64.cc',
'regexp/x64/regexp-macro-assembler-x64.cc',
'regexp/x64/regexp-macro-assembler-x64.h',
'third_party/valgrind/valgrind.h',
@@ -1649,8 +1669,6 @@
'ic/ppc/access-compiler-ppc.cc',
'ic/ppc/handler-compiler-ppc.cc',
'ic/ppc/ic-ppc.cc',
- 'ic/ppc/ic-compiler-ppc.cc',
- 'ic/ppc/stub-cache-ppc.cc',
'ppc/assembler-ppc-inl.h',
'ppc/assembler-ppc.cc',
'ppc/assembler-ppc.h',
@@ -1691,9 +1709,7 @@
'full-codegen/s390/full-codegen-s390.cc',
'ic/s390/access-compiler-s390.cc',
'ic/s390/handler-compiler-s390.cc',
- 'ic/s390/ic-compiler-s390.cc',
'ic/s390/ic-s390.cc',
- 'ic/s390/stub-cache-s390.cc',
'regexp/s390/regexp-macro-assembler-s390.cc',
'regexp/s390/regexp-macro-assembler-s390.h',
's390/assembler-s390.cc',
@@ -2231,7 +2247,6 @@
'js/prologue.js',
'js/runtime.js',
'js/v8natives.js',
- 'js/symbol.js',
'js/array.js',
'js/string.js',
'js/arraybuffer.js',
@@ -2245,6 +2260,7 @@
'js/spread.js',
'js/proxy.js',
'js/async-await.js',
+ 'js/harmony-string-padding.js',
'debug/mirrors.js',
'debug/debug.js',
'debug/liveedit.js',
@@ -2254,7 +2270,6 @@
'messages.h',
'js/harmony-atomics.js',
'js/harmony-simd.js',
- 'js/harmony-string-padding.js',
],
'libraries_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
'libraries_experimental_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental.bin',
diff --git a/deps/v8/src/value-serializer.cc b/deps/v8/src/value-serializer.cc
index c6abb8a85c..f19197af75 100644
--- a/deps/v8/src/value-serializer.cc
+++ b/deps/v8/src/value-serializer.cc
@@ -110,8 +110,8 @@ enum class SerializationTag : uint8_t {
// ObjectReference to one) serialized just before it. This is a quirk arising
// from the previous stack-based implementation.
kArrayBufferView = 'V',
- // Shared array buffer (transferred). transferID:uint32_t
- kSharedArrayBufferTransfer = 'u',
+ // Shared array buffer. transferID:uint32_t
+ kSharedArrayBuffer = 'u',
// Compiled WebAssembly module. encodingType:(one-byte tag).
// If encodingType == 'y' (raw bytes):
// wasmWireByteLength:uint32_t, then raw data
@@ -269,6 +269,7 @@ std::pair<uint8_t*, size_t> ValueSerializer::Release() {
void ValueSerializer::TransferArrayBuffer(uint32_t transfer_id,
Handle<JSArrayBuffer> array_buffer) {
DCHECK(!array_buffer_transfer_map_.Find(array_buffer));
+ DCHECK(!array_buffer->is_shared());
array_buffer_transfer_map_.Set(array_buffer, transfer_id);
}
@@ -400,7 +401,7 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
// Eliminate callable and exotic objects, which should not be serialized.
InstanceType instance_type = receiver->map()->instance_type();
- if (receiver->IsCallable() || (instance_type <= LAST_SPECIAL_RECEIVER_TYPE &&
+ if (receiver->IsCallable() || (IsSpecialReceiverInstanceType(instance_type) &&
instance_type != JS_SPECIAL_API_OBJECT_TYPE)) {
ThrowDataCloneError(MessageTemplate::kDataCloneError, receiver);
return Nothing<bool>();
@@ -417,7 +418,7 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
case JS_API_OBJECT_TYPE: {
Handle<JSObject> js_object = Handle<JSObject>::cast(receiver);
Map* map = js_object->map();
- if (FLAG_expose_wasm &&
+ if (!FLAG_wasm_disable_structured_cloning &&
map->GetConstructor() ==
isolate_->native_context()->wasm_module_constructor()) {
return WriteWasmModule(js_object);
@@ -442,7 +443,7 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
case JS_SET_TYPE:
return WriteJSSet(Handle<JSSet>::cast(receiver));
case JS_ARRAY_BUFFER_TYPE:
- return WriteJSArrayBuffer(JSArrayBuffer::cast(*receiver));
+ return WriteJSArrayBuffer(Handle<JSArrayBuffer>::cast(receiver));
case JS_TYPED_ARRAY_TYPE:
case JS_DATA_VIEW_TYPE:
return WriteJSArrayBufferView(JSArrayBufferView::cast(*receiver));
@@ -474,7 +475,8 @@ Maybe<bool> ValueSerializer::WriteJSObject(Handle<JSObject> object) {
Handle<Object> value;
if (V8_LIKELY(!map_changed)) map_changed = *map == object->map();
- if (V8_LIKELY(!map_changed && details.type() == DATA)) {
+ if (V8_LIKELY(!map_changed && details.location() == kField)) {
+ DCHECK_EQ(kData, details.kind());
FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
value = JSObject::FastPropertyAt(object, details.representation(),
field_index);
@@ -724,28 +726,37 @@ Maybe<bool> ValueSerializer::WriteJSSet(Handle<JSSet> set) {
return Just(true);
}
-Maybe<bool> ValueSerializer::WriteJSArrayBuffer(JSArrayBuffer* array_buffer) {
+Maybe<bool> ValueSerializer::WriteJSArrayBuffer(
+ Handle<JSArrayBuffer> array_buffer) {
+ if (array_buffer->is_shared()) {
+ if (!delegate_) {
+ ThrowDataCloneError(MessageTemplate::kDataCloneError, array_buffer);
+ return Nothing<bool>();
+ }
+
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
+ Maybe<uint32_t> index = delegate_->GetSharedArrayBufferId(
+ v8_isolate, Utils::ToLocalShared(array_buffer));
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate_, Nothing<bool>());
+
+ WriteTag(SerializationTag::kSharedArrayBuffer);
+ WriteVarint(index.FromJust());
+ return Just(true);
+ }
+
uint32_t* transfer_entry = array_buffer_transfer_map_.Find(array_buffer);
if (transfer_entry) {
- WriteTag(array_buffer->is_shared()
- ? SerializationTag::kSharedArrayBufferTransfer
- : SerializationTag::kArrayBufferTransfer);
+ WriteTag(SerializationTag::kArrayBufferTransfer);
WriteVarint(*transfer_entry);
return Just(true);
}
-
- if (array_buffer->is_shared()) {
- ThrowDataCloneError(
- MessageTemplate::kDataCloneErrorSharedArrayBufferNotTransferred);
- return Nothing<bool>();
- }
if (array_buffer->was_neutered()) {
ThrowDataCloneError(MessageTemplate::kDataCloneErrorNeuteredArrayBuffer);
return Nothing<bool>();
}
double byte_length = array_buffer->byte_length()->Number();
if (byte_length > std::numeric_limits<uint32_t>::max()) {
- ThrowDataCloneError(MessageTemplate::kDataCloneError, handle(array_buffer));
+ ThrowDataCloneError(MessageTemplate::kDataCloneError, array_buffer);
return Nothing<bool>();
}
WriteTag(SerializationTag::kArrayBuffer);
@@ -783,7 +794,7 @@ Maybe<bool> ValueSerializer::WriteWasmModule(Handle<JSObject> object) {
WriteTag(SerializationTag::kWasmModule);
WriteRawBytes(&encoding_tag, sizeof(encoding_tag));
- Handle<String> wire_bytes = compiled_part->module_bytes();
+ Handle<String> wire_bytes(compiled_part->module_bytes(), isolate_);
int wire_bytes_length = wire_bytes->length();
WriteVarint<uint32_t>(wire_bytes_length);
uint8_t* destination = ReserveRawBytes(wire_bytes_length);
@@ -1006,10 +1017,10 @@ void ValueDeserializer::TransferArrayBuffer(
}
Handle<SeededNumberDictionary> dictionary =
array_buffer_transfer_map_.ToHandleChecked();
- const bool used_as_prototype = false;
+ Handle<JSObject> not_a_prototype_holder;
Handle<SeededNumberDictionary> new_dictionary =
SeededNumberDictionary::AtNumberPut(dictionary, transfer_id, array_buffer,
- used_as_prototype);
+ not_a_prototype_holder);
if (!new_dictionary.is_identical_to(dictionary)) {
GlobalHandles::Destroy(Handle<Object>::cast(dictionary).location());
array_buffer_transfer_map_ = Handle<SeededNumberDictionary>::cast(
@@ -1105,7 +1116,7 @@ MaybeHandle<Object> ValueDeserializer::ReadObjectInternal() {
const bool is_shared = false;
return ReadTransferredJSArrayBuffer(is_shared);
}
- case SerializationTag::kSharedArrayBufferTransfer: {
+ case SerializationTag::kSharedArrayBuffer: {
const bool is_shared = true;
return ReadTransferredJSArrayBuffer(is_shared);
}
@@ -1443,8 +1454,10 @@ MaybeHandle<JSArrayBuffer> ValueDeserializer::ReadJSArrayBuffer() {
const bool should_initialize = false;
Handle<JSArrayBuffer> array_buffer =
isolate_->factory()->NewJSArrayBuffer(SharedFlag::kNotShared, pretenure_);
- JSArrayBuffer::SetupAllocatingData(array_buffer, isolate_, byte_length,
- should_initialize);
+ if (!JSArrayBuffer::SetupAllocatingData(array_buffer, isolate_, byte_length,
+ should_initialize)) {
+ return MaybeHandle<JSArrayBuffer>();
+ }
memcpy(array_buffer->backing_store(), position_, byte_length);
position_ += byte_length;
AddObjectWithID(id, array_buffer);
@@ -1514,7 +1527,7 @@ MaybeHandle<JSArrayBufferView> ValueDeserializer::ReadJSArrayBufferView(
}
MaybeHandle<JSObject> ValueDeserializer::ReadWasmModule() {
- if (!FLAG_expose_wasm) return MaybeHandle<JSObject>();
+ if (FLAG_wasm_disable_structured_cloning) return MaybeHandle<JSObject>();
Vector<const uint8_t> encoding_tag;
if (!ReadRawBytes(sizeof(WasmEncodingTag)).To(&encoding_tag) ||
@@ -1554,8 +1567,8 @@ MaybeHandle<JSObject> ValueDeserializer::ReadWasmModule() {
wasm::ErrorThrower thrower(isolate_, "ValueDeserializer::ReadWasmModule");
return wasm::CreateModuleObjectFromBytes(
isolate_, wire_bytes.begin(), wire_bytes.end(), &thrower,
- wasm::ModuleOrigin::kWasmOrigin, Handle<Script>::null(), nullptr,
- nullptr);
+ wasm::ModuleOrigin::kWasmOrigin, Handle<Script>::null(),
+ Vector<const byte>::empty());
}
MaybeHandle<JSObject> ValueDeserializer::ReadHostObject() {
@@ -1584,6 +1597,7 @@ static void CommitProperties(Handle<JSObject> object, Handle<Map> map,
DisallowHeapAllocation no_gc;
DescriptorArray* descriptors = object->map()->instance_descriptors();
for (unsigned i = 0; i < properties.size(); i++) {
+ // Initializing store.
object->WriteToField(i, descriptors->GetDetails(i), *properties[i]);
}
}
@@ -1654,8 +1668,8 @@ Maybe<uint32_t> ValueDeserializer::ReadJSObjectProperties(
->NowContains(value)) {
Handle<FieldType> value_type =
value->OptimalType(isolate_, expected_representation);
- Map::GeneralizeFieldType(target, descriptor,
- expected_representation, value_type);
+ Map::GeneralizeField(target, descriptor, expected_representation,
+ value_type);
}
DCHECK(target->instance_descriptors()
->GetFieldType(descriptor)
diff --git a/deps/v8/src/value-serializer.h b/deps/v8/src/value-serializer.h
index 86e21cf86c..1ed9457b77 100644
--- a/deps/v8/src/value-serializer.h
+++ b/deps/v8/src/value-serializer.h
@@ -112,7 +112,8 @@ class ValueSerializer {
void WriteJSRegExp(JSRegExp* regexp);
Maybe<bool> WriteJSMap(Handle<JSMap> map) WARN_UNUSED_RESULT;
Maybe<bool> WriteJSSet(Handle<JSSet> map) WARN_UNUSED_RESULT;
- Maybe<bool> WriteJSArrayBuffer(JSArrayBuffer* array_buffer);
+ Maybe<bool> WriteJSArrayBuffer(Handle<JSArrayBuffer> array_buffer)
+ WARN_UNUSED_RESULT;
Maybe<bool> WriteJSArrayBufferView(JSArrayBufferView* array_buffer);
Maybe<bool> WriteWasmModule(Handle<JSObject> object) WARN_UNUSED_RESULT;
Maybe<bool> WriteHostObject(Handle<JSObject> object) WARN_UNUSED_RESULT;
diff --git a/deps/v8/src/vector.h b/deps/v8/src/vector.h
index 080f89e9f4..eb5808322c 100644
--- a/deps/v8/src/vector.h
+++ b/deps/v8/src/vector.h
@@ -33,7 +33,7 @@ class Vector {
// Returns a vector using the same backing storage as this one,
// spanning from and including 'from', to but not including 'to'.
- Vector<T> SubVector(int from, int to) {
+ Vector<T> SubVector(int from, int to) const {
DCHECK(0 <= from);
SLOW_DCHECK(from < to);
SLOW_DCHECK(static_cast<unsigned>(to) <= static_cast<unsigned>(length_));
@@ -119,6 +119,9 @@ class Vector {
return Vector<T>(start_ + offset, length_ - offset);
}
+ // Implicit conversion from Vector<T> to Vector<const T>.
+ inline operator Vector<const T>() { return Vector<const T>::cast(*this); }
+
// Factory method for creating empty vectors.
static Vector<T> empty() { return Vector<T>(NULL, 0); }
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index 7305bf2576..3252d55035 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -4,6 +4,7 @@
#include "src/version.h"
+#include "include/v8-version-string.h"
#include "include/v8-version.h"
#include "src/utils.h"
@@ -12,25 +13,6 @@
// number. This define is mainly used by the build system script.
#define SONAME ""
-#if V8_IS_CANDIDATE_VERSION
-#define CANDIDATE_STRING " (candidate)"
-#else
-#define CANDIDATE_STRING ""
-#endif
-
-#define SX(x) #x
-#define S(x) SX(x)
-
-#if V8_PATCH_LEVEL > 0
-#define VERSION_STRING \
- S(V8_MAJOR_VERSION) "." S(V8_MINOR_VERSION) "." S(V8_BUILD_NUMBER) "." S( \
- V8_PATCH_LEVEL) CANDIDATE_STRING
-#else
-#define VERSION_STRING \
- S(V8_MAJOR_VERSION) "." S(V8_MINOR_VERSION) "." S(V8_BUILD_NUMBER) \
- CANDIDATE_STRING
-#endif
-
namespace v8 {
namespace internal {
@@ -40,7 +22,7 @@ int Version::build_ = V8_BUILD_NUMBER;
int Version::patch_ = V8_PATCH_LEVEL;
bool Version::candidate_ = (V8_IS_CANDIDATE_VERSION != 0);
const char* Version::soname_ = SONAME;
-const char* Version::version_string_ = VERSION_STRING;
+const char* Version::version_string_ = V8_VERSION_STRING;
// Calculate the V8 version string.
void Version::GetString(Vector<char> str) {
diff --git a/deps/v8/src/wasm/OWNERS b/deps/v8/src/wasm/OWNERS
index 2822c29819..4f54661aeb 100644
--- a/deps/v8/src/wasm/OWNERS
+++ b/deps/v8/src/wasm/OWNERS
@@ -2,6 +2,7 @@ set noparent
ahaas@chromium.org
bradnelson@chromium.org
+clemensh@chromium.org
mtrofin@chromium.org
rossberg@chromium.org
titzer@chromium.org
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index fc8f110b73..afe8701779 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -34,7 +34,12 @@ class Decoder {
Decoder(const byte* start, const byte* end)
: start_(start),
pc_(start),
- limit_(end),
+ end_(end),
+ error_pc_(nullptr),
+ error_pt_(nullptr) {}
+ Decoder(const byte* start, const byte* pc, const byte* end)
+ : start_(start),
+ pc_(pc),
end_(end),
error_pc_(nullptr),
error_pt_(nullptr) {}
@@ -44,7 +49,7 @@ class Decoder {
inline bool check(const byte* base, unsigned offset, unsigned length,
const char* msg) {
DCHECK_GE(base, start_);
- if ((base + offset + length) > limit_) {
+ if ((base + offset + length) > end_) {
error(base, base + offset, "%s", msg);
return false;
}
@@ -185,22 +190,27 @@ class Decoder {
// Consume {size} bytes and send them to the bit bucket, advancing {pc_}.
void consume_bytes(uint32_t size, const char* name = "skip") {
- TRACE(" +%d %-20s: %d bytes\n", static_cast<int>(pc_ - start_), name,
- size);
+#if DEBUG
+ if (name) {
+ // Only trace if the name is not null.
+ TRACE(" +%d %-20s: %d bytes\n", static_cast<int>(pc_ - start_), name,
+ size);
+ }
+#endif
if (checkAvailable(size)) {
pc_ += size;
} else {
- pc_ = limit_;
+ pc_ = end_;
}
}
- // Check that at least {size} bytes exist between {pc_} and {limit_}.
+ // Check that at least {size} bytes exist between {pc_} and {end_}.
bool checkAvailable(int size) {
intptr_t pc_overflow_value = std::numeric_limits<intptr_t>::max() - size;
if (size < 0 || (intptr_t)pc_ > pc_overflow_value) {
error(pc_, nullptr, "reading %d bytes would underflow/overflow", size);
return false;
- } else if (pc_ < start_ || limit_ < (pc_ + size)) {
+ } else if (pc_ < start_ || end_ < (pc_ + size)) {
error(pc_, nullptr, "expected %d bytes, fell off end", size);
return false;
} else {
@@ -241,11 +251,11 @@ class Decoder {
template <typename T>
T traceOffEnd() {
T t = 0;
- for (const byte* ptr = pc_; ptr < limit_; ptr++) {
+ for (const byte* ptr = pc_; ptr < end_; ptr++) {
TRACE("%02x ", *ptr);
}
TRACE("<end>\n");
- pc_ = limit_;
+ pc_ = end_;
return t;
}
@@ -272,7 +282,6 @@ class Decoder {
void Reset(const byte* start, const byte* end) {
start_ = start;
pc_ = start;
- limit_ = end;
end_ = end;
error_pc_ = nullptr;
error_pt_ = nullptr;
@@ -281,16 +290,16 @@ class Decoder {
bool ok() const { return error_msg_ == nullptr; }
bool failed() const { return !ok(); }
- bool more() const { return pc_ < limit_; }
+ bool more() const { return pc_ < end_; }
- const byte* start() { return start_; }
- const byte* pc() { return pc_; }
- uint32_t pc_offset() { return static_cast<uint32_t>(pc_ - start_); }
+ const byte* start() const { return start_; }
+ const byte* pc() const { return pc_; }
+ uint32_t pc_offset() const { return static_cast<uint32_t>(pc_ - start_); }
+ const byte* end() const { return end_; }
protected:
const byte* start_;
const byte* pc_;
- const byte* limit_;
const byte* end_;
const byte* error_pc_;
const byte* error_pt_;
@@ -308,7 +317,7 @@ class Decoder {
const int kMaxLength = (sizeof(IntType) * 8 + 6) / 7;
const byte* ptr = base + offset;
const byte* end = ptr + kMaxLength;
- if (end > limit_) end = limit_;
+ if (end > end_) end = end_;
int shift = 0;
byte b = 0;
IntType result = 0;
@@ -358,7 +367,7 @@ class Decoder {
const int kMaxLength = (sizeof(IntType) * 8 + 6) / 7;
const byte* pos = pc_;
const byte* end = pc_ + kMaxLength;
- if (end > limit_) end = limit_;
+ if (end > end_) end = end_;
IntType result = 0;
int shift = 0;
diff --git a/deps/v8/src/wasm/ast-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index ff6af34a02..04a2806237 100644
--- a/deps/v8/src/wasm/ast-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -9,8 +9,8 @@
#include "src/handles.h"
#include "src/zone/zone-containers.h"
-#include "src/wasm/ast-decoder.h"
#include "src/wasm/decoder.h"
+#include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
@@ -31,16 +31,14 @@ namespace wasm {
#define TRACE(...)
#endif
-#define CHECK_PROTOTYPE_OPCODE(flag) \
- if (module_ && module_->origin == kAsmJsOrigin) { \
- error("Opcode not supported for asmjs modules"); \
- } \
- if (!FLAG_##flag) { \
- error("Invalid opcode (enable with --" #flag ")"); \
- break; \
+#define CHECK_PROTOTYPE_OPCODE(flag) \
+ if (module_ != nullptr && module_->origin == kAsmJsOrigin) { \
+ error("Opcode not supported for asmjs modules"); \
+ } \
+ if (!FLAG_##flag) { \
+ error("Invalid opcode (enable with --" #flag ")"); \
+ break; \
}
-// TODO(titzer): this is only for intermediate migration.
-#define IMPLICIT_FUNCTION_END 1
// An SsaEnv environment carries the current local variable renaming
// as well as the current effect and control dependency in the TF graph.
@@ -70,7 +68,7 @@ struct SsaEnv {
struct Value {
const byte* pc;
TFNode* node;
- LocalType type;
+ ValueType type;
};
struct TryInfo : public ZoneObject {
@@ -87,9 +85,9 @@ struct MergeValues {
Value first;
} vals; // Either multiple values or a single value.
- Value& first() {
- DCHECK_GT(arity, 0u);
- return arity == 1 ? vals.first : vals.array[0];
+ Value& operator[](size_t i) {
+ DCHECK_GT(arity, i);
+ return arity == 1 ? vals.first : vals.array[i];
}
};
@@ -101,11 +99,12 @@ enum ControlKind { kControlIf, kControlBlock, kControlLoop, kControlTry };
struct Control {
const byte* pc;
ControlKind kind;
- int stack_depth; // stack height at the beginning of the construct.
- SsaEnv* end_env; // end environment for the construct.
- SsaEnv* false_env; // false environment (only for if).
- TryInfo* try_info; // Information used for compiling try statements.
+ size_t stack_depth; // stack height at the beginning of the construct.
+ SsaEnv* end_env; // end environment for the construct.
+ SsaEnv* false_env; // false environment (only for if).
+ TryInfo* try_info; // Information used for compiling try statements.
int32_t previous_catch; // The previous Control (on the stack) with a catch.
+ bool unreachable; // The current block has been ended.
// Values merged into the end of this control construct.
MergeValues merge;
@@ -116,30 +115,30 @@ struct Control {
inline bool is_try() const { return kind == kControlTry; }
// Named constructors.
- static Control Block(const byte* pc, int stack_depth, SsaEnv* end_env,
+ static Control Block(const byte* pc, size_t stack_depth, SsaEnv* end_env,
int32_t previous_catch) {
- return {pc, kControlBlock, stack_depth, end_env,
- nullptr, nullptr, previous_catch, {0, {NO_VALUE}}};
+ return {pc, kControlBlock, stack_depth, end_env, nullptr,
+ nullptr, previous_catch, false, {0, {NO_VALUE}}};
}
- static Control If(const byte* pc, int stack_depth, SsaEnv* end_env,
+ static Control If(const byte* pc, size_t stack_depth, SsaEnv* end_env,
SsaEnv* false_env, int32_t previous_catch) {
- return {pc, kControlIf, stack_depth, end_env,
- false_env, nullptr, previous_catch, {0, {NO_VALUE}}};
+ return {pc, kControlIf, stack_depth, end_env, false_env,
+ nullptr, previous_catch, false, {0, {NO_VALUE}}};
}
- static Control Loop(const byte* pc, int stack_depth, SsaEnv* end_env,
+ static Control Loop(const byte* pc, size_t stack_depth, SsaEnv* end_env,
int32_t previous_catch) {
- return {pc, kControlLoop, stack_depth, end_env,
- nullptr, nullptr, previous_catch, {0, {NO_VALUE}}};
+ return {pc, kControlLoop, stack_depth, end_env, nullptr,
+ nullptr, previous_catch, false, {0, {NO_VALUE}}};
}
- static Control Try(const byte* pc, int stack_depth, SsaEnv* end_env,
+ static Control Try(const byte* pc, size_t stack_depth, SsaEnv* end_env,
Zone* zone, SsaEnv* catch_env, int32_t previous_catch) {
DCHECK_NOT_NULL(catch_env);
TryInfo* try_info = new (zone) TryInfo(catch_env);
- return {pc, kControlTry, stack_depth, end_env,
- nullptr, try_info, previous_catch, {0, {NO_VALUE}}};
+ return {pc, kControlTry, stack_depth, end_env, nullptr,
+ try_info, previous_catch, false, {0, {NO_VALUE}}};
}
};
@@ -164,24 +163,123 @@ struct LaneOperand {
// lengths, etc.
class WasmDecoder : public Decoder {
public:
- WasmDecoder(ModuleEnv* module, FunctionSig* sig, const byte* start,
+ WasmDecoder(const WasmModule* module, FunctionSig* sig, const byte* start,
const byte* end)
: Decoder(start, end),
module_(module),
sig_(sig),
- total_locals_(0),
local_types_(nullptr) {}
- ModuleEnv* module_;
+ const WasmModule* module_;
FunctionSig* sig_;
- size_t total_locals_;
- ZoneVector<LocalType>* local_types_;
+
+ ZoneVector<ValueType>* local_types_;
+
+ size_t total_locals() const {
+ return local_types_ == nullptr ? 0 : local_types_->size();
+ }
+
+ static bool DecodeLocals(Decoder* decoder, const FunctionSig* sig,
+ ZoneVector<ValueType>* type_list) {
+ DCHECK_NOT_NULL(type_list);
+ // Initialize from signature.
+ if (sig != nullptr) {
+ type_list->reserve(sig->parameter_count());
+ for (size_t i = 0; i < sig->parameter_count(); ++i) {
+ type_list->push_back(sig->GetParam(i));
+ }
+ }
+ // Decode local declarations, if any.
+ uint32_t entries = decoder->consume_u32v("local decls count");
+ if (decoder->failed()) return false;
+
+ TRACE("local decls count: %u\n", entries);
+ while (entries-- > 0 && decoder->ok() && decoder->more()) {
+ uint32_t count = decoder->consume_u32v("local count");
+ if (decoder->failed()) return false;
+
+ if ((count + type_list->size()) > kMaxNumWasmLocals) {
+ decoder->error(decoder->pc() - 1, "local count too large");
+ return false;
+ }
+ byte code = decoder->consume_u8("local type");
+ if (decoder->failed()) return false;
+
+ ValueType type;
+ switch (code) {
+ case kLocalI32:
+ type = kWasmI32;
+ break;
+ case kLocalI64:
+ type = kWasmI64;
+ break;
+ case kLocalF32:
+ type = kWasmF32;
+ break;
+ case kLocalF64:
+ type = kWasmF64;
+ break;
+ case kLocalS128:
+ type = kWasmS128;
+ break;
+ default:
+ decoder->error(decoder->pc() - 1, "invalid local type");
+ return false;
+ }
+ type_list->insert(type_list->end(), count, type);
+ }
+ DCHECK(decoder->ok());
+ return true;
+ }
+
+ static BitVector* AnalyzeLoopAssignment(Decoder* decoder, const byte* pc,
+ int locals_count, Zone* zone) {
+ if (pc >= decoder->end()) return nullptr;
+ if (*pc != kExprLoop) return nullptr;
+
+ BitVector* assigned = new (zone) BitVector(locals_count, zone);
+ int depth = 0;
+ // Iteratively process all AST nodes nested inside the loop.
+ while (pc < decoder->end() && decoder->ok()) {
+ WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
+ unsigned length = 1;
+ switch (opcode) {
+ case kExprLoop:
+ case kExprIf:
+ case kExprBlock:
+ case kExprTry:
+ length = OpcodeLength(decoder, pc);
+ depth++;
+ break;
+ case kExprSetLocal: // fallthru
+ case kExprTeeLocal: {
+ LocalIndexOperand operand(decoder, pc);
+ if (assigned->length() > 0 &&
+ operand.index < static_cast<uint32_t>(assigned->length())) {
+ // Unverified code might have an out-of-bounds index.
+ assigned->Add(operand.index);
+ }
+ length = 1 + operand.length;
+ break;
+ }
+ case kExprEnd:
+ depth--;
+ break;
+ default:
+ length = OpcodeLength(decoder, pc);
+ break;
+ }
+ if (depth <= 0) break;
+ pc += length;
+ }
+ return decoder->ok() ? assigned : nullptr;
+ }
inline bool Validate(const byte* pc, LocalIndexOperand& operand) {
- if (operand.index < total_locals_) {
+ if (operand.index < total_locals()) {
if (local_types_) {
operand.type = local_types_->at(operand.index);
} else {
- operand.type = kAstStmt;
+ operand.type = kWasmStmt;
}
return true;
}
@@ -190,9 +288,8 @@ class WasmDecoder : public Decoder {
}
inline bool Validate(const byte* pc, GlobalIndexOperand& operand) {
- ModuleEnv* m = module_;
- if (m && m->module && operand.index < m->module->globals.size()) {
- operand.global = &m->module->globals[operand.index];
+ if (module_ != nullptr && operand.index < module_->globals.size()) {
+ operand.global = &module_->globals[operand.index];
operand.type = operand.global->type;
return true;
}
@@ -201,9 +298,8 @@ class WasmDecoder : public Decoder {
}
inline bool Complete(const byte* pc, CallFunctionOperand& operand) {
- ModuleEnv* m = module_;
- if (m && m->module && operand.index < m->module->functions.size()) {
- operand.sig = m->module->functions[operand.index].sig;
+ if (module_ != nullptr && operand.index < module_->functions.size()) {
+ operand.sig = module_->functions[operand.index].sig;
return true;
}
return false;
@@ -218,17 +314,15 @@ class WasmDecoder : public Decoder {
}
inline bool Complete(const byte* pc, CallIndirectOperand& operand) {
- ModuleEnv* m = module_;
- if (m && m->module && operand.index < m->module->signatures.size()) {
- operand.sig = m->module->signatures[operand.index];
+ if (module_ != nullptr && operand.index < module_->signatures.size()) {
+ operand.sig = module_->signatures[operand.index];
return true;
}
return false;
}
inline bool Validate(const byte* pc, CallIndirectOperand& operand) {
- uint32_t table_index = 0;
- if (!module_->IsValidTable(table_index)) {
+ if (module_ == nullptr || module_->function_tables.empty()) {
error("function table has to exist to execute call_indirect");
return false;
}
@@ -264,33 +358,33 @@ class WasmDecoder : public Decoder {
}
}
- unsigned OpcodeLength(const byte* pc) {
+ static unsigned OpcodeLength(Decoder* decoder, const byte* pc) {
switch (static_cast<byte>(*pc)) {
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
#undef DECLARE_OPCODE_CASE
{
- MemoryAccessOperand operand(this, pc, UINT32_MAX);
+ MemoryAccessOperand operand(decoder, pc, UINT32_MAX);
return 1 + operand.length;
}
case kExprBr:
case kExprBrIf: {
- BreakDepthOperand operand(this, pc);
+ BreakDepthOperand operand(decoder, pc);
return 1 + operand.length;
}
case kExprSetGlobal:
case kExprGetGlobal: {
- GlobalIndexOperand operand(this, pc);
+ GlobalIndexOperand operand(decoder, pc);
return 1 + operand.length;
}
case kExprCallFunction: {
- CallFunctionOperand operand(this, pc);
+ CallFunctionOperand operand(decoder, pc);
return 1 + operand.length;
}
case kExprCallIndirect: {
- CallIndirectOperand operand(this, pc);
+ CallIndirectOperand operand(decoder, pc);
return 1 + operand.length;
}
@@ -298,7 +392,7 @@ class WasmDecoder : public Decoder {
case kExprIf: // fall thru
case kExprLoop:
case kExprBlock: {
- BlockTypeOperand operand(this, pc);
+ BlockTypeOperand operand(decoder, pc);
return 1 + operand.length;
}
@@ -306,35 +400,33 @@ class WasmDecoder : public Decoder {
case kExprTeeLocal:
case kExprGetLocal:
case kExprCatch: {
- LocalIndexOperand operand(this, pc);
+ LocalIndexOperand operand(decoder, pc);
return 1 + operand.length;
}
case kExprBrTable: {
- BranchTableOperand operand(this, pc);
- BranchTableIterator iterator(this, operand);
+ BranchTableOperand operand(decoder, pc);
+ BranchTableIterator iterator(decoder, operand);
return 1 + iterator.length();
}
case kExprI32Const: {
- ImmI32Operand operand(this, pc);
+ ImmI32Operand operand(decoder, pc);
return 1 + operand.length;
}
case kExprI64Const: {
- ImmI64Operand operand(this, pc);
+ ImmI64Operand operand(decoder, pc);
return 1 + operand.length;
}
case kExprGrowMemory:
case kExprMemorySize: {
- MemoryIndexOperand operand(this, pc);
+ MemoryIndexOperand operand(decoder, pc);
return 1 + operand.length;
}
- case kExprI8Const:
- return 2;
case kExprF32Const:
return 5;
case kExprF64Const:
return 9;
case kSimdPrefix: {
- byte simd_index = checked_read_u8(pc, 1, "simd_index");
+ byte simd_index = decoder->checked_read_u8(pc, 1, "simd_index");
WasmOpcode opcode =
static_cast<WasmOpcode>(kSimdPrefix << 8 | simd_index);
switch (opcode) {
@@ -351,7 +443,7 @@ class WasmDecoder : public Decoder {
return 3;
}
default:
- error("invalid SIMD opcode");
+ decoder->error(pc, "invalid SIMD opcode");
return 2;
}
}
@@ -363,24 +455,24 @@ class WasmDecoder : public Decoder {
static const int32_t kNullCatch = -1;
-// The full WASM decoder for bytecode. Both verifies bytecode and generates
-// a TurboFan IR graph.
+// The full WASM decoder for bytecode. Verifies bytecode and, optionally,
+// generates a TurboFan IR graph.
class WasmFullDecoder : public WasmDecoder {
public:
+ WasmFullDecoder(Zone* zone, const wasm::WasmModule* module,
+ const FunctionBody& body)
+ : WasmFullDecoder(zone, module, nullptr, body) {}
+
WasmFullDecoder(Zone* zone, TFBuilder* builder, const FunctionBody& body)
- : WasmDecoder(body.module, body.sig, body.start, body.end),
- zone_(zone),
- builder_(builder),
- base_(body.base),
- local_type_vec_(zone),
- stack_(zone),
- control_(zone),
- last_end_found_(false),
- current_catch_(kNullCatch) {
- local_types_ = &local_type_vec_;
- }
+ : WasmFullDecoder(zone, builder->module_env() == nullptr
+ ? nullptr
+ : builder->module_env()->module,
+ builder, body) {}
bool Decode() {
+ if (FLAG_wasm_code_fuzzer_gen_test) {
+ PrintRawWasmCode(start_, end_);
+ }
base::ElapsedTimer decode_timer;
if (FLAG_trace_wasm_decode_time) {
decode_timer.Start();
@@ -393,47 +485,21 @@ class WasmFullDecoder : public WasmDecoder {
return false;
}
- DecodeLocalDecls();
+ DCHECK_EQ(0, local_types_->size());
+ WasmDecoder::DecodeLocals(this, sig_, local_types_);
InitSsaEnv();
DecodeFunctionBody();
if (failed()) return TraceFailed();
-#if IMPLICIT_FUNCTION_END
- // With implicit end support (old style), the function block
- // remains on the stack. Other control blocks are an error.
- if (control_.size() > 1) {
- error(pc_, control_.back().pc, "unterminated control structure");
- return TraceFailed();
- }
-
- // Assume an implicit end to the function body block.
- if (control_.size() == 1) {
- Control* c = &control_.back();
- if (ssa_env_->go()) {
- FallThruTo(c);
- }
-
- if (c->end_env->go()) {
- // Push the end values onto the stack.
- stack_.resize(c->stack_depth);
- if (c->merge.arity == 1) {
- stack_.push_back(c->merge.vals.first);
- } else {
- for (unsigned i = 0; i < c->merge.arity; i++) {
- stack_.push_back(c->merge.vals.array[i]);
- }
- }
-
- TRACE(" @%-8d #xx:%-20s|", startrel(pc_), "ImplicitReturn");
- SetEnv("function:end", c->end_env);
- DoReturn();
- TRACE("\n");
- }
- }
-#else
if (!control_.empty()) {
- error(pc_, control_.back().pc, "unterminated control structure");
+ // Generate a better error message whether the unterminated control
+ // structure is the function body block or an innner structure.
+ if (control_.size() > 1) {
+ error(pc_, control_.back().pc, "unterminated control structure");
+ } else {
+ error("function body must end with \"end\" opcode.");
+ }
return TraceFailed();
}
@@ -441,7 +507,6 @@ class WasmFullDecoder : public WasmDecoder {
error("function body must end with \"end\" opcode.");
return false;
}
-#endif
if (FLAG_trace_wasm_decode_time) {
double ms = decode_timer.Elapsed().InMillisecondsF();
@@ -459,36 +524,21 @@ class WasmFullDecoder : public WasmDecoder {
return false;
}
- bool DecodeLocalDecls(AstLocalDecls& decls) {
- DecodeLocalDecls();
- if (failed()) return false;
- decls.decls_encoded_size = pc_offset();
- decls.local_types.reserve(local_type_vec_.size());
- for (size_t pos = 0; pos < local_type_vec_.size();) {
- uint32_t count = 0;
- LocalType type = local_type_vec_[pos];
- while (pos < local_type_vec_.size() && local_type_vec_[pos] == type) {
- pos++;
- count++;
- }
- decls.local_types.push_back(std::pair<LocalType, uint32_t>(type, count));
- }
- decls.total_local_count = static_cast<uint32_t>(local_type_vec_.size());
- return true;
- }
-
- BitVector* AnalyzeLoopAssignmentForTesting(const byte* pc,
- size_t num_locals) {
- total_locals_ = num_locals;
- local_type_vec_.reserve(num_locals);
- if (num_locals > local_type_vec_.size()) {
- local_type_vec_.insert(local_type_vec_.end(),
- num_locals - local_type_vec_.size(), kAstI32);
- }
- return AnalyzeLoopAssignment(pc);
+ private:
+ WasmFullDecoder(Zone* zone, const wasm::WasmModule* module,
+ TFBuilder* builder, const FunctionBody& body)
+ : WasmDecoder(module, body.sig, body.start, body.end),
+ zone_(zone),
+ builder_(builder),
+ base_(body.base),
+ local_type_vec_(zone),
+ stack_(zone),
+ control_(zone),
+ last_end_found_(false),
+ current_catch_(kNullCatch) {
+ local_types_ = &local_type_vec_;
}
- private:
static const size_t kErrorMsgSize = 128;
Zone* zone_;
@@ -497,7 +547,7 @@ class WasmFullDecoder : public WasmDecoder {
SsaEnv* ssa_env_;
- ZoneVector<LocalType> local_type_vec_; // types of local variables.
+ ZoneVector<ValueType> local_type_vec_; // types of local variables.
ZoneVector<Value> stack_; // stack of values.
ZoneVector<Control> control_; // stack of blocks, loops, and ifs.
bool last_end_found_;
@@ -521,11 +571,11 @@ class WasmFullDecoder : public WasmDecoder {
// Initialize local variables.
uint32_t index = 0;
while (index < sig_->parameter_count()) {
- ssa_env->locals[index] = builder_->Param(index, local_type_vec_[index]);
+ ssa_env->locals[index] = builder_->Param(index);
index++;
}
while (index < local_type_vec_.size()) {
- LocalType type = local_type_vec_[index];
+ ValueType type = local_type_vec_[index];
TFNode* node = DefaultValue(type);
while (index < local_type_vec_.size() &&
local_type_vec_[index] == type) {
@@ -533,27 +583,28 @@ class WasmFullDecoder : public WasmDecoder {
ssa_env->locals[index++] = node;
}
}
- builder_->set_module(module_);
}
ssa_env->control = start;
ssa_env->effect = start;
SetEnv("initial", ssa_env);
if (builder_) {
- builder_->StackCheck(position());
+ // The function-prologue stack check is associated with position 0, which
+ // is never a position of any instruction in the function.
+ builder_->StackCheck(0);
}
}
- TFNode* DefaultValue(LocalType type) {
+ TFNode* DefaultValue(ValueType type) {
switch (type) {
- case kAstI32:
+ case kWasmI32:
return builder_->Int32Constant(0);
- case kAstI64:
+ case kWasmI64:
return builder_->Int64Constant(0);
- case kAstF32:
+ case kWasmF32:
return builder_->Float32Constant(0);
- case kAstF64:
+ case kWasmF64:
return builder_->Float64Constant(0);
- case kAstS128:
+ case kWasmS128:
return builder_->CreateS128Value(0);
default:
UNREACHABLE();
@@ -572,58 +623,19 @@ class WasmFullDecoder : public WasmDecoder {
return bytes;
}
- // Decodes the locals declarations, if any, populating {local_type_vec_}.
- void DecodeLocalDecls() {
- DCHECK_EQ(0u, local_type_vec_.size());
- // Initialize {local_type_vec} from signature.
- if (sig_) {
- local_type_vec_.reserve(sig_->parameter_count());
- for (size_t i = 0; i < sig_->parameter_count(); ++i) {
- local_type_vec_.push_back(sig_->GetParam(i));
- }
- }
- // Decode local declarations, if any.
- uint32_t entries = consume_u32v("local decls count");
- TRACE("local decls count: %u\n", entries);
- while (entries-- > 0 && pc_ < limit_) {
- uint32_t count = consume_u32v("local count");
- if (count > kMaxNumWasmLocals) {
- error(pc_ - 1, "local count too large");
- return;
- }
- byte code = consume_u8("local type");
- LocalType type;
- switch (code) {
- case kLocalI32:
- type = kAstI32;
- break;
- case kLocalI64:
- type = kAstI64;
- break;
- case kLocalF32:
- type = kAstF32;
- break;
- case kLocalF64:
- type = kAstF64;
- break;
- case kLocalS128:
- type = kAstS128;
- break;
- default:
- error(pc_ - 1, "invalid local type");
- return;
- }
- local_type_vec_.insert(local_type_vec_.end(), count, type);
+ bool CheckHasMemory() {
+ if (!module_->has_memory) {
+ error(pc_ - 1, "memory instruction with no memory");
}
- total_locals_ = local_type_vec_.size();
+ return module_->has_memory;
}
// Decodes the body of a function.
void DecodeFunctionBody() {
TRACE("wasm-decode %p...%p (module+%d, %d bytes) %s\n",
reinterpret_cast<const void*>(start_),
- reinterpret_cast<const void*>(limit_), baserel(pc_),
- static_cast<int>(limit_ - start_), builder_ ? "graph building" : "");
+ reinterpret_cast<const void*>(end_), baserel(pc_),
+ static_cast<int>(end_ - start_), builder_ ? "graph building" : "");
{
// Set up initial function block.
@@ -643,9 +655,7 @@ class WasmFullDecoder : public WasmDecoder {
}
}
- if (pc_ >= limit_) return; // Nothing to do.
-
- while (true) { // decoding loop.
+ while (pc_ < end_) { // decoding loop.
unsigned len = 1;
WasmOpcode opcode = static_cast<WasmOpcode>(*pc_);
if (!WasmOpcodes::IsPrefixOpcode(opcode)) {
@@ -673,8 +683,12 @@ class WasmFullDecoder : public WasmDecoder {
}
case kExprThrow: {
CHECK_PROTOTYPE_OPCODE(wasm_eh_prototype);
- Value value = Pop(0, kAstI32);
+ Value value = Pop(0, kWasmI32);
BUILD(Throw, value.node);
+ // TODO(titzer): Throw should end control, but currently we build a
+ // (reachable) runtime call instead of connecting it directly to
+ // end.
+ // EndControl();
break;
}
case kExprTry: {
@@ -710,9 +724,7 @@ class WasmFullDecoder : public WasmDecoder {
break;
}
- if (ssa_env_->go()) {
- MergeValuesInto(c);
- }
+ FallThruTo(c);
stack_.resize(c->stack_depth);
DCHECK_NOT_NULL(c->try_info);
@@ -746,7 +758,7 @@ class WasmFullDecoder : public WasmDecoder {
case kExprIf: {
// Condition on top of stack. Split environments for branches.
BlockTypeOperand operand(this, pc_);
- Value cond = Pop(0, kAstI32);
+ Value cond = Pop(0, kWasmI32);
TFNode* if_true = nullptr;
TFNode* if_false = nullptr;
BUILD(BranchNoHint, cond.node, &if_true, &if_false);
@@ -776,8 +788,8 @@ class WasmFullDecoder : public WasmDecoder {
break;
}
FallThruTo(c);
- // Switch to environment for false branch.
stack_.resize(c->stack_depth);
+ // Switch to environment for false branch.
SetEnv("if_else:false", c->false_env);
c->false_env = nullptr; // record that an else is already seen
break;
@@ -791,7 +803,8 @@ class WasmFullDecoder : public WasmDecoder {
Control* c = &control_.back();
if (c->is_loop()) {
// A loop just leaves the values on the stack.
- TypeCheckLoopFallThru(c);
+ TypeCheckFallThru(c);
+ if (c->unreachable) PushEndValues(c);
PopControl();
SetEnv("loop:end", ssa_env_);
break;
@@ -800,8 +813,7 @@ class WasmFullDecoder : public WasmDecoder {
if (c->false_env != nullptr) {
// End the true branch of a one-armed if.
Goto(c->false_env, c->end_env);
- if (ssa_env_->go() &&
- static_cast<int>(stack_.size()) != c->stack_depth) {
+ if (!c->unreachable && stack_.size() != c->stack_depth) {
error("end of if expected empty stack");
stack_.resize(c->stack_depth);
}
@@ -824,23 +836,13 @@ class WasmFullDecoder : public WasmDecoder {
}
FallThruTo(c);
SetEnv(name, c->end_env);
+ PushEndValues(c);
- // Push the end values onto the stack.
- stack_.resize(c->stack_depth);
- if (c->merge.arity == 1) {
- stack_.push_back(c->merge.vals.first);
- } else {
- for (unsigned i = 0; i < c->merge.arity; i++) {
- stack_.push_back(c->merge.vals.array[i]);
- }
- }
-
- PopControl();
-
- if (control_.empty()) {
- // If the last (implicit) control was popped, check we are at end.
+ if (control_.size() == 1) {
+ // If at the last (implicit) control, check we are at end.
if (pc_ + 1 != end_) {
error(pc_, pc_ + 1, "trailing code after function end");
+ break;
}
last_end_found_ = true;
if (ssa_env_->go()) {
@@ -848,25 +850,18 @@ class WasmFullDecoder : public WasmDecoder {
TRACE(" @%-8d #xx:%-20s|", startrel(pc_), "ImplicitReturn");
DoReturn();
TRACE("\n");
+ } else {
+ TypeCheckFallThru(c);
}
- return;
}
+ PopControl();
break;
}
case kExprSelect: {
- Value cond = Pop(2, kAstI32);
+ Value cond = Pop(2, kWasmI32);
Value fval = Pop();
- Value tval = Pop();
- if (tval.type == kAstStmt || tval.type != fval.type) {
- if (tval.type != kAstEnd && fval.type != kAstEnd) {
- error("type mismatch in select");
- break;
- }
- }
+ Value tval = Pop(0, fval.type);
if (build()) {
- DCHECK(tval.type != kAstEnd);
- DCHECK(fval.type != kAstEnd);
- DCHECK(cond.type != kAstEnd);
TFNode* controls[2];
builder_->BranchNoHint(cond.node, &controls[0], &controls[1]);
TFNode* merge = builder_->Merge(2, controls);
@@ -875,7 +870,7 @@ class WasmFullDecoder : public WasmDecoder {
Push(tval.type, phi);
ssa_env_->control = merge;
} else {
- Push(tval.type, nullptr);
+ Push(tval.type == kWasmVar ? fval.type : tval.type, nullptr);
}
break;
}
@@ -890,7 +885,7 @@ class WasmFullDecoder : public WasmDecoder {
}
case kExprBrIf: {
BreakDepthOperand operand(this, pc_);
- Value cond = Pop(0, kAstI32);
+ Value cond = Pop(0, kWasmI32);
if (ok() && Validate(pc_, operand, control_)) {
SsaEnv* fenv = ssa_env_;
SsaEnv* tenv = Split(fenv);
@@ -907,7 +902,7 @@ class WasmFullDecoder : public WasmDecoder {
BranchTableOperand operand(this, pc_);
BranchTableIterator iterator(this, operand);
if (Validate(pc_, operand, control_.size())) {
- Value key = Pop(0, kAstI32);
+ Value key = Pop(0, kWasmI32);
if (failed()) break;
SsaEnv* break_env = ssa_env_;
@@ -917,6 +912,7 @@ class WasmFullDecoder : public WasmDecoder {
SsaEnv* copy = Steal(break_env);
ssa_env_ = copy;
+ MergeValues* merge = nullptr;
while (ok() && iterator.has_next()) {
uint32_t i = iterator.cur_index();
const byte* pos = iterator.pc();
@@ -930,6 +926,27 @@ class WasmFullDecoder : public WasmDecoder {
? BUILD(IfDefault, sw)
: BUILD(IfValue, i, sw);
BreakTo(target);
+
+ // Check that label types match up.
+ Control* c = &control_[control_.size() - target - 1];
+ if (i == 0) {
+ merge = &c->merge;
+ } else if (merge->arity != c->merge.arity) {
+ error(pos, pos,
+ "inconsistent arity in br_table target %d"
+ " (previous was %u, this one %u)",
+ i, merge->arity, c->merge.arity);
+ } else if (control_.back().unreachable) {
+ for (uint32_t j = 0; ok() && j < merge->arity; ++j) {
+ if ((*merge)[j].type != c->merge[j].type) {
+ error(pos, pos,
+ "type error in br_table target %d operand %d"
+ " (previous expected %s, this one %s)",
+ i, j, WasmOpcodes::TypeName((*merge)[j].type),
+ WasmOpcodes::TypeName(c->merge[j].type));
+ }
+ }
+ }
}
if (failed()) break;
} else {
@@ -946,6 +963,7 @@ class WasmFullDecoder : public WasmDecoder {
ssa_env_ = break_env;
}
len = 1 + iterator.length();
+ EndControl();
break;
}
case kExprReturn: {
@@ -957,33 +975,27 @@ class WasmFullDecoder : public WasmDecoder {
EndControl();
break;
}
- case kExprI8Const: {
- ImmI8Operand operand(this, pc_);
- Push(kAstI32, BUILD(Int32Constant, operand.value));
- len = 1 + operand.length;
- break;
- }
case kExprI32Const: {
ImmI32Operand operand(this, pc_);
- Push(kAstI32, BUILD(Int32Constant, operand.value));
+ Push(kWasmI32, BUILD(Int32Constant, operand.value));
len = 1 + operand.length;
break;
}
case kExprI64Const: {
ImmI64Operand operand(this, pc_);
- Push(kAstI64, BUILD(Int64Constant, operand.value));
+ Push(kWasmI64, BUILD(Int64Constant, operand.value));
len = 1 + operand.length;
break;
}
case kExprF32Const: {
ImmF32Operand operand(this, pc_);
- Push(kAstF32, BUILD(Float32Constant, operand.value));
+ Push(kWasmF32, BUILD(Float32Constant, operand.value));
len = 1 + operand.length;
break;
}
case kExprF64Const: {
ImmF64Operand operand(this, pc_);
- Push(kAstF64, BUILD(Float64Constant, operand.value));
+ Push(kWasmF64, BUILD(Float64Constant, operand.value));
len = 1 + operand.length;
break;
}
@@ -1045,79 +1057,81 @@ class WasmFullDecoder : public WasmDecoder {
break;
}
case kExprI32LoadMem8S:
- len = DecodeLoadMem(kAstI32, MachineType::Int8());
+ len = DecodeLoadMem(kWasmI32, MachineType::Int8());
break;
case kExprI32LoadMem8U:
- len = DecodeLoadMem(kAstI32, MachineType::Uint8());
+ len = DecodeLoadMem(kWasmI32, MachineType::Uint8());
break;
case kExprI32LoadMem16S:
- len = DecodeLoadMem(kAstI32, MachineType::Int16());
+ len = DecodeLoadMem(kWasmI32, MachineType::Int16());
break;
case kExprI32LoadMem16U:
- len = DecodeLoadMem(kAstI32, MachineType::Uint16());
+ len = DecodeLoadMem(kWasmI32, MachineType::Uint16());
break;
case kExprI32LoadMem:
- len = DecodeLoadMem(kAstI32, MachineType::Int32());
+ len = DecodeLoadMem(kWasmI32, MachineType::Int32());
break;
case kExprI64LoadMem8S:
- len = DecodeLoadMem(kAstI64, MachineType::Int8());
+ len = DecodeLoadMem(kWasmI64, MachineType::Int8());
break;
case kExprI64LoadMem8U:
- len = DecodeLoadMem(kAstI64, MachineType::Uint8());
+ len = DecodeLoadMem(kWasmI64, MachineType::Uint8());
break;
case kExprI64LoadMem16S:
- len = DecodeLoadMem(kAstI64, MachineType::Int16());
+ len = DecodeLoadMem(kWasmI64, MachineType::Int16());
break;
case kExprI64LoadMem16U:
- len = DecodeLoadMem(kAstI64, MachineType::Uint16());
+ len = DecodeLoadMem(kWasmI64, MachineType::Uint16());
break;
case kExprI64LoadMem32S:
- len = DecodeLoadMem(kAstI64, MachineType::Int32());
+ len = DecodeLoadMem(kWasmI64, MachineType::Int32());
break;
case kExprI64LoadMem32U:
- len = DecodeLoadMem(kAstI64, MachineType::Uint32());
+ len = DecodeLoadMem(kWasmI64, MachineType::Uint32());
break;
case kExprI64LoadMem:
- len = DecodeLoadMem(kAstI64, MachineType::Int64());
+ len = DecodeLoadMem(kWasmI64, MachineType::Int64());
break;
case kExprF32LoadMem:
- len = DecodeLoadMem(kAstF32, MachineType::Float32());
+ len = DecodeLoadMem(kWasmF32, MachineType::Float32());
break;
case kExprF64LoadMem:
- len = DecodeLoadMem(kAstF64, MachineType::Float64());
+ len = DecodeLoadMem(kWasmF64, MachineType::Float64());
break;
case kExprI32StoreMem8:
- len = DecodeStoreMem(kAstI32, MachineType::Int8());
+ len = DecodeStoreMem(kWasmI32, MachineType::Int8());
break;
case kExprI32StoreMem16:
- len = DecodeStoreMem(kAstI32, MachineType::Int16());
+ len = DecodeStoreMem(kWasmI32, MachineType::Int16());
break;
case kExprI32StoreMem:
- len = DecodeStoreMem(kAstI32, MachineType::Int32());
+ len = DecodeStoreMem(kWasmI32, MachineType::Int32());
break;
case kExprI64StoreMem8:
- len = DecodeStoreMem(kAstI64, MachineType::Int8());
+ len = DecodeStoreMem(kWasmI64, MachineType::Int8());
break;
case kExprI64StoreMem16:
- len = DecodeStoreMem(kAstI64, MachineType::Int16());
+ len = DecodeStoreMem(kWasmI64, MachineType::Int16());
break;
case kExprI64StoreMem32:
- len = DecodeStoreMem(kAstI64, MachineType::Int32());
+ len = DecodeStoreMem(kWasmI64, MachineType::Int32());
break;
case kExprI64StoreMem:
- len = DecodeStoreMem(kAstI64, MachineType::Int64());
+ len = DecodeStoreMem(kWasmI64, MachineType::Int64());
break;
case kExprF32StoreMem:
- len = DecodeStoreMem(kAstF32, MachineType::Float32());
+ len = DecodeStoreMem(kWasmF32, MachineType::Float32());
break;
case kExprF64StoreMem:
- len = DecodeStoreMem(kAstF64, MachineType::Float64());
+ len = DecodeStoreMem(kWasmF64, MachineType::Float64());
break;
case kExprGrowMemory: {
+ if (!CheckHasMemory()) break;
MemoryIndexOperand operand(this, pc_);
+ DCHECK_NOT_NULL(module_);
if (module_->origin != kAsmJsOrigin) {
- Value val = Pop(0, kAstI32);
- Push(kAstI32, BUILD(GrowMemory, val.node));
+ Value val = Pop(0, kWasmI32);
+ Push(kWasmI32, BUILD(GrowMemory, val.node));
} else {
error("grow_memory is not supported for asmjs modules");
}
@@ -1125,8 +1139,9 @@ class WasmFullDecoder : public WasmDecoder {
break;
}
case kExprMemorySize: {
+ if (!CheckHasMemory()) break;
MemoryIndexOperand operand(this, pc_);
- Push(kAstI32, BUILD(CurrentMemoryPages));
+ Push(kWasmI32, BUILD(CurrentMemoryPages));
len = 1 + operand.length;
break;
}
@@ -1144,7 +1159,7 @@ class WasmFullDecoder : public WasmDecoder {
case kExprCallIndirect: {
CallIndirectOperand operand(this, pc_);
if (Validate(pc_, operand)) {
- Value index = Pop(0, kAstI32);
+ Value index = Pop(0, kWasmI32);
TFNode** buffer = PopArgs(operand.sig);
if (buffer) buffer[0] = index.node;
TFNode** rets = nullptr;
@@ -1165,7 +1180,7 @@ class WasmFullDecoder : public WasmDecoder {
break;
}
case kAtomicPrefix: {
- if (!module_ || module_->origin != kAsmJsOrigin) {
+ if (module_ == nullptr || module_->origin != kAsmJsOrigin) {
error("Atomics are allowed only in AsmJs modules");
break;
}
@@ -1184,7 +1199,7 @@ class WasmFullDecoder : public WasmDecoder {
}
default: {
// Deal with special asmjs opcodes.
- if (module_ && module_->origin == kAsmJsOrigin) {
+ if (module_ != nullptr && module_->origin == kAsmJsOrigin) {
sig = WasmOpcodes::AsmjsSignature(opcode);
if (sig) {
BuildSimpleOperator(opcode, sig);
@@ -1199,6 +1214,35 @@ class WasmFullDecoder : public WasmDecoder {
#if DEBUG
if (FLAG_trace_wasm_decoder) {
+ PrintF(" ");
+ for (size_t i = 0; i < control_.size(); ++i) {
+ Control* c = &control_[i];
+ enum ControlKind {
+ kControlIf,
+ kControlBlock,
+ kControlLoop,
+ kControlTry
+ };
+ switch (c->kind) {
+ case kControlIf:
+ PrintF("I");
+ break;
+ case kControlBlock:
+ PrintF("B");
+ break;
+ case kControlLoop:
+ PrintF("L");
+ break;
+ case kControlTry:
+ PrintF("T");
+ break;
+ default:
+ break;
+ }
+ PrintF("%u", c->merge.arity);
+ if (c->unreachable) PrintF("*");
+ }
+ PrintF(" | ");
for (size_t i = 0; i < stack_.size(); ++i) {
Value& val = stack_[i];
WasmOpcode opcode = static_cast<WasmOpcode>(*val.pc);
@@ -1228,20 +1272,23 @@ class WasmFullDecoder : public WasmDecoder {
default:
break;
}
+ if (val.node == nullptr) PrintF("?");
}
PrintF("\n");
}
#endif
pc_ += len;
- if (pc_ >= limit_) {
- // End of code reached or exceeded.
- if (pc_ > limit_ && ok()) error("Beyond end of code");
- return;
- }
} // end decode loop
+ if (pc_ > end_ && ok()) error("Beyond end of code");
}
- void EndControl() { ssa_env_->Kill(SsaEnv::kControlEnd); }
+ void EndControl() {
+ ssa_env_->Kill(SsaEnv::kControlEnd);
+ if (!control_.empty()) {
+ stack_.resize(control_.back().stack_depth);
+ control_.back().unreachable = true;
+ }
+ }
void SetBlockType(Control* c, BlockTypeOperand& operand) {
c->merge.arity = operand.arity;
@@ -1273,77 +1320,96 @@ class WasmFullDecoder : public WasmDecoder {
}
}
- LocalType GetReturnType(FunctionSig* sig) {
- return sig->return_count() == 0 ? kAstStmt : sig->GetReturn();
+ ValueType GetReturnType(FunctionSig* sig) {
+ return sig->return_count() == 0 ? kWasmStmt : sig->GetReturn();
}
void PushBlock(SsaEnv* end_env) {
- const int stack_depth = static_cast<int>(stack_.size());
control_.emplace_back(
- Control::Block(pc_, stack_depth, end_env, current_catch_));
+ Control::Block(pc_, stack_.size(), end_env, current_catch_));
}
void PushLoop(SsaEnv* end_env) {
- const int stack_depth = static_cast<int>(stack_.size());
control_.emplace_back(
- Control::Loop(pc_, stack_depth, end_env, current_catch_));
+ Control::Loop(pc_, stack_.size(), end_env, current_catch_));
}
void PushIf(SsaEnv* end_env, SsaEnv* false_env) {
- const int stack_depth = static_cast<int>(stack_.size());
control_.emplace_back(
- Control::If(pc_, stack_depth, end_env, false_env, current_catch_));
+ Control::If(pc_, stack_.size(), end_env, false_env, current_catch_));
}
void PushTry(SsaEnv* end_env, SsaEnv* catch_env) {
- const int stack_depth = static_cast<int>(stack_.size());
- control_.emplace_back(Control::Try(pc_, stack_depth, end_env, zone_,
+ control_.emplace_back(Control::Try(pc_, stack_.size(), end_env, zone_,
catch_env, current_catch_));
current_catch_ = static_cast<int32_t>(control_.size() - 1);
}
void PopControl() { control_.pop_back(); }
- int DecodeLoadMem(LocalType type, MachineType mem_type) {
+ int DecodeLoadMem(ValueType type, MachineType mem_type) {
+ if (!CheckHasMemory()) return 0;
MemoryAccessOperand operand(this, pc_,
ElementSizeLog2Of(mem_type.representation()));
- Value index = Pop(0, kAstI32);
+ Value index = Pop(0, kWasmI32);
TFNode* node = BUILD(LoadMem, type, mem_type, index.node, operand.offset,
operand.alignment, position());
Push(type, node);
return 1 + operand.length;
}
- int DecodeStoreMem(LocalType type, MachineType mem_type) {
+ int DecodeStoreMem(ValueType type, MachineType mem_type) {
+ if (!CheckHasMemory()) return 0;
MemoryAccessOperand operand(this, pc_,
ElementSizeLog2Of(mem_type.representation()));
Value val = Pop(1, type);
- Value index = Pop(0, kAstI32);
+ Value index = Pop(0, kWasmI32);
BUILD(StoreMem, mem_type, index.node, operand.offset, operand.alignment,
val.node, position());
return 1 + operand.length;
}
- unsigned ExtractLane(WasmOpcode opcode, LocalType type) {
+ unsigned ExtractLane(WasmOpcode opcode, ValueType type) {
LaneOperand operand(this, pc_);
if (Validate(pc_, operand)) {
- TFNode* input = Pop(0, LocalType::kSimd128).node;
- TFNode* node = BUILD(SimdExtractLane, opcode, operand.lane, input);
+ compiler::NodeVector inputs(1, zone_);
+ inputs[0] = Pop(0, ValueType::kSimd128).node;
+ TFNode* node = BUILD(SimdLaneOp, opcode, operand.lane, inputs);
Push(type, node);
}
return operand.length;
}
+ unsigned ReplaceLane(WasmOpcode opcode, ValueType type) {
+ LaneOperand operand(this, pc_);
+ if (Validate(pc_, operand)) {
+ compiler::NodeVector inputs(2, zone_);
+ inputs[1] = Pop(1, type).node;
+ inputs[0] = Pop(0, ValueType::kSimd128).node;
+ TFNode* node = BUILD(SimdLaneOp, opcode, operand.lane, inputs);
+ Push(ValueType::kSimd128, node);
+ }
+ return operand.length;
+ }
+
unsigned DecodeSimdOpcode(WasmOpcode opcode) {
unsigned len = 0;
switch (opcode) {
case kExprI32x4ExtractLane: {
- len = ExtractLane(opcode, LocalType::kWord32);
+ len = ExtractLane(opcode, ValueType::kWord32);
break;
}
case kExprF32x4ExtractLane: {
- len = ExtractLane(opcode, LocalType::kFloat32);
+ len = ExtractLane(opcode, ValueType::kFloat32);
+ break;
+ }
+ case kExprI32x4ReplaceLane: {
+ len = ReplaceLane(opcode, ValueType::kWord32);
+ break;
+ }
+ case kExprF32x4ReplaceLane: {
+ len = ReplaceLane(opcode, ValueType::kFloat32);
break;
}
default: {
@@ -1381,12 +1447,25 @@ class WasmFullDecoder : public WasmDecoder {
EndControl();
}
- void Push(LocalType type, TFNode* node) {
- if (type != kAstStmt && type != kAstEnd) {
+ void Push(ValueType type, TFNode* node) {
+ if (type != kWasmStmt) {
stack_.push_back({pc_, node, type});
}
}
+ void PushEndValues(Control* c) {
+ DCHECK_EQ(c, &control_.back());
+ stack_.resize(c->stack_depth);
+ if (c->merge.arity == 1) {
+ stack_.push_back(c->merge.vals.first);
+ } else {
+ for (unsigned i = 0; i < c->merge.arity; i++) {
+ stack_.push_back(c->merge.vals.array[i]);
+ }
+ }
+ DCHECK_EQ(c->stack_depth + c->merge.arity, stack_.size());
+ }
+
void PushReturns(FunctionSig* sig, TFNode** rets) {
for (size_t i = 0; i < sig->return_count(); i++) {
// When verifying only, then {rets} will be null, so push null.
@@ -1399,31 +1478,24 @@ class WasmFullDecoder : public WasmDecoder {
return WasmOpcodes::ShortOpcodeName(static_cast<WasmOpcode>(*pc));
}
- Value Pop(int index, LocalType expected) {
- if (!ssa_env_->go()) {
- // Unreachable code is essentially not typechecked.
- return {pc_, nullptr, expected};
- }
+ Value Pop(int index, ValueType expected) {
Value val = Pop();
- if (val.type != expected) {
- if (val.type != kAstEnd) {
- error(pc_, val.pc, "%s[%d] expected type %s, found %s of type %s",
- SafeOpcodeNameAt(pc_), index, WasmOpcodes::TypeName(expected),
- SafeOpcodeNameAt(val.pc), WasmOpcodes::TypeName(val.type));
- }
+ if (val.type != expected && val.type != kWasmVar && expected != kWasmVar) {
+ error(pc_, val.pc, "%s[%d] expected type %s, found %s of type %s",
+ SafeOpcodeNameAt(pc_), index, WasmOpcodes::TypeName(expected),
+ SafeOpcodeNameAt(val.pc), WasmOpcodes::TypeName(val.type));
}
return val;
}
Value Pop() {
- if (!ssa_env_->go()) {
- // Unreachable code is essentially not typechecked.
- return {pc_, nullptr, kAstEnd};
- }
size_t limit = control_.empty() ? 0 : control_.back().stack_depth;
if (stack_.size() <= limit) {
- Value val = {pc_, nullptr, kAstStmt};
- error(pc_, pc_, "%s found empty stack", SafeOpcodeNameAt(pc_));
+ // Popping past the current control start in reachable code.
+ Value val = {pc_, nullptr, kWasmVar};
+ if (!control_.back().unreachable) {
+ error(pc_, pc_, "%s found empty stack", SafeOpcodeNameAt(pc_));
+ }
return val;
}
Value val = stack_.back();
@@ -1431,22 +1503,6 @@ class WasmFullDecoder : public WasmDecoder {
return val;
}
- Value PopUpTo(int stack_depth) {
- if (!ssa_env_->go()) {
- // Unreachable code is essentially not typechecked.
- return {pc_, nullptr, kAstEnd};
- }
- if (stack_depth == static_cast<int>(stack_.size())) {
- Value val = {pc_, nullptr, kAstStmt};
- return val;
- } else {
- DCHECK_LE(stack_depth, static_cast<int>(stack_.size()));
- Value val = Pop();
- stack_.resize(stack_depth);
- return val;
- }
- }
-
int baserel(const byte* ptr) {
return base_ ? static_cast<int>(ptr - base_) : 0;
}
@@ -1454,17 +1510,17 @@ class WasmFullDecoder : public WasmDecoder {
int startrel(const byte* ptr) { return static_cast<int>(ptr - start_); }
void BreakTo(unsigned depth) {
- if (!ssa_env_->go()) return;
Control* c = &control_[control_.size() - depth - 1];
if (c->is_loop()) {
// This is the inner loop block, which does not have a value.
Goto(ssa_env_, c->end_env);
} else {
// Merge the value(s) into the end of the block.
- if (c->stack_depth + c->merge.arity > stack_.size()) {
+ size_t expected = control_.back().stack_depth + c->merge.arity;
+ if (stack_.size() < expected && !control_.back().unreachable) {
error(
pc_, pc_,
- "expected at least %d values on the stack for br to @%d, found %d",
+ "expected at least %u values on the stack for br to @%d, found %d",
c->merge.arity, startrel(c->pc),
static_cast<int>(stack_.size() - c->stack_depth));
return;
@@ -1474,37 +1530,41 @@ class WasmFullDecoder : public WasmDecoder {
}
void FallThruTo(Control* c) {
- if (!ssa_env_->go()) return;
+ DCHECK_EQ(c, &control_.back());
// Merge the value(s) into the end of the block.
- int arity = static_cast<int>(c->merge.arity);
- if (c->stack_depth + arity != static_cast<int>(stack_.size())) {
- error(pc_, pc_, "expected %d elements on the stack for fallthru to @%d",
- arity, startrel(c->pc));
+ size_t expected = c->stack_depth + c->merge.arity;
+ if (stack_.size() == expected ||
+ (stack_.size() < expected && c->unreachable)) {
+ MergeValuesInto(c);
+ c->unreachable = false;
return;
}
- MergeValuesInto(c);
+ error(pc_, pc_, "expected %u elements on the stack for fallthru to @%d",
+ c->merge.arity, startrel(c->pc));
}
- inline Value& GetMergeValueFromStack(Control* c, int i) {
+ inline Value& GetMergeValueFromStack(Control* c, size_t i) {
return stack_[stack_.size() - c->merge.arity + i];
}
- void TypeCheckLoopFallThru(Control* c) {
- if (!ssa_env_->go()) return;
+ void TypeCheckFallThru(Control* c) {
+ DCHECK_EQ(c, &control_.back());
// Fallthru must match arity exactly.
int arity = static_cast<int>(c->merge.arity);
- if (c->stack_depth + arity != static_cast<int>(stack_.size())) {
+ if (c->stack_depth + arity < stack_.size() ||
+ (c->stack_depth + arity != stack_.size() && !c->unreachable)) {
error(pc_, pc_, "expected %d elements on the stack for fallthru to @%d",
arity, startrel(c->pc));
return;
}
// Typecheck the values left on the stack.
- for (unsigned i = 0; i < c->merge.arity; i++) {
+ size_t avail = stack_.size() - c->stack_depth;
+ for (size_t i = avail >= c->merge.arity ? 0 : c->merge.arity - avail;
+ i < c->merge.arity; i++) {
Value& val = GetMergeValueFromStack(c, i);
- Value& old =
- c->merge.arity == 1 ? c->merge.vals.first : c->merge.vals.array[i];
+ Value& old = c->merge[i];
if (val.type != old.type) {
- error(pc_, pc_, "type error in merge[%d] (expected %s, got %s)", i,
+ error(pc_, pc_, "type error in merge[%zu] (expected %s, got %s)", i,
WasmOpcodes::TypeName(old.type), WasmOpcodes::TypeName(val.type));
return;
}
@@ -1514,23 +1574,24 @@ class WasmFullDecoder : public WasmDecoder {
void MergeValuesInto(Control* c) {
SsaEnv* target = c->end_env;
bool first = target->state == SsaEnv::kUnreachable;
+ bool reachable = ssa_env_->go();
Goto(ssa_env_, target);
- for (unsigned i = 0; i < c->merge.arity; i++) {
+ size_t avail = stack_.size() - control_.back().stack_depth;
+ for (size_t i = avail >= c->merge.arity ? 0 : c->merge.arity - avail;
+ i < c->merge.arity; i++) {
Value& val = GetMergeValueFromStack(c, i);
- Value& old =
- c->merge.arity == 1 ? c->merge.vals.first : c->merge.vals.array[i];
- if (val.type != old.type) {
- error(pc_, pc_, "type error in merge[%d] (expected %s, got %s)", i,
+ Value& old = c->merge[i];
+ if (val.type != old.type && val.type != kWasmVar) {
+ error(pc_, pc_, "type error in merge[%zu] (expected %s, got %s)", i,
WasmOpcodes::TypeName(old.type), WasmOpcodes::TypeName(val.type));
return;
}
- if (builder_) {
+ if (builder_ && reachable) {
+ DCHECK_NOT_NULL(val.node);
old.node =
first ? val.node : CreateOrMergeIntoPhi(old.type, target->control,
old.node, val.node);
- } else {
- old.node = nullptr;
}
}
}
@@ -1555,13 +1616,13 @@ class WasmFullDecoder : public WasmDecoder {
break;
}
}
- PrintF(" env = %p, state = %c, reason = %s", static_cast<void*>(env),
+ PrintF("{set_env = %p, state = %c, reason = %s", static_cast<void*>(env),
state, reason);
if (env && env->control) {
PrintF(", control = ");
compiler::WasmGraphBuilder::PrintDebugName(env->control);
}
- PrintF("\n");
+ PrintF("}");
}
#endif
ssa_env_ = env;
@@ -1602,7 +1663,7 @@ class WasmFullDecoder : public WasmDecoder {
} else {
DCHECK_EQ(SsaEnv::kMerged, try_info->catch_env->state);
try_info->exception =
- CreateOrMergeIntoPhi(kAstI32, try_info->catch_env->control,
+ CreateOrMergeIntoPhi(kWasmI32, try_info->catch_env->control,
try_info->exception, if_exception);
}
@@ -1686,7 +1747,7 @@ class WasmFullDecoder : public WasmDecoder {
return from->Kill();
}
- TFNode* CreateOrMergeIntoPhi(LocalType type, TFNode* merge, TFNode* tnode,
+ TFNode* CreateOrMergeIntoPhi(ValueType type, TFNode* merge, TFNode* tnode,
TFNode* fnode) {
DCHECK_NOT_NULL(builder_);
if (builder_->IsPhiWithMerge(tnode, merge)) {
@@ -1710,7 +1771,8 @@ class WasmFullDecoder : public WasmDecoder {
env->effect = builder_->EffectPhi(1, &env->effect, env->control);
builder_->Terminate(env->effect, env->control);
if (FLAG_wasm_loop_assignment_analysis) {
- BitVector* assigned = AnalyzeLoopAssignment(pc);
+ BitVector* assigned = AnalyzeLoopAssignment(
+ this, pc, static_cast<int>(total_locals()), zone_);
if (failed()) return env;
if (assigned != nullptr) {
// Only introduce phis for variables assigned in this loop.
@@ -1789,52 +1851,10 @@ class WasmFullDecoder : public WasmDecoder {
}
virtual void onFirstError() {
- limit_ = start_; // Terminate decoding loop.
+ end_ = start_; // Terminate decoding loop.
builder_ = nullptr; // Don't build any more nodes.
TRACE(" !%s\n", error_msg_.get());
}
- BitVector* AnalyzeLoopAssignment(const byte* pc) {
- if (pc >= limit_) return nullptr;
- if (*pc != kExprLoop) return nullptr;
-
- BitVector* assigned =
- new (zone_) BitVector(static_cast<int>(local_type_vec_.size()), zone_);
- int depth = 0;
- // Iteratively process all AST nodes nested inside the loop.
- while (pc < limit_ && ok()) {
- WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
- unsigned length = 1;
- switch (opcode) {
- case kExprLoop:
- case kExprIf:
- case kExprBlock:
- case kExprTry:
- length = OpcodeLength(pc);
- depth++;
- break;
- case kExprSetLocal: // fallthru
- case kExprTeeLocal: {
- LocalIndexOperand operand(this, pc);
- if (assigned->length() > 0 &&
- operand.index < static_cast<uint32_t>(assigned->length())) {
- // Unverified code might have an out-of-bounds index.
- assigned->Add(operand.index);
- }
- length = 1 + operand.length;
- break;
- }
- case kExprEnd:
- depth--;
- break;
- default:
- length = OpcodeLength(pc);
- break;
- }
- if (depth <= 0) break;
- pc += length;
- }
- return ok() ? assigned : nullptr;
- }
inline wasm::WasmCodePosition position() {
int offset = static_cast<int>(pc_ - start_);
@@ -1865,30 +1885,33 @@ class WasmFullDecoder : public WasmDecoder {
}
};
-bool DecodeLocalDecls(AstLocalDecls& decls, const byte* start,
+bool DecodeLocalDecls(BodyLocalDecls* decls, const byte* start,
const byte* end) {
- AccountingAllocator allocator;
- Zone tmp(&allocator, ZONE_NAME);
- FunctionBody body = {nullptr, nullptr, nullptr, start, end};
- WasmFullDecoder decoder(&tmp, nullptr, body);
- return decoder.DecodeLocalDecls(decls);
+ Decoder decoder(start, end);
+ if (WasmDecoder::DecodeLocals(&decoder, nullptr, &decls->type_list)) {
+ DCHECK(decoder.ok());
+ decls->encoded_size = decoder.pc_offset();
+ return true;
+ }
+ return false;
}
BytecodeIterator::BytecodeIterator(const byte* start, const byte* end,
- AstLocalDecls* decls)
+ BodyLocalDecls* decls)
: Decoder(start, end) {
if (decls != nullptr) {
- if (DecodeLocalDecls(*decls, start, end)) {
- pc_ += decls->decls_encoded_size;
+ if (DecodeLocalDecls(decls, start, end)) {
+ pc_ += decls->encoded_size;
if (pc_ > end_) pc_ = end_;
}
}
}
DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
+ const wasm::WasmModule* module,
FunctionBody& body) {
Zone zone(allocator, ZONE_NAME);
- WasmFullDecoder decoder(&zone, nullptr, body);
+ WasmFullDecoder decoder(&zone, module, body);
decoder.Decode();
return decoder.toResult<DecodeStruct*>(nullptr);
}
@@ -1902,21 +1925,35 @@ DecodeResult BuildTFGraph(AccountingAllocator* allocator, TFBuilder* builder,
}
unsigned OpcodeLength(const byte* pc, const byte* end) {
- WasmDecoder decoder(nullptr, nullptr, pc, end);
- return decoder.OpcodeLength(pc);
+ Decoder decoder(pc, end);
+ return WasmDecoder::OpcodeLength(&decoder, pc);
}
-void PrintAstForDebugging(const byte* start, const byte* end) {
+void PrintRawWasmCode(const byte* start, const byte* end) {
AccountingAllocator allocator;
- OFStream os(stdout);
- PrintAst(&allocator, FunctionBodyForTesting(start, end), os, nullptr);
+ PrintRawWasmCode(&allocator, FunctionBodyForTesting(start, end), nullptr);
}
-bool PrintAst(AccountingAllocator* allocator, const FunctionBody& body,
- std::ostream& os,
- std::vector<std::tuple<uint32_t, int, int>>* offset_table) {
+namespace {
+const char* RawOpcodeName(WasmOpcode opcode) {
+ switch (opcode) {
+#define DECLARE_NAME_CASE(name, opcode, sig) \
+ case kExpr##name: \
+ return "kExpr" #name;
+ FOREACH_OPCODE(DECLARE_NAME_CASE)
+#undef DECLARE_NAME_CASE
+ default:
+ break;
+ }
+ return "Unknown";
+}
+} // namespace
+
+bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
+ const wasm::WasmModule* module) {
+ OFStream os(stdout);
Zone zone(allocator, ZONE_NAME);
- WasmFullDecoder decoder(&zone, nullptr, body);
+ WasmFullDecoder decoder(&zone, module, body);
int line_nr = 0;
// Print the function signature.
@@ -1926,14 +1963,22 @@ bool PrintAst(AccountingAllocator* allocator, const FunctionBody& body,
}
// Print the local declarations.
- AstLocalDecls decls(&zone);
+ BodyLocalDecls decls(&zone);
BytecodeIterator i(body.start, body.end, &decls);
- if (body.start != i.pc()) {
+ if (body.start != i.pc() && !FLAG_wasm_code_fuzzer_gen_test) {
os << "// locals: ";
- for (auto p : decls.local_types) {
- LocalType type = p.first;
- uint32_t count = p.second;
- os << " " << count << " " << WasmOpcodes::TypeName(type);
+ if (!decls.type_list.empty()) {
+ ValueType type = decls.type_list[0];
+ uint32_t count = 0;
+ for (size_t pos = 0; pos < decls.type_list.size(); ++pos) {
+ if (decls.type_list[pos] == type) {
+ ++count;
+ } else {
+ os << " " << count << " " << WasmOpcodes::TypeName(type);
+ type = decls.type_list[pos];
+ count = 1;
+ }
+ }
}
os << std::endl;
++line_nr;
@@ -1949,25 +1994,22 @@ bool PrintAst(AccountingAllocator* allocator, const FunctionBody& body,
++line_nr;
unsigned control_depth = 0;
for (; i.has_next(); i.next()) {
- unsigned length = decoder.OpcodeLength(i.pc());
+ unsigned length = WasmDecoder::OpcodeLength(&decoder, i.pc());
WasmOpcode opcode = i.current();
if (opcode == kExprElse) control_depth--;
int num_whitespaces = control_depth < 32 ? 2 * control_depth : 64;
- if (offset_table) {
- offset_table->push_back(
- std::make_tuple(i.pc_offset(), line_nr, num_whitespaces));
- }
// 64 whitespaces
const char* padding =
" ";
os.write(padding, num_whitespaces);
- os << "k" << WasmOpcodes::OpcodeName(opcode) << ",";
+
+ os << RawOpcodeName(opcode) << ",";
for (size_t j = 1; j < length; ++j) {
- os << " " << AsHex(i.pc()[j], 2) << ",";
+ os << " 0x" << AsHex(i.pc()[j], 2) << ",";
}
switch (opcode) {
@@ -2024,7 +2066,7 @@ bool PrintAst(AccountingAllocator* allocator, const FunctionBody& body,
}
default:
break;
- }
+ }
os << std::endl;
++line_nr;
}
@@ -2034,9 +2076,9 @@ bool PrintAst(AccountingAllocator* allocator, const FunctionBody& body,
BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone, size_t num_locals,
const byte* start, const byte* end) {
- FunctionBody body = {nullptr, nullptr, nullptr, start, end};
- WasmFullDecoder decoder(zone, nullptr, body);
- return decoder.AnalyzeLoopAssignmentForTesting(start, num_locals);
+ Decoder decoder(start, end);
+ return WasmDecoder::AnalyzeLoopAssignment(&decoder, start,
+ static_cast<int>(num_locals), zone);
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/ast-decoder.h b/deps/v8/src/wasm/function-body-decoder.h
index 9ce323efcb..1115b1a450 100644
--- a/deps/v8/src/wasm/ast-decoder.h
+++ b/deps/v8/src/wasm/function-body-decoder.h
@@ -2,10 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_AST_DECODER_H_
-#define V8_WASM_AST_DECODER_H_
+#ifndef V8_WASM_FUNCTION_BODY_DECODER_H_
+#define V8_WASM_FUNCTION_BODY_DECODER_H_
+
+#include <iterator>
#include "src/base/compiler-specific.h"
+#include "src/base/iterator.h"
#include "src/globals.h"
#include "src/signature.h"
#include "src/wasm/decoder.h"
@@ -29,12 +32,12 @@ struct WasmGlobal;
// Helpers for decoding different kinds of operands which follow bytecodes.
struct LocalIndexOperand {
uint32_t index;
- LocalType type;
+ ValueType type;
unsigned length;
inline LocalIndexOperand(Decoder* decoder, const byte* pc) {
index = decoder->checked_read_u32v(pc, 1, &length, "local index");
- type = kAstStmt;
+ type = kWasmStmt;
}
};
@@ -67,7 +70,9 @@ struct ImmF32Operand {
float value;
unsigned length;
inline ImmF32Operand(Decoder* decoder, const byte* pc) {
- value = bit_cast<float>(decoder->checked_read_u32(pc, 1, "immf32"));
+ // Avoid bit_cast because it might not preserve the signalling bit of a NaN.
+ uint32_t tmp = decoder->checked_read_u32(pc, 1, "immf32");
+ memcpy(&value, &tmp, sizeof(value));
length = 4;
}
};
@@ -76,21 +81,23 @@ struct ImmF64Operand {
double value;
unsigned length;
inline ImmF64Operand(Decoder* decoder, const byte* pc) {
- value = bit_cast<double>(decoder->checked_read_u64(pc, 1, "immf64"));
+ // Avoid bit_cast because it might not preserve the signalling bit of a NaN.
+ uint64_t tmp = decoder->checked_read_u64(pc, 1, "immf64");
+ memcpy(&value, &tmp, sizeof(value));
length = 8;
}
};
struct GlobalIndexOperand {
uint32_t index;
- LocalType type;
+ ValueType type;
const WasmGlobal* global;
unsigned length;
inline GlobalIndexOperand(Decoder* decoder, const byte* pc) {
index = decoder->checked_read_u32v(pc, 1, &length, "global index");
global = nullptr;
- type = kAstStmt;
+ type = kWasmStmt;
}
};
@@ -101,12 +108,12 @@ struct BlockTypeOperand {
inline BlockTypeOperand(Decoder* decoder, const byte* pc) {
uint8_t val = decoder->checked_read_u8(pc, 1, "block type");
- LocalType type = kAstStmt;
+ ValueType type = kWasmStmt;
length = 1;
arity = 0;
types = nullptr;
if (decode_local_type(val, &type)) {
- arity = type == kAstStmt ? 0 : 1;
+ arity = type == kWasmStmt ? 0 : 1;
types = pc + 1;
} else {
// Handle multi-value blocks.
@@ -132,7 +139,7 @@ struct BlockTypeOperand {
uint32_t offset = 1 + 1 + len + i;
val = decoder->checked_read_u8(pc, offset, "block type");
decode_local_type(val, &type);
- if (type == kAstStmt) {
+ if (type == kWasmStmt) {
decoder->error(pc, pc + offset, "invalid block type");
return;
}
@@ -141,34 +148,34 @@ struct BlockTypeOperand {
}
// Decode a byte representing a local type. Return {false} if the encoded
// byte was invalid or {kMultivalBlock}.
- bool decode_local_type(uint8_t val, LocalType* result) {
- switch (static_cast<LocalTypeCode>(val)) {
+ bool decode_local_type(uint8_t val, ValueType* result) {
+ switch (static_cast<ValueTypeCode>(val)) {
case kLocalVoid:
- *result = kAstStmt;
+ *result = kWasmStmt;
return true;
case kLocalI32:
- *result = kAstI32;
+ *result = kWasmI32;
return true;
case kLocalI64:
- *result = kAstI64;
+ *result = kWasmI64;
return true;
case kLocalF32:
- *result = kAstF32;
+ *result = kWasmF32;
return true;
case kLocalF64:
- *result = kAstF64;
+ *result = kWasmF64;
return true;
case kLocalS128:
- *result = kAstS128;
+ *result = kWasmS128;
return true;
default:
- *result = kAstStmt;
+ *result = kWasmStmt;
return false;
}
}
- LocalType read_entry(unsigned index) {
+ ValueType read_entry(unsigned index) {
DCHECK_LT(index, arity);
- LocalType result;
+ ValueType result;
CHECK(decode_local_type(types[index], &result));
return result;
}
@@ -243,10 +250,6 @@ struct BranchTableOperand {
}
table = pc + 1 + len1;
}
- inline uint32_t read_entry(Decoder* decoder, unsigned i) {
- DCHECK(i <= table_count);
- return table ? decoder->read_u32(table + i * sizeof(uint32_t)) : 0;
- }
};
// A helper to iterate over a branch table.
@@ -309,11 +312,10 @@ struct MemoryAccessOperand {
};
typedef compiler::WasmGraphBuilder TFBuilder;
-struct ModuleEnv; // forward declaration of module interface.
+struct WasmModule; // forward declaration of module interface.
// All of the various data structures necessary to decode a function body.
struct FunctionBody {
- ModuleEnv* module; // module environment
FunctionSig* sig; // function signature
const byte* base; // base of the module bytes, for error reporting
const byte* start; // start of the function body
@@ -322,7 +324,7 @@ struct FunctionBody {
static inline FunctionBody FunctionBodyForTesting(const byte* start,
const byte* end) {
- return {nullptr, nullptr, start, start, end};
+ return {nullptr, start, start, end};
}
struct DecodeStruct {
@@ -334,48 +336,42 @@ inline std::ostream& operator<<(std::ostream& os, const DecodeStruct& tree) {
}
V8_EXPORT_PRIVATE DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
+ const wasm::WasmModule* module,
FunctionBody& body);
DecodeResult BuildTFGraph(AccountingAllocator* allocator, TFBuilder* builder,
FunctionBody& body);
-bool PrintAst(AccountingAllocator* allocator, const FunctionBody& body,
- std::ostream& os,
- std::vector<std::tuple<uint32_t, int, int>>* offset_table);
+bool PrintRawWasmCode(AccountingAllocator* allocator, const FunctionBody& body,
+ const wasm::WasmModule* module);
// A simplified form of AST printing, e.g. from a debugger.
-void PrintAstForDebugging(const byte* start, const byte* end);
+void PrintRawWasmCode(const byte* start, const byte* end);
inline DecodeResult VerifyWasmCode(AccountingAllocator* allocator,
- ModuleEnv* module, FunctionSig* sig,
+ const WasmModule* module, FunctionSig* sig,
const byte* start, const byte* end) {
- FunctionBody body = {module, sig, nullptr, start, end};
- return VerifyWasmCode(allocator, body);
+ FunctionBody body = {sig, nullptr, start, end};
+ return VerifyWasmCode(allocator, module, body);
}
inline DecodeResult BuildTFGraph(AccountingAllocator* allocator,
- TFBuilder* builder, ModuleEnv* module,
- FunctionSig* sig, const byte* start,
- const byte* end) {
- FunctionBody body = {module, sig, nullptr, start, end};
+ TFBuilder* builder, FunctionSig* sig,
+ const byte* start, const byte* end) {
+ FunctionBody body = {sig, nullptr, start, end};
return BuildTFGraph(allocator, builder, body);
}
-struct AstLocalDecls {
+struct BodyLocalDecls {
// The size of the encoded declarations.
- uint32_t decls_encoded_size; // size of encoded declarations
+ uint32_t encoded_size; // size of encoded declarations
- // Total number of locals.
- uint32_t total_local_count;
-
- // List of {local type, count} pairs.
- ZoneVector<std::pair<LocalType, uint32_t>> local_types;
+ ZoneVector<ValueType> type_list;
// Constructor initializes the vector.
- explicit AstLocalDecls(Zone* zone)
- : decls_encoded_size(0), total_local_count(0), local_types(zone) {}
+ explicit BodyLocalDecls(Zone* zone) : encoded_size(0), type_list(zone) {}
};
-V8_EXPORT_PRIVATE bool DecodeLocalDecls(AstLocalDecls& decls, const byte* start,
- const byte* end);
+V8_EXPORT_PRIVATE bool DecodeLocalDecls(BodyLocalDecls* decls,
+ const byte* start, const byte* end);
V8_EXPORT_PRIVATE BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone,
size_t num_locals,
const byte* start,
@@ -386,41 +382,77 @@ V8_EXPORT_PRIVATE unsigned OpcodeLength(const byte* pc, const byte* end);
// A simple forward iterator for bytecodes.
class V8_EXPORT_PRIVATE BytecodeIterator : public NON_EXPORTED_BASE(Decoder) {
- public:
- // If one wants to iterate over the bytecode without looking at {pc_offset()}.
- class iterator {
+ // Base class for both iterators defined below.
+ class iterator_base {
public:
- inline iterator& operator++() {
+ inline iterator_base& operator++() {
DCHECK_LT(ptr_, end_);
ptr_ += OpcodeLength(ptr_, end_);
return *this;
}
+ inline bool operator==(const iterator_base& that) {
+ return this->ptr_ == that.ptr_;
+ }
+ inline bool operator!=(const iterator_base& that) {
+ return this->ptr_ != that.ptr_;
+ }
+
+ protected:
+ const byte* ptr_;
+ const byte* end_;
+ iterator_base(const byte* ptr, const byte* end) : ptr_(ptr), end_(end) {}
+ };
+
+ public:
+ // If one wants to iterate over the bytecode without looking at {pc_offset()}.
+ class opcode_iterator
+ : public iterator_base,
+ public std::iterator<std::input_iterator_tag, WasmOpcode> {
+ public:
inline WasmOpcode operator*() {
DCHECK_LT(ptr_, end_);
return static_cast<WasmOpcode>(*ptr_);
}
- inline bool operator==(const iterator& that) {
- return this->ptr_ == that.ptr_;
- }
- inline bool operator!=(const iterator& that) {
- return this->ptr_ != that.ptr_;
+
+ private:
+ friend class BytecodeIterator;
+ opcode_iterator(const byte* ptr, const byte* end)
+ : iterator_base(ptr, end) {}
+ };
+ // If one wants to iterate over the instruction offsets without looking at
+ // opcodes.
+ class offset_iterator
+ : public iterator_base,
+ public std::iterator<std::input_iterator_tag, uint32_t> {
+ public:
+ inline uint32_t operator*() {
+ DCHECK_LT(ptr_, end_);
+ return static_cast<uint32_t>(ptr_ - start_);
}
private:
+ const byte* start_;
friend class BytecodeIterator;
- const byte* ptr_;
- const byte* end_;
- iterator(const byte* ptr, const byte* end) : ptr_(ptr), end_(end) {}
+ offset_iterator(const byte* start, const byte* ptr, const byte* end)
+ : iterator_base(ptr, end), start_(start) {}
};
// Create a new {BytecodeIterator}. If the {decls} pointer is non-null,
// assume the bytecode starts with local declarations and decode them.
// Otherwise, do not decode local decls.
BytecodeIterator(const byte* start, const byte* end,
- AstLocalDecls* decls = nullptr);
+ BodyLocalDecls* decls = nullptr);
- inline iterator begin() const { return iterator(pc_, end_); }
- inline iterator end() const { return iterator(end_, end_); }
+ base::iterator_range<opcode_iterator> opcodes() {
+ return base::iterator_range<opcode_iterator>(opcode_iterator(pc_, end_),
+ opcode_iterator(end_, end_));
+ }
+
+ base::iterator_range<offset_iterator> offsets() {
+ return base::iterator_range<offset_iterator>(
+ offset_iterator(start_, pc_, end_),
+ offset_iterator(start_, end_, end_));
+ }
WasmOpcode current() {
return static_cast<WasmOpcode>(
@@ -441,4 +473,4 @@ class V8_EXPORT_PRIVATE BytecodeIterator : public NON_EXPORTED_BASE(Decoder) {
} // namespace internal
} // namespace v8
-#endif // V8_WASM_AST_DECODER_H_
+#endif // V8_WASM_FUNCTION_BODY_DECODER_H_
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index c8eace3c10..056fc2f64d 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -12,6 +12,7 @@
#include "src/v8.h"
#include "src/wasm/decoder.h"
+#include "src/wasm/wasm-limits.h"
namespace v8 {
namespace internal {
@@ -31,25 +32,25 @@ namespace {
const char* kNameString = "name";
const size_t kNameStringLength = 4;
-LocalType TypeOf(const WasmModule* module, const WasmInitExpr& expr) {
+ValueType TypeOf(const WasmModule* module, const WasmInitExpr& expr) {
switch (expr.kind) {
case WasmInitExpr::kNone:
- return kAstStmt;
+ return kWasmStmt;
case WasmInitExpr::kGlobalIndex:
return expr.val.global_index < module->globals.size()
? module->globals[expr.val.global_index].type
- : kAstStmt;
+ : kWasmStmt;
case WasmInitExpr::kI32Const:
- return kAstI32;
+ return kWasmI32;
case WasmInitExpr::kI64Const:
- return kAstI64;
+ return kWasmI64;
case WasmInitExpr::kF32Const:
- return kAstF32;
+ return kWasmF32;
case WasmInitExpr::kF64Const:
- return kAstF64;
+ return kWasmF64;
default:
UNREACHABLE();
- return kAstStmt;
+ return kWasmStmt;
}
}
@@ -179,17 +180,17 @@ class ModuleDecoder : public Decoder {
ModuleOrigin origin)
: Decoder(module_start, module_end), module_zone(zone), origin_(origin) {
result_.start = start_;
- if (limit_ < start_) {
+ if (end_ < start_) {
error(start_, "end is less than start");
- limit_ = start_;
+ end_ = start_;
}
}
virtual void onFirstError() {
- pc_ = limit_; // On error, terminate section decoding loop.
+ pc_ = end_; // On error, terminate section decoding loop.
}
- static void DumpModule(WasmModule* module, const ModuleResult& result) {
+ void DumpModule(const ModuleResult& result) {
std::string path;
if (FLAG_dump_wasm_module_path) {
path = FLAG_dump_wasm_module_path;
@@ -199,7 +200,7 @@ class ModuleDecoder : public Decoder {
}
}
// File are named `HASH.{ok,failed}.wasm`.
- size_t hash = base::hash_range(module->module_start, module->module_end);
+ size_t hash = base::hash_range(start_, end_);
char buf[32] = {'\0'};
#if V8_OS_WIN && _MSC_VER < 1900
#define snprintf sprintf_s
@@ -208,17 +209,15 @@ class ModuleDecoder : public Decoder {
result.ok() ? "ok" : "failed");
std::string name(buf);
if (FILE* wasm_file = base::OS::FOpen((path + name).c_str(), "wb")) {
- fwrite(module->module_start, module->module_end - module->module_start, 1,
- wasm_file);
+ fwrite(start_, end_ - start_, 1, wasm_file);
fclose(wasm_file);
}
}
// Decodes an entire module.
- ModuleResult DecodeModule(WasmModule* module, bool verify_functions = true) {
+ ModuleResult DecodeModule(bool verify_functions = true) {
pc_ = start_;
- module->module_start = start_;
- module->module_end = limit_;
+ WasmModule* module = new WasmModule(module_zone);
module->min_mem_pages = 0;
module->max_mem_pages = 0;
module->mem_export = false;
@@ -249,8 +248,8 @@ class ModuleDecoder : public Decoder {
// ===== Type section ====================================================
if (section_iter.section_code() == kTypeSectionCode) {
- uint32_t signatures_count = consume_u32v("signatures count");
- module->signatures.reserve(SafeReserve(signatures_count));
+ uint32_t signatures_count = consume_count("types count", kV8MaxWasmTypes);
+ module->signatures.reserve(signatures_count);
for (uint32_t i = 0; ok() && i < signatures_count; ++i) {
TRACE("DecodeSignature[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
@@ -262,8 +261,9 @@ class ModuleDecoder : public Decoder {
// ===== Import section ==================================================
if (section_iter.section_code() == kImportSectionCode) {
- uint32_t import_table_count = consume_u32v("import table count");
- module->import_table.reserve(SafeReserve(import_table_count));
+ uint32_t import_table_count =
+ consume_count("imports count", kV8MaxWasmImports);
+ module->import_table.reserve(import_table_count);
for (uint32_t i = 0; ok() && i < import_table_count; ++i) {
TRACE("DecodeImportTable[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
@@ -280,9 +280,6 @@ class ModuleDecoder : public Decoder {
const byte* pos = pc_;
import->module_name_offset =
consume_string(&import->module_name_length, true);
- if (import->module_name_length == 0) {
- error(pos, "import module name cannot be NULL");
- }
import->field_name_offset =
consume_string(&import->field_name_length, true);
@@ -307,6 +304,7 @@ class ModuleDecoder : public Decoder {
}
case kExternalTable: {
// ===== Imported table ==========================================
+ if (!AddTable(module)) break;
import->index =
static_cast<uint32_t>(module->function_tables.size());
module->function_tables.push_back({0, 0, false,
@@ -314,30 +312,29 @@ class ModuleDecoder : public Decoder {
false, SignatureMap()});
expect_u8("element type", kWasmAnyFunctionTypeForm);
WasmIndirectFunctionTable* table = &module->function_tables.back();
- consume_resizable_limits(
- "element count", "elements", WasmModule::kV8MaxTableSize,
- &table->min_size, &table->has_max, WasmModule::kV8MaxTableSize,
- &table->max_size);
+ consume_resizable_limits("element count", "elements",
+ kV8MaxWasmTableSize, &table->min_size,
+ &table->has_max, kV8MaxWasmTableSize,
+ &table->max_size);
break;
}
case kExternalMemory: {
// ===== Imported memory =========================================
- bool has_max = false;
- consume_resizable_limits("memory", "pages", WasmModule::kV8MaxPages,
- &module->min_mem_pages, &has_max,
- WasmModule::kSpecMaxPages,
- &module->max_mem_pages);
- module->has_memory = true;
+ if (!AddMemory(module)) break;
+ consume_resizable_limits(
+ "memory", "pages", kV8MaxWasmMemoryPages,
+ &module->min_mem_pages, &module->has_max_mem,
+ kSpecMaxWasmMemoryPages, &module->max_mem_pages);
break;
}
case kExternalGlobal: {
// ===== Imported global =========================================
import->index = static_cast<uint32_t>(module->globals.size());
module->globals.push_back(
- {kAstStmt, false, WasmInitExpr(), 0, true, false});
+ {kWasmStmt, false, WasmInitExpr(), 0, true, false});
WasmGlobal* global = &module->globals.back();
global->type = consume_value_type();
- global->mutability = consume_u8("mutability") != 0;
+ global->mutability = consume_mutability();
if (global->mutability) {
error("mutable globals cannot be imported");
}
@@ -353,8 +350,9 @@ class ModuleDecoder : public Decoder {
// ===== Function section ================================================
if (section_iter.section_code() == kFunctionSectionCode) {
- uint32_t functions_count = consume_u32v("functions count");
- module->functions.reserve(SafeReserve(functions_count));
+ uint32_t functions_count =
+ consume_count("functions count", kV8MaxWasmFunctions);
+ module->functions.reserve(functions_count);
module->num_declared_functions = functions_count;
for (uint32_t i = 0; ok() && i < functions_count; ++i) {
uint32_t func_index = static_cast<uint32_t>(module->functions.size());
@@ -375,63 +373,47 @@ class ModuleDecoder : public Decoder {
// ===== Table section ===================================================
if (section_iter.section_code() == kTableSectionCode) {
- const byte* pos = pc_;
- uint32_t table_count = consume_u32v("table count");
- // Require at most one table for now.
- if (table_count > 1) {
- error(pos, pos, "invalid table count %d, maximum 1", table_count);
- }
- if (module->function_tables.size() < 1) {
- module->function_tables.push_back({0, 0, false, std::vector<int32_t>(),
- false, false, SignatureMap()});
- }
+ uint32_t table_count = consume_count("table count", kV8MaxWasmTables);
for (uint32_t i = 0; ok() && i < table_count; i++) {
+ if (!AddTable(module)) break;
+ module->function_tables.push_back({0, 0, false, std::vector<int32_t>(),
+ false, false, SignatureMap()});
WasmIndirectFunctionTable* table = &module->function_tables.back();
expect_u8("table type", kWasmAnyFunctionTypeForm);
- consume_resizable_limits("table elements", "elements",
- WasmModule::kV8MaxTableSize, &table->min_size,
- &table->has_max, WasmModule::kV8MaxTableSize,
- &table->max_size);
+ consume_resizable_limits(
+ "table elements", "elements", kV8MaxWasmTableSize, &table->min_size,
+ &table->has_max, kV8MaxWasmTableSize, &table->max_size);
}
section_iter.advance();
}
// ===== Memory section ==================================================
if (section_iter.section_code() == kMemorySectionCode) {
- const byte* pos = pc_;
- uint32_t memory_count = consume_u32v("memory count");
- // Require at most one memory for now.
- if (memory_count > 1) {
- error(pos, pos, "invalid memory count %d, maximum 1", memory_count);
- }
+ uint32_t memory_count = consume_count("memory count", kV8MaxWasmMemories);
for (uint32_t i = 0; ok() && i < memory_count; i++) {
- bool has_max = false;
- consume_resizable_limits(
- "memory", "pages", WasmModule::kV8MaxPages, &module->min_mem_pages,
- &has_max, WasmModule::kSpecMaxPages, &module->max_mem_pages);
+ if (!AddMemory(module)) break;
+ consume_resizable_limits("memory", "pages", kV8MaxWasmMemoryPages,
+ &module->min_mem_pages, &module->has_max_mem,
+ kSpecMaxWasmMemoryPages,
+ &module->max_mem_pages);
}
- module->has_memory = true;
section_iter.advance();
}
// ===== Global section ==================================================
if (section_iter.section_code() == kGlobalSectionCode) {
- uint32_t globals_count = consume_u32v("globals count");
+ uint32_t globals_count =
+ consume_count("globals count", kV8MaxWasmGlobals);
uint32_t imported_globals = static_cast<uint32_t>(module->globals.size());
- if (!IsWithinLimit(std::numeric_limits<int32_t>::max(), globals_count,
- imported_globals)) {
- error(pos, pos, "too many imported+defined globals: %u + %u",
- imported_globals, globals_count);
- }
- module->globals.reserve(SafeReserve(imported_globals + globals_count));
+ module->globals.reserve(imported_globals + globals_count);
for (uint32_t i = 0; ok() && i < globals_count; ++i) {
TRACE("DecodeGlobal[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
// Add an uninitialized global and pass a pointer to it.
module->globals.push_back(
- {kAstStmt, false, WasmInitExpr(), 0, false, false});
+ {kWasmStmt, false, WasmInitExpr(), 0, false, false});
WasmGlobal* global = &module->globals.back();
DecodeGlobalInModule(module, i + imported_globals, global);
}
@@ -440,8 +422,9 @@ class ModuleDecoder : public Decoder {
// ===== Export section ==================================================
if (section_iter.section_code() == kExportSectionCode) {
- uint32_t export_table_count = consume_u32v("export table count");
- module->export_table.reserve(SafeReserve(export_table_count));
+ uint32_t export_table_count =
+ consume_count("exports count", kV8MaxWasmImports);
+ module->export_table.reserve(export_table_count);
for (uint32_t i = 0; ok() && i < export_table_count; ++i) {
TRACE("DecodeExportTable[%d] module+%d\n", i,
static_cast<int>(pc_ - start_));
@@ -473,7 +456,11 @@ class ModuleDecoder : public Decoder {
}
case kExternalMemory: {
uint32_t index = consume_u32v("memory index");
- if (index != 0) error("invalid memory index != 0");
+ // TODO(titzer): This should become more regular
+ // once we support multiple memories.
+ if (!module->has_memory || index != 0) {
+ error("invalid memory index != 0");
+ }
module->mem_export = true;
break;
}
@@ -493,8 +480,8 @@ class ModuleDecoder : public Decoder {
break;
}
}
- // Check for duplicate exports.
- if (ok() && module->export_table.size() > 1) {
+ // Check for duplicate exports (except for asm.js).
+ if (ok() && origin_ != kAsmJsOrigin && module->export_table.size() > 1) {
std::vector<WasmExport> sorted_exports(module->export_table);
const byte* base = start_;
auto cmp_less = [base](const WasmExport& a, const WasmExport& b) {
@@ -538,7 +525,8 @@ class ModuleDecoder : public Decoder {
// ===== Elements section ================================================
if (section_iter.section_code() == kElementSectionCode) {
- uint32_t element_count = consume_u32v("element count");
+ uint32_t element_count =
+ consume_count("element count", kV8MaxWasmTableSize);
for (uint32_t i = 0; ok() && i < element_count; ++i) {
const byte* pos = pc();
uint32_t table_index = consume_u32v("table index");
@@ -551,19 +539,18 @@ class ModuleDecoder : public Decoder {
} else {
table = &module->function_tables[table_index];
}
- WasmInitExpr offset = consume_init_expr(module, kAstI32);
- uint32_t num_elem = consume_u32v("number of elements");
+ WasmInitExpr offset = consume_init_expr(module, kWasmI32);
+ uint32_t num_elem =
+ consume_count("number of elements", kV8MaxWasmTableEntries);
std::vector<uint32_t> vector;
module->table_inits.push_back({table_index, offset, vector});
WasmTableInit* init = &module->table_inits.back();
- init->entries.reserve(SafeReserve(num_elem));
for (uint32_t j = 0; ok() && j < num_elem; j++) {
WasmFunction* func = nullptr;
uint32_t index = consume_func_index(module, &func);
init->entries.push_back(index);
if (table && index < module->functions.size()) {
// Canonicalize signature indices during decoding.
- // TODO(titzer): suboptimal, redundant when verifying only.
table->map.FindOrInsert(module->functions[index].sig);
}
}
@@ -587,10 +574,8 @@ class ModuleDecoder : public Decoder {
function->code_start_offset = pc_offset();
function->code_end_offset = pc_offset() + size;
if (verify_functions) {
- ModuleEnv module_env;
- module_env.module = module;
- module_env.origin = module->origin;
-
+ ModuleBytesEnv module_env(module, nullptr,
+ ModuleWireBytes(start_, end_));
VerifyFunctionBody(i + module->num_imported_functions, &module_env,
function);
}
@@ -601,8 +586,9 @@ class ModuleDecoder : public Decoder {
// ===== Data section ====================================================
if (section_iter.section_code() == kDataSectionCode) {
- uint32_t data_segments_count = consume_u32v("data segments count");
- module->data_segments.reserve(SafeReserve(data_segments_count));
+ uint32_t data_segments_count =
+ consume_count("data segments count", kV8MaxWasmDataSegments);
+ module->data_segments.reserve(data_segments_count);
for (uint32_t i = 0; ok() && i < data_segments_count; ++i) {
if (!module->has_memory) {
error("cannot load data without memory");
@@ -623,22 +609,29 @@ class ModuleDecoder : public Decoder {
// ===== Name section ====================================================
if (section_iter.section_code() == kNameSectionCode) {
- uint32_t functions_count = consume_u32v("functions count");
+ // TODO(titzer): find a way to report name errors as warnings.
+ // Use an inner decoder so that errors don't fail the outer decoder.
+ Decoder inner(start_, pc_, end_);
+ uint32_t functions_count = inner.consume_u32v("functions count");
- for (uint32_t i = 0; ok() && i < functions_count; ++i) {
+ for (uint32_t i = 0; inner.ok() && i < functions_count; ++i) {
uint32_t function_name_length = 0;
- uint32_t name_offset = consume_string(&function_name_length, false);
+ uint32_t name_offset =
+ consume_string(inner, &function_name_length, false);
uint32_t func_index = i;
- if (func_index < module->functions.size()) {
+ if (inner.ok() && func_index < module->functions.size()) {
module->functions[func_index].name_offset = name_offset;
module->functions[func_index].name_length = function_name_length;
}
- uint32_t local_names_count = consume_u32v("local names count");
+ uint32_t local_names_count = inner.consume_u32v("local names count");
for (uint32_t j = 0; ok() && j < local_names_count; j++) {
- skip_string();
+ uint32_t length = inner.consume_u32v("string length");
+ inner.consume_bytes(length, "string");
}
}
+ // Skip the whole names section in the outer decoder.
+ consume_bytes(section_iter.payload_length(), nullptr);
section_iter.advance();
}
@@ -656,25 +649,19 @@ class ModuleDecoder : public Decoder {
if (verify_functions && result.ok()) {
result.MoveFrom(result_); // Copy error code and location.
}
- if (FLAG_dump_wasm_module) DumpModule(module, result);
+ if (FLAG_dump_wasm_module) DumpModule(result);
return result;
}
- uint32_t SafeReserve(uint32_t count) {
- // Avoid OOM by only reserving up to a certain size.
- const uint32_t kMaxReserve = 20000;
- return count < kMaxReserve ? count : kMaxReserve;
- }
-
// Decodes a single anonymous function starting at {start_}.
- FunctionResult DecodeSingleFunction(ModuleEnv* module_env,
+ FunctionResult DecodeSingleFunction(ModuleBytesEnv* module_env,
WasmFunction* function) {
pc_ = start_;
function->sig = consume_sig(); // read signature
function->name_offset = 0; // ---- name
function->name_length = 0; // ---- name length
function->code_start_offset = off(pc_); // ---- code start
- function->code_end_offset = off(limit_); // ---- code end
+ function->code_end_offset = off(end_); // ---- code end
if (ok()) VerifyFunctionBody(0, module_env, function);
@@ -693,7 +680,7 @@ class ModuleDecoder : public Decoder {
WasmInitExpr DecodeInitExpr(const byte* start) {
pc_ = start;
- return consume_init_expr(nullptr, kAstStmt);
+ return consume_init_expr(nullptr, kWasmStmt);
}
private:
@@ -703,13 +690,32 @@ class ModuleDecoder : public Decoder {
uint32_t off(const byte* ptr) { return static_cast<uint32_t>(ptr - start_); }
+ bool AddTable(WasmModule* module) {
+ if (module->function_tables.size() > 0) {
+ error("At most one table is supported");
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ bool AddMemory(WasmModule* module) {
+ if (module->has_memory) {
+ error("At most one memory is supported");
+ return false;
+ } else {
+ module->has_memory = true;
+ return true;
+ }
+ }
+
// Decodes a single global entry inside a module starting at {pc_}.
void DecodeGlobalInModule(WasmModule* module, uint32_t index,
WasmGlobal* global) {
global->type = consume_value_type();
- global->mutability = consume_u8("mutability") != 0;
+ global->mutability = consume_mutability();
const byte* pos = pc();
- global->init = consume_init_expr(module, kAstStmt);
+ global->init = consume_init_expr(module, kWasmStmt);
switch (global->init.kind) {
case WasmInitExpr::kGlobalIndex: {
uint32_t other_index = global->init.val.global_index;
@@ -747,12 +753,12 @@ class ModuleDecoder : public Decoder {
void DecodeDataSegmentInModule(WasmModule* module, WasmDataSegment* segment) {
const byte* start = pc_;
expect_u8("linear memory index", 0);
- segment->dest_addr = consume_init_expr(module, kAstI32);
+ segment->dest_addr = consume_init_expr(module, kWasmI32);
segment->source_size = consume_u32v("source size");
segment->source_offset = static_cast<uint32_t>(pc_ - start_);
// Validate the data is in the module.
- uint32_t module_limit = static_cast<uint32_t>(limit_ - start_);
+ uint32_t module_limit = static_cast<uint32_t>(end_ - start_);
if (!IsWithinLimit(module_limit, segment->source_offset,
segment->source_size)) {
error(start, "segment out of bounds of module");
@@ -779,17 +785,19 @@ class ModuleDecoder : public Decoder {
}
// Verifies the body (code) of a given function.
- void VerifyFunctionBody(uint32_t func_num, ModuleEnv* menv,
+ void VerifyFunctionBody(uint32_t func_num, ModuleBytesEnv* menv,
WasmFunction* function) {
if (FLAG_trace_wasm_decoder || FLAG_trace_wasm_decode_time) {
OFStream os(stdout);
os << "Verifying WASM function " << WasmFunctionName(function, menv)
<< std::endl;
}
- FunctionBody body = {menv, function->sig, start_,
+ FunctionBody body = {function->sig, start_,
start_ + function->code_start_offset,
start_ + function->code_end_offset};
- DecodeResult result = VerifyWasmCode(module_zone->allocator(), body);
+ DecodeResult result =
+ VerifyWasmCode(module_zone->allocator(),
+ menv == nullptr ? nullptr : menv->module, body);
if (result.failed()) {
// Wrap the error message from the function decoder.
std::ostringstream str;
@@ -808,27 +816,26 @@ class ModuleDecoder : public Decoder {
}
}
+ uint32_t consume_string(uint32_t* length, bool validate_utf8) {
+ return consume_string(*this, length, validate_utf8);
+ }
+
// Reads a length-prefixed string, checking that it is within bounds. Returns
// the offset of the string, and the length as an out parameter.
- uint32_t consume_string(uint32_t* length, bool validate_utf8) {
- *length = consume_u32v("string length");
- uint32_t offset = pc_offset();
- const byte* string_start = pc_;
+ uint32_t consume_string(Decoder& decoder, uint32_t* length,
+ bool validate_utf8) {
+ *length = decoder.consume_u32v("string length");
+ uint32_t offset = decoder.pc_offset();
+ const byte* string_start = decoder.pc();
// Consume bytes before validation to guarantee that the string is not oob.
- if (*length > 0) consume_bytes(*length, "string");
- if (ok() && validate_utf8 &&
+ if (*length > 0) decoder.consume_bytes(*length, "string");
+ if (decoder.ok() && validate_utf8 &&
!unibrow::Utf8::Validate(string_start, *length)) {
- error(string_start, "no valid UTF-8 string");
+ decoder.error(string_start, "no valid UTF-8 string");
}
return offset;
}
- // Skips over a length-prefixed string, but checks that it is within bounds.
- void skip_string() {
- uint32_t length = consume_u32v("string length");
- consume_bytes(length, "string");
- }
-
uint32_t consume_sig_index(WasmModule* module, FunctionSig** sig) {
const byte* pos = pc_;
uint32_t sig_index = consume_u32v("signature index");
@@ -842,6 +849,17 @@ class ModuleDecoder : public Decoder {
return sig_index;
}
+ uint32_t consume_count(const char* name, size_t maximum) {
+ const byte* p = pc_;
+ uint32_t count = consume_u32v(name);
+ if (count > maximum) {
+ error(p, p, "%s of %u exceeds internal limit of %zu", name, count,
+ maximum);
+ return static_cast<uint32_t>(maximum);
+ }
+ return count;
+ }
+
uint32_t consume_func_index(WasmModule* module, WasmFunction** func) {
return consume_index("function index", module->functions, func);
}
@@ -912,7 +930,7 @@ class ModuleDecoder : public Decoder {
return true;
}
- WasmInitExpr consume_init_expr(WasmModule* module, LocalType expected) {
+ WasmInitExpr consume_init_expr(WasmModule* module, ValueType expected) {
const byte* pos = pc();
uint8_t opcode = consume_u8("opcode");
WasmInitExpr expr;
@@ -978,7 +996,7 @@ class ModuleDecoder : public Decoder {
if (!expect_u8("end opcode", kExprEnd)) {
expr.kind = WasmInitExpr::kNone;
}
- if (expected != kAstStmt && TypeOf(module, expr) != kAstI32) {
+ if (expected != kWasmStmt && TypeOf(module, expr) != kWasmI32) {
error(pos, pos, "type error in init expression, expected %s, got %s",
WasmOpcodes::TypeName(expected),
WasmOpcodes::TypeName(TypeOf(module, expr)));
@@ -986,29 +1004,36 @@ class ModuleDecoder : public Decoder {
return expr;
}
+ // Read a mutability flag
+ bool consume_mutability() {
+ byte val = consume_u8("mutability");
+ if (val > 1) error(pc_ - 1, "invalid mutability");
+ return val != 0;
+ }
+
// Reads a single 8-bit integer, interpreting it as a local type.
- LocalType consume_value_type() {
+ ValueType consume_value_type() {
byte val = consume_u8("value type");
- LocalTypeCode t = static_cast<LocalTypeCode>(val);
+ ValueTypeCode t = static_cast<ValueTypeCode>(val);
switch (t) {
case kLocalI32:
- return kAstI32;
+ return kWasmI32;
case kLocalI64:
- return kAstI64;
+ return kWasmI64;
case kLocalF32:
- return kAstF32;
+ return kWasmF32;
case kLocalF64:
- return kAstF64;
+ return kWasmF64;
case kLocalS128:
if (origin_ != kAsmJsOrigin && FLAG_wasm_simd_prototype) {
- return kAstS128;
+ return kWasmS128;
} else {
error(pc_ - 1, "invalid local type");
- return kAstStmt;
+ return kWasmStmt;
}
default:
error(pc_ - 1, "invalid local type");
- return kAstStmt;
+ return kWasmStmt;
}
}
@@ -1016,35 +1041,32 @@ class ModuleDecoder : public Decoder {
FunctionSig* consume_sig() {
if (!expect_u8("type form", kWasmFunctionTypeForm)) return nullptr;
// parse parameter types
- uint32_t param_count = consume_u32v("param count");
- std::vector<LocalType> params;
+ uint32_t param_count =
+ consume_count("param count", kV8MaxWasmFunctionParams);
+ if (failed()) return nullptr;
+ std::vector<ValueType> params;
for (uint32_t i = 0; ok() && i < param_count; ++i) {
- LocalType param = consume_value_type();
+ ValueType param = consume_value_type();
params.push_back(param);
}
// parse return types
- const byte* pt = pc_;
- uint32_t return_count = consume_u32v("return count");
- if (return_count > kMaxReturnCount) {
- error(pt, pt, "return count of %u exceeds maximum of %u", return_count,
- kMaxReturnCount);
- return nullptr;
- }
- std::vector<LocalType> returns;
+ const size_t max_return_count = FLAG_wasm_mv_prototype
+ ? kV8MaxWasmFunctionMultiReturns
+ : kV8MaxWasmFunctionReturns;
+ uint32_t return_count = consume_count("return count", max_return_count);
+ if (failed()) return nullptr;
+ std::vector<ValueType> returns;
for (uint32_t i = 0; ok() && i < return_count; ++i) {
- LocalType ret = consume_value_type();
+ ValueType ret = consume_value_type();
returns.push_back(ret);
}
- if (failed()) {
- // Decoding failed, return void -> void
- return new (module_zone) FunctionSig(0, 0, nullptr);
- }
+ if (failed()) return nullptr;
// FunctionSig stores the return types first.
- LocalType* buffer =
- module_zone->NewArray<LocalType>(param_count + return_count);
+ ValueType* buffer =
+ module_zone->NewArray<ValueType>(param_count + return_count);
uint32_t b = 0;
for (uint32_t i = 0; i < return_count; ++i) buffer[b++] = returns[i];
for (uint32_t i = 0; i < param_count; ++i) buffer[b++] = params[i];
@@ -1113,16 +1135,16 @@ ModuleResult DecodeWasmModule(Isolate* isolate, const byte* module_start,
isolate->counters()->wasm_decode_module_time());
size_t size = module_end - module_start;
if (module_start > module_end) return ModuleError("start > end");
- if (size >= kMaxModuleSize) return ModuleError("size > maximum module size");
+ if (size >= kV8MaxWasmModuleSize)
+ return ModuleError("size > maximum module size");
// TODO(bradnelson): Improve histogram handling of size_t.
isolate->counters()->wasm_module_size_bytes()->AddSample(
static_cast<int>(size));
// Signatures are stored in zone memory, which have the same lifetime
// as the {module}.
Zone* zone = new Zone(isolate->allocator(), ZONE_NAME);
- WasmModule* module = new WasmModule(zone, module_start);
ModuleDecoder decoder(zone, module_start, module_end, origin);
- ModuleResult result = decoder.DecodeModule(module, verify_functions);
+ ModuleResult result = decoder.DecodeModule(verify_functions);
// TODO(bradnelson): Improve histogram handling of size_t.
// TODO(titzer): this isn't accurate, since it doesn't count the data
// allocated on the C++ heap.
@@ -1146,14 +1168,14 @@ WasmInitExpr DecodeWasmInitExprForTesting(const byte* start, const byte* end) {
}
FunctionResult DecodeWasmFunction(Isolate* isolate, Zone* zone,
- ModuleEnv* module_env,
+ ModuleBytesEnv* module_env,
const byte* function_start,
const byte* function_end) {
HistogramTimerScope wasm_decode_function_time_scope(
isolate->counters()->wasm_decode_function_time());
size_t size = function_end - function_start;
if (function_start > function_end) return FunctionError("start > end");
- if (size > kMaxFunctionSize)
+ if (size > kV8MaxWasmFunctionSize)
return FunctionError("size > maximum function size");
isolate->counters()->wasm_function_size_bytes()->AddSample(
static_cast<int>(size));
@@ -1208,22 +1230,31 @@ AsmJsOffsetsResult DecodeAsmJsOffsets(const byte* tables_start,
for (uint32_t i = 0; i < functions_count && decoder.ok(); ++i) {
uint32_t size = decoder.consume_u32v("table size");
if (size == 0) {
- table.push_back(std::vector<std::pair<int, int>>());
+ table.push_back(std::vector<AsmJsOffsetEntry>());
continue;
}
if (!decoder.checkAvailable(size)) {
decoder.error("illegal asm function offset table size");
}
const byte* table_end = decoder.pc() + size;
- uint32_t locals_size = decoder.consume_u32("locals size");
+ uint32_t locals_size = decoder.consume_u32v("locals size");
+ int function_start_position = decoder.consume_u32v("function start pos");
int last_byte_offset = locals_size;
- int last_asm_position = 0;
- std::vector<std::pair<int, int>> func_asm_offsets;
+ int last_asm_position = function_start_position;
+ std::vector<AsmJsOffsetEntry> func_asm_offsets;
func_asm_offsets.reserve(size / 4); // conservative estimation
+ // Add an entry for the stack check, associated with position 0.
+ func_asm_offsets.push_back(
+ {0, function_start_position, function_start_position});
while (decoder.ok() && decoder.pc() < table_end) {
last_byte_offset += decoder.consume_u32v("byte offset delta");
- last_asm_position += decoder.consume_i32v("asm position delta");
- func_asm_offsets.push_back({last_byte_offset, last_asm_position});
+ int call_position =
+ last_asm_position + decoder.consume_i32v("call position delta");
+ int to_number_position =
+ call_position + decoder.consume_i32v("to_number position delta");
+ last_asm_position = to_number_position;
+ func_asm_offsets.push_back(
+ {last_byte_offset, call_position, to_number_position});
}
if (decoder.pc() != table_end) {
decoder.error("broken asm offset table");
@@ -1235,6 +1266,36 @@ AsmJsOffsetsResult DecodeAsmJsOffsets(const byte* tables_start,
return decoder.toResult(std::move(table));
}
+std::vector<CustomSectionOffset> DecodeCustomSections(const byte* start,
+ const byte* end) {
+ Decoder decoder(start, end);
+ decoder.consume_bytes(4, "wasm magic");
+ decoder.consume_bytes(4, "wasm version");
+
+ std::vector<CustomSectionOffset> result;
+
+ while (decoder.more()) {
+ byte section_code = decoder.consume_u8("section code");
+ uint32_t section_length = decoder.consume_u32v("section length");
+ uint32_t section_start = decoder.pc_offset();
+ if (section_code != 0) {
+ // Skip known sections.
+ decoder.consume_bytes(section_length, "section bytes");
+ continue;
+ }
+ uint32_t name_length = decoder.consume_u32v("name length");
+ uint32_t name_offset = decoder.pc_offset();
+ decoder.consume_bytes(name_length, "section name");
+ uint32_t payload_offset = decoder.pc_offset();
+ uint32_t payload_length = section_length - (payload_offset - section_start);
+ decoder.consume_bytes(payload_length);
+ result.push_back({section_start, name_offset, name_length, payload_offset,
+ payload_length, section_length});
+ }
+
+ return result;
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index 7cf5cfe3c1..982fbc9189 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -6,7 +6,7 @@
#define V8_WASM_MODULE_DECODER_H_
#include "src/globals.h"
-#include "src/wasm/ast-decoder.h"
+#include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-result.h"
@@ -18,7 +18,12 @@ typedef Result<const WasmModule*> ModuleResult;
typedef Result<WasmFunction*> FunctionResult;
typedef std::vector<std::pair<int, int>> FunctionOffsets;
typedef Result<FunctionOffsets> FunctionOffsetsResult;
-typedef std::vector<std::vector<std::pair<int, int>>> AsmJsOffsets;
+struct AsmJsOffsetEntry {
+ int byte_offset;
+ int source_position_call;
+ int source_position_number_conversion;
+};
+typedef std::vector<std::vector<AsmJsOffsetEntry>> AsmJsOffsets;
typedef Result<AsmJsOffsets> AsmJsOffsetsResult;
// Decodes the bytes of a WASM module between {module_start} and {module_end}.
@@ -37,7 +42,8 @@ V8_EXPORT_PRIVATE FunctionSig* DecodeWasmSignatureForTesting(Zone* zone,
// Decodes the bytes of a WASM function between
// {function_start} and {function_end}.
V8_EXPORT_PRIVATE FunctionResult DecodeWasmFunction(Isolate* isolate,
- Zone* zone, ModuleEnv* env,
+ Zone* zone,
+ ModuleBytesEnv* env,
const byte* function_start,
const byte* function_end);
@@ -50,6 +56,18 @@ FunctionOffsetsResult DecodeWasmFunctionOffsets(const byte* module_start,
V8_EXPORT_PRIVATE WasmInitExpr DecodeWasmInitExprForTesting(const byte* start,
const byte* end);
+struct CustomSectionOffset {
+ uint32_t section_start;
+ uint32_t name_offset;
+ uint32_t name_length;
+ uint32_t payload_offset;
+ uint32_t payload_length;
+ uint32_t section_length;
+};
+
+V8_EXPORT_PRIVATE std::vector<CustomSectionOffset> DecodeCustomSections(
+ const byte* start, const byte* end);
+
// Extracts the mapping from wasm byte offset to asm.js source position per
// function.
// Returns a vector of vectors with <byte_offset, source_position> entries, or
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index 11c2ef8aa5..6cb865d59c 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -2,144 +2,273 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/assembler-inl.h"
#include "src/assert-scope.h"
+#include "src/compiler/wasm-compiler.h"
#include "src/debug/debug.h"
#include "src/factory.h"
+#include "src/frames-inl.h"
#include "src/isolate.h"
#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-interpreter.h"
+#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects.h"
+#include "src/zone/accounting-allocator.h"
using namespace v8::internal;
using namespace v8::internal::wasm;
namespace {
-enum {
- kWasmDebugInfoWasmObj,
- kWasmDebugInfoWasmBytesHash,
- kWasmDebugInfoAsmJsOffsets,
- kWasmDebugInfoNumEntries
-};
+class InterpreterHandle {
+ AccountingAllocator allocator_;
+ WasmInstance instance_;
+ WasmInterpreter interpreter_;
-// TODO(clemensh): Move asm.js offset tables to the compiled module.
-FixedArray *GetAsmJsOffsetTables(Handle<WasmDebugInfo> debug_info,
- Isolate *isolate) {
- Object *offset_tables = debug_info->get(kWasmDebugInfoAsmJsOffsets);
- if (!offset_tables->IsUndefined(isolate)) {
- return FixedArray::cast(offset_tables);
+ public:
+ // Initialize in the right order, using helper methods to make this possible.
+ // WasmInterpreter has to be allocated in place, since it is not movable.
+ InterpreterHandle(Isolate* isolate, WasmDebugInfo* debug_info)
+ : instance_(debug_info->wasm_instance()->compiled_module()->module()),
+ interpreter_(GetBytesEnv(&instance_, debug_info), &allocator_) {
+ Handle<JSArrayBuffer> mem_buffer =
+ handle(debug_info->wasm_instance()->memory_buffer(), isolate);
+ if (mem_buffer->IsUndefined(isolate)) {
+ DCHECK_EQ(0, instance_.module->min_mem_pages);
+ instance_.mem_start = nullptr;
+ instance_.mem_size = 0;
+ } else {
+ instance_.mem_start =
+ reinterpret_cast<byte*>(mem_buffer->backing_store());
+ CHECK(mem_buffer->byte_length()->ToUint32(&instance_.mem_size));
+ }
}
- Handle<JSObject> wasm_instance(debug_info->wasm_instance(), isolate);
- Handle<WasmCompiledModule> compiled_module(GetCompiledModule(*wasm_instance),
- isolate);
- DCHECK(compiled_module->has_asm_js_offset_tables());
-
- AsmJsOffsetsResult asm_offsets;
- {
- Handle<ByteArray> asm_offset_tables =
- compiled_module->asm_js_offset_tables();
- DisallowHeapAllocation no_gc;
- const byte *bytes_start = asm_offset_tables->GetDataStartAddress();
- const byte *bytes_end = bytes_start + asm_offset_tables->length();
- asm_offsets = wasm::DecodeAsmJsOffsets(bytes_start, bytes_end);
+ static ModuleBytesEnv GetBytesEnv(WasmInstance* instance,
+ WasmDebugInfo* debug_info) {
+ // Return raw pointer into heap. The WasmInterpreter will make its own copy
+ // of this data anyway, and there is no heap allocation in-between.
+ SeqOneByteString* bytes_str =
+ debug_info->wasm_instance()->compiled_module()->module_bytes();
+ Vector<const byte> bytes(bytes_str->GetChars(), bytes_str->length());
+ return ModuleBytesEnv(instance->module, instance, bytes);
}
- // Wasm bytes must be valid and must contain asm.js offset table.
- DCHECK(asm_offsets.ok());
- DCHECK_GE(static_cast<size_t>(kMaxInt), asm_offsets.val.size());
- int num_functions = static_cast<int>(asm_offsets.val.size());
- DCHECK_EQ(
- wasm::GetNumberOfFunctions(handle(debug_info->wasm_instance())),
- static_cast<int>(num_functions +
- compiled_module->module()->num_imported_functions));
- Handle<FixedArray> all_tables =
- isolate->factory()->NewFixedArray(num_functions);
- debug_info->set(kWasmDebugInfoAsmJsOffsets, *all_tables);
- for (int func = 0; func < num_functions; ++func) {
- std::vector<std::pair<int, int>> &func_asm_offsets = asm_offsets.val[func];
- if (func_asm_offsets.empty()) continue;
- size_t array_size = 2 * kIntSize * func_asm_offsets.size();
- CHECK_LE(array_size, static_cast<size_t>(kMaxInt));
- ByteArray *arr =
- *isolate->factory()->NewByteArray(static_cast<int>(array_size));
- all_tables->set(func, arr);
- int idx = 0;
- for (std::pair<int, int> p : func_asm_offsets) {
- // Byte offsets must be strictly monotonously increasing:
- DCHECK(idx == 0 || p.first > arr->get_int(idx - 2));
- arr->set_int(idx++, p.first);
- arr->set_int(idx++, p.second);
+
+ WasmInterpreter* interpreter() { return &interpreter_; }
+ const WasmModule* module() { return instance_.module; }
+
+ void Execute(uint32_t func_index, uint8_t* arg_buffer) {
+ DCHECK_GE(module()->functions.size(), func_index);
+ FunctionSig* sig = module()->functions[func_index].sig;
+ DCHECK_GE(kMaxInt, sig->parameter_count());
+ int num_params = static_cast<int>(sig->parameter_count());
+ ScopedVector<WasmVal> wasm_args(num_params);
+ uint8_t* arg_buf_ptr = arg_buffer;
+ for (int i = 0; i < num_params; ++i) {
+ uint32_t param_size = 1 << ElementSizeLog2Of(sig->GetParam(i));
+#define CASE_ARG_TYPE(type, ctype) \
+ case type: \
+ DCHECK_EQ(param_size, sizeof(ctype)); \
+ wasm_args[i] = WasmVal(*reinterpret_cast<ctype*>(arg_buf_ptr)); \
+ break;
+ switch (sig->GetParam(i)) {
+ CASE_ARG_TYPE(kWasmI32, uint32_t)
+ CASE_ARG_TYPE(kWasmI64, uint64_t)
+ CASE_ARG_TYPE(kWasmF32, float)
+ CASE_ARG_TYPE(kWasmF64, double)
+#undef CASE_ARG_TYPE
+ default:
+ UNREACHABLE();
+ }
+ arg_buf_ptr += param_size;
}
- DCHECK_EQ(arr->length(), idx * kIntSize);
+
+ WasmInterpreter::Thread* thread = interpreter_.GetThread(0);
+ // We do not support reentering an already running interpreter at the moment
+ // (like INTERPRETER -> JS -> WASM -> INTERPRETER).
+ DCHECK(thread->state() == WasmInterpreter::STOPPED ||
+ thread->state() == WasmInterpreter::FINISHED);
+ thread->Reset();
+ thread->PushFrame(&module()->functions[func_index], wasm_args.start());
+ WasmInterpreter::State state;
+ do {
+ state = thread->Run();
+ switch (state) {
+ case WasmInterpreter::State::PAUSED: {
+ // We hit a breakpoint.
+ // TODO(clemensh): Handle this.
+ } break;
+ case WasmInterpreter::State::FINISHED:
+ // Perfect, just break the switch and exit the loop.
+ break;
+ case WasmInterpreter::State::TRAPPED:
+ // TODO(clemensh): Generate appropriate JS exception.
+ UNIMPLEMENTED();
+ break;
+ // STOPPED and RUNNING should never occur here.
+ case WasmInterpreter::State::STOPPED:
+ case WasmInterpreter::State::RUNNING:
+ default:
+ UNREACHABLE();
+ }
+ } while (state != WasmInterpreter::State::FINISHED);
+
+ // Copy back the return value
+ DCHECK_GE(kV8MaxWasmFunctionReturns, sig->return_count());
+ // TODO(wasm): Handle multi-value returns.
+ DCHECK_EQ(1, kV8MaxWasmFunctionReturns);
+ if (sig->return_count()) {
+ WasmVal ret_val = thread->GetReturnValue(0);
+#define CASE_RET_TYPE(type, ctype) \
+ case type: \
+ DCHECK_EQ(1 << ElementSizeLog2Of(sig->GetReturn(0)), sizeof(ctype)); \
+ *reinterpret_cast<ctype*>(arg_buffer) = ret_val.to<ctype>(); \
+ break;
+ switch (sig->GetReturn(0)) {
+ CASE_RET_TYPE(kWasmI32, uint32_t)
+ CASE_RET_TYPE(kWasmI64, uint64_t)
+ CASE_RET_TYPE(kWasmF32, float)
+ CASE_RET_TYPE(kWasmF64, double)
+#undef CASE_RET_TYPE
+ default:
+ UNREACHABLE();
+ }
+ }
+ }
+};
+
+InterpreterHandle* GetOrCreateInterpreterHandle(
+ Isolate* isolate, Handle<WasmDebugInfo> debug_info) {
+ Handle<Object> handle(debug_info->get(WasmDebugInfo::kInterpreterHandle),
+ isolate);
+ if (handle->IsUndefined(isolate)) {
+ InterpreterHandle* cpp_handle = new InterpreterHandle(isolate, *debug_info);
+ handle = Managed<InterpreterHandle>::New(isolate, cpp_handle);
+ debug_info->set(WasmDebugInfo::kInterpreterHandle, *handle);
}
- return *all_tables;
+
+ return Handle<Managed<InterpreterHandle>>::cast(handle)->get();
+}
+
+int GetNumFunctions(WasmInstanceObject* instance) {
+ size_t num_functions =
+ instance->compiled_module()->module()->functions.size();
+ DCHECK_GE(kMaxInt, num_functions);
+ return static_cast<int>(num_functions);
+}
+
+Handle<FixedArray> GetOrCreateInterpretedFunctions(
+ Isolate* isolate, Handle<WasmDebugInfo> debug_info) {
+ Handle<Object> obj(debug_info->get(WasmDebugInfo::kInterpretedFunctions),
+ isolate);
+ if (!obj->IsUndefined(isolate)) return Handle<FixedArray>::cast(obj);
+
+ Handle<FixedArray> new_arr = isolate->factory()->NewFixedArray(
+ GetNumFunctions(debug_info->wasm_instance()));
+ debug_info->set(WasmDebugInfo::kInterpretedFunctions, *new_arr);
+ return new_arr;
}
-} // namespace
-Handle<WasmDebugInfo> WasmDebugInfo::New(Handle<JSObject> wasm) {
- Isolate *isolate = wasm->GetIsolate();
- Factory *factory = isolate->factory();
- Handle<FixedArray> arr =
- factory->NewFixedArray(kWasmDebugInfoNumEntries, TENURED);
- arr->set(kWasmDebugInfoWasmObj, *wasm);
- int hash = 0;
- Handle<SeqOneByteString> wasm_bytes = GetWasmBytes(wasm);
- {
- DisallowHeapAllocation no_gc;
- hash = StringHasher::HashSequentialString(
- wasm_bytes->GetChars(), wasm_bytes->length(), kZeroHashSeed);
+void RedirectCallsitesInCode(Code* code, Code* old_target, Code* new_target) {
+ DisallowHeapAllocation no_gc;
+ for (RelocIterator it(code, RelocInfo::kCodeTargetMask); !it.done();
+ it.next()) {
+ DCHECK(RelocInfo::IsCodeTarget(it.rinfo()->rmode()));
+ Code* target = Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
+ if (target != old_target) continue;
+ it.rinfo()->set_target_address(new_target->instruction_start());
}
- Handle<Object> hash_obj = factory->NewNumberFromInt(hash, TENURED);
- arr->set(kWasmDebugInfoWasmBytesHash, *hash_obj);
+}
+
+void RedirectCallsitesInInstance(Isolate* isolate, WasmInstanceObject* instance,
+ Code* old_target, Code* new_target) {
+ DisallowHeapAllocation no_gc;
+ // Redirect all calls in wasm functions.
+ FixedArray* code_table = instance->compiled_module()->ptr_to_code_table();
+ for (int i = 0, e = GetNumFunctions(instance); i < e; ++i) {
+ RedirectCallsitesInCode(Code::cast(code_table->get(i)), old_target,
+ new_target);
+ }
+
+ // Redirect all calls in exported functions.
+ FixedArray* weak_exported_functions =
+ instance->compiled_module()->ptr_to_weak_exported_functions();
+ for (int i = 0, e = weak_exported_functions->length(); i != e; ++i) {
+ WeakCell* weak_function = WeakCell::cast(weak_exported_functions->get(i));
+ if (weak_function->cleared()) continue;
+ Code* code = JSFunction::cast(weak_function->value())->code();
+ RedirectCallsitesInCode(code, old_target, new_target);
+ }
+}
+
+void EnsureRedirectToInterpreter(Isolate* isolate,
+ Handle<WasmDebugInfo> debug_info,
+ int func_index) {
+ Handle<FixedArray> interpreted_functions =
+ GetOrCreateInterpretedFunctions(isolate, debug_info);
+ if (!interpreted_functions->get(func_index)->IsUndefined(isolate)) return;
+
+ Handle<WasmInstanceObject> instance(debug_info->wasm_instance(), isolate);
+ Handle<Code> new_code = compiler::CompileWasmInterpreterEntry(
+ isolate, func_index,
+ instance->compiled_module()->module()->functions[func_index].sig,
+ instance);
+
+ Handle<FixedArray> code_table = instance->compiled_module()->code_table();
+ Handle<Code> old_code(Code::cast(code_table->get(func_index)), isolate);
+ interpreted_functions->set(func_index, *new_code);
+ RedirectCallsitesInInstance(isolate, *instance, *old_code, *new_code);
+}
+
+} // namespace
+
+Handle<WasmDebugInfo> WasmDebugInfo::New(Handle<WasmInstanceObject> instance) {
+ Isolate* isolate = instance->GetIsolate();
+ Factory* factory = isolate->factory();
+ Handle<FixedArray> arr = factory->NewFixedArray(kFieldCount, TENURED);
+ arr->set(kInstance, *instance);
return Handle<WasmDebugInfo>::cast(arr);
}
-bool WasmDebugInfo::IsDebugInfo(Object *object) {
+bool WasmDebugInfo::IsDebugInfo(Object* object) {
if (!object->IsFixedArray()) return false;
- FixedArray *arr = FixedArray::cast(object);
- return arr->length() == kWasmDebugInfoNumEntries &&
- IsWasmInstance(arr->get(kWasmDebugInfoWasmObj)) &&
- arr->get(kWasmDebugInfoWasmBytesHash)->IsNumber();
+ FixedArray* arr = FixedArray::cast(object);
+ if (arr->length() != kFieldCount) return false;
+ if (!IsWasmInstance(arr->get(kInstance))) return false;
+ Isolate* isolate = arr->GetIsolate();
+ if (!arr->get(kInterpreterHandle)->IsUndefined(isolate) &&
+ !arr->get(kInterpreterHandle)->IsForeign())
+ return false;
+ return true;
}
-WasmDebugInfo *WasmDebugInfo::cast(Object *object) {
+WasmDebugInfo* WasmDebugInfo::cast(Object* object) {
DCHECK(IsDebugInfo(object));
- return reinterpret_cast<WasmDebugInfo *>(object);
+ return reinterpret_cast<WasmDebugInfo*>(object);
}
-JSObject *WasmDebugInfo::wasm_instance() {
- return JSObject::cast(get(kWasmDebugInfoWasmObj));
+WasmInstanceObject* WasmDebugInfo::wasm_instance() {
+ return WasmInstanceObject::cast(get(kInstance));
}
-int WasmDebugInfo::GetAsmJsSourcePosition(Handle<WasmDebugInfo> debug_info,
- int func_index, int byte_offset) {
- Isolate *isolate = debug_info->GetIsolate();
- Handle<JSObject> instance(debug_info->wasm_instance(), isolate);
- FixedArray *offset_tables = GetAsmJsOffsetTables(debug_info, isolate);
-
- WasmCompiledModule *compiled_module = wasm::GetCompiledModule(*instance);
- int num_imported_functions =
- compiled_module->module()->num_imported_functions;
- DCHECK_LE(num_imported_functions, func_index);
- func_index -= num_imported_functions;
- DCHECK_LT(func_index, offset_tables->length());
- ByteArray *offset_table = ByteArray::cast(offset_tables->get(func_index));
-
- // Binary search for the current byte offset.
- int left = 0; // inclusive
- int right = offset_table->length() / kIntSize / 2; // exclusive
- DCHECK_LT(left, right);
- while (right - left > 1) {
- int mid = left + (right - left) / 2;
- if (offset_table->get_int(2 * mid) <= byte_offset) {
- left = mid;
- } else {
- right = mid;
- }
- }
- // There should be an entry for each position that could show up on the stack
- // trace:
- DCHECK_EQ(byte_offset, offset_table->get_int(2 * left));
- return offset_table->get_int(2 * left + 1);
+void WasmDebugInfo::SetBreakpoint(Handle<WasmDebugInfo> debug_info,
+ int func_index, int offset) {
+ Isolate* isolate = debug_info->GetIsolate();
+ InterpreterHandle* handle = GetOrCreateInterpreterHandle(isolate, debug_info);
+ WasmInterpreter* interpreter = handle->interpreter();
+ DCHECK_LE(0, func_index);
+ DCHECK_GT(handle->module()->functions.size(), func_index);
+ const WasmFunction* func = &handle->module()->functions[func_index];
+ interpreter->SetBreakpoint(func, offset, true);
+ EnsureRedirectToInterpreter(isolate, debug_info, func_index);
+}
+
+void WasmDebugInfo::RunInterpreter(Handle<WasmDebugInfo> debug_info,
+ int func_index, uint8_t* arg_buffer) {
+ DCHECK_LE(0, func_index);
+ InterpreterHandle* interp_handle =
+ GetOrCreateInterpreterHandle(debug_info->GetIsolate(), debug_info);
+ interp_handle->Execute(static_cast<uint32_t>(func_index), arg_buffer);
}
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
index 4c4c91b29c..e982cc7f99 100644
--- a/deps/v8/src/wasm/wasm-external-refs.cc
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -208,6 +208,19 @@ void float64_pow_wrapper(double* param0, double* param1) {
double y = ReadDoubleValue(param1);
WriteDoubleValue(param0, Pow(x, y));
}
+
+static WasmTrapCallbackForTesting wasm_trap_callback_for_testing = nullptr;
+
+void set_trap_callback_for_testing(WasmTrapCallbackForTesting callback) {
+ wasm_trap_callback_for_testing = callback;
+}
+
+void call_trap_callback_for_testing() {
+ if (wasm_trap_callback_for_testing) {
+ wasm_trap_callback_for_testing();
+ }
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-external-refs.h b/deps/v8/src/wasm/wasm-external-refs.h
index d9539ce71a..04337b99ec 100644
--- a/deps/v8/src/wasm/wasm-external-refs.h
+++ b/deps/v8/src/wasm/wasm-external-refs.h
@@ -61,6 +61,12 @@ uint32_t word64_popcnt_wrapper(uint64_t* input);
void float64_pow_wrapper(double* param0, double* param1);
+typedef void (*WasmTrapCallbackForTesting)();
+
+void set_trap_callback_for_testing(WasmTrapCallbackForTesting callback);
+
+void call_trap_callback_for_testing();
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index 6e049ffd25..ac125caf7e 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -5,9 +5,10 @@
#include "src/wasm/wasm-interpreter.h"
#include "src/utils.h"
-#include "src/wasm/ast-decoder.h"
#include "src/wasm/decoder.h"
+#include "src/wasm/function-body-decoder.h"
#include "src/wasm/wasm-external-refs.h"
+#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
#include "src/zone/accounting-allocator.h"
@@ -62,6 +63,7 @@ namespace wasm {
V(I64GtS, int64_t, >) \
V(I64GeS, int64_t, >=) \
V(F32Add, float, +) \
+ V(F32Sub, float, -) \
V(F32Eq, float, ==) \
V(F32Ne, float, !=) \
V(F32Lt, float, <) \
@@ -69,6 +71,7 @@ namespace wasm {
V(F32Gt, float, >) \
V(F32Ge, float, >=) \
V(F64Add, double, +) \
+ V(F64Sub, double, -) \
V(F64Eq, double, ==) \
V(F64Ne, double, !=) \
V(F64Lt, double, <) \
@@ -101,13 +104,11 @@ namespace wasm {
V(I32Rol, int32_t) \
V(I64Ror, int64_t) \
V(I64Rol, int64_t) \
- V(F32Sub, float) \
V(F32Min, float) \
V(F32Max, float) \
V(F32CopySign, float) \
V(F64Min, double) \
V(F64Max, double) \
- V(F64Sub, double) \
V(F64CopySign, double) \
V(I32AsmjsDivS, int32_t) \
V(I32AsmjsDivU, uint32_t) \
@@ -158,8 +159,6 @@ namespace wasm {
V(F64UConvertI64, uint64_t) \
V(F64ConvertF32, float) \
V(F64ReinterpretI64, int64_t) \
- V(I32ReinterpretF32, float) \
- V(I64ReinterpretF64, double) \
V(I32AsmjsSConvertF32, float) \
V(I32AsmjsUConvertF32, float) \
V(I32AsmjsSConvertF64, double) \
@@ -293,41 +292,6 @@ static inline uint64_t ExecuteI64Rol(uint64_t a, uint64_t b, TrapReason* trap) {
return (a << shift) | (a >> (64 - shift));
}
-static float quiet(float a) {
- static const uint32_t kSignalingBit = 1 << 22;
- uint32_t q = bit_cast<uint32_t>(std::numeric_limits<float>::quiet_NaN());
- if ((q & kSignalingBit) != 0) {
- // On some machines, the signaling bit set indicates it's a quiet NaN.
- return bit_cast<float>(bit_cast<uint32_t>(a) | kSignalingBit);
- } else {
- // On others, the signaling bit set indicates it's a signaling NaN.
- return bit_cast<float>(bit_cast<uint32_t>(a) & ~kSignalingBit);
- }
-}
-
-static double quiet(double a) {
- static const uint64_t kSignalingBit = 1ULL << 51;
- uint64_t q = bit_cast<uint64_t>(std::numeric_limits<double>::quiet_NaN());
- if ((q & kSignalingBit) != 0) {
- // On some machines, the signaling bit set indicates it's a quiet NaN.
- return bit_cast<double>(bit_cast<uint64_t>(a) | kSignalingBit);
- } else {
- // On others, the signaling bit set indicates it's a signaling NaN.
- return bit_cast<double>(bit_cast<uint64_t>(a) & ~kSignalingBit);
- }
-}
-
-static inline float ExecuteF32Sub(float a, float b, TrapReason* trap) {
- float result = a - b;
- // Some architectures (e.g. MIPS) need extra checking to preserve the payload
- // of a NaN operand.
- if (result - result != 0) {
- if (std::isnan(a)) return quiet(a);
- if (std::isnan(b)) return quiet(b);
- }
- return result;
-}
-
static inline float ExecuteF32Min(float a, float b, TrapReason* trap) {
return JSMin(a, b);
}
@@ -340,17 +304,6 @@ static inline float ExecuteF32CopySign(float a, float b, TrapReason* trap) {
return copysignf(a, b);
}
-static inline double ExecuteF64Sub(double a, double b, TrapReason* trap) {
- double result = a - b;
- // Some architectures (e.g. MIPS) need extra checking to preserve the payload
- // of a NaN operand.
- if (result - result != 0) {
- if (std::isnan(a)) return quiet(a);
- if (std::isnan(b)) return quiet(b);
- }
- return result;
-}
-
static inline double ExecuteF64Min(double a, double b, TrapReason* trap) {
return JSMin(a, b);
}
@@ -651,19 +604,20 @@ static inline double ExecuteF64ReinterpretI64(int64_t a, TrapReason* trap) {
return bit_cast<double>(a);
}
-static inline int32_t ExecuteI32ReinterpretF32(float a, TrapReason* trap) {
- return bit_cast<int32_t>(a);
+static inline int32_t ExecuteI32ReinterpretF32(WasmVal a) {
+ return a.to_unchecked<int32_t>();
}
-static inline int64_t ExecuteI64ReinterpretF64(double a, TrapReason* trap) {
- return bit_cast<int64_t>(a);
+static inline int64_t ExecuteI64ReinterpretF64(WasmVal a) {
+ return a.to_unchecked<int64_t>();
}
static inline int32_t ExecuteGrowMemory(uint32_t delta_pages,
WasmInstance* instance) {
// TODO(ahaas): Move memory allocation to wasm-module.cc for better
// encapsulation.
- if (delta_pages > wasm::WasmModule::kV8MaxPages) {
+ if (delta_pages > wasm::kV8MaxWasmMemoryPages ||
+ delta_pages > instance->module->max_mem_pages) {
return -1;
}
uint32_t old_size = instance->mem_size;
@@ -679,8 +633,9 @@ static inline int32_t ExecuteGrowMemory(uint32_t delta_pages,
} else {
DCHECK_NOT_NULL(instance->mem_start);
new_size = old_size + delta_pages * wasm::WasmModule::kPageSize;
- if (new_size >
- wasm::WasmModule::kV8MaxPages * wasm::WasmModule::kPageSize) {
+ if (new_size / wasm::WasmModule::kPageSize > wasm::kV8MaxWasmMemoryPages ||
+ new_size / wasm::WasmModule::kPageSize >
+ instance->module->max_mem_pages) {
return -1;
}
new_mem_start = static_cast<byte*>(realloc(instance->mem_start, new_size));
@@ -721,8 +676,8 @@ class ControlTransfers : public ZoneObject {
public:
ControlTransferMap map_;
- ControlTransfers(Zone* zone, ModuleEnv* env, AstLocalDecls* locals,
- const byte* start, const byte* end)
+ ControlTransfers(Zone* zone, BodyLocalDecls* locals, const byte* start,
+ const byte* end)
: map_(zone) {
// Represents a control flow label.
struct CLabel : public ZoneObject {
@@ -872,7 +827,7 @@ class ControlTransfers : public ZoneObject {
// Code and metadata needed to execute a function.
struct InterpreterCode {
const WasmFunction* function; // wasm function
- AstLocalDecls locals; // local declarations
+ BodyLocalDecls locals; // local declarations
const byte* orig_start; // start of original code
const byte* orig_end; // end of original code
byte* start; // start of (maybe altered) code
@@ -890,14 +845,13 @@ class CodeMap {
const WasmModule* module_;
ZoneVector<InterpreterCode> interpreter_code_;
- CodeMap(const WasmModule* module, Zone* zone)
+ CodeMap(const WasmModule* module, const uint8_t* module_start, Zone* zone)
: zone_(zone), module_(module), interpreter_code_(zone) {
if (module == nullptr) return;
for (size_t i = 0; i < module->functions.size(); ++i) {
const WasmFunction* function = &module->functions[i];
- const byte* code_start =
- module->module_start + function->code_start_offset;
- const byte* code_end = module->module_start + function->code_end_offset;
+ const byte* code_start = module_start + function->code_start_offset;
+ const byte* code_end = module_start + function->code_end_offset;
AddFunction(function, code_start, code_end);
}
}
@@ -929,10 +883,9 @@ class CodeMap {
InterpreterCode* Preprocess(InterpreterCode* code) {
if (code->targets == nullptr && code->start) {
// Compute the control targets map and the local declarations.
- CHECK(DecodeLocalDecls(code->locals, code->start, code->end));
- ModuleEnv env = {module_, nullptr, kWasmOrigin};
+ CHECK(DecodeLocalDecls(&code->locals, code->start, code->end));
code->targets = new (zone_) ControlTransfers(
- zone_, &env, &code->locals, code->orig_start, code->orig_end);
+ zone_, &code->locals, code->orig_start, code->orig_end);
}
return code;
}
@@ -940,7 +893,7 @@ class CodeMap {
int AddFunction(const WasmFunction* function, const byte* code_start,
const byte* code_end) {
InterpreterCode code = {
- function, AstLocalDecls(zone_), code_start,
+ function, BodyLocalDecls(zone_), code_start,
code_end, const_cast<byte*>(code_start), const_cast<byte*>(code_end),
nullptr};
@@ -1072,7 +1025,7 @@ class ThreadImpl : public WasmInterpreter::Thread {
// Limit of parameters.
sp_t plimit() { return sp + code->function->sig->parameter_count(); }
// Limit of locals.
- sp_t llimit() { return plimit() + code->locals.total_local_count; }
+ sp_t llimit() { return plimit() + code->locals.type_list.size(); }
};
struct Block {
@@ -1121,28 +1074,28 @@ class ThreadImpl : public WasmInterpreter::Thread {
}
pc_t InitLocals(InterpreterCode* code) {
- for (auto p : code->locals.local_types) {
+ for (auto p : code->locals.type_list) {
WasmVal val;
- switch (p.first) {
- case kAstI32:
+ switch (p) {
+ case kWasmI32:
val = WasmVal(static_cast<int32_t>(0));
break;
- case kAstI64:
+ case kWasmI64:
val = WasmVal(static_cast<int64_t>(0));
break;
- case kAstF32:
+ case kWasmF32:
val = WasmVal(static_cast<float>(0));
break;
- case kAstF64:
+ case kWasmF64:
val = WasmVal(static_cast<double>(0));
break;
default:
UNREACHABLE();
break;
}
- stack_.insert(stack_.end(), p.second, val);
+ stack_.push_back(val);
}
- return code->locals.decls_encoded_size;
+ return code->locals.encoded_size;
}
void CommitPc(pc_t pc) {
@@ -1173,7 +1126,7 @@ class ThreadImpl : public WasmInterpreter::Thread {
}
bool DoReturn(InterpreterCode** code, pc_t* pc, pc_t* limit, size_t arity) {
- DCHECK_GT(frames_.size(), 0u);
+ DCHECK_GT(frames_.size(), 0);
// Pop all blocks for this frame.
while (!blocks_.empty() && blocks_.back().fp == frames_.size()) {
blocks_.pop_back();
@@ -1357,12 +1310,6 @@ class ThreadImpl : public WasmInterpreter::Thread {
blocks_.pop_back();
break;
}
- case kExprI8Const: {
- ImmI8Operand operand(&decoder, code->at(pc));
- Push(pc, WasmVal(operand.value));
- len = 1 + operand.length;
- break;
- }
case kExprI32Const: {
ImmI32Operand operand(&decoder, code->at(pc));
Push(pc, WasmVal(operand.value));
@@ -1450,15 +1397,15 @@ class ThreadImpl : public WasmInterpreter::Thread {
GlobalIndexOperand operand(&decoder, code->at(pc));
const WasmGlobal* global = &module()->globals[operand.index];
byte* ptr = instance()->globals_start + global->offset;
- LocalType type = global->type;
+ ValueType type = global->type;
WasmVal val;
- if (type == kAstI32) {
+ if (type == kWasmI32) {
val = WasmVal(*reinterpret_cast<int32_t*>(ptr));
- } else if (type == kAstI64) {
+ } else if (type == kWasmI64) {
val = WasmVal(*reinterpret_cast<int64_t*>(ptr));
- } else if (type == kAstF32) {
+ } else if (type == kWasmF32) {
val = WasmVal(*reinterpret_cast<float*>(ptr));
- } else if (type == kAstF64) {
+ } else if (type == kWasmF64) {
val = WasmVal(*reinterpret_cast<double*>(ptr));
} else {
UNREACHABLE();
@@ -1471,15 +1418,15 @@ class ThreadImpl : public WasmInterpreter::Thread {
GlobalIndexOperand operand(&decoder, code->at(pc));
const WasmGlobal* global = &module()->globals[operand.index];
byte* ptr = instance()->globals_start + global->offset;
- LocalType type = global->type;
+ ValueType type = global->type;
WasmVal val = Pop();
- if (type == kAstI32) {
+ if (type == kWasmI32) {
*reinterpret_cast<int32_t*>(ptr) = val.to<int32_t>();
- } else if (type == kAstI64) {
+ } else if (type == kWasmI64) {
*reinterpret_cast<int64_t*>(ptr) = val.to<int64_t>();
- } else if (type == kAstF32) {
+ } else if (type == kWasmF32) {
*reinterpret_cast<float*>(ptr) = val.to<float>();
- } else if (type == kAstF64) {
+ } else if (type == kWasmF64) {
*reinterpret_cast<double*>(ptr) = val.to<double>();
} else {
UNREACHABLE();
@@ -1605,6 +1552,19 @@ class ThreadImpl : public WasmInterpreter::Thread {
len = 1 + operand.length;
break;
}
+ // We need to treat kExprI32ReinterpretF32 and kExprI64ReinterpretF64
+ // specially to guarantee that the quiet bit of a NaN is preserved on
+ // ia32 by the reinterpret casts.
+ case kExprI32ReinterpretF32: {
+ WasmVal result(ExecuteI32ReinterpretF32(Pop()));
+ Push(pc, result);
+ break;
+ }
+ case kExprI64ReinterpretF64: {
+ WasmVal result(ExecuteI64ReinterpretF64(Pop()));
+ Push(pc, result);
+ break;
+ }
#define EXECUTE_SIMPLE_BINOP(name, ctype, op) \
case kExpr##name: { \
WasmVal rval = Pop(); \
@@ -1680,8 +1640,8 @@ class ThreadImpl : public WasmInterpreter::Thread {
}
WasmVal Pop() {
- DCHECK_GT(stack_.size(), 0u);
- DCHECK_GT(frames_.size(), 0u);
+ DCHECK_GT(stack_.size(), 0);
+ DCHECK_GT(frames_.size(), 0);
DCHECK_GT(stack_.size(), frames_.back().llimit()); // can't pop into locals
WasmVal val = stack_.back();
stack_.pop_back();
@@ -1689,8 +1649,8 @@ class ThreadImpl : public WasmInterpreter::Thread {
}
void PopN(int n) {
- DCHECK_GE(stack_.size(), static_cast<size_t>(n));
- DCHECK_GT(frames_.size(), 0u);
+ DCHECK_GE(stack_.size(), n);
+ DCHECK_GT(frames_.size(), 0);
size_t nsize = stack_.size() - n;
DCHECK_GE(nsize, frames_.back().llimit()); // can't pop into locals
stack_.resize(nsize);
@@ -1698,13 +1658,13 @@ class ThreadImpl : public WasmInterpreter::Thread {
WasmVal PopArity(size_t arity) {
if (arity == 0) return WasmVal();
- CHECK_EQ(1u, arity);
+ CHECK_EQ(1, arity);
return Pop();
}
void Push(pc_t pc, WasmVal val) {
// TODO(titzer): store PC as well?
- if (val.type != kAstStmt) stack_.push_back(val);
+ if (val.type != kWasmStmt) stack_.push_back(val);
}
void TraceStack(const char* phase, pc_t pc) {
@@ -1730,19 +1690,19 @@ class ThreadImpl : public WasmInterpreter::Thread {
PrintF(" s%zu:", i);
WasmVal val = stack_[i];
switch (val.type) {
- case kAstI32:
+ case kWasmI32:
PrintF("i32:%d", val.to<int32_t>());
break;
- case kAstI64:
+ case kWasmI64:
PrintF("i64:%" PRId64 "", val.to<int64_t>());
break;
- case kAstF32:
+ case kWasmF32:
PrintF("f32:%f", val.to<float>());
break;
- case kAstF64:
+ case kWasmF64:
PrintF("f64:%lf", val.to<double>());
break;
- case kAstStmt:
+ case kWasmStmt:
PrintF("void");
break;
default:
@@ -1760,14 +1720,19 @@ class ThreadImpl : public WasmInterpreter::Thread {
class WasmInterpreterInternals : public ZoneObject {
public:
WasmInstance* instance_;
+ // Create a copy of the module bytes for the interpreter, since the passed
+ // pointer might be invalidated after constructing the interpreter.
+ const ZoneVector<uint8_t> module_bytes_;
CodeMap codemap_;
ZoneVector<ThreadImpl*> threads_;
- WasmInterpreterInternals(Zone* zone, WasmInstance* instance)
- : instance_(instance),
- codemap_(instance_ ? instance_->module : nullptr, zone),
+ WasmInterpreterInternals(Zone* zone, const ModuleBytesEnv& env)
+ : instance_(env.instance),
+ module_bytes_(env.module_bytes.start(), env.module_bytes.end(), zone),
+ codemap_(env.instance ? env.instance->module : nullptr,
+ module_bytes_.data(), zone),
threads_(zone) {
- threads_.push_back(new ThreadImpl(zone, &codemap_, instance));
+ threads_.push_back(new ThreadImpl(zone, &codemap_, env.instance));
}
void Delete() {
@@ -1780,10 +1745,10 @@ class WasmInterpreterInternals : public ZoneObject {
//============================================================================
// Implementation of the public interface of the interpreter.
//============================================================================
-WasmInterpreter::WasmInterpreter(WasmInstance* instance,
+WasmInterpreter::WasmInterpreter(const ModuleBytesEnv& env,
AccountingAllocator* allocator)
: zone_(allocator, ZONE_NAME),
- internals_(new (&zone_) WasmInterpreterInternals(&zone_, instance)) {}
+ internals_(new (&zone_) WasmInterpreterInternals(&zone_, env)) {}
WasmInterpreter::~WasmInterpreter() { internals_->Delete(); }
@@ -1797,7 +1762,7 @@ bool WasmInterpreter::SetBreakpoint(const WasmFunction* function, pc_t pc,
if (!code) return false;
size_t size = static_cast<size_t>(code->end - code->start);
// Check bounds for {pc}.
- if (pc < code->locals.decls_encoded_size || pc >= size) return false;
+ if (pc < code->locals.encoded_size || pc >= size) return false;
// Make a copy of the code before enabling a breakpoint.
if (enabled && code->orig_start == code->start) {
code->start = reinterpret_cast<byte*>(zone_.New(size));
@@ -1818,7 +1783,7 @@ bool WasmInterpreter::GetBreakpoint(const WasmFunction* function, pc_t pc) {
if (!code) return false;
size_t size = static_cast<size_t>(code->end - code->start);
// Check bounds for {pc}.
- if (pc < code->locals.decls_encoded_size || pc >= size) return false;
+ if (pc < code->locals.encoded_size || pc >= size) return false;
// Check if a breakpoint is present at that place in the code.
return code->start[pc] == kInternalBreakpoint;
}
@@ -1841,14 +1806,14 @@ WasmVal WasmInterpreter::GetLocalVal(const WasmFrame* frame, int index) {
CHECK_GE(index, 0);
UNIMPLEMENTED();
WasmVal none;
- none.type = kAstStmt;
+ none.type = kWasmStmt;
return none;
}
WasmVal WasmInterpreter::GetExprVal(const WasmFrame* frame, int pc) {
UNIMPLEMENTED();
WasmVal none;
- none.type = kAstStmt;
+ none.type = kWasmStmt;
return none;
}
@@ -1885,7 +1850,7 @@ bool WasmInterpreter::SetFunctionCodeForTesting(const WasmFunction* function,
ControlTransferMap WasmInterpreter::ComputeControlTransfersForTesting(
Zone* zone, const byte* start, const byte* end) {
- ControlTransfers targets(zone, nullptr, nullptr, start, end);
+ ControlTransfers targets(zone, nullptr, start, end);
return targets.map_;
}
diff --git a/deps/v8/src/wasm/wasm-interpreter.h b/deps/v8/src/wasm/wasm-interpreter.h
index 360362b994..80e6c4ba79 100644
--- a/deps/v8/src/wasm/wasm-interpreter.h
+++ b/deps/v8/src/wasm/wasm-interpreter.h
@@ -17,8 +17,8 @@ namespace internal {
namespace wasm {
// forward declarations.
+struct ModuleBytesEnv;
struct WasmFunction;
-struct WasmInstance;
class WasmInterpreterInternals;
typedef size_t pc_t;
@@ -32,23 +32,23 @@ typedef ZoneMap<pc_t, pcdiff_t> ControlTransferMap;
// Macro for defining union members.
#define FOREACH_UNION_MEMBER(V) \
- V(i32, kAstI32, int32_t) \
- V(u32, kAstI32, uint32_t) \
- V(i64, kAstI64, int64_t) \
- V(u64, kAstI64, uint64_t) \
- V(f32, kAstF32, float) \
- V(f64, kAstF64, double)
+ V(i32, kWasmI32, int32_t) \
+ V(u32, kWasmI32, uint32_t) \
+ V(i64, kWasmI64, int64_t) \
+ V(u64, kWasmI64, uint64_t) \
+ V(f32, kWasmF32, float) \
+ V(f64, kWasmF64, double)
// Representation of values within the interpreter.
struct WasmVal {
- LocalType type;
+ ValueType type;
union {
#define DECLARE_FIELD(field, localtype, ctype) ctype field;
FOREACH_UNION_MEMBER(DECLARE_FIELD)
#undef DECLARE_FIELD
} val;
- WasmVal() : type(kAstStmt) {}
+ WasmVal() : type(kWasmStmt) {}
#define DECLARE_CONSTRUCTOR(field, localtype, ctype) \
explicit WasmVal(ctype v) : type(localtype) { val.field = v; }
@@ -56,13 +56,22 @@ struct WasmVal {
#undef DECLARE_CONSTRUCTOR
template <typename T>
- T to() {
+ inline T to() {
+ UNREACHABLE();
+ }
+
+ template <typename T>
+ inline T to_unchecked() {
UNREACHABLE();
}
};
#define DECLARE_CAST(field, localtype, ctype) \
template <> \
+ inline ctype WasmVal::to_unchecked() { \
+ return val.field; \
+ } \
+ template <> \
inline ctype WasmVal::to() { \
CHECK_EQ(localtype, type); \
return val.field; \
@@ -70,11 +79,6 @@ struct WasmVal {
FOREACH_UNION_MEMBER(DECLARE_CAST)
#undef DECLARE_CAST
-template <>
-inline void WasmVal::to() {
- CHECK_EQ(kAstStmt, type);
-}
-
// Representation of frames within the interpreter.
class WasmFrame {
public:
@@ -135,7 +139,7 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
bool GetBreakpoint(const WasmFunction* function, int pc);
};
- WasmInterpreter(WasmInstance* instance, AccountingAllocator* allocator);
+ WasmInterpreter(const ModuleBytesEnv& env, AccountingAllocator* allocator);
~WasmInterpreter();
//==========================================================================
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index 0e030a28c4..b426d5bf3d 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -18,6 +18,7 @@
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-js.h"
+#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-result.h"
@@ -28,13 +29,12 @@ using v8::internal::wasm::ErrorThrower;
namespace v8 {
-enum WasmMemoryObjectData {
- kWasmMemoryBuffer,
- kWasmMemoryMaximum,
- kWasmMemoryInstanceObject
-};
-
namespace {
+
+#define RANGE_ERROR_MSG \
+ "Wasm compilation exceeds internal limits in this context for the provided " \
+ "arguments"
+
i::Handle<i::String> v8_str(i::Isolate* isolate, const char* str) {
return isolate->factory()->NewStringFromAsciiChecked(str);
}
@@ -48,6 +48,100 @@ struct RawBuffer {
size_t size() { return static_cast<size_t>(end - start); }
};
+bool IsCompilationAllowed(i::Isolate* isolate, ErrorThrower* thrower,
+ v8::Local<v8::Value> source, bool is_async) {
+ // Allow caller to do one final check on thrower state, rather than
+ // one at each step. No information is lost - failure reason is captured
+ // in the thrower state.
+ if (thrower->error()) return false;
+
+ AllowWasmCompileCallback callback = isolate->allow_wasm_compile_callback();
+ if (callback != nullptr &&
+ !callback(reinterpret_cast<v8::Isolate*>(isolate), source, is_async)) {
+ thrower->RangeError(RANGE_ERROR_MSG);
+ return false;
+ }
+ return true;
+}
+
+bool IsInstantiationAllowed(i::Isolate* isolate, ErrorThrower* thrower,
+ v8::Local<v8::Value> module_or_bytes,
+ i::MaybeHandle<i::JSReceiver> ffi, bool is_async) {
+ // Allow caller to do one final check on thrower state, rather than
+ // one at each step. No information is lost - failure reason is captured
+ // in the thrower state.
+ if (thrower->error()) return false;
+ v8::MaybeLocal<v8::Value> v8_ffi;
+ if (!ffi.is_null()) {
+ v8_ffi = v8::Local<v8::Value>::Cast(Utils::ToLocal(ffi.ToHandleChecked()));
+ }
+ AllowWasmInstantiateCallback callback =
+ isolate->allow_wasm_instantiate_callback();
+ if (callback != nullptr &&
+ !callback(reinterpret_cast<v8::Isolate*>(isolate), module_or_bytes,
+ v8_ffi, is_async)) {
+ thrower->RangeError(RANGE_ERROR_MSG);
+ return false;
+ }
+ return true;
+}
+
+i::wasm::ModuleWireBytes GetFirstArgumentAsBytes(
+ const v8::FunctionCallbackInfo<v8::Value>& args, ErrorThrower* thrower) {
+ if (args.Length() < 1) {
+ thrower->TypeError("Argument 0 must be a buffer source");
+ return i::wasm::ModuleWireBytes(nullptr, nullptr);
+ }
+
+ const byte* start = nullptr;
+ size_t length = 0;
+ v8::Local<v8::Value> source = args[0];
+ if (source->IsArrayBuffer()) {
+ // A raw array buffer was passed.
+ Local<ArrayBuffer> buffer = Local<ArrayBuffer>::Cast(source);
+ ArrayBuffer::Contents contents = buffer->GetContents();
+
+ start = reinterpret_cast<const byte*>(contents.Data());
+ length = contents.ByteLength();
+ } else if (source->IsTypedArray()) {
+ // A TypedArray was passed.
+ Local<TypedArray> array = Local<TypedArray>::Cast(source);
+ Local<ArrayBuffer> buffer = array->Buffer();
+
+ ArrayBuffer::Contents contents = buffer->GetContents();
+
+ start =
+ reinterpret_cast<const byte*>(contents.Data()) + array->ByteOffset();
+ length = array->ByteLength();
+ } else {
+ thrower->TypeError("Argument 0 must be a buffer source");
+ }
+ DCHECK_IMPLIES(length, start != nullptr);
+ if (length == 0) {
+ thrower->CompileError("BufferSource argument is empty");
+ }
+ if (length > i::wasm::kV8MaxWasmModuleSize) {
+ thrower->RangeError("buffer source exceeds maximum size of %zu (is %zu)",
+ i::wasm::kV8MaxWasmModuleSize, length);
+ }
+ if (thrower->error()) return i::wasm::ModuleWireBytes(nullptr, nullptr);
+ // TODO(titzer): use the handle as well?
+ return i::wasm::ModuleWireBytes(start, start + length);
+}
+
+i::MaybeHandle<i::JSReceiver> GetSecondArgumentAsImports(
+ const v8::FunctionCallbackInfo<v8::Value>& args, ErrorThrower* thrower) {
+ if (args.Length() < 2) return {};
+ if (args[1]->IsUndefined()) return {};
+
+ if (!args[1]->IsObject()) {
+ thrower->TypeError("Argument 1 must be an object");
+ return {};
+ }
+ Local<Object> obj = Local<Object>::Cast(args[1]);
+ return i::Handle<i::JSReceiver>::cast(v8::Utils::OpenHandle(*obj));
+}
+
RawBuffer GetRawBufferSource(
v8::Local<v8::Value> source, ErrorThrower* thrower) {
const byte* start = nullptr;
@@ -61,9 +155,6 @@ RawBuffer GetRawBufferSource(
start = reinterpret_cast<const byte*>(contents.Data());
end = start + contents.ByteLength();
- if (start == nullptr || end == start) {
- thrower->CompileError("ArrayBuffer argument is empty");
- }
} else if (source->IsTypedArray()) {
// A TypedArray was passed.
Local<TypedArray> array = Local<TypedArray>::Cast(source);
@@ -75,13 +166,12 @@ RawBuffer GetRawBufferSource(
reinterpret_cast<const byte*>(contents.Data()) + array->ByteOffset();
end = start + array->ByteLength();
- if (start == nullptr || end == start) {
- thrower->TypeError("ArrayBuffer argument is empty");
- }
} else {
- thrower->TypeError("Argument 0 must be an ArrayBuffer or Uint8Array");
+ thrower->TypeError("Argument 0 must be a buffer source");
+ }
+ if (start == nullptr || end == start) {
+ thrower->CompileError("BufferSource argument is empty");
}
-
return {start, end};
}
@@ -97,7 +187,7 @@ static i::MaybeHandle<i::WasmModuleObject> CreateModuleObject(
DCHECK(source->IsArrayBuffer() || source->IsTypedArray());
return i::wasm::CreateModuleObjectFromBytes(
i_isolate, buffer.start, buffer.end, thrower, i::wasm::kWasmOrigin,
- i::Handle<i::Script>::null(), nullptr, nullptr);
+ i::Handle<i::Script>::null(), i::Vector<const byte>::empty());
}
static bool ValidateModule(v8::Isolate* isolate,
@@ -115,6 +205,17 @@ static bool ValidateModule(v8::Isolate* isolate,
i::wasm::ModuleOrigin::kWasmOrigin);
}
+// TODO(wasm): move brand check to the respective types, and don't throw
+// in it, rather, use a provided ErrorThrower, or let caller handle it.
+static bool BrandCheck(Isolate* isolate, i::Handle<i::Object> value,
+ i::Handle<i::Symbol> sym) {
+ if (!value->IsJSObject()) return false;
+ i::Handle<i::JSObject> object = i::Handle<i::JSObject>::cast(value);
+ Maybe<bool> has_brand = i::JSObject::HasOwnProperty(object, sym);
+ if (has_brand.IsNothing()) return false;
+ return has_brand.ToChecked();
+}
+
static bool BrandCheck(Isolate* isolate, i::Handle<i::Object> value,
i::Handle<i::Symbol> sym, const char* msg) {
if (value->IsJSObject()) {
@@ -130,27 +231,36 @@ static bool BrandCheck(Isolate* isolate, i::Handle<i::Object> value,
void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
ErrorThrower thrower(reinterpret_cast<i::Isolate*>(isolate),
"WebAssembly.compile()");
+ Local<Context> context = isolate->GetCurrentContext();
+ v8::Local<v8::Promise::Resolver> resolver;
+ if (!v8::Promise::Resolver::New(context).ToLocal(&resolver)) return;
+ v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
+ return_value.Set(resolver->GetPromise());
+
if (args.Length() < 1) {
thrower.TypeError("Argument 0 must be a buffer source");
+ resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
+ return;
+ }
+ auto bytes = GetFirstArgumentAsBytes(args, &thrower);
+ USE(bytes);
+ if (!IsCompilationAllowed(i_isolate, &thrower, args[0], true)) {
+ resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
return;
}
i::MaybeHandle<i::JSObject> module_obj =
CreateModuleObject(isolate, args[0], &thrower);
- Local<Context> context = isolate->GetCurrentContext();
- v8::Local<v8::Promise::Resolver> resolver;
- if (!v8::Promise::Resolver::New(context).ToLocal(&resolver)) return;
if (thrower.error()) {
resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
} else {
resolver->Resolve(context, Utils::ToLocal(module_obj.ToHandleChecked()));
}
- v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
- return_value.Set(resolver->GetPromise());
}
void WebAssemblyValidate(const v8::FunctionCallbackInfo<v8::Value>& args) {
@@ -168,12 +278,14 @@ void WebAssemblyValidate(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (ValidateModule(isolate, args[0], &thrower)) {
return_value.Set(v8::True(isolate));
} else {
+ if (thrower.wasm_error()) thrower.Reify(); // Clear error.
return_value.Set(v8::False(isolate));
}
}
void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
HandleScope scope(isolate);
ErrorThrower thrower(reinterpret_cast<i::Isolate*>(isolate),
"WebAssembly.Module()");
@@ -182,6 +294,10 @@ void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
thrower.TypeError("Argument 0 must be a buffer source");
return;
}
+ auto bytes = GetFirstArgumentAsBytes(args, &thrower);
+ USE(bytes);
+ if (!IsCompilationAllowed(i_isolate, &thrower, args[0], false)) return;
+
i::MaybeHandle<i::JSObject> module_obj =
CreateModuleObject(isolate, args[0], &thrower);
if (module_obj.is_null()) return;
@@ -190,16 +306,49 @@ void WebAssemblyModule(const v8::FunctionCallbackInfo<v8::Value>& args) {
return_value.Set(Utils::ToLocal(module_obj.ToHandleChecked()));
}
-void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) {
- HandleScope scope(args.GetIsolate());
- v8::Isolate* isolate = args.GetIsolate();
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+MaybeLocal<Value> InstantiateModuleImpl(
+ i::Isolate* i_isolate, i::Handle<i::WasmModuleObject> i_module_obj,
+ const v8::FunctionCallbackInfo<v8::Value>& args, ErrorThrower* thrower) {
+ // It so happens that in both the WebAssembly.instantiate, as well as
+ // WebAssembly.Instance ctor, the positions of the ffi object and memory
+ // are the same. If that changes later, we refactor the consts into
+ // parameters.
+ static const int kFfiOffset = 1;
- ErrorThrower thrower(i_isolate, "WebAssembly.Instance()");
+ MaybeLocal<Value> nothing;
+ i::Handle<i::JSReceiver> ffi = i::Handle<i::JSObject>::null();
+ // This is a first - level validation of the argument. If present, we only
+ // check its type. {Instantiate} will further check that if the module
+ // has imports, the argument must be present, as well as piecemeal
+ // import satisfaction.
+ if (args.Length() > kFfiOffset && !args[kFfiOffset]->IsUndefined()) {
+ if (!args[kFfiOffset]->IsObject()) {
+ thrower->TypeError("Argument %d must be an object", kFfiOffset);
+ return nothing;
+ }
+ Local<Object> obj = Local<Object>::Cast(args[kFfiOffset]);
+ ffi = i::Handle<i::JSReceiver>::cast(v8::Utils::OpenHandle(*obj));
+ }
+
+ i::MaybeHandle<i::JSObject> instance =
+ i::wasm::WasmModule::Instantiate(i_isolate, thrower, i_module_obj, ffi);
+ if (instance.is_null()) {
+ if (!thrower->error())
+ thrower->RuntimeError("Could not instantiate module");
+ return nothing;
+ }
+ DCHECK(!i_isolate->has_pending_exception());
+ return Utils::ToLocal(instance.ToHandleChecked());
+}
+namespace {
+i::MaybeHandle<i::WasmModuleObject> GetFirstArgumentAsModule(
+ const v8::FunctionCallbackInfo<v8::Value>& args, ErrorThrower& thrower) {
+ v8::Isolate* isolate = args.GetIsolate();
+ i::MaybeHandle<i::WasmModuleObject> nothing;
if (args.Length() < 1) {
thrower.TypeError("Argument 0 must be a WebAssembly.Module");
- return;
+ return nothing;
}
Local<Context> context = isolate->GetCurrentContext();
@@ -207,60 +356,210 @@ void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (!BrandCheck(isolate, Utils::OpenHandle(*args[0]),
i::Handle<i::Symbol>(i_context->wasm_module_sym()),
"Argument 0 must be a WebAssembly.Module")) {
- return;
+ return nothing;
}
- Local<Object> obj = Local<Object>::Cast(args[0]);
- i::Handle<i::JSObject> i_obj =
- i::Handle<i::JSObject>::cast(v8::Utils::OpenHandle(*obj));
+ Local<Object> module_obj = Local<Object>::Cast(args[0]);
+ return i::Handle<i::WasmModuleObject>::cast(
+ v8::Utils::OpenHandle(*module_obj));
+}
+} // namespace
- i::Handle<i::JSReceiver> ffi = i::Handle<i::JSObject>::null();
- if (args.Length() > 1 && args[1]->IsObject()) {
- Local<Object> obj = Local<Object>::Cast(args[1]);
- ffi = i::Handle<i::JSReceiver>::cast(v8::Utils::OpenHandle(*obj));
+void WebAssemblyModuleImports(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ HandleScope scope(args.GetIsolate());
+ v8::Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ErrorThrower thrower(i_isolate, "WebAssembly.Module.imports()");
+
+ auto maybe_module = GetFirstArgumentAsModule(args, thrower);
+
+ if (!maybe_module.is_null()) {
+ auto imports =
+ i::wasm::GetImports(i_isolate, maybe_module.ToHandleChecked());
+ args.GetReturnValue().Set(Utils::ToLocal(imports));
}
+}
- i::Handle<i::JSArrayBuffer> memory = i::Handle<i::JSArrayBuffer>::null();
- if (args.Length() > 2 && args[2]->IsObject()) {
- Local<Object> obj = Local<Object>::Cast(args[2]);
- i::Handle<i::Object> mem_obj = v8::Utils::OpenHandle(*obj);
- if (i::WasmJs::IsWasmMemoryObject(i_isolate, mem_obj)) {
- memory = i::Handle<i::JSArrayBuffer>(
- i::Handle<i::WasmMemoryObject>::cast(mem_obj)->get_buffer(),
- i_isolate);
- } else {
- thrower.TypeError("Argument 2 must be a WebAssembly.Memory");
+void WebAssemblyModuleExports(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ HandleScope scope(args.GetIsolate());
+ v8::Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+
+ ErrorThrower thrower(i_isolate, "WebAssembly.Module.exports()");
+
+ auto maybe_module = GetFirstArgumentAsModule(args, thrower);
+
+ if (!maybe_module.is_null()) {
+ auto exports =
+ i::wasm::GetExports(i_isolate, maybe_module.ToHandleChecked());
+ args.GetReturnValue().Set(Utils::ToLocal(exports));
+ }
+}
+
+void WebAssemblyModuleCustomSections(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ HandleScope scope(args.GetIsolate());
+ v8::Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+
+ ErrorThrower thrower(i_isolate, "WebAssembly.Module.customSections()");
+
+ auto maybe_module = GetFirstArgumentAsModule(args, thrower);
+
+ if (args.Length() < 2) {
+ thrower.TypeError("Argument 1 must be a string");
+ return;
+ }
+
+ i::Handle<i::Object> name = Utils::OpenHandle(*args[1]);
+ if (!name->IsString()) {
+ thrower.TypeError("Argument 1 must be a string");
+ return;
+ }
+
+ if (!maybe_module.is_null()) {
+ auto custom_sections =
+ i::wasm::GetCustomSections(i_isolate, maybe_module.ToHandleChecked(),
+ i::Handle<i::String>::cast(name), &thrower);
+ if (!thrower.error()) {
+ args.GetReturnValue().Set(Utils::ToLocal(custom_sections));
}
}
- i::MaybeHandle<i::JSObject> instance =
- i::wasm::WasmModule::Instantiate(i_isolate, &thrower, i_obj, ffi, memory);
- if (instance.is_null()) {
- if (!thrower.error()) thrower.RuntimeError("Could not instantiate module");
+}
+
+void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ HandleScope scope(args.GetIsolate());
+ v8::Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+
+ ErrorThrower thrower(i_isolate, "WebAssembly.Instance()");
+
+ auto maybe_module = GetFirstArgumentAsModule(args, thrower);
+ if (thrower.error()) return;
+ auto maybe_imports = GetSecondArgumentAsImports(args, &thrower);
+ if (!IsInstantiationAllowed(i_isolate, &thrower, args[0], maybe_imports,
+ false)) {
return;
}
- DCHECK(!i_isolate->has_pending_exception());
+ DCHECK(!thrower.error());
+
+ if (!maybe_module.is_null()) {
+ MaybeLocal<Value> instance = InstantiateModuleImpl(
+ i_isolate, maybe_module.ToHandleChecked(), args, &thrower);
+ if (instance.IsEmpty()) {
+ DCHECK(thrower.error());
+ return;
+ }
+ args.GetReturnValue().Set(instance.ToLocalChecked());
+ }
+}
+
+void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+
+ HandleScope scope(isolate);
+ ErrorThrower thrower(i_isolate, "WebAssembly.instantiate()");
+
+ Local<Context> context = isolate->GetCurrentContext();
+ i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
+
+ v8::Local<v8::Promise::Resolver> resolver;
+ if (!v8::Promise::Resolver::New(context).ToLocal(&resolver)) return;
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
- return_value.Set(Utils::ToLocal(instance.ToHandleChecked()));
+ return_value.Set(resolver->GetPromise());
+
+ if (args.Length() < 1) {
+ thrower.TypeError(
+ "Argument 0 must be provided and must be either a buffer source or a "
+ "WebAssembly.Module object");
+ resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
+ return;
+ }
+
+ i::Handle<i::Object> first_arg = Utils::OpenHandle(*args[0]);
+ if (!first_arg->IsJSObject()) {
+ thrower.TypeError(
+ "Argument 0 must be a buffer source or a WebAssembly.Module object");
+ resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
+ return;
+ }
+
+ bool want_pair = !BrandCheck(
+ isolate, first_arg, i::Handle<i::Symbol>(i_context->wasm_module_sym()));
+ auto maybe_imports = GetSecondArgumentAsImports(args, &thrower);
+ if (thrower.error()) {
+ resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
+ return;
+ }
+ if (!IsInstantiationAllowed(i_isolate, &thrower, args[0], maybe_imports,
+ true)) {
+ resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
+ return;
+ }
+ i::Handle<i::WasmModuleObject> module_obj;
+ if (want_pair) {
+ i::MaybeHandle<i::WasmModuleObject> maybe_module_obj =
+ CreateModuleObject(isolate, args[0], &thrower);
+ if (!maybe_module_obj.ToHandle(&module_obj)) {
+ DCHECK(thrower.error());
+ resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
+ return;
+ }
+ } else {
+ module_obj = i::Handle<i::WasmModuleObject>::cast(first_arg);
+ }
+ DCHECK(!module_obj.is_null());
+ MaybeLocal<Value> instance =
+ InstantiateModuleImpl(i_isolate, module_obj, args, &thrower);
+ if (instance.IsEmpty()) {
+ DCHECK(thrower.error());
+ resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
+ } else {
+ DCHECK(!thrower.error());
+ Local<Value> retval;
+ if (want_pair) {
+ i::Handle<i::JSFunction> object_function = i::Handle<i::JSFunction>(
+ i_isolate->native_context()->object_function(), i_isolate);
+
+ i::Handle<i::JSObject> i_retval =
+ i_isolate->factory()->NewJSObject(object_function, i::TENURED);
+ i::Handle<i::String> module_property_name =
+ i_isolate->factory()->InternalizeUtf8String("module");
+ i::Handle<i::String> instance_property_name =
+ i_isolate->factory()->InternalizeUtf8String("instance");
+ i::JSObject::AddProperty(i_retval, module_property_name, module_obj,
+ i::NONE);
+ i::JSObject::AddProperty(i_retval, instance_property_name,
+ Utils::OpenHandle(*instance.ToLocalChecked()),
+ i::NONE);
+ retval = Utils::ToLocal(i_retval);
+ } else {
+ retval = instance.ToLocalChecked();
+ }
+ DCHECK(!retval.IsEmpty());
+ resolver->Resolve(context, retval);
+ }
}
bool GetIntegerProperty(v8::Isolate* isolate, ErrorThrower* thrower,
Local<Context> context, Local<v8::Object> object,
- Local<String> property, int* result, int lower_bound,
- int upper_bound) {
+ Local<String> property, int* result,
+ int64_t lower_bound, uint64_t upper_bound) {
v8::MaybeLocal<v8::Value> maybe = object->Get(context, property);
v8::Local<v8::Value> value;
if (maybe.ToLocal(&value)) {
int64_t number;
if (!value->IntegerValue(context).To(&number)) return false;
- if (number < static_cast<int64_t>(lower_bound)) {
+ if (number < lower_bound) {
thrower->RangeError("Property value %" PRId64
- " is below the lower bound %d",
+ " is below the lower bound %" PRIx64,
number, lower_bound);
return false;
}
if (number > static_cast<int64_t>(upper_bound)) {
thrower->RangeError("Property value %" PRId64
- " is above the upper bound %d",
+ " is above the upper bound %" PRIu64,
number, upper_bound);
return false;
}
@@ -270,8 +569,6 @@ bool GetIntegerProperty(v8::Isolate* isolate, ErrorThrower* thrower,
return false;
}
-const int max_table_size = 1 << 26;
-
void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
HandleScope scope(isolate);
@@ -299,28 +596,23 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
}
// The descriptor's 'initial'.
- int initial;
+ int initial = 0;
if (!GetIntegerProperty(isolate, &thrower, context, descriptor,
v8_str(isolate, "initial"), &initial, 0,
- max_table_size)) {
+ i::wasm::kV8MaxWasmTableSize)) {
return;
}
// The descriptor's 'maximum'.
- int maximum = 0;
+ int maximum = -1;
Local<String> maximum_key = v8_str(isolate, "maximum");
Maybe<bool> has_maximum = descriptor->Has(context, maximum_key);
- if (has_maximum.IsNothing()) {
- // There has been an exception, just return.
- return;
- }
- if (has_maximum.FromJust()) {
+ if (!has_maximum.IsNothing() && has_maximum.FromJust()) {
if (!GetIntegerProperty(isolate, &thrower, context, descriptor, maximum_key,
- &maximum, initial, max_table_size)) {
+ &maximum, initial,
+ i::wasm::kSpecMaxWasmTableSize)) {
return;
}
- } else {
- maximum = static_cast<int>(i::wasm::WasmModule::kV8MaxTableSize);
}
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -335,7 +627,7 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
HandleScope scope(isolate);
ErrorThrower thrower(reinterpret_cast<i::Isolate*>(isolate),
- "WebAssembly.Module()");
+ "WebAssembly.Memory()");
if (args.Length() < 1 || !args[0]->IsObject()) {
thrower.TypeError("Argument 0 must be a memory descriptor");
return;
@@ -343,35 +635,35 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
Local<Context> context = isolate->GetCurrentContext();
Local<v8::Object> descriptor = args[0]->ToObject(context).ToLocalChecked();
// The descriptor's 'initial'.
- int initial;
+ int initial = 0;
if (!GetIntegerProperty(isolate, &thrower, context, descriptor,
- v8_str(isolate, "initial"), &initial, 0, 65536)) {
+ v8_str(isolate, "initial"), &initial, 0,
+ i::wasm::kV8MaxWasmMemoryPages)) {
return;
}
// The descriptor's 'maximum'.
- int maximum = 0;
+ int maximum = -1;
Local<String> maximum_key = v8_str(isolate, "maximum");
Maybe<bool> has_maximum = descriptor->Has(context, maximum_key);
- if (has_maximum.IsNothing()) {
- // There has been an exception, just return.
- return;
- }
- if (has_maximum.FromJust()) {
+ if (!has_maximum.IsNothing() && has_maximum.FromJust()) {
if (!GetIntegerProperty(isolate, &thrower, context, descriptor, maximum_key,
- &maximum, initial, 65536)) {
+ &maximum, initial,
+ i::wasm::kSpecMaxWasmMemoryPages)) {
return;
}
}
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::Handle<i::JSArrayBuffer> buffer =
- i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kNotShared);
size_t size = static_cast<size_t>(i::wasm::WasmModule::kPageSize) *
static_cast<size_t>(initial);
- i::JSArrayBuffer::SetupAllocatingData(buffer, i_isolate, size);
-
- i::Handle<i::JSObject> memory_obj = i::WasmMemoryObject::New(
- i_isolate, buffer, has_maximum.FromJust() ? maximum : -1);
+ i::Handle<i::JSArrayBuffer> buffer =
+ i::wasm::NewArrayBuffer(i_isolate, size, i::FLAG_wasm_guard_pages);
+ if (buffer.is_null()) {
+ thrower.RangeError("could not allocate memory");
+ return;
+ }
+ i::Handle<i::JSObject> memory_obj =
+ i::WasmMemoryObject::New(i_isolate, buffer, maximum);
args.GetReturnValue().Set(Utils::ToLocal(memory_obj));
}
@@ -404,7 +696,7 @@ void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
auto receiver =
i::Handle<i::WasmTableObject>::cast(Utils::OpenHandle(*args.This()));
- i::Handle<i::FixedArray> old_array(receiver->get_functions(), i_isolate);
+ i::Handle<i::FixedArray> old_array(receiver->functions(), i_isolate);
int old_size = old_array->length();
int64_t new_size64 = 0;
if (args.Length() > 0 && !args[0]->IntegerValue(context).To(&new_size64)) {
@@ -412,14 +704,23 @@ void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
new_size64 += old_size;
- if (new_size64 < old_size || new_size64 > receiver->maximum_length()) {
+ int64_t max_size64 = receiver->maximum_length();
+ if (max_size64 < 0 ||
+ max_size64 > static_cast<int64_t>(i::wasm::kV8MaxWasmTableSize)) {
+ max_size64 = i::wasm::kV8MaxWasmTableSize;
+ }
+
+ if (new_size64 < old_size || new_size64 > max_size64) {
v8::Local<v8::Value> e = v8::Exception::RangeError(
v8_str(isolate, new_size64 < old_size ? "trying to shrink table"
: "maximum table size exceeded"));
isolate->ThrowException(e);
return;
}
+
int new_size = static_cast<int>(new_size64);
+ i::WasmTableObject::Grow(i_isolate, receiver,
+ static_cast<uint32_t>(new_size - old_size));
if (new_size != old_size) {
i::Handle<i::FixedArray> new_array =
@@ -430,7 +731,9 @@ void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
receiver->set_functions(*new_array);
}
- // TODO(titzer): update relevant instances.
+ // TODO(gdeepti): use weak links for instances
+ v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
+ return_value.Set(old_size);
}
void WebAssemblyTableGet(const v8::FunctionCallbackInfo<v8::Value>& args) {
@@ -446,7 +749,7 @@ void WebAssemblyTableGet(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
auto receiver =
i::Handle<i::WasmTableObject>::cast(Utils::OpenHandle(*args.This()));
- i::Handle<i::FixedArray> array(receiver->get_functions(), i_isolate);
+ i::Handle<i::FixedArray> array(receiver->functions(), i_isolate);
int i = 0;
if (args.Length() > 0 && !args[0]->Int32Value(context).To(&i)) return;
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
@@ -490,7 +793,7 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
auto receiver =
i::Handle<i::WasmTableObject>::cast(Utils::OpenHandle(*args.This()));
- i::Handle<i::FixedArray> array(receiver->get_functions(), i_isolate);
+ i::Handle<i::FixedArray> array(receiver->functions(), i_isolate);
int i;
if (!args[0]->Int32Value(context).To(&i)) return;
if (i < 0 || i >= array->length()) {
@@ -500,7 +803,7 @@ void WebAssemblyTableSet(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- i::Handle<i::FixedArray> dispatch_tables(receiver->get_dispatch_tables(),
+ i::Handle<i::FixedArray> dispatch_tables(receiver->dispatch_tables(),
i_isolate);
if (value->IsNull(i_isolate)) {
i::wasm::UpdateDispatchTables(i_isolate, dispatch_tables, i,
@@ -522,40 +825,40 @@ void WebAssemblyMemoryGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
"Receiver is not a WebAssembly.Memory")) {
return;
}
- if (args.Length() < 1) {
+ int64_t delta_size = 0;
+ if (args.Length() < 1 || !args[0]->IntegerValue(context).To(&delta_size)) {
v8::Local<v8::Value> e = v8::Exception::TypeError(
v8_str(isolate, "Argument 0 required, must be numeric value of pages"));
isolate->ThrowException(e);
return;
}
-
- uint32_t delta = args[0]->Uint32Value(context).FromJust();
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::Handle<i::JSObject> receiver =
- i::Handle<i::JSObject>::cast(Utils::OpenHandle(*args.This()));
- i::Handle<i::Object> instance_object(
- receiver->GetInternalField(kWasmMemoryInstanceObject), i_isolate);
- i::Handle<i::JSObject> instance(
- i::Handle<i::JSObject>::cast(instance_object));
-
- // TODO(gdeepti) Implement growing memory when shared by different
- // instances.
- int32_t ret = internal::wasm::GrowInstanceMemory(i_isolate, instance, delta);
- if (ret == -1) {
- v8::Local<v8::Value> e = v8::Exception::Error(
- v8_str(isolate, "Unable to grow instance memory."));
+ i::Handle<i::WasmMemoryObject> receiver =
+ i::Handle<i::WasmMemoryObject>::cast(Utils::OpenHandle(*args.This()));
+ int64_t max_size64 = receiver->maximum_pages();
+ if (max_size64 < 0 ||
+ max_size64 > static_cast<int64_t>(i::wasm::kV8MaxWasmTableSize)) {
+ max_size64 = i::wasm::kV8MaxWasmMemoryPages;
+ }
+ i::Handle<i::JSArrayBuffer> old_buffer(receiver->buffer());
+ uint32_t old_size =
+ old_buffer->byte_length()->Number() / i::wasm::kSpecMaxWasmMemoryPages;
+ int64_t new_size64 = old_size + delta_size;
+ if (delta_size < 0 || max_size64 < new_size64 || new_size64 < old_size) {
+ v8::Local<v8::Value> e = v8::Exception::RangeError(v8_str(
+ isolate, new_size64 < old_size ? "trying to shrink memory"
+ : "maximum memory size exceeded"));
isolate->ThrowException(e);
return;
}
- i::MaybeHandle<i::JSArrayBuffer> buffer =
- internal::wasm::GetInstanceMemory(i_isolate, instance);
- if (buffer.is_null()) {
- v8::Local<v8::Value> e = v8::Exception::Error(
- v8_str(isolate, "WebAssembly.Memory buffer object not set."));
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ int32_t ret = i::wasm::GrowWebAssemblyMemory(
+ i_isolate, receiver, static_cast<uint32_t>(delta_size));
+ if (ret == -1) {
+ v8::Local<v8::Value> e = v8::Exception::RangeError(
+ v8_str(isolate, "Unable to grow instance memory."));
isolate->ThrowException(e);
return;
}
- receiver->SetInternalField(kWasmMemoryBuffer, *buffer.ToHandleChecked());
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
return_value.Set(ret);
}
@@ -571,10 +874,9 @@ void WebAssemblyMemoryGetBuffer(
return;
}
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- i::Handle<i::JSObject> receiver =
- i::Handle<i::JSObject>::cast(Utils::OpenHandle(*args.This()));
- i::Handle<i::Object> buffer(receiver->GetInternalField(kWasmMemoryBuffer),
- i_isolate);
+ i::Handle<i::WasmMemoryObject> receiver =
+ i::Handle<i::WasmMemoryObject>::cast(Utils::OpenHandle(*args.This()));
+ i::Handle<i::Object> buffer(receiver->buffer(), i_isolate);
DCHECK(buffer->IsJSArrayBuffer());
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
return_value.Set(Utils::ToLocal(buffer));
@@ -586,20 +888,23 @@ void WebAssemblyMemoryGetBuffer(
static i::Handle<i::FunctionTemplateInfo> NewTemplate(i::Isolate* i_isolate,
FunctionCallback func) {
Isolate* isolate = reinterpret_cast<Isolate*>(i_isolate);
- Local<FunctionTemplate> local = FunctionTemplate::New(isolate, func);
- return v8::Utils::OpenHandle(*local);
+ Local<FunctionTemplate> templ = FunctionTemplate::New(isolate, func);
+ templ->ReadOnlyPrototype();
+ return v8::Utils::OpenHandle(*templ);
}
namespace internal {
Handle<JSFunction> InstallFunc(Isolate* isolate, Handle<JSObject> object,
- const char* str, FunctionCallback func) {
+ const char* str, FunctionCallback func,
+ int length = 0) {
Handle<String> name = v8_str(isolate, str);
Handle<FunctionTemplateInfo> temp = NewTemplate(isolate, func);
Handle<JSFunction> function =
ApiNatives::InstantiateFunction(temp).ToHandleChecked();
- PropertyAttributes attributes =
- static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
+ JSFunction::SetName(function, name, isolate->factory()->empty_string());
+ function->shared()->set_length(length);
+ PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM);
JSObject::AddProperty(object, name, function, attributes);
return function;
}
@@ -611,27 +916,46 @@ Handle<JSFunction> InstallGetter(Isolate* isolate, Handle<JSObject> object,
Handle<JSFunction> function =
ApiNatives::InstantiateFunction(temp).ToHandleChecked();
v8::PropertyAttribute attributes =
- static_cast<v8::PropertyAttribute>(v8::DontDelete | v8::ReadOnly);
+ static_cast<v8::PropertyAttribute>(v8::DontEnum);
Utils::ToLocal(object)->SetAccessorProperty(Utils::ToLocal(name),
Utils::ToLocal(function),
Local<Function>(), attributes);
return function;
}
-void WasmJs::InstallWasmModuleSymbolIfNeeded(Isolate* isolate,
- Handle<JSGlobalObject> global,
- Handle<Context> context) {
- if (!context->get(Context::WASM_MODULE_SYM_INDEX)->IsSymbol() ||
- !context->get(Context::WASM_INSTANCE_SYM_INDEX)->IsSymbol()) {
- InstallWasmMapsIfNeeded(isolate, isolate->native_context());
- InstallWasmConstructors(isolate, isolate->global_object(),
- isolate->native_context());
- }
-}
+void WasmJs::Install(Isolate* isolate) {
+ Handle<JSGlobalObject> global = isolate->global_object();
+ Handle<Context> context(global->native_context(), isolate);
+ // TODO(titzer): once FLAG_expose_wasm is gone, this should become a DCHECK.
+ if (context->get(Context::WASM_FUNCTION_MAP_INDEX)->IsMap()) return;
+
+ // Install Maps.
+
+ // TODO(titzer): Also make one for strict mode functions?
+ Handle<Map> prev_map = Handle<Map>(context->sloppy_function_map(), isolate);
+
+ InstanceType instance_type = prev_map->instance_type();
+ int internal_fields = JSObject::GetInternalFieldCount(*prev_map);
+ CHECK_EQ(0, internal_fields);
+ int pre_allocated =
+ prev_map->GetInObjectProperties() - prev_map->unused_property_fields();
+ int instance_size = 0;
+ int in_object_properties = 0;
+ int wasm_internal_fields = internal_fields + 1 // module instance object
+ + 1 // function arity
+ + 1; // function signature
+ JSFunction::CalculateInstanceSizeHelper(instance_type, wasm_internal_fields,
+ 0, &instance_size,
+ &in_object_properties);
+
+ int unused_property_fields = in_object_properties - pre_allocated;
+ Handle<Map> map = Map::CopyInitialMap(
+ prev_map, instance_size, in_object_properties, unused_property_fields);
+
+ context->set_wasm_function_map(*map);
+
+ // Install symbols.
-void WasmJs::InstallWasmConstructors(Isolate* isolate,
- Handle<JSGlobalObject> global,
- Handle<Context> context) {
Factory* factory = isolate->factory();
// Create private symbols.
Handle<Symbol> module_sym = factory->NewPrivateSymbol();
@@ -646,7 +970,9 @@ void WasmJs::InstallWasmConstructors(Isolate* isolate,
Handle<Symbol> memory_sym = factory->NewPrivateSymbol();
context->set_wasm_memory_sym(*memory_sym);
- // Bind the WebAssembly object.
+ // Install the JS API.
+
+ // Setup WebAssembly
Handle<String> name = v8_str(isolate, "WebAssembly");
Handle<JSFunction> cons = factory->NewFunction(name);
JSFunction::SetInstancePrototype(
@@ -655,118 +981,103 @@ void WasmJs::InstallWasmConstructors(Isolate* isolate,
Handle<JSObject> webassembly = factory->NewJSObject(cons, TENURED);
PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM);
JSObject::AddProperty(global, name, webassembly, attributes);
-
- // Setup compile
- InstallFunc(isolate, webassembly, "compile", WebAssemblyCompile);
-
- // Setup compile
- InstallFunc(isolate, webassembly, "validate", WebAssemblyValidate);
+ PropertyAttributes ro_attributes =
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
+ JSObject::AddProperty(webassembly, factory->to_string_tag_symbol(),
+ v8_str(isolate, "WebAssembly"), ro_attributes);
+ InstallFunc(isolate, webassembly, "compile", WebAssemblyCompile, 1);
+ InstallFunc(isolate, webassembly, "validate", WebAssemblyValidate, 1);
+ InstallFunc(isolate, webassembly, "instantiate", WebAssemblyInstantiate, 1);
// Setup Module
Handle<JSFunction> module_constructor =
- InstallFunc(isolate, webassembly, "Module", WebAssemblyModule);
+ InstallFunc(isolate, webassembly, "Module", WebAssemblyModule, 1);
context->set_wasm_module_constructor(*module_constructor);
Handle<JSObject> module_proto =
factory->NewJSObject(module_constructor, TENURED);
- i::Handle<i::Map> map = isolate->factory()->NewMap(
- i::JS_OBJECT_TYPE, i::JSObject::kHeaderSize +
+ i::Handle<i::Map> module_map = isolate->factory()->NewMap(
+ i::JS_API_OBJECT_TYPE, i::JSObject::kHeaderSize +
WasmModuleObject::kFieldCount * i::kPointerSize);
- JSFunction::SetInitialMap(module_constructor, map, module_proto);
+ JSFunction::SetInitialMap(module_constructor, module_map, module_proto);
+ InstallFunc(isolate, module_constructor, "imports", WebAssemblyModuleImports,
+ 1);
+ InstallFunc(isolate, module_constructor, "exports", WebAssemblyModuleExports,
+ 1);
+ InstallFunc(isolate, module_constructor, "customSections",
+ WebAssemblyModuleCustomSections, 2);
JSObject::AddProperty(module_proto, isolate->factory()->constructor_string(),
module_constructor, DONT_ENUM);
+ JSObject::AddProperty(module_proto, factory->to_string_tag_symbol(),
+ v8_str(isolate, "WebAssembly.Module"), ro_attributes);
// Setup Instance
Handle<JSFunction> instance_constructor =
- InstallFunc(isolate, webassembly, "Instance", WebAssemblyInstance);
+ InstallFunc(isolate, webassembly, "Instance", WebAssemblyInstance, 1);
context->set_wasm_instance_constructor(*instance_constructor);
+ Handle<JSObject> instance_proto =
+ factory->NewJSObject(instance_constructor, TENURED);
+ i::Handle<i::Map> instance_map = isolate->factory()->NewMap(
+ i::JS_API_OBJECT_TYPE, i::JSObject::kHeaderSize +
+ WasmInstanceObject::kFieldCount * i::kPointerSize);
+ JSFunction::SetInitialMap(instance_constructor, instance_map, instance_proto);
+ JSObject::AddProperty(instance_proto,
+ isolate->factory()->constructor_string(),
+ instance_constructor, DONT_ENUM);
+ JSObject::AddProperty(instance_proto, factory->to_string_tag_symbol(),
+ v8_str(isolate, "WebAssembly.Instance"), ro_attributes);
// Setup Table
Handle<JSFunction> table_constructor =
- InstallFunc(isolate, webassembly, "Table", WebAssemblyTable);
+ InstallFunc(isolate, webassembly, "Table", WebAssemblyTable, 1);
context->set_wasm_table_constructor(*table_constructor);
Handle<JSObject> table_proto =
factory->NewJSObject(table_constructor, TENURED);
- map = isolate->factory()->NewMap(
- i::JS_OBJECT_TYPE, i::JSObject::kHeaderSize +
+ i::Handle<i::Map> table_map = isolate->factory()->NewMap(
+ i::JS_API_OBJECT_TYPE, i::JSObject::kHeaderSize +
WasmTableObject::kFieldCount * i::kPointerSize);
- JSFunction::SetInitialMap(table_constructor, map, table_proto);
+ JSFunction::SetInitialMap(table_constructor, table_map, table_proto);
JSObject::AddProperty(table_proto, isolate->factory()->constructor_string(),
table_constructor, DONT_ENUM);
InstallGetter(isolate, table_proto, "length", WebAssemblyTableGetLength);
- InstallFunc(isolate, table_proto, "grow", WebAssemblyTableGrow);
- InstallFunc(isolate, table_proto, "get", WebAssemblyTableGet);
- InstallFunc(isolate, table_proto, "set", WebAssemblyTableSet);
+ InstallFunc(isolate, table_proto, "grow", WebAssemblyTableGrow, 1);
+ InstallFunc(isolate, table_proto, "get", WebAssemblyTableGet, 1);
+ InstallFunc(isolate, table_proto, "set", WebAssemblyTableSet, 2);
+ JSObject::AddProperty(table_proto, factory->to_string_tag_symbol(),
+ v8_str(isolate, "WebAssembly.Table"), ro_attributes);
// Setup Memory
Handle<JSFunction> memory_constructor =
- InstallFunc(isolate, webassembly, "Memory", WebAssemblyMemory);
+ InstallFunc(isolate, webassembly, "Memory", WebAssemblyMemory, 1);
context->set_wasm_memory_constructor(*memory_constructor);
Handle<JSObject> memory_proto =
factory->NewJSObject(memory_constructor, TENURED);
- map = isolate->factory()->NewMap(
- i::JS_OBJECT_TYPE, i::JSObject::kHeaderSize +
+ i::Handle<i::Map> memory_map = isolate->factory()->NewMap(
+ i::JS_API_OBJECT_TYPE, i::JSObject::kHeaderSize +
WasmMemoryObject::kFieldCount * i::kPointerSize);
- JSFunction::SetInitialMap(memory_constructor, map, memory_proto);
+ JSFunction::SetInitialMap(memory_constructor, memory_map, memory_proto);
JSObject::AddProperty(memory_proto, isolate->factory()->constructor_string(),
memory_constructor, DONT_ENUM);
- InstallFunc(isolate, memory_proto, "grow", WebAssemblyMemoryGrow);
+ InstallFunc(isolate, memory_proto, "grow", WebAssemblyMemoryGrow, 1);
InstallGetter(isolate, memory_proto, "buffer", WebAssemblyMemoryGetBuffer);
+ JSObject::AddProperty(memory_proto, factory->to_string_tag_symbol(),
+ v8_str(isolate, "WebAssembly.Memory"), ro_attributes);
// Setup errors
- attributes = static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
+ attributes = static_cast<PropertyAttributes>(DONT_ENUM);
Handle<JSFunction> compile_error(
isolate->native_context()->wasm_compile_error_function());
JSObject::AddProperty(webassembly, isolate->factory()->CompileError_string(),
compile_error, attributes);
+ Handle<JSFunction> link_error(
+ isolate->native_context()->wasm_link_error_function());
+ JSObject::AddProperty(webassembly, isolate->factory()->LinkError_string(),
+ link_error, attributes);
Handle<JSFunction> runtime_error(
isolate->native_context()->wasm_runtime_error_function());
JSObject::AddProperty(webassembly, isolate->factory()->RuntimeError_string(),
runtime_error, attributes);
}
-void WasmJs::Install(Isolate* isolate, Handle<JSGlobalObject> global) {
- if (!FLAG_expose_wasm && !FLAG_validate_asm) {
- return;
- }
-
- // Setup wasm function map.
- Handle<Context> context(global->native_context(), isolate);
- InstallWasmMapsIfNeeded(isolate, context);
-
- if (FLAG_expose_wasm) {
- InstallWasmConstructors(isolate, global, context);
- }
-}
-
-void WasmJs::InstallWasmMapsIfNeeded(Isolate* isolate,
- Handle<Context> context) {
- if (!context->get(Context::WASM_FUNCTION_MAP_INDEX)->IsMap()) {
- // TODO(titzer): Move this to bootstrapper.cc??
- // TODO(titzer): Also make one for strict mode functions?
- Handle<Map> prev_map = Handle<Map>(context->sloppy_function_map(), isolate);
-
- InstanceType instance_type = prev_map->instance_type();
- int internal_fields = JSObject::GetInternalFieldCount(*prev_map);
- CHECK_EQ(0, internal_fields);
- int pre_allocated =
- prev_map->GetInObjectProperties() - prev_map->unused_property_fields();
- int instance_size = 0;
- int in_object_properties = 0;
- int wasm_internal_fields = internal_fields + 1 // module instance object
- + 1 // function arity
- + 1; // function signature
- JSFunction::CalculateInstanceSizeHelper(instance_type, wasm_internal_fields,
- 0, &instance_size,
- &in_object_properties);
-
- int unused_property_fields = in_object_properties - pre_allocated;
- Handle<Map> map = Map::CopyInitialMap(
- prev_map, instance_size, in_object_properties, unused_property_fields);
-
- context->set_wasm_function_map(*map);
- }
-}
-
static bool HasBrand(i::Handle<i::Object> value, i::Handle<i::Symbol> symbol) {
if (value->IsJSObject()) {
i::Handle<i::JSObject> object = i::Handle<i::JSObject>::cast(value);
diff --git a/deps/v8/src/wasm/wasm-js.h b/deps/v8/src/wasm/wasm-js.h
index f5b9596ee2..05d5ea3061 100644
--- a/deps/v8/src/wasm/wasm-js.h
+++ b/deps/v8/src/wasm/wasm-js.h
@@ -13,16 +13,7 @@ namespace internal {
// Exposes a WASM API to JavaScript through the V8 API.
class WasmJs {
public:
- static void Install(Isolate* isolate, Handle<JSGlobalObject> global_object);
-
- V8_EXPORT_PRIVATE static void InstallWasmModuleSymbolIfNeeded(
- Isolate* isolate, Handle<JSGlobalObject> global, Handle<Context> context);
-
- V8_EXPORT_PRIVATE static void InstallWasmMapsIfNeeded(
- Isolate* isolate, Handle<Context> context);
- static void InstallWasmConstructors(Isolate* isolate,
- Handle<JSGlobalObject> global,
- Handle<Context> context);
+ V8_EXPORT_PRIVATE static void Install(Isolate* isolate);
// WebAssembly.Table.
static bool IsWasmTableObject(Isolate* isolate, Handle<Object> value);
diff --git a/deps/v8/src/wasm/wasm-limits.h b/deps/v8/src/wasm/wasm-limits.h
new file mode 100644
index 0000000000..4c7455adc5
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-limits.h
@@ -0,0 +1,45 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_WASM_LIMITS_H_
+#define V8_WASM_WASM_LIMITS_H_
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// The following limits are imposed by V8 on WebAssembly modules.
+// The limits are agreed upon with other engines for consistency.
+const size_t kV8MaxWasmTypes = 1000000;
+const size_t kV8MaxWasmFunctions = 1000000;
+const size_t kV8MaxWasmImports = 100000;
+const size_t kV8MaxWasmExports = 100000;
+const size_t kV8MaxWasmGlobals = 1000000;
+const size_t kV8MaxWasmDataSegments = 100000;
+const size_t kV8MaxWasmMemoryPages = 16384; // = 1 GiB
+const size_t kV8MaxWasmStringSize = 100000;
+const size_t kV8MaxWasmModuleSize = 1024 * 1024 * 1024; // = 1 GiB
+const size_t kV8MaxWasmFunctionSize = 128 * 1024;
+const size_t kV8MaxWasmFunctionLocals = 50000;
+const size_t kV8MaxWasmFunctionParams = 1000;
+const size_t kV8MaxWasmFunctionMultiReturns = 1000;
+const size_t kV8MaxWasmFunctionReturns = 1;
+const size_t kV8MaxWasmTableSize = 10000000;
+const size_t kV8MaxWasmTableEntries = 10000000;
+const size_t kV8MaxWasmTables = 1;
+const size_t kV8MaxWasmMemories = 1;
+
+const size_t kSpecMaxWasmMemoryPages = 65536;
+const size_t kSpecMaxWasmTableSize = 0xFFFFFFFFu;
+
+const uint64_t kWasmMaxHeapOffset =
+ static_cast<uint64_t>(
+ std::numeric_limits<uint32_t>::max()) // maximum base value
+ + std::numeric_limits<uint32_t>::max(); // maximum index value
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_WASM_LIMITS_H_
diff --git a/deps/v8/src/wasm/wasm-macro-gen.h b/deps/v8/src/wasm/wasm-macro-gen.h
index ce2f843e71..1ec9ee80ff 100644
--- a/deps/v8/src/wasm/wasm-macro-gen.h
+++ b/deps/v8/src/wasm/wasm-macro-gen.h
@@ -59,6 +59,7 @@
// Control.
//------------------------------------------------------------------------------
#define WASM_NOP kExprNop
+#define WASM_END kExprEnd
#define ARITY_0 0
#define ARITY_1 1
@@ -71,13 +72,13 @@
#define WASM_BLOCK(...) kExprBlock, kLocalVoid, __VA_ARGS__, kExprEnd
#define WASM_BLOCK_T(t, ...) \
- kExprBlock, static_cast<byte>(WasmOpcodes::LocalTypeCodeFor(t)), \
+ kExprBlock, static_cast<byte>(WasmOpcodes::ValueTypeCodeFor(t)), \
__VA_ARGS__, kExprEnd
#define WASM_BLOCK_TT(t1, t2, ...) \
kExprBlock, kMultivalBlock, 0, \
- static_cast<byte>(WasmOpcodes::LocalTypeCodeFor(t1)), \
- static_cast<byte>(WasmOpcodes::LocalTypeCodeFor(t2)), __VA_ARGS__, \
+ static_cast<byte>(WasmOpcodes::ValueTypeCodeFor(t1)), \
+ static_cast<byte>(WasmOpcodes::ValueTypeCodeFor(t2)), __VA_ARGS__, \
kExprEnd
#define WASM_BLOCK_I(...) kExprBlock, kLocalI32, __VA_ARGS__, kExprEnd
@@ -99,13 +100,13 @@
cond, kExprIf, kLocalVoid, tstmt, kExprElse, fstmt, kExprEnd
#define WASM_IF_ELSE_T(t, cond, tstmt, fstmt) \
- cond, kExprIf, static_cast<byte>(WasmOpcodes::LocalTypeCodeFor(t)), tstmt, \
+ cond, kExprIf, static_cast<byte>(WasmOpcodes::ValueTypeCodeFor(t)), tstmt, \
kExprElse, fstmt, kExprEnd
#define WASM_IF_ELSE_TT(t1, t2, cond, tstmt, fstmt) \
cond, kExprIf, kMultivalBlock, 0, \
- static_cast<byte>(WasmOpcodes::LocalTypeCodeFor(t1)), \
- static_cast<byte>(WasmOpcodes::LocalTypeCodeFor(t2)), tstmt, kExprElse, \
+ static_cast<byte>(WasmOpcodes::ValueTypeCodeFor(t1)), \
+ static_cast<byte>(WasmOpcodes::ValueTypeCodeFor(t2)), tstmt, kExprElse, \
fstmt, kExprEnd
#define WASM_IF_ELSE_I(cond, tstmt, fstmt) \
@@ -140,9 +141,8 @@
// Misc expressions.
//------------------------------------------------------------------------------
#define WASM_ID(...) __VA_ARGS__
-#define WASM_ZERO kExprI8Const, 0
-#define WASM_ONE kExprI8Const, 1
-#define WASM_I8(val) kExprI8Const, static_cast<byte>(val)
+#define WASM_ZERO kExprI32Const, 0
+#define WASM_ONE kExprI32Const, 1
#define I32V_MIN(length) -(1 << (6 + (7 * ((length) - 1))))
#define I32V_MAX(length) ((1 << (6 + (7 * ((length) - 1)))) - 1)
@@ -195,7 +195,7 @@ class LocalDeclEncoder {
pos = WriteUint32v(buffer, pos, static_cast<uint32_t>(local_decls.size()));
for (size_t i = 0; i < local_decls.size(); ++i) {
pos = WriteUint32v(buffer, pos, local_decls[i].first);
- buffer[pos++] = WasmOpcodes::LocalTypeCodeFor(local_decls[i].second);
+ buffer[pos++] = WasmOpcodes::ValueTypeCodeFor(local_decls[i].second);
}
DCHECK_EQ(Size(), pos);
return pos;
@@ -203,7 +203,7 @@ class LocalDeclEncoder {
// Add locals declarations to this helper. Return the index of the newly added
// local(s), with an optional adjustment for the parameters.
- uint32_t AddLocals(uint32_t count, LocalType type) {
+ uint32_t AddLocals(uint32_t count, ValueType type) {
uint32_t result =
static_cast<uint32_t>(total + (sig ? sig->parameter_count() : 0));
total += count;
@@ -211,7 +211,7 @@ class LocalDeclEncoder {
count += local_decls.back().first;
local_decls.pop_back();
}
- local_decls.push_back(std::pair<uint32_t, LocalType>(count, type));
+ local_decls.push_back(std::pair<uint32_t, ValueType>(count, type));
return result;
}
@@ -227,7 +227,7 @@ class LocalDeclEncoder {
private:
FunctionSig* sig;
- ZoneVector<std::pair<uint32_t, LocalType>> local_decls;
+ ZoneVector<std::pair<uint32_t, ValueType>> local_decls;
size_t total;
size_t SizeofUint32v(uint32_t val) const {
@@ -447,19 +447,22 @@ class LocalDeclEncoder {
#define WASM_WHILE(x, y) \
kExprLoop, kLocalVoid, x, kExprIf, kLocalVoid, y, kExprBr, DEPTH_1, \
kExprEnd, kExprEnd
-#define WASM_INC_LOCAL(index) \
- kExprGetLocal, static_cast<byte>(index), kExprI8Const, 1, kExprI32Add, \
+#define WASM_INC_LOCAL(index) \
+ kExprGetLocal, static_cast<byte>(index), kExprI32Const, 1, kExprI32Add, \
kExprTeeLocal, static_cast<byte>(index)
#define WASM_INC_LOCAL_BYV(index, count) \
- kExprGetLocal, static_cast<byte>(index), kExprI8Const, \
+ kExprGetLocal, static_cast<byte>(index), kExprI32Const, \
static_cast<byte>(count), kExprI32Add, kExprTeeLocal, \
static_cast<byte>(index)
#define WASM_INC_LOCAL_BY(index, count) \
- kExprGetLocal, static_cast<byte>(index), kExprI8Const, \
+ kExprGetLocal, static_cast<byte>(index), kExprI32Const, \
static_cast<byte>(count), kExprI32Add, kExprSetLocal, \
static_cast<byte>(index)
#define WASM_UNOP(opcode, x) x, static_cast<byte>(opcode)
#define WASM_BINOP(opcode, x, y) x, y, static_cast<byte>(opcode)
+#define WASM_SIMD_UNOP(opcode, x) x, kSimdPrefix, static_cast<byte>(opcode)
+#define WASM_SIMD_BINOP(opcode, x, y) \
+ x, y, kSimdPrefix, static_cast<byte>(opcode)
//------------------------------------------------------------------------------
// Int32 operations
@@ -621,14 +624,31 @@ class LocalDeclEncoder {
//------------------------------------------------------------------------------
// Simd Operations.
//------------------------------------------------------------------------------
-#define WASM_SIMD_I32x4_SPLAT(x) x, kSimdPrefix, kExprI32x4Splat & 0xff
-#define WASM_SIMD_I32x4_EXTRACT_LANE(lane, x) \
- x, kSimdPrefix, kExprI32x4ExtractLane & 0xff, static_cast<byte>(lane)
-#define WASM_SIMD_I32x4_ADD(x, y) x, y, kSimdPrefix, kExprI32x4Add & 0xff
#define WASM_SIMD_F32x4_SPLAT(x) x, kSimdPrefix, kExprF32x4Splat & 0xff
#define WASM_SIMD_F32x4_EXTRACT_LANE(lane, x) \
x, kSimdPrefix, kExprF32x4ExtractLane & 0xff, static_cast<byte>(lane)
+#define WASM_SIMD_F32x4_REPLACE_LANE(lane, x, y) \
+ x, y, kSimdPrefix, kExprF32x4ReplaceLane & 0xff, static_cast<byte>(lane)
+#define WASM_SIMD_F32x4_FROM_I32x4(x) \
+ x, kSimdPrefix, kExprF32x4FromInt32x4 & 0xff
+#define WASM_SIMD_F32x4_FROM_U32x4(x) \
+ x, kSimdPrefix, kExprF32x4FromUint32x4 & 0xff
#define WASM_SIMD_F32x4_ADD(x, y) x, y, kSimdPrefix, kExprF32x4Add & 0xff
+#define WASM_SIMD_F32x4_SUB(x, y) x, y, kSimdPrefix, kExprF32x4Sub & 0xff
+
+#define WASM_SIMD_I32x4_SPLAT(x) x, kSimdPrefix, kExprI32x4Splat & 0xff
+#define WASM_SIMD_I32x4_EXTRACT_LANE(lane, x) \
+ x, kSimdPrefix, kExprI32x4ExtractLane & 0xff, static_cast<byte>(lane)
+#define WASM_SIMD_I32x4_REPLACE_LANE(lane, x, y) \
+ x, y, kSimdPrefix, kExprI32x4ReplaceLane & 0xff, static_cast<byte>(lane)
+#define WASM_SIMD_I32x4_FROM_F32x4(x) \
+ x, kSimdPrefix, kExprI32x4FromFloat32x4 & 0xff
+#define WASM_SIMD_U32x4_FROM_F32x4(x) \
+ x, kSimdPrefix, kExprUi32x4FromFloat32x4 & 0xff
+#define WASM_SIMD_S32x4_SELECT(x, y, z) \
+ x, y, z, kSimdPrefix, kExprS32x4Select & 0xff
+#define WASM_SIMD_I32x4_ADD(x, y) x, y, kSimdPrefix, kExprI32x4Add & 0xff
+#define WASM_SIMD_I32x4_SUB(x, y) x, y, kSimdPrefix, kExprI32x4Sub & 0xff
#define SIG_ENTRY_v_v kWasmFunctionTypeForm, 0, 0
#define SIZEOF_SIG_ENTRY_v_v 3
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index 290e98ecf8..cd83d46d3e 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -8,7 +8,7 @@
#include "src/v8.h"
#include "src/zone/zone-containers.h"
-#include "src/wasm/ast-decoder.h"
+#include "src/wasm/function-body-decoder.h"
#include "src/wasm/leb-helper.h"
#include "src/wasm/wasm-macro-gen.h"
#include "src/wasm/wasm-module-builder.h"
@@ -50,11 +50,10 @@ WasmFunctionBuilder::WasmFunctionBuilder(WasmModuleBuilder* builder)
: builder_(builder),
locals_(builder->zone()),
signature_index_(0),
- exported_(0),
func_index_(static_cast<uint32_t>(builder->functions_.size())),
body_(builder->zone()),
name_(builder->zone()),
- exported_name_(builder->zone()),
+ exported_names_(builder->zone()),
i32_temps_(builder->zone()),
i64_temps_(builder->zone()),
f32_temps_(builder->zone()),
@@ -77,7 +76,7 @@ void WasmFunctionBuilder::SetSignature(FunctionSig* sig) {
signature_index_ = builder_->AddSignature(sig);
}
-uint32_t WasmFunctionBuilder::AddLocal(LocalType type) {
+uint32_t WasmFunctionBuilder::AddLocal(ValueType type) {
DCHECK(locals_.has_sig());
return locals_.AddLocals(1, type);
}
@@ -123,10 +122,10 @@ void WasmFunctionBuilder::EmitWithVarInt(WasmOpcode opcode,
}
void WasmFunctionBuilder::EmitI32Const(int32_t value) {
- // TODO(titzer): variable-length signed and unsigned i32 constants.
- if (-128 <= value && value <= 127) {
- EmitWithU8(kExprI8Const, static_cast<byte>(value));
+ if (-64 <= value && value <= 63) {
+ EmitWithU8(kExprI32Const, static_cast<byte>(value & 0x7F));
} else {
+ // TODO(titzer): variable-length signed and unsigned i32 constants.
byte code[] = {WASM_I32V_5(value)};
EmitCode(code, sizeof(code));
}
@@ -141,12 +140,9 @@ void WasmFunctionBuilder::EmitDirectCallIndex(uint32_t index) {
EmitCode(code, sizeof(code));
}
-void WasmFunctionBuilder::Export() { exported_ = true; }
-
void WasmFunctionBuilder::ExportAs(Vector<const char> name) {
- exported_ = true;
- exported_name_.resize(name.length());
- memcpy(exported_name_.data(), name.start(), name.length());
+ exported_names_.push_back(ZoneVector<char>(
+ name.start(), name.start() + name.length(), builder_->zone()));
}
void WasmFunctionBuilder::SetName(Vector<const char> name) {
@@ -154,8 +150,9 @@ void WasmFunctionBuilder::SetName(Vector<const char> name) {
memcpy(name_.data(), name.start(), name.length());
}
-void WasmFunctionBuilder::AddAsmWasmOffset(int asm_position) {
- // We only want to emit one mapping per byte offset:
+void WasmFunctionBuilder::AddAsmWasmOffset(int call_position,
+ int to_number_position) {
+ // We only want to emit one mapping per byte offset.
DCHECK(asm_offsets_.size() == 0 || body_.size() > last_asm_byte_offset_);
DCHECK_LE(body_.size(), kMaxUInt32);
@@ -163,22 +160,31 @@ void WasmFunctionBuilder::AddAsmWasmOffset(int asm_position) {
asm_offsets_.write_u32v(byte_offset - last_asm_byte_offset_);
last_asm_byte_offset_ = byte_offset;
- DCHECK_GE(asm_position, 0);
- asm_offsets_.write_i32v(asm_position - last_asm_source_position_);
- last_asm_source_position_ = asm_position;
+ DCHECK_GE(call_position, 0);
+ asm_offsets_.write_i32v(call_position - last_asm_source_position_);
+
+ DCHECK_GE(to_number_position, 0);
+ asm_offsets_.write_i32v(to_number_position - call_position);
+ last_asm_source_position_ = to_number_position;
+}
+
+void WasmFunctionBuilder::SetAsmFunctionStartPosition(int position) {
+ DCHECK_EQ(0, asm_func_start_source_position_);
+ DCHECK_LE(0, position);
+ // Must be called before emitting any asm.js source position.
+ DCHECK_EQ(0, asm_offsets_.size());
+ asm_func_start_source_position_ = position;
+ last_asm_source_position_ = position;
}
void WasmFunctionBuilder::WriteSignature(ZoneBuffer& buffer) const {
buffer.write_u32v(signature_index_);
}
-void WasmFunctionBuilder::WriteExport(ZoneBuffer& buffer) const {
- if (exported_) {
- const ZoneVector<char>* exported_name =
- exported_name_.size() == 0 ? &name_ : &exported_name_;
- buffer.write_size(exported_name->size());
- buffer.write(reinterpret_cast<const byte*>(exported_name->data()),
- exported_name->size());
+void WasmFunctionBuilder::WriteExports(ZoneBuffer& buffer) const {
+ for (auto name : exported_names_) {
+ buffer.write_size(name.size());
+ buffer.write(reinterpret_cast<const byte*>(name.data()), name.size());
buffer.write_u8(kExternalFunction);
buffer.write_u32v(func_index_ +
static_cast<uint32_t>(builder_->imports_.size()));
@@ -204,14 +210,19 @@ void WasmFunctionBuilder::WriteBody(ZoneBuffer& buffer) const {
}
void WasmFunctionBuilder::WriteAsmWasmOffsetTable(ZoneBuffer& buffer) const {
- if (asm_offsets_.size() == 0) {
+ if (asm_func_start_source_position_ == 0 && asm_offsets_.size() == 0) {
buffer.write_size(0);
return;
}
- buffer.write_size(asm_offsets_.size() + kInt32Size);
+ size_t locals_enc_size = LEBHelper::sizeof_u32v(locals_.Size());
+ size_t func_start_size =
+ LEBHelper::sizeof_u32v(asm_func_start_source_position_);
+ buffer.write_size(asm_offsets_.size() + locals_enc_size + func_start_size);
// Offset of the recorded byte offsets.
DCHECK_GE(kMaxUInt32, locals_.Size());
- buffer.write_u32(static_cast<uint32_t>(locals_.Size()));
+ buffer.write_u32v(static_cast<uint32_t>(locals_.Size()));
+ // Start position of the function.
+ buffer.write_u32v(asm_func_start_source_position_);
buffer.write(asm_offsets_.begin(), asm_offsets_.size());
}
@@ -271,8 +282,15 @@ uint32_t WasmModuleBuilder::AddSignature(FunctionSig* sig) {
}
}
-void WasmModuleBuilder::AddIndirectFunction(uint32_t index) {
- indirect_functions_.push_back(index);
+uint32_t WasmModuleBuilder::AllocateIndirectFunctions(uint32_t count) {
+ uint32_t ret = static_cast<uint32_t>(indirect_functions_.size());
+ indirect_functions_.resize(indirect_functions_.size() + count);
+ return ret;
+}
+
+void WasmModuleBuilder::SetIndirectFunction(uint32_t indirect,
+ uint32_t direct) {
+ indirect_functions_[indirect] = direct;
}
uint32_t WasmModuleBuilder::AddImport(const char* name, int name_length,
@@ -285,7 +303,7 @@ void WasmModuleBuilder::MarkStartFunction(WasmFunctionBuilder* function) {
start_function_index_ = function->func_index();
}
-uint32_t WasmModuleBuilder::AddGlobal(LocalType type, bool exported,
+uint32_t WasmModuleBuilder::AddGlobal(ValueType type, bool exported,
bool mutability,
const WasmInitExpr& init) {
globals_.push_back({type, exported, mutability, init});
@@ -309,11 +327,11 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
buffer.write_u8(kWasmFunctionTypeForm);
buffer.write_size(sig->parameter_count());
for (size_t j = 0; j < sig->parameter_count(); j++) {
- buffer.write_u8(WasmOpcodes::LocalTypeCodeFor(sig->GetParam(j)));
+ buffer.write_u8(WasmOpcodes::ValueTypeCodeFor(sig->GetParam(j)));
}
buffer.write_size(sig->return_count());
for (size_t j = 0; j < sig->return_count(); j++) {
- buffer.write_u8(WasmOpcodes::LocalTypeCodeFor(sig->GetReturn(j)));
+ buffer.write_u8(WasmOpcodes::ValueTypeCodeFor(sig->GetReturn(j)));
}
}
FixupSection(buffer, start);
@@ -324,10 +342,10 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
size_t start = EmitSection(kImportSectionCode, buffer);
buffer.write_size(imports_.size());
for (auto import : imports_) {
- buffer.write_u32v(import.name_length); // module name length
- buffer.write(reinterpret_cast<const byte*>(import.name), // module name
+ buffer.write_u32v(0); // module name length
+ buffer.write_u32v(import.name_length); // field name length
+ buffer.write(reinterpret_cast<const byte*>(import.name), // field name
import.name_length);
- buffer.write_u32v(0); // field name length
buffer.write_u8(kExternalFunction);
buffer.write_u32v(import.sig_index);
}
@@ -341,7 +359,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
buffer.write_size(functions_.size());
for (auto function : functions_) {
function->WriteSignature(buffer);
- if (function->exported()) exports++;
+ exports += function->exported_names_.size();
if (function->name_.size() > 0) has_names = true;
}
FixupSection(buffer, start);
@@ -374,29 +392,29 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
buffer.write_size(globals_.size());
for (auto global : globals_) {
- buffer.write_u8(WasmOpcodes::LocalTypeCodeFor(global.type));
+ buffer.write_u8(WasmOpcodes::ValueTypeCodeFor(global.type));
buffer.write_u8(global.mutability ? 1 : 0);
switch (global.init.kind) {
case WasmInitExpr::kI32Const: {
- DCHECK_EQ(kAstI32, global.type);
+ DCHECK_EQ(kWasmI32, global.type);
const byte code[] = {WASM_I32V_5(global.init.val.i32_const)};
buffer.write(code, sizeof(code));
break;
}
case WasmInitExpr::kI64Const: {
- DCHECK_EQ(kAstI64, global.type);
+ DCHECK_EQ(kWasmI64, global.type);
const byte code[] = {WASM_I64V_10(global.init.val.i64_const)};
buffer.write(code, sizeof(code));
break;
}
case WasmInitExpr::kF32Const: {
- DCHECK_EQ(kAstF32, global.type);
+ DCHECK_EQ(kWasmF32, global.type);
const byte code[] = {WASM_F32(global.init.val.f32_const)};
buffer.write(code, sizeof(code));
break;
}
case WasmInitExpr::kF64Const: {
- DCHECK_EQ(kAstF64, global.type);
+ DCHECK_EQ(kWasmF64, global.type);
const byte code[] = {WASM_F64(global.init.val.f64_const)};
buffer.write(code, sizeof(code));
break;
@@ -410,22 +428,22 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
default: {
// No initializer, emit a default value.
switch (global.type) {
- case kAstI32: {
+ case kWasmI32: {
const byte code[] = {WASM_I32V_1(0)};
buffer.write(code, sizeof(code));
break;
}
- case kAstI64: {
+ case kWasmI64: {
const byte code[] = {WASM_I64V_1(0)};
buffer.write(code, sizeof(code));
break;
}
- case kAstF32: {
+ case kWasmF32: {
const byte code[] = {WASM_F32(0.0)};
buffer.write(code, sizeof(code));
break;
}
- case kAstF64: {
+ case kWasmF64: {
const byte code[] = {WASM_F64(0.0)};
buffer.write(code, sizeof(code));
break;
@@ -444,7 +462,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
if (exports > 0) {
size_t start = EmitSection(kExportSectionCode, buffer);
buffer.write_u32v(exports);
- for (auto function : functions_) function->WriteExport(buffer);
+ for (auto function : functions_) function->WriteExports(buffer);
FixupSection(buffer, start);
}
@@ -517,10 +535,8 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
}
for (auto function : functions_) {
buffer.write_size(function->name_.size());
- if (function->name_.size() > 0) {
- buffer.write(reinterpret_cast<const byte*>(&function->name_[0]),
- function->name_.size());
- }
+ buffer.write(reinterpret_cast<const byte*>(function->name_.data()),
+ function->name_.size());
buffer.write_u8(0);
}
FixupSection(buffer, start);
@@ -534,6 +550,8 @@ void WasmModuleBuilder::WriteAsmJsOffsetTable(ZoneBuffer& buffer) const {
for (auto function : functions_) {
function->WriteAsmWasmOffsetTable(buffer);
}
+ // Append a 0 to indicate that this is an encoded table.
+ buffer.write_u8(0);
}
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index d35313ef47..3258f78d50 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -120,7 +120,7 @@ class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
public:
// Building methods.
void SetSignature(FunctionSig* sig);
- uint32_t AddLocal(LocalType type);
+ uint32_t AddLocal(ValueType type);
void EmitVarInt(uint32_t val);
void EmitCode(const byte* code, uint32_t code_size);
void Emit(WasmOpcode opcode);
@@ -132,17 +132,16 @@ class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
void EmitWithU8U8(WasmOpcode opcode, const byte imm1, const byte imm2);
void EmitWithVarInt(WasmOpcode opcode, uint32_t immediate);
void EmitDirectCallIndex(uint32_t index);
- void Export();
void ExportAs(Vector<const char> name);
void SetName(Vector<const char> name);
- void AddAsmWasmOffset(int asm_position);
+ void AddAsmWasmOffset(int call_position, int to_number_position);
+ void SetAsmFunctionStartPosition(int position);
void WriteSignature(ZoneBuffer& buffer) const;
- void WriteExport(ZoneBuffer& buffer) const;
+ void WriteExports(ZoneBuffer& buffer) const;
void WriteBody(ZoneBuffer& buffer) const;
void WriteAsmWasmOffsetTable(ZoneBuffer& buffer) const;
- bool exported() { return exported_; }
uint32_t func_index() { return func_index_; }
FunctionSig* signature();
@@ -159,11 +158,10 @@ class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
WasmModuleBuilder* builder_;
LocalDeclEncoder locals_;
uint32_t signature_index_;
- bool exported_;
uint32_t func_index_;
ZoneVector<uint8_t> body_;
ZoneVector<char> name_;
- ZoneVector<char> exported_name_;
+ ZoneVector<ZoneVector<char>> exported_names_;
ZoneVector<uint32_t> i32_temps_;
ZoneVector<uint32_t> i64_temps_;
ZoneVector<uint32_t> f32_temps_;
@@ -174,22 +172,23 @@ class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
ZoneBuffer asm_offsets_;
uint32_t last_asm_byte_offset_ = 0;
uint32_t last_asm_source_position_ = 0;
+ uint32_t asm_func_start_source_position_ = 0;
};
class WasmTemporary {
public:
- WasmTemporary(WasmFunctionBuilder* builder, LocalType type) {
+ WasmTemporary(WasmFunctionBuilder* builder, ValueType type) {
switch (type) {
- case kAstI32:
+ case kWasmI32:
temporary_ = &builder->i32_temps_;
break;
- case kAstI64:
+ case kWasmI64:
temporary_ = &builder->i64_temps_;
break;
- case kAstF32:
+ case kWasmF32:
temporary_ = &builder->f32_temps_;
break;
- case kAstF64:
+ case kWasmF64:
temporary_ = &builder->f64_temps_;
break;
default:
@@ -226,11 +225,12 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
imports_[index].name_length = name_length;
}
WasmFunctionBuilder* AddFunction(FunctionSig* sig = nullptr);
- uint32_t AddGlobal(LocalType type, bool exported, bool mutability = true,
+ uint32_t AddGlobal(ValueType type, bool exported, bool mutability = true,
const WasmInitExpr& init = WasmInitExpr());
void AddDataSegment(const byte* data, uint32_t size, uint32_t dest);
uint32_t AddSignature(FunctionSig* sig);
- void AddIndirectFunction(uint32_t index);
+ uint32_t AllocateIndirectFunctions(uint32_t count);
+ void SetIndirectFunction(uint32_t indirect, uint32_t direct);
void MarkStartFunction(WasmFunctionBuilder* builder);
// Writing methods.
@@ -256,7 +256,7 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
};
struct WasmGlobal {
- LocalType type;
+ ValueType type;
bool exported;
bool mutability;
WasmInitExpr init;
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index 79b99fe04d..60dda925fa 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -4,25 +4,26 @@
#include <memory>
+#include "src/assembler-inl.h"
+#include "src/base/adapters.h"
#include "src/base/atomic-utils.h"
#include "src/code-stubs.h"
-
-#include "src/macro-assembler.h"
+#include "src/compiler/wasm-compiler.h"
+#include "src/debug/interface-types.h"
#include "src/objects.h"
#include "src/property-descriptor.h"
#include "src/simulator.h"
#include "src/snapshot/snapshot.h"
#include "src/v8.h"
-#include "src/wasm/ast-decoder.h"
+#include "src/wasm/function-body-decoder.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-js.h"
+#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-result.h"
-#include "src/compiler/wasm-compiler.h"
-
using namespace v8::internal;
using namespace v8::internal::wasm;
namespace base = v8::base;
@@ -40,26 +41,11 @@ namespace base = v8::base;
namespace {
static const int kInvalidSigIndex = -1;
-static const int kPlaceholderMarker = 1000000000;
byte* raw_buffer_ptr(MaybeHandle<JSArrayBuffer> buffer, int offset) {
return static_cast<byte*>(buffer.ToHandleChecked()->backing_store()) + offset;
}
-MaybeHandle<String> ExtractStringFromModuleBytes(
- Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
- uint32_t offset, uint32_t size) {
- // TODO(wasm): cache strings from modules if it's a performance win.
- Handle<SeqOneByteString> module_bytes = compiled_module->module_bytes();
- DCHECK_GE(static_cast<size_t>(module_bytes->length()), offset);
- DCHECK_GE(static_cast<size_t>(module_bytes->length() - offset), size);
- Address raw = module_bytes->GetCharsAddress() + offset;
- if (!unibrow::Utf8::Validate(reinterpret_cast<const byte*>(raw), size))
- return {}; // UTF8 decoding error for name.
- return isolate->factory()->NewStringFromUtf8SubString(
- module_bytes, static_cast<int>(offset), static_cast<int>(size));
-}
-
void ReplaceReferenceInCode(Handle<Code> code, Handle<Object> old_ref,
Handle<Object> new_ref) {
for (RelocIterator it(*code, 1 << RelocInfo::EMBEDDED_OBJECT); !it.done();
@@ -70,34 +56,74 @@ void ReplaceReferenceInCode(Handle<Code> code, Handle<Object> old_ref,
}
}
-Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size) {
- if (size > (WasmModule::kV8MaxPages * WasmModule::kPageSize)) {
- // TODO(titzer): lift restriction on maximum memory allocated here.
- return Handle<JSArrayBuffer>::null();
- }
- void* memory = isolate->array_buffer_allocator()->Allocate(size);
- if (memory == nullptr) {
- return Handle<JSArrayBuffer>::null();
- }
+static void MemoryFinalizer(const v8::WeakCallbackInfo<void>& data) {
+ DisallowHeapAllocation no_gc;
+ JSArrayBuffer** p = reinterpret_cast<JSArrayBuffer**>(data.GetParameter());
+ JSArrayBuffer* buffer = *p;
-#if DEBUG
- // Double check the API allocator actually zero-initialized the memory.
- const byte* bytes = reinterpret_cast<const byte*>(memory);
- for (size_t i = 0; i < size; ++i) {
- DCHECK_EQ(0, bytes[i]);
- }
+ void* memory = buffer->backing_store();
+ base::OS::Free(memory,
+ RoundUp(kWasmMaxHeapOffset, base::OS::CommitPageSize()));
+
+ data.GetIsolate()->AdjustAmountOfExternalAllocatedMemory(
+ -buffer->byte_length()->Number());
+
+ GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
+}
+
+#if V8_TARGET_ARCH_64_BIT
+const bool kGuardRegionsSupported = true;
+#else
+const bool kGuardRegionsSupported = false;
#endif
- Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
- JSArrayBuffer::Setup(buffer, isolate, false, memory, static_cast<int>(size));
- buffer->set_is_neuterable(false);
- return buffer;
+bool EnableGuardRegions() {
+ return FLAG_wasm_guard_pages && kGuardRegionsSupported;
+}
+
+void* TryAllocateBackingStore(Isolate* isolate, size_t size,
+ bool enable_guard_regions, bool& is_external) {
+ is_external = false;
+ // TODO(eholk): Right now enable_guard_regions has no effect on 32-bit
+ // systems. It may be safer to fail instead, given that other code might do
+ // things that would be unsafe if they expected guard pages where there
+ // weren't any.
+ if (enable_guard_regions && kGuardRegionsSupported) {
+ // TODO(eholk): On Windows we want to make sure we don't commit the guard
+ // pages yet.
+
+ // We always allocate the largest possible offset into the heap, so the
+ // addressable memory after the guard page can be made inaccessible.
+ const size_t alloc_size =
+ RoundUp(kWasmMaxHeapOffset, base::OS::CommitPageSize());
+ DCHECK_EQ(0, size % base::OS::CommitPageSize());
+
+ // AllocateGuarded makes the whole region inaccessible by default.
+ void* memory = base::OS::AllocateGuarded(alloc_size);
+ if (memory == nullptr) {
+ return nullptr;
+ }
+
+ // Make the part we care about accessible.
+ base::OS::Unprotect(memory, size);
+
+ reinterpret_cast<v8::Isolate*>(isolate)
+ ->AdjustAmountOfExternalAllocatedMemory(size);
+
+ is_external = true;
+ return memory;
+ } else {
+ void* memory = isolate->array_buffer_allocator()->Allocate(size);
+ return memory;
+ }
}
void RelocateMemoryReferencesInCode(Handle<FixedArray> code_table,
+ uint32_t num_imported_functions,
Address old_start, Address start,
uint32_t prev_size, uint32_t new_size) {
- for (int i = 0; i < code_table->length(); ++i) {
+ for (int i = static_cast<int>(num_imported_functions);
+ i < code_table->length(); ++i) {
DCHECK(code_table->get(i)->IsCode());
Handle<Code> code = Handle<Code>(Code::cast(code_table->get(i)));
AllowDeferredHandleDereference embedding_raw_address;
@@ -123,52 +149,25 @@ void RelocateGlobals(Handle<FixedArray> code_table, Address old_start,
}
}
-Handle<Code> CreatePlaceholder(Factory* factory, uint32_t index,
- Code::Kind kind) {
- // Create a placeholder code object and encode the corresponding index in
- // the {constant_pool_offset} field of the code object.
- // TODO(titzer): instead of placeholders, use a reloc_info mode.
- static byte buffer[] = {0, 0, 0, 0}; // fake instructions.
- static CodeDesc desc = {
- buffer, arraysize(buffer), arraysize(buffer), 0, 0, nullptr, 0, nullptr};
- Handle<Code> code = factory->NewCode(desc, Code::KindField::encode(kind),
- Handle<Object>::null());
- code->set_constant_pool_offset(static_cast<int>(index) + kPlaceholderMarker);
- return code;
-}
-
-bool LinkFunction(Handle<Code> unlinked,
- std::vector<Handle<Code>>& code_table) {
- bool modified = false;
- int mode_mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
- AllowDeferredHandleDereference embedding_raw_address;
- for (RelocIterator it(*unlinked, mode_mask); !it.done(); it.next()) {
- RelocInfo::Mode mode = it.rinfo()->rmode();
- if (RelocInfo::IsCodeTarget(mode)) {
- Code* target =
- Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
- if (target->constant_pool_offset() < kPlaceholderMarker) continue;
- switch (target->kind()) {
- case Code::WASM_FUNCTION: // fall through
- case Code::WASM_TO_JS_FUNCTION: // fall through
- case Code::JS_TO_WASM_FUNCTION: {
- // Patch direct calls to placeholder code objects.
- uint32_t index = target->constant_pool_offset() - kPlaceholderMarker;
- Handle<Code> new_target = code_table[index];
- if (target != *new_target) {
- it.rinfo()->set_target_address(new_target->instruction_start(),
- UPDATE_WRITE_BARRIER,
- SKIP_ICACHE_FLUSH);
- modified = true;
- }
- break;
- }
- default:
- break;
- }
+void RelocateTableSizeReferences(Handle<FixedArray> code_table,
+ uint32_t old_size, uint32_t new_size) {
+ for (int i = 0; i < code_table->length(); ++i) {
+ DCHECK(code_table->get(i)->IsCode());
+ Handle<Code> code = Handle<Code>(Code::cast(code_table->get(i)));
+ AllowDeferredHandleDereference embedding_raw_address;
+ int mask = 1 << RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE;
+ for (RelocIterator it(*code, mask); !it.done(); it.next()) {
+ it.rinfo()->update_wasm_function_table_size_reference(old_size, new_size);
}
}
- return modified;
+}
+
+Handle<Code> CreatePlaceholder(Factory* factory, Code::Kind kind) {
+ byte buffer[] = {0, 0, 0, 0}; // fake instructions.
+ CodeDesc desc = {
+ buffer, arraysize(buffer), arraysize(buffer), 0, 0, nullptr, 0, nullptr};
+ return factory->NewCode(desc, Code::KindField::encode(kind),
+ Handle<Object>::null());
}
void FlushICache(Isolate* isolate, Handle<FixedArray> code_table) {
@@ -257,7 +256,7 @@ Address GetGlobalStartAddressFromCodeTemplate(Object* undefined,
Address old_address = nullptr;
if (instance->has_globals_buffer()) {
old_address =
- static_cast<Address>(instance->get_globals_buffer()->backing_store());
+ static_cast<Address>(instance->globals_buffer()->backing_store());
}
return old_address;
}
@@ -265,7 +264,7 @@ Address GetGlobalStartAddressFromCodeTemplate(Object* undefined,
void InitializeParallelCompilation(
Isolate* isolate, const std::vector<WasmFunction>& functions,
std::vector<compiler::WasmCompilationUnit*>& compilation_units,
- ModuleEnv& module_env, ErrorThrower* thrower) {
+ ModuleBytesEnv& module_env, ErrorThrower* thrower) {
for (uint32_t i = FLAG_skip_compiling_wasm_funcs; i < functions.size(); ++i) {
const WasmFunction* func = &functions[i];
compilation_units[i] =
@@ -329,9 +328,10 @@ void FinishCompilationUnits(
}
}
-void CompileInParallel(Isolate* isolate, const WasmModule* module,
+void CompileInParallel(Isolate* isolate, ModuleBytesEnv* module_env,
std::vector<Handle<Code>>& functions,
- ErrorThrower* thrower, ModuleEnv* module_env) {
+ ErrorThrower* thrower) {
+ const WasmModule* module = module_env->module;
// Data structures for the parallel compilation.
std::vector<compiler::WasmCompilationUnit*> compilation_units(
module->functions.size());
@@ -393,22 +393,23 @@ void CompileInParallel(Isolate* isolate, const WasmModule* module,
FinishCompilationUnits(executed_units, functions, result_mutex);
}
-void CompileSequentially(Isolate* isolate, const WasmModule* module,
+void CompileSequentially(Isolate* isolate, ModuleBytesEnv* module_env,
std::vector<Handle<Code>>& functions,
- ErrorThrower* thrower, ModuleEnv* module_env) {
+ ErrorThrower* thrower) {
DCHECK(!thrower->error());
+ const WasmModule* module = module_env->module;
for (uint32_t i = FLAG_skip_compiling_wasm_funcs;
i < module->functions.size(); ++i) {
const WasmFunction& func = module->functions[i];
if (func.imported) continue; // Imports are compiled at instantiation time.
- WasmName str = module->GetName(func.name_offset, func.name_length);
Handle<Code> code = Handle<Code>::null();
// Compile the function.
code = compiler::WasmCompilationUnit::CompileWasmFunction(
thrower, isolate, module_env, &func);
if (code.is_null()) {
+ WasmName str = module_env->GetName(&func);
thrower->CompileError("Compilation of #%d:%.*s failed.", i, str.length(),
str.start());
break;
@@ -418,36 +419,120 @@ void CompileSequentially(Isolate* isolate, const WasmModule* module,
}
}
-void PatchDirectCalls(Handle<FixedArray> old_functions,
- Handle<FixedArray> new_functions, int start) {
- DCHECK_EQ(new_functions->length(), old_functions->length());
+int ExtractDirectCallIndex(wasm::Decoder& decoder, const byte* pc) {
+ DCHECK_EQ(static_cast<int>(kExprCallFunction), static_cast<int>(*pc));
+ decoder.Reset(pc + 1, pc + 6);
+ uint32_t call_idx = decoder.consume_u32v("call index");
+ DCHECK(decoder.ok());
+ DCHECK_GE(kMaxInt, call_idx);
+ return static_cast<int>(call_idx);
+}
+
+int AdvanceSourcePositionTableIterator(SourcePositionTableIterator& iterator,
+ size_t offset_l) {
+ DCHECK_GE(kMaxInt, offset_l);
+ int offset = static_cast<int>(offset_l);
+ DCHECK(!iterator.done());
+ int byte_pos;
+ do {
+ byte_pos = iterator.source_position().ScriptOffset();
+ iterator.Advance();
+ } while (!iterator.done() && iterator.code_offset() <= offset);
+ return byte_pos;
+}
+void PatchContext(RelocIterator& it, Context* context) {
+ Object* old = it.rinfo()->target_object();
+ // The only context we use is the native context.
+ DCHECK_IMPLIES(old->IsContext(), old->IsNativeContext());
+ if (!old->IsNativeContext()) return;
+ it.rinfo()->set_target_object(context, UPDATE_WRITE_BARRIER,
+ SKIP_ICACHE_FLUSH);
+}
+
+void PatchDirectCallsAndContext(Handle<FixedArray> new_functions,
+ Handle<WasmCompiledModule> compiled_module,
+ WasmModule* module, int start) {
DisallowHeapAllocation no_gc;
- std::map<Code*, Code*> old_to_new_code;
- for (int i = 0; i < new_functions->length(); ++i) {
- old_to_new_code.insert(std::make_pair(Code::cast(old_functions->get(i)),
- Code::cast(new_functions->get(i))));
- }
- int mode_mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
AllowDeferredHandleDereference embedding_raw_address;
- for (int i = start; i < new_functions->length(); ++i) {
- Code* wasm_function = Code::cast(new_functions->get(i));
+ SeqOneByteString* module_bytes = compiled_module->module_bytes();
+ std::vector<WasmFunction>* wasm_functions =
+ &compiled_module->module()->functions;
+ DCHECK_EQ(wasm_functions->size() +
+ compiled_module->module()->num_exported_functions,
+ new_functions->length());
+ DCHECK_EQ(start, compiled_module->module()->num_imported_functions);
+ Context* context = compiled_module->ptr_to_native_context();
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+
+ // Allocate decoder outside of the loop and reuse it to decode all function
+ // indexes.
+ wasm::Decoder decoder(nullptr, nullptr);
+ int num_wasm_functions = static_cast<int>(wasm_functions->size());
+ int func_index = start;
+ // Patch all wasm functions.
+ for (; func_index < num_wasm_functions; ++func_index) {
+ Code* wasm_function = Code::cast(new_functions->get(func_index));
+ DCHECK(wasm_function->kind() == Code::WASM_FUNCTION);
+ // Iterate simultaneously over the relocation information and the source
+ // position table. For each call in the reloc info, move the source position
+ // iterator forward to that position to find the byte offset of the
+ // respective call. Then extract the call index from the module wire bytes
+ // to find the new compiled function.
+ SourcePositionTableIterator source_pos_iterator(
+ wasm_function->source_position_table());
+ const byte* func_bytes =
+ module_bytes->GetChars() +
+ compiled_module->module()->functions[func_index].code_start_offset;
for (RelocIterator it(wasm_function, mode_mask); !it.done(); it.next()) {
- Code* old_code =
- Code::GetCodeFromTargetAddress(it.rinfo()->target_address());
- if (old_code->kind() == Code::WASM_TO_JS_FUNCTION ||
- old_code->kind() == Code::WASM_FUNCTION) {
- auto found = old_to_new_code.find(old_code);
- DCHECK(found != old_to_new_code.end());
- Code* new_code = found->second;
- if (new_code != old_code) {
- it.rinfo()->set_target_address(new_code->instruction_start(),
- UPDATE_WRITE_BARRIER,
- SKIP_ICACHE_FLUSH);
- }
+ if (RelocInfo::IsEmbeddedObject(it.rinfo()->rmode())) {
+ PatchContext(it, context);
+ continue;
}
+ DCHECK(RelocInfo::IsCodeTarget(it.rinfo()->rmode()));
+ Code::Kind kind =
+ Code::GetCodeFromTargetAddress(it.rinfo()->target_address())->kind();
+ if (kind != Code::WASM_FUNCTION && kind != Code::WASM_TO_JS_FUNCTION)
+ continue;
+ size_t offset = it.rinfo()->pc() - wasm_function->instruction_start();
+ int byte_pos =
+ AdvanceSourcePositionTableIterator(source_pos_iterator, offset);
+ int called_func_index =
+ ExtractDirectCallIndex(decoder, func_bytes + byte_pos);
+ Code* new_code = Code::cast(new_functions->get(called_func_index));
+ it.rinfo()->set_target_address(new_code->instruction_start(),
+ UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
}
}
+ // Patch all exported functions.
+ for (auto exp : module->export_table) {
+ if (exp.kind != kExternalFunction) continue;
+ Code* export_wrapper = Code::cast(new_functions->get(func_index));
+ DCHECK_EQ(Code::JS_TO_WASM_FUNCTION, export_wrapper->kind());
+ // There must be exactly one call to WASM_FUNCTION or WASM_TO_JS_FUNCTION.
+ int num_wasm_calls = 0;
+ for (RelocIterator it(export_wrapper, mode_mask); !it.done(); it.next()) {
+ if (RelocInfo::IsEmbeddedObject(it.rinfo()->rmode())) {
+ PatchContext(it, context);
+ continue;
+ }
+ DCHECK(RelocInfo::IsCodeTarget(it.rinfo()->rmode()));
+ Code::Kind kind =
+ Code::GetCodeFromTargetAddress(it.rinfo()->target_address())->kind();
+ if (kind != Code::WASM_FUNCTION && kind != Code::WASM_TO_JS_FUNCTION)
+ continue;
+ ++num_wasm_calls;
+ Code* new_code = Code::cast(new_functions->get(exp.index));
+ DCHECK(new_code->kind() == Code::WASM_FUNCTION ||
+ new_code->kind() == Code::WASM_TO_JS_FUNCTION);
+ it.rinfo()->set_target_address(new_code->instruction_start(),
+ UPDATE_WRITE_BARRIER, SKIP_ICACHE_FLUSH);
+ }
+ DCHECK_EQ(1, num_wasm_calls);
+ func_index++;
+ }
+ DCHECK_EQ(new_functions->length(), func_index);
}
static void ResetCompiledModule(Isolate* isolate, WasmInstanceObject* owner,
@@ -456,7 +541,7 @@ static void ResetCompiledModule(Isolate* isolate, WasmInstanceObject* owner,
Object* undefined = *isolate->factory()->undefined_value();
uint32_t old_mem_size = compiled_module->mem_size();
uint32_t default_mem_size = compiled_module->default_mem_size();
- Object* mem_start = compiled_module->ptr_to_memory();
+ Object* mem_start = compiled_module->maybe_ptr_to_memory();
Address old_mem_address = nullptr;
Address globals_start =
GetGlobalStartAddressFromCodeTemplate(undefined, owner);
@@ -486,7 +571,8 @@ static void ResetCompiledModule(Isolate* isolate, WasmInstanceObject* owner,
if (fct_obj != nullptr && fct_obj != undefined &&
(old_mem_size > 0 || globals_start != nullptr || function_tables)) {
FixedArray* functions = FixedArray::cast(fct_obj);
- for (int i = 0; i < functions->length(); ++i) {
+ for (int i = compiled_module->num_imported_functions();
+ i < functions->length(); ++i) {
Code* code = Code::cast(functions->get(i));
bool changed = false;
for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
@@ -518,12 +604,58 @@ static void ResetCompiledModule(Isolate* isolate, WasmInstanceObject* owner,
compiled_module->reset_memory();
}
+static void MemoryInstanceFinalizer(Isolate* isolate,
+ WasmInstanceObject* instance) {
+ DisallowHeapAllocation no_gc;
+ // If the memory object is destroyed, nothing needs to be done here.
+ if (!instance->has_memory_object()) return;
+ Handle<WasmInstanceWrapper> instance_wrapper =
+ handle(instance->instance_wrapper());
+ DCHECK(WasmInstanceWrapper::IsWasmInstanceWrapper(*instance_wrapper));
+ DCHECK(instance_wrapper->has_instance());
+ bool has_prev = instance_wrapper->has_previous();
+ bool has_next = instance_wrapper->has_next();
+ Handle<WasmMemoryObject> memory_object(instance->memory_object());
+
+ if (!has_prev && !has_next) {
+ memory_object->ResetInstancesLink(isolate);
+ return;
+ } else {
+ Handle<WasmInstanceWrapper> next_wrapper, prev_wrapper;
+ if (!has_prev) {
+ Handle<WasmInstanceWrapper> next_wrapper =
+ instance_wrapper->next_wrapper();
+ next_wrapper->reset_previous_wrapper();
+ // As this is the first link in the memory object, destroying
+ // without updating memory object would corrupt the instance chain in
+ // the memory object.
+ memory_object->set_instances_link(*next_wrapper);
+ } else if (!has_next) {
+ instance_wrapper->previous_wrapper()->reset_next_wrapper();
+ } else {
+ DCHECK(has_next && has_prev);
+ Handle<WasmInstanceWrapper> prev_wrapper =
+ instance_wrapper->previous_wrapper();
+ Handle<WasmInstanceWrapper> next_wrapper =
+ instance_wrapper->next_wrapper();
+ prev_wrapper->set_next_wrapper(*next_wrapper);
+ next_wrapper->set_previous_wrapper(*prev_wrapper);
+ }
+ // Reset to avoid dangling pointers
+ instance_wrapper->reset();
+ }
+}
+
static void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
+ DisallowHeapAllocation no_gc;
JSObject** p = reinterpret_cast<JSObject**>(data.GetParameter());
WasmInstanceObject* owner = reinterpret_cast<WasmInstanceObject*>(*p);
- WasmCompiledModule* compiled_module = owner->get_compiled_module();
- TRACE("Finalizing %d {\n", compiled_module->instance_id());
Isolate* isolate = reinterpret_cast<Isolate*>(data.GetIsolate());
+ // If a link to shared memory instances exists, update the list of memory
+ // instances before the instance is destroyed.
+ if (owner->has_instance_wrapper()) MemoryInstanceFinalizer(isolate, owner);
+ WasmCompiledModule* compiled_module = owner->compiled_module();
+ TRACE("Finalizing %d {\n", compiled_module->instance_id());
DCHECK(compiled_module->has_weak_wasm_module());
WeakCell* weak_wasm_module = compiled_module->ptr_to_weak_wasm_module();
@@ -540,8 +672,8 @@ static void InstanceFinalizer(const v8::WeakCallbackInfo<void>& data) {
TRACE("}\n");
DCHECK(!current_template->has_weak_prev_instance());
- WeakCell* next = compiled_module->ptr_to_weak_next_instance();
- WeakCell* prev = compiled_module->ptr_to_weak_prev_instance();
+ WeakCell* next = compiled_module->maybe_ptr_to_weak_next_instance();
+ WeakCell* prev = compiled_module->maybe_ptr_to_weak_prev_instance();
if (current_template == compiled_module) {
if (next == nullptr) {
@@ -597,8 +729,81 @@ std::pair<int, int> GetFunctionOffsetAndLength(
static_cast<int>(func.code_end_offset - func.code_start_offset)};
}
+Handle<Script> CreateWasmScript(Isolate* isolate,
+ const ModuleWireBytes& wire_bytes) {
+ Handle<Script> script =
+ isolate->factory()->NewScript(isolate->factory()->empty_string());
+ script->set_type(Script::TYPE_WASM);
+
+ int hash = StringHasher::HashSequentialString(
+ reinterpret_cast<const char*>(wire_bytes.module_bytes.start()),
+ wire_bytes.module_bytes.length(), kZeroHashSeed);
+
+ const int kBufferSize = 50;
+ char buffer[kBufferSize];
+ int url_chars = SNPrintF(ArrayVector(buffer), "wasm://wasm/%08x", hash);
+ DCHECK(url_chars >= 0 && url_chars < kBufferSize);
+ MaybeHandle<String> url_str = isolate->factory()->NewStringFromOneByte(
+ Vector<const uint8_t>(reinterpret_cast<uint8_t*>(buffer), url_chars),
+ TENURED);
+ script->set_source_url(*url_str.ToHandleChecked());
+
+ int name_chars = SNPrintF(ArrayVector(buffer), "wasm-%08x", hash);
+ DCHECK(name_chars >= 0 && name_chars < kBufferSize);
+ MaybeHandle<String> name_str = isolate->factory()->NewStringFromOneByte(
+ Vector<const uint8_t>(reinterpret_cast<uint8_t*>(buffer), name_chars),
+ TENURED);
+ script->set_name(*name_str.ToHandleChecked());
+
+ return script;
+}
} // namespace
+Handle<JSArrayBuffer> wasm::NewArrayBuffer(Isolate* isolate, size_t size,
+ bool enable_guard_regions) {
+ if (size > (kV8MaxWasmMemoryPages * WasmModule::kPageSize)) {
+ // TODO(titzer): lift restriction on maximum memory allocated here.
+ return Handle<JSArrayBuffer>::null();
+ }
+
+ enable_guard_regions = enable_guard_regions && kGuardRegionsSupported;
+
+ bool is_external; // Set by TryAllocateBackingStore
+ void* memory =
+ TryAllocateBackingStore(isolate, size, enable_guard_regions, is_external);
+
+ if (memory == nullptr) {
+ return Handle<JSArrayBuffer>::null();
+ }
+
+#if DEBUG
+ // Double check the API allocator actually zero-initialized the memory.
+ const byte* bytes = reinterpret_cast<const byte*>(memory);
+ for (size_t i = 0; i < size; ++i) {
+ DCHECK_EQ(0, bytes[i]);
+ }
+#endif
+
+ Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
+ JSArrayBuffer::Setup(buffer, isolate, is_external, memory,
+ static_cast<int>(size));
+ buffer->set_is_neuterable(false);
+ buffer->set_has_guard_region(enable_guard_regions);
+
+ if (is_external) {
+ // We mark the buffer as external if we allocated it here with guard
+ // pages. That means we need to arrange for it to be freed.
+
+ // TODO(eholk): Finalizers may not run when the main thread is shutting
+ // down, which means we may leak memory here.
+ Handle<Object> global_handle = isolate->global_handles()->Create(*buffer);
+ GlobalHandles::MakeWeak(global_handle.location(), global_handle.location(),
+ &MemoryFinalizer, v8::WeakCallbackType::kFinalizer);
+ }
+
+ return buffer;
+}
+
const char* wasm::SectionName(WasmSectionCode code) {
switch (code) {
case kUnknownSectionCode:
@@ -650,15 +855,12 @@ std::ostream& wasm::operator<<(std::ostream& os, const WasmFunction& function) {
return os;
}
-std::ostream& wasm::operator<<(std::ostream& os, const WasmFunctionName& pair) {
- os << "#" << pair.function_->func_index << ":";
- if (pair.function_->name_offset > 0) {
- if (pair.module_) {
- WasmName name = pair.module_->GetName(pair.function_->name_offset,
- pair.function_->name_length);
- os.write(name.start(), name.length());
- } else {
- os << "+" << pair.function_->func_index;
+std::ostream& wasm::operator<<(std::ostream& os, const WasmFunctionName& name) {
+ os << "#" << name.function_->func_index;
+ if (name.function_->name_offset > 0) {
+ if (name.name_.start()) {
+ os << ":";
+ os.write(name.name_.start(), name.name_.length());
}
} else {
os << "?";
@@ -666,16 +868,17 @@ std::ostream& wasm::operator<<(std::ostream& os, const WasmFunctionName& pair) {
return os;
}
-Object* wasm::GetOwningWasmInstance(Code* code) {
+WasmInstanceObject* wasm::GetOwningWasmInstance(Code* code) {
DCHECK(code->kind() == Code::WASM_FUNCTION);
DisallowHeapAllocation no_gc;
FixedArray* deopt_data = code->deoptimization_data();
DCHECK_NOT_NULL(deopt_data);
- DCHECK(deopt_data->length() == 2);
+ DCHECK_EQ(2, deopt_data->length());
Object* weak_link = deopt_data->get(0);
- if (!weak_link->IsWeakCell()) return nullptr;
+ DCHECK(weak_link->IsWeakCell());
WeakCell* cell = WeakCell::cast(weak_link);
- return cell->value();
+ if (!cell->value()) return nullptr;
+ return WasmInstanceObject::cast(cell->value());
}
int wasm::GetFunctionCodeOffset(Handle<WasmCompiledModule> compiled_module,
@@ -683,69 +886,41 @@ int wasm::GetFunctionCodeOffset(Handle<WasmCompiledModule> compiled_module,
return GetFunctionOffsetAndLength(compiled_module, func_index).first;
}
-bool wasm::GetPositionInfo(Handle<WasmCompiledModule> compiled_module,
- uint32_t position, Script::PositionInfo* info) {
- std::vector<WasmFunction>& functions = compiled_module->module()->functions;
-
- // Binary search for a function containing the given position.
- int left = 0; // inclusive
- int right = static_cast<int>(functions.size()); // exclusive
- if (right == 0) return false;
- while (right - left > 1) {
- int mid = left + (right - left) / 2;
- if (functions[mid].code_start_offset <= position) {
- left = mid;
- } else {
- right = mid;
- }
- }
- // If the found entry does not contains the given position, return false.
- WasmFunction& func = functions[left];
- if (position < func.code_start_offset || position >= func.code_end_offset) {
- return false;
- }
-
- info->line = left;
- info->column = position - func.code_start_offset;
- info->line_start = func.code_start_offset;
- info->line_end = func.code_end_offset;
- return true;
-}
-
-WasmModule::WasmModule(Zone* owned, const byte* module_start)
- : owned_zone(owned),
- module_start(module_start),
- pending_tasks(new base::Semaphore(0)) {}
+WasmModule::WasmModule(Zone* owned)
+ : owned_zone(owned), pending_tasks(new base::Semaphore(0)) {}
MaybeHandle<WasmCompiledModule> WasmModule::CompileFunctions(
Isolate* isolate, Handle<WasmModuleWrapper> module_wrapper,
- ErrorThrower* thrower) const {
+ ErrorThrower* thrower, const ModuleWireBytes& wire_bytes,
+ Handle<Script> asm_js_script,
+ Vector<const byte> asm_js_offset_table_bytes) const {
Factory* factory = isolate->factory();
MaybeHandle<WasmCompiledModule> nothing;
WasmInstance temp_instance(this);
temp_instance.context = isolate->native_context();
- temp_instance.mem_size = WasmModule::kPageSize * this->min_mem_pages;
+ temp_instance.mem_size = WasmModule::kPageSize * min_mem_pages;
temp_instance.mem_start = nullptr;
temp_instance.globals_start = nullptr;
// Initialize the indirect tables with placeholders.
- int function_table_count = static_cast<int>(this->function_tables.size());
+ int function_table_count = static_cast<int>(function_tables.size());
Handle<FixedArray> function_tables =
- factory->NewFixedArray(function_table_count);
+ factory->NewFixedArray(function_table_count, TENURED);
+ Handle<FixedArray> signature_tables =
+ factory->NewFixedArray(function_table_count, TENURED);
for (int i = 0; i < function_table_count; ++i) {
- temp_instance.function_tables[i] = factory->NewFixedArray(0);
+ temp_instance.function_tables[i] = factory->NewFixedArray(1, TENURED);
+ temp_instance.signature_tables[i] = factory->NewFixedArray(1, TENURED);
function_tables->set(i, *temp_instance.function_tables[i]);
+ signature_tables->set(i, *temp_instance.signature_tables[i]);
}
HistogramTimerScope wasm_compile_module_time_scope(
isolate->counters()->wasm_compile_module_time());
- ModuleEnv module_env;
- module_env.module = this;
- module_env.instance = &temp_instance;
- module_env.origin = origin;
+ ModuleBytesEnv module_env(this, &temp_instance, wire_bytes);
// The {code_table} array contains import wrappers and functions (which
// are both included in {functions.size()}, and export wrappers.
@@ -755,12 +930,11 @@ MaybeHandle<WasmCompiledModule> WasmModule::CompileFunctions(
factory->NewFixedArray(static_cast<int>(code_table_size), TENURED);
// Initialize the code table with placeholders.
+ Handle<Code> code_placeholder =
+ CreatePlaceholder(factory, Code::WASM_FUNCTION);
for (uint32_t i = 0; i < functions.size(); ++i) {
- Code::Kind kind = Code::WASM_FUNCTION;
- if (i < num_imported_functions) kind = Code::WASM_TO_JS_FUNCTION;
- Handle<Code> placeholder = CreatePlaceholder(factory, i, kind);
- code_table->set(static_cast<int>(i), *placeholder);
- temp_instance.function_code[i] = placeholder;
+ code_table->set(static_cast<int>(i), *code_placeholder);
+ temp_instance.function_code[i] = code_placeholder;
}
isolate->counters()->wasm_functions_per_module()->AddSample(
@@ -772,14 +946,14 @@ MaybeHandle<WasmCompiledModule> WasmModule::CompileFunctions(
for (size_t i = 0; i < temp_instance.function_code.size(); ++i) {
results.push_back(temp_instance.function_code[i]);
}
- CompileInParallel(isolate, this, results, thrower, &module_env);
+ CompileInParallel(isolate, &module_env, results, thrower);
for (size_t i = 0; i < results.size(); ++i) {
temp_instance.function_code[i] = results[i];
}
} else {
- CompileSequentially(isolate, this, temp_instance.function_code, thrower,
- &module_env);
+ CompileSequentially(isolate, &module_env, temp_instance.function_code,
+ thrower);
}
if (thrower->error()) return nothing;
@@ -788,61 +962,74 @@ MaybeHandle<WasmCompiledModule> WasmModule::CompileFunctions(
i < temp_instance.function_code.size(); ++i) {
Code* code = *temp_instance.function_code[i];
code_table->set(static_cast<int>(i), code);
+ RecordStats(isolate, code);
}
- // Link the functions in the module.
- for (size_t i = FLAG_skip_compiling_wasm_funcs;
- i < temp_instance.function_code.size(); ++i) {
- Handle<Code> code = temp_instance.function_code[i];
- bool modified = LinkFunction(code, temp_instance.function_code);
- if (modified) {
- // TODO(mtrofin): do we need to flush the cache here?
- Assembler::FlushICache(isolate, code->instruction_start(),
- code->instruction_size());
- }
- }
+ // Create heap objects for script, module bytes and asm.js offset table to be
+ // stored in the shared module data.
+ Handle<Script> script;
+ Handle<ByteArray> asm_js_offset_table;
+ if (asm_js_script.is_null()) {
+ script = CreateWasmScript(isolate, wire_bytes);
+ } else {
+ script = asm_js_script;
+ asm_js_offset_table =
+ isolate->factory()->NewByteArray(asm_js_offset_table_bytes.length());
+ asm_js_offset_table->copy_in(0, asm_js_offset_table_bytes.start(),
+ asm_js_offset_table_bytes.length());
+ }
+ // TODO(wasm): only save the sections necessary to deserialize a
+ // {WasmModule}. E.g. function bodies could be omitted.
+ Handle<String> module_bytes =
+ factory->NewStringFromOneByte(wire_bytes.module_bytes, TENURED)
+ .ToHandleChecked();
+ DCHECK(module_bytes->IsSeqOneByteString());
+
+ // Create the shared module data.
+ // TODO(clemensh): For the same module (same bytes / same hash), we should
+ // only have one WasmSharedModuleData. Otherwise, we might only set
+ // breakpoints on a (potentially empty) subset of the instances.
+
+ Handle<WasmSharedModuleData> shared = WasmSharedModuleData::New(
+ isolate, module_wrapper, Handle<SeqOneByteString>::cast(module_bytes),
+ script, asm_js_offset_table);
// Create the compiled module object, and populate with compiled functions
// and information needed at instantiation time. This object needs to be
// serializable. Instantiation may occur off a deserialized version of this
// object.
- Handle<WasmCompiledModule> ret =
- WasmCompiledModule::New(isolate, module_wrapper);
+ Handle<WasmCompiledModule> ret = WasmCompiledModule::New(isolate, shared);
+ ret->set_num_imported_functions(num_imported_functions);
ret->set_code_table(code_table);
ret->set_min_mem_pages(min_mem_pages);
ret->set_max_mem_pages(max_mem_pages);
if (function_table_count > 0) {
ret->set_function_tables(function_tables);
+ ret->set_signature_tables(signature_tables);
ret->set_empty_function_tables(function_tables);
}
+ // If we created a wasm script, finish it now and make it public to the
+ // debugger.
+ if (asm_js_script.is_null()) {
+ script->set_wasm_compiled_module(*ret);
+ isolate->debug()->OnAfterCompile(script);
+ }
+
// Compile JS->WASM wrappers for exported functions.
int func_index = 0;
for (auto exp : export_table) {
if (exp.kind != kExternalFunction) continue;
Handle<Code> wasm_code =
code_table->GetValueChecked<Code>(isolate, exp.index);
- Handle<Code> wrapper_code = compiler::CompileJSToWasmWrapper(
- isolate, &module_env, wasm_code, exp.index);
+ Handle<Code> wrapper_code =
+ compiler::CompileJSToWasmWrapper(isolate, this, wasm_code, exp.index);
int export_index = static_cast<int>(functions.size() + func_index);
code_table->set(export_index, *wrapper_code);
+ RecordStats(isolate, *wrapper_code);
func_index++;
}
- {
- // TODO(wasm): only save the sections necessary to deserialize a
- // {WasmModule}. E.g. function bodies could be omitted.
- size_t module_bytes_len = module_end - module_start;
- DCHECK_LE(module_bytes_len, static_cast<size_t>(kMaxInt));
- Vector<const uint8_t> module_bytes_vec(module_start,
- static_cast<int>(module_bytes_len));
- Handle<String> module_bytes_string =
- factory->NewStringFromOneByte(module_bytes_vec, TENURED)
- .ToHandleChecked();
- DCHECK(module_bytes_string->IsSeqOneByteString());
- ret->set_module_bytes(Handle<SeqOneByteString>::cast(module_bytes_string));
- }
-
return ret;
}
@@ -876,7 +1063,7 @@ static Handle<Code> UnwrapImportWrapper(Handle<Object> target) {
code = handle(target);
}
}
- DCHECK(found == 1);
+ DCHECK_EQ(1, found);
return code;
}
@@ -884,8 +1071,8 @@ static Handle<Code> CompileImportWrapper(Isolate* isolate, int index,
FunctionSig* sig,
Handle<JSReceiver> target,
Handle<String> module_name,
- MaybeHandle<String> import_name) {
- Handle<Code> code;
+ MaybeHandle<String> import_name,
+ ModuleOrigin origin) {
WasmFunction* other_func = GetWasmFunctionForImportWrapper(isolate, target);
if (other_func) {
if (sig->Equals(other_func->sig)) {
@@ -898,7 +1085,7 @@ static Handle<Code> CompileImportWrapper(Isolate* isolate, int index,
} else {
// Signature mismatch. Compile a new wrapper for the new signature.
return compiler::CompileWasmToJSWrapper(isolate, target, sig, index,
- module_name, import_name);
+ module_name, import_name, origin);
}
}
@@ -906,11 +1093,13 @@ static void UpdateDispatchTablesInternal(Isolate* isolate,
Handle<FixedArray> dispatch_tables,
int index, WasmFunction* function,
Handle<Code> code) {
- DCHECK_EQ(0, dispatch_tables->length() % 3);
- for (int i = 0; i < dispatch_tables->length(); i += 3) {
+ DCHECK_EQ(0, dispatch_tables->length() % 4);
+ for (int i = 0; i < dispatch_tables->length(); i += 4) {
int table_index = Smi::cast(dispatch_tables->get(i + 1))->value();
- Handle<FixedArray> dispatch_table(
+ Handle<FixedArray> function_table(
FixedArray::cast(dispatch_tables->get(i + 2)), isolate);
+ Handle<FixedArray> signature_table(
+ FixedArray::cast(dispatch_tables->get(i + 3)), isolate);
if (function) {
// TODO(titzer): the signature might need to be copied to avoid
// a dangling pointer in the signature map.
@@ -919,12 +1108,12 @@ static void UpdateDispatchTablesInternal(Isolate* isolate,
int sig_index = static_cast<int>(
instance->module()->function_tables[table_index].map.FindOrInsert(
function->sig));
- dispatch_table->set(index, Smi::FromInt(sig_index));
- dispatch_table->set(index + (dispatch_table->length() / 2), *code);
+ signature_table->set(index, Smi::FromInt(sig_index));
+ function_table->set(index, *code);
} else {
Code* code = nullptr;
- dispatch_table->set(index, Smi::FromInt(-1));
- dispatch_table->set(index + (dispatch_table->length() / 2), code);
+ signature_table->set(index, Smi::FromInt(-1));
+ function_table->set(index, code);
}
}
}
@@ -949,17 +1138,27 @@ void wasm::UpdateDispatchTables(Isolate* isolate,
class WasmInstanceBuilder {
public:
WasmInstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
- Handle<JSObject> module_object, Handle<JSReceiver> ffi,
- Handle<JSArrayBuffer> memory)
+ Handle<WasmModuleObject> module_object,
+ Handle<JSReceiver> ffi, Handle<JSArrayBuffer> memory)
: isolate_(isolate),
+ module_(module_object->compiled_module()->module()),
thrower_(thrower),
module_object_(module_object),
ffi_(ffi),
memory_(memory) {}
// Build an instance, in all of its glory.
- MaybeHandle<JSObject> Build() {
- MaybeHandle<JSObject> nothing;
+ MaybeHandle<WasmInstanceObject> Build() {
+ MaybeHandle<WasmInstanceObject> nothing;
+
+ // Check that an imports argument was provided, if the module requires it.
+ // No point in continuing otherwise.
+ if (!module_->import_table.empty() && ffi_.is_null()) {
+ thrower_->TypeError(
+ "Imports argument must be present and must be an object");
+ return nothing;
+ }
+
HistogramTimerScope wasm_instantiate_module_time_scope(
isolate_->counters()->wasm_instantiate_module_time());
Factory* factory = isolate_->factory();
@@ -982,8 +1181,7 @@ class WasmInstanceBuilder {
Handle<WasmCompiledModule> original;
{
DisallowHeapAllocation no_gc;
- original = handle(
- WasmCompiledModule::cast(module_object_->GetInternalField(0)));
+ original = handle(module_object_->compiled_module());
if (original->has_weak_owning_instance()) {
owner = handle(WasmInstanceObject::cast(
original->weak_owning_instance()->value()));
@@ -1032,10 +1230,8 @@ class WasmInstanceBuilder {
compiled_module_->instance_id());
}
compiled_module_->set_code_table(code_table);
+ compiled_module_->set_native_context(isolate_->native_context());
}
- module_ = reinterpret_cast<WasmModuleWrapper*>(
- *compiled_module_->module_wrapper())
- ->get();
//--------------------------------------------------------------------------
// Allocate the instance object.
@@ -1049,8 +1245,9 @@ class WasmInstanceBuilder {
MaybeHandle<JSArrayBuffer> old_globals;
uint32_t globals_size = module_->globals_size;
if (globals_size > 0) {
+ const bool enable_guard_regions = false;
Handle<JSArrayBuffer> global_buffer =
- NewArrayBuffer(isolate_, globals_size);
+ NewArrayBuffer(isolate_, globals_size, enable_guard_regions);
globals_ = global_buffer;
if (globals_.is_null()) {
thrower_->RangeError("Out of memory: wasm globals");
@@ -1072,9 +1269,9 @@ class WasmInstanceBuilder {
static_cast<int>(module_->function_tables.size());
table_instances_.reserve(module_->function_tables.size());
for (int index = 0; index < function_table_count; ++index) {
- table_instances_.push_back({Handle<WasmTableObject>::null(),
- Handle<FixedArray>::null(),
- Handle<FixedArray>::null()});
+ table_instances_.push_back(
+ {Handle<WasmTableObject>::null(), Handle<FixedArray>::null(),
+ Handle<FixedArray>::null(), Handle<FixedArray>::null()});
}
//--------------------------------------------------------------------------
@@ -1089,6 +1286,11 @@ class WasmInstanceBuilder {
InitGlobals();
//--------------------------------------------------------------------------
+ // Set up the indirect function tables for the new instance.
+ //--------------------------------------------------------------------------
+ if (function_table_count > 0) InitializeTables(code_table, instance);
+
+ //--------------------------------------------------------------------------
// Set up the memory for the new instance.
//--------------------------------------------------------------------------
MaybeHandle<JSArrayBuffer> old_memory;
@@ -1099,17 +1301,51 @@ class WasmInstanceBuilder {
if (!memory_.is_null()) {
// Set externally passed ArrayBuffer non neuterable.
memory_->set_is_neuterable(false);
+
+ DCHECK_IMPLIES(EnableGuardRegions(), module_->origin == kAsmJsOrigin ||
+ memory_->has_guard_region());
} else if (min_mem_pages > 0) {
memory_ = AllocateMemory(min_mem_pages);
if (memory_.is_null()) return nothing; // failed to allocate memory
}
+ //--------------------------------------------------------------------------
+ // Check that indirect function table segments are within bounds.
+ //--------------------------------------------------------------------------
+ for (WasmTableInit& table_init : module_->table_inits) {
+ DCHECK(table_init.table_index < table_instances_.size());
+ uint32_t base = EvalUint32InitExpr(table_init.offset);
+ uint32_t table_size =
+ table_instances_[table_init.table_index].function_table->length();
+ if (!in_bounds(base, static_cast<uint32_t>(table_init.entries.size()),
+ table_size)) {
+ thrower_->LinkError("table initializer is out of bounds");
+ return nothing;
+ }
+ }
+
+ //--------------------------------------------------------------------------
+ // Check that memory segments are within bounds.
+ //--------------------------------------------------------------------------
+ for (WasmDataSegment& seg : module_->data_segments) {
+ uint32_t base = EvalUint32InitExpr(seg.dest_addr);
+ uint32_t mem_size = memory_.is_null()
+ ? 0 : static_cast<uint32_t>(memory_->byte_length()->Number());
+ if (!in_bounds(base, seg.source_size, mem_size)) {
+ thrower_->LinkError("data segment is out of bounds");
+ return nothing;
+ }
+ }
+
+ //--------------------------------------------------------------------------
+ // Initialize memory.
+ //--------------------------------------------------------------------------
if (!memory_.is_null()) {
instance->set_memory_buffer(*memory_);
Address mem_start = static_cast<Address>(memory_->backing_store());
uint32_t mem_size =
static_cast<uint32_t>(memory_->byte_length()->Number());
- LoadDataSegments(mem_start, mem_size);
+ if (!LoadDataSegments(mem_start, mem_size)) return nothing;
uint32_t old_mem_size = compiled_module_->mem_size();
Address old_mem_start =
@@ -1117,11 +1353,10 @@ class WasmInstanceBuilder {
? static_cast<Address>(
compiled_module_->memory()->backing_store())
: nullptr;
- RelocateMemoryReferencesInCode(code_table, old_mem_start, mem_start,
- old_mem_size, mem_size);
+ RelocateMemoryReferencesInCode(
+ code_table, module_->num_imported_functions, old_mem_start, mem_start,
+ old_mem_size, mem_size);
compiled_module_->set_memory(memory_);
- } else {
- LoadDataSegments(nullptr, 0);
}
//--------------------------------------------------------------------------
@@ -1144,21 +1379,61 @@ class WasmInstanceBuilder {
//--------------------------------------------------------------------------
// Set up the exports object for the new instance.
//--------------------------------------------------------------------------
- ProcessExports(code_table, instance);
+ ProcessExports(code_table, instance, compiled_module_);
//--------------------------------------------------------------------------
- // Set up the indirect function tables for the new instance.
+ // Add instance to Memory object
//--------------------------------------------------------------------------
- if (function_table_count > 0) InitializeTables(code_table, instance);
-
- if (num_imported_functions > 0 || !owner.is_null()) {
- // If the code was cloned, or new imports were compiled, patch.
- PatchDirectCalls(old_code_table, code_table, num_imported_functions);
+ DCHECK(wasm::IsWasmInstance(*instance));
+ if (instance->has_memory_object()) {
+ instance->memory_object()->AddInstance(isolate_, instance);
}
+ //--------------------------------------------------------------------------
+ // Initialize the indirect function tables.
+ //--------------------------------------------------------------------------
+ if (function_table_count > 0) LoadTableSegments(code_table, instance);
+
+ // Patch new call sites and the context.
+ PatchDirectCallsAndContext(code_table, compiled_module_, module_,
+ num_imported_functions);
+
FlushICache(isolate_, code_table);
//--------------------------------------------------------------------------
+ // Unpack and notify signal handler of protected instructions.
+ //--------------------------------------------------------------------------
+ {
+ for (int i = 0; i < code_table->length(); ++i) {
+ Handle<Code> code = code_table->GetValueChecked<Code>(isolate_, i);
+
+ if (code->kind() != Code::WASM_FUNCTION) {
+ continue;
+ }
+
+ FixedArray* protected_instructions = code->protected_instructions();
+ DCHECK(protected_instructions != nullptr);
+ Zone zone(isolate_->allocator(), "Wasm Module");
+ ZoneVector<trap_handler::ProtectedInstructionData> unpacked(&zone);
+ for (int i = 0; i < protected_instructions->length();
+ i += Code::kTrapDataSize) {
+ trap_handler::ProtectedInstructionData data;
+ data.instr_offset =
+ protected_instructions
+ ->GetValueChecked<Smi>(isolate_, i + Code::kTrapCodeOffset)
+ ->value();
+ data.landing_offset =
+ protected_instructions
+ ->GetValueChecked<Smi>(isolate_, i + Code::kTrapLandingOffset)
+ ->value();
+ unpacked.emplace_back(data);
+ }
+ // TODO(eholk): Register the protected instruction information once the
+ // trap handler is in place.
+ }
+ }
+
+ //--------------------------------------------------------------------------
// Set up and link the new instance.
//--------------------------------------------------------------------------
{
@@ -1174,7 +1449,7 @@ class WasmInstanceBuilder {
// we want all the publishing to happen free from GC interruptions, and
// so we do it in
// one GC-free scope afterwards.
- original = handle(owner.ToHandleChecked()->get_compiled_module());
+ original = handle(owner.ToHandleChecked()->compiled_module());
link_to_original = factory->NewWeakCell(original.ToHandleChecked());
}
// Publish the new instance to the instances chain.
@@ -1194,30 +1469,20 @@ class WasmInstanceBuilder {
v8::WeakCallbackType::kFinalizer);
}
}
-
- DCHECK(wasm::IsWasmInstance(*instance));
- if (instance->has_memory_object()) {
- instance->get_memory_object()->AddInstance(*instance);
- }
-
//--------------------------------------------------------------------------
// Run the start function if one was specified.
//--------------------------------------------------------------------------
if (module_->start_function_index >= 0) {
HandleScope scope(isolate_);
- ModuleEnv module_env;
- module_env.module = module_;
- module_env.instance = nullptr;
- module_env.origin = module_->origin;
int start_index = module_->start_function_index;
Handle<Code> startup_code =
code_table->GetValueChecked<Code>(isolate_, start_index);
FunctionSig* sig = module_->functions[start_index].sig;
Handle<Code> wrapper_code = compiler::CompileJSToWasmWrapper(
- isolate_, &module_env, startup_code, start_index);
+ isolate_, module_, startup_code, start_index);
Handle<WasmExportedFunction> startup_fct = WasmExportedFunction::New(
- isolate_, instance, factory->InternalizeUtf8String("start"),
- wrapper_code, static_cast<int>(sig->parameter_count()), start_index);
+ isolate_, instance, MaybeHandle<String>(), start_index,
+ static_cast<int>(sig->parameter_count()), wrapper_code);
RecordStats(isolate_, *startup_code);
// Call the JS function.
Handle<Object> undefined = factory->undefined_value();
@@ -1237,7 +1502,7 @@ class WasmInstanceBuilder {
DCHECK(!isolate_->has_pending_exception());
TRACE("Finishing instance %d\n", compiled_module_->instance_id());
- TRACE_CHAIN(WasmCompiledModule::cast(module_object_->GetInternalField(0)));
+ TRACE_CHAIN(module_object_->compiled_module());
return instance;
}
@@ -1246,13 +1511,14 @@ class WasmInstanceBuilder {
struct TableInstance {
Handle<WasmTableObject> table_object; // WebAssembly.Table instance
Handle<FixedArray> js_wrappers; // JSFunctions exported
- Handle<FixedArray> dispatch_table; // internal (code, sig) pairs
+ Handle<FixedArray> function_table; // internal code array
+ Handle<FixedArray> signature_table; // internal sig array
};
Isolate* isolate_;
- WasmModule* module_;
+ WasmModule* const module_;
ErrorThrower* thrower_;
- Handle<JSObject> module_object_;
+ Handle<WasmModuleObject> module_object_;
Handle<JSReceiver> ffi_;
Handle<JSArrayBuffer> memory_;
Handle<JSArrayBuffer> globals_;
@@ -1260,58 +1526,49 @@ class WasmInstanceBuilder {
std::vector<TableInstance> table_instances_;
std::vector<Handle<JSFunction>> js_wrappers_;
- // Helper routine to print out errors with imports (FFI).
- MaybeHandle<JSFunction> ReportFFIError(const char* error, uint32_t index,
- Handle<String> module_name,
- MaybeHandle<String> function_name) {
- Handle<String> function_name_handle;
- if (function_name.ToHandle(&function_name_handle)) {
- thrower_->TypeError(
- "Import #%d module=\"%.*s\" function=\"%.*s\" error: %s", index,
- module_name->length(), module_name->ToCString().get(),
- function_name_handle->length(),
- function_name_handle->ToCString().get(), error);
- } else {
- thrower_->TypeError("Import #%d module=\"%.*s\" error: %s", index,
- module_name->length(), module_name->ToCString().get(),
- error);
- }
- thrower_->TypeError("Import ");
- return MaybeHandle<JSFunction>();
+ // Helper routines to print out errors with imports.
+ void ReportLinkError(const char* error, uint32_t index,
+ Handle<String> module_name, Handle<String> import_name) {
+ thrower_->LinkError(
+ "Import #%d module=\"%.*s\" function=\"%.*s\" error: %s", index,
+ module_name->length(), module_name->ToCString().get(),
+ import_name->length(), import_name->ToCString().get(), error);
+ }
+
+ MaybeHandle<Object> ReportLinkError(const char* error, uint32_t index,
+ Handle<String> module_name) {
+ thrower_->LinkError("Import #%d module=\"%.*s\" error: %s", index,
+ module_name->length(), module_name->ToCString().get(),
+ error);
+ return MaybeHandle<Object>();
}
// Look up an import value in the {ffi_} object.
MaybeHandle<Object> LookupImport(uint32_t index, Handle<String> module_name,
- MaybeHandle<String> import_name) {
- if (ffi_.is_null()) {
- return ReportFFIError("FFI is not an object", index, module_name,
- import_name);
- }
+ Handle<String> import_name) {
+ // We pre-validated in the js-api layer that the ffi object is present, and
+ // a JSObject, if the module has imports.
+ DCHECK(!ffi_.is_null());
// Look up the module first.
- MaybeHandle<Object> result = Object::GetProperty(ffi_, module_name);
+ MaybeHandle<Object> result =
+ Object::GetPropertyOrElement(ffi_, module_name);
if (result.is_null()) {
- return ReportFFIError("module not found", index, module_name,
- import_name);
+ return ReportLinkError("module not found", index, module_name);
}
Handle<Object> module = result.ToHandleChecked();
- if (!import_name.is_null()) {
- // Look up the value in the module.
- if (!module->IsJSReceiver()) {
- return ReportFFIError("module is not an object or function", index,
- module_name, import_name);
- }
+ // Look up the value in the module.
+ if (!module->IsJSReceiver()) {
+ return ReportLinkError("module is not an object or function", index,
+ module_name);
+ }
- result = Object::GetProperty(module, import_name.ToHandleChecked());
- if (result.is_null()) {
- return ReportFFIError("import not found", index, module_name,
- import_name);
- }
- } else {
- // No function specified. Use the "default export".
- result = module;
+ result = Object::GetPropertyOrElement(module, import_name);
+ if (result.is_null()) {
+ ReportLinkError("import not found", index, module_name, import_name);
+ return MaybeHandle<JSFunction>();
}
return result;
@@ -1331,26 +1588,27 @@ class WasmInstanceBuilder {
}
}
+ bool in_bounds(uint32_t offset, uint32_t size, uint32_t upper) {
+ return offset + size <= upper && offset + size >= offset;
+ }
+
// Load data segments into the memory.
- void LoadDataSegments(Address mem_addr, size_t mem_size) {
- Handle<SeqOneByteString> module_bytes = compiled_module_->module_bytes();
+ bool LoadDataSegments(Address mem_addr, size_t mem_size) {
+ Handle<SeqOneByteString> module_bytes(compiled_module_->module_bytes(),
+ isolate_);
for (const WasmDataSegment& segment : module_->data_segments) {
uint32_t source_size = segment.source_size;
// Segments of size == 0 are just nops.
if (source_size == 0) continue;
uint32_t dest_offset = EvalUint32InitExpr(segment.dest_addr);
- if (dest_offset >= mem_size || source_size >= mem_size ||
- dest_offset > (mem_size - source_size)) {
- thrower_->TypeError("data segment (start = %" PRIu32 ", size = %" PRIu32
- ") does not fit into memory (size = %" PRIuS ")",
- dest_offset, source_size, mem_size);
- return;
- }
+ DCHECK(in_bounds(dest_offset, source_size,
+ static_cast<uint32_t>(mem_size)));
byte* dest = mem_addr + dest_offset;
const byte* src = reinterpret_cast<const byte*>(
module_bytes->GetCharsAddress() + segment.source_offset);
memcpy(dest, src, source_size);
}
+ return true;
}
void WriteGlobalValue(WasmGlobal& global, Handle<Object> value) {
@@ -1365,17 +1623,17 @@ class WasmInstanceBuilder {
TRACE("init [globals+%u] = %lf, type = %s\n", global.offset, num,
WasmOpcodes::TypeName(global.type));
switch (global.type) {
- case kAstI32:
+ case kWasmI32:
*GetRawGlobalPtr<int32_t>(global) = static_cast<int32_t>(num);
break;
- case kAstI64:
+ case kWasmI64:
// TODO(titzer): initialization of imported i64 globals.
UNREACHABLE();
break;
- case kAstF32:
+ case kWasmF32:
*GetRawGlobalPtr<float>(global) = static_cast<float>(num);
break;
- case kAstF64:
+ case kWasmF64:
*GetRawGlobalPtr<double>(global) = static_cast<double>(num);
break;
default:
@@ -1393,39 +1651,43 @@ class WasmInstanceBuilder {
for (int index = 0; index < static_cast<int>(module_->import_table.size());
++index) {
WasmImport& import = module_->import_table[index];
- Handle<String> module_name =
- ExtractStringFromModuleBytes(isolate_, compiled_module_,
- import.module_name_offset,
- import.module_name_length)
- .ToHandleChecked();
- Handle<String> function_name = Handle<String>::null();
- if (import.field_name_length > 0) {
- function_name = ExtractStringFromModuleBytes(isolate_, compiled_module_,
- import.field_name_offset,
- import.field_name_length)
- .ToHandleChecked();
- }
+
+ Handle<String> module_name;
+ MaybeHandle<String> maybe_module_name =
+ WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+ isolate_, compiled_module_, import.module_name_offset,
+ import.module_name_length);
+ if (!maybe_module_name.ToHandle(&module_name)) return -1;
+
+ Handle<String> import_name;
+ MaybeHandle<String> maybe_import_name =
+ WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+ isolate_, compiled_module_, import.field_name_offset,
+ import.field_name_length);
+ if (!maybe_import_name.ToHandle(&import_name)) return -1;
MaybeHandle<Object> result =
- LookupImport(index, module_name, function_name);
+ LookupImport(index, module_name, import_name);
if (thrower_->error()) return -1;
+ Handle<Object> value = result.ToHandleChecked();
switch (import.kind) {
case kExternalFunction: {
// Function imports must be callable.
- Handle<Object> function = result.ToHandleChecked();
- if (!function->IsCallable()) {
- ReportFFIError("function import requires a callable", index,
- module_name, function_name);
+ if (!value->IsCallable()) {
+ ReportLinkError("function import requires a callable", index,
+ module_name, import_name);
return -1;
}
Handle<Code> import_wrapper = CompileImportWrapper(
isolate_, index, module_->functions[import.index].sig,
- Handle<JSReceiver>::cast(function), module_name, function_name);
+ Handle<JSReceiver>::cast(value), module_name, import_name,
+ module_->origin);
if (import_wrapper.is_null()) {
- ReportFFIError("imported function does not match the expected type",
- index, module_name, function_name);
+ ReportLinkError(
+ "imported function does not match the expected type", index,
+ module_name, import_name);
return -1;
}
code_table->set(num_imported_functions, *import_wrapper);
@@ -1434,10 +1696,9 @@ class WasmInstanceBuilder {
break;
}
case kExternalTable: {
- Handle<Object> value = result.ToHandleChecked();
if (!WasmJs::IsWasmTableObject(isolate_, value)) {
- ReportFFIError("table import requires a WebAssembly.Table", index,
- module_name, function_name);
+ ReportLinkError("table import requires a WebAssembly.Table", index,
+ module_name, import_name);
return -1;
}
WasmIndirectFunctionTable& table =
@@ -1445,23 +1706,43 @@ class WasmInstanceBuilder {
TableInstance& table_instance = table_instances_[num_imported_tables];
table_instance.table_object = Handle<WasmTableObject>::cast(value);
table_instance.js_wrappers = Handle<FixedArray>(
- table_instance.table_object->get_functions(), isolate_);
-
- // TODO(titzer): import table size must match exactly for now.
- int table_size = table_instance.js_wrappers->length();
- if (table_size != static_cast<int>(table.min_size)) {
- thrower_->TypeError(
- "table import %d is wrong size (%d), expected %u", index,
- table_size, table.min_size);
+ table_instance.table_object->functions(), isolate_);
+
+ int imported_cur_size = table_instance.js_wrappers->length();
+ if (imported_cur_size < static_cast<int>(table.min_size)) {
+ thrower_->LinkError(
+ "table import %d is smaller than minimum %d, got %u", index,
+ table.min_size, imported_cur_size);
return -1;
}
- // Allocate a new dispatch table.
- table_instance.dispatch_table =
- isolate_->factory()->NewFixedArray(table_size * 2);
- for (int i = 0; i < table_size * 2; ++i) {
- table_instance.dispatch_table->set(i,
- Smi::FromInt(kInvalidSigIndex));
+ if (table.has_max) {
+ int64_t imported_max_size =
+ table_instance.table_object->maximum_length();
+ if (imported_max_size < 0) {
+ thrower_->LinkError(
+ "table import %d has no maximum length, expected %d", index,
+ table.max_size);
+ return -1;
+ }
+ if (imported_max_size > table.max_size) {
+ thrower_->LinkError(
+ "table import %d has maximum larger than maximum %d, "
+ "got %" PRIx64,
+ index, table.max_size, imported_max_size);
+ return -1;
+ }
+ }
+
+ // Allocate a new dispatch table and signature table.
+ int table_size = imported_cur_size;
+ table_instance.function_table =
+ isolate_->factory()->NewFixedArray(table_size);
+ table_instance.signature_table =
+ isolate_->factory()->NewFixedArray(table_size);
+ for (int i = 0; i < table_size; ++i) {
+ table_instance.signature_table->set(i,
+ Smi::FromInt(kInvalidSigIndex));
}
// Initialize the dispatch table with the (foreign) JS functions
// that are already in the table.
@@ -1471,43 +1752,70 @@ class WasmInstanceBuilder {
WasmFunction* function =
GetWasmFunctionForImportWrapper(isolate_, val);
if (function == nullptr) {
- thrower_->TypeError("table import %d[%d] is not a WASM function",
+ thrower_->LinkError("table import %d[%d] is not a WASM function",
index, i);
return -1;
}
int sig_index = table.map.FindOrInsert(function->sig);
- table_instance.dispatch_table->set(i, Smi::FromInt(sig_index));
- table_instance.dispatch_table->set(i + table_size,
- *UnwrapImportWrapper(val));
+ table_instance.signature_table->set(i, Smi::FromInt(sig_index));
+ table_instance.function_table->set(i, *UnwrapImportWrapper(val));
}
num_imported_tables++;
break;
}
case kExternalMemory: {
- Handle<Object> object = result.ToHandleChecked();
- if (!WasmJs::IsWasmMemoryObject(isolate_, object)) {
- ReportFFIError("memory import must be a WebAssembly.Memory object",
- index, module_name, function_name);
+ // Validation should have failed if more than one memory object was
+ // provided.
+ DCHECK(!instance->has_memory_object());
+ if (!WasmJs::IsWasmMemoryObject(isolate_, value)) {
+ ReportLinkError("memory import must be a WebAssembly.Memory object",
+ index, module_name, import_name);
return -1;
}
- auto memory = Handle<WasmMemoryObject>::cast(object);
+ auto memory = Handle<WasmMemoryObject>::cast(value);
+ DCHECK(WasmJs::IsWasmMemoryObject(isolate_, memory));
instance->set_memory_object(*memory);
- memory_ = Handle<JSArrayBuffer>(memory->get_buffer(), isolate_);
+ memory_ = Handle<JSArrayBuffer>(memory->buffer(), isolate_);
+ uint32_t imported_cur_pages = static_cast<uint32_t>(
+ memory_->byte_length()->Number() / WasmModule::kPageSize);
+ if (imported_cur_pages < module_->min_mem_pages) {
+ thrower_->LinkError(
+ "memory import %d is smaller than maximum %u, got %u", index,
+ module_->min_mem_pages, imported_cur_pages);
+ }
+ int32_t imported_max_pages = memory->maximum_pages();
+ if (module_->has_max_mem) {
+ if (imported_max_pages < 0) {
+ thrower_->LinkError(
+ "memory import %d has no maximum limit, expected at most %u",
+ index, imported_max_pages);
+ return -1;
+ }
+ if (static_cast<uint32_t>(imported_max_pages) >
+ module_->max_mem_pages) {
+ thrower_->LinkError(
+ "memory import %d has larger maximum than maximum %u, got %d",
+ index, module_->max_mem_pages, imported_max_pages);
+ return -1;
+ }
+ }
break;
}
case kExternalGlobal: {
// Global imports are converted to numbers and written into the
// {globals_} array buffer.
- Handle<Object> object = result.ToHandleChecked();
- MaybeHandle<Object> number = Object::ToNumber(object);
- if (number.is_null()) {
- ReportFFIError("global import could not be converted to number",
- index, module_name, function_name);
+ if (module_->globals[import.index].type == kWasmI64) {
+ ReportLinkError("global import cannot have type i64", index,
+ module_name, import_name);
+ return -1;
+ }
+ if (!value->IsNumber()) {
+ ReportLinkError("global import must be a number", index,
+ module_name, import_name);
return -1;
}
- Handle<Object> val = number.ToHandleChecked();
- WriteGlobalValue(module_->globals[import.index], val);
+ WriteGlobalValue(module_->globals[import.index], value);
break;
}
default:
@@ -1546,7 +1854,7 @@ class WasmInstanceBuilder {
module_->globals[global.init.val.global_index].offset;
TRACE("init [globals+%u] = [globals+%d]\n", global.offset,
old_offset);
- size_t size = (global.type == kAstI64 || global.type == kAstF64)
+ size_t size = (global.type == kWasmI64 || global.type == kWasmF64)
? sizeof(double)
: sizeof(int32_t);
memcpy(raw_buffer_ptr(globals_, new_offset),
@@ -1565,12 +1873,13 @@ class WasmInstanceBuilder {
// Allocate memory for a module instance as a new JSArrayBuffer.
Handle<JSArrayBuffer> AllocateMemory(uint32_t min_mem_pages) {
- if (min_mem_pages > WasmModule::kV8MaxPages) {
+ if (min_mem_pages > kV8MaxWasmMemoryPages) {
thrower_->RangeError("Out of memory: wasm memory too large");
return Handle<JSArrayBuffer>::null();
}
- Handle<JSArrayBuffer> mem_buffer =
- NewArrayBuffer(isolate_, min_mem_pages * WasmModule::kPageSize);
+ const bool enable_guard_regions = EnableGuardRegions();
+ Handle<JSArrayBuffer> mem_buffer = NewArrayBuffer(
+ isolate_, min_mem_pages * WasmModule::kPageSize, enable_guard_regions);
if (mem_buffer.is_null()) {
thrower_->RangeError("Out of memory: wasm memory");
@@ -1578,31 +1887,30 @@ class WasmInstanceBuilder {
return mem_buffer;
}
- // Process the exports, creating wrappers for functions, tables, memories,
- // and globals.
- void ProcessExports(Handle<FixedArray> code_table,
- Handle<WasmInstanceObject> instance) {
- bool needs_wrappers = module_->num_exported_functions > 0;
+ bool NeedsWrappers() {
+ if (module_->num_exported_functions > 0) return true;
for (auto table_instance : table_instances_) {
- if (!table_instance.js_wrappers.is_null()) {
- needs_wrappers = true;
- break;
- }
+ if (!table_instance.js_wrappers.is_null()) return true;
}
for (auto table : module_->function_tables) {
- if (table.exported) {
- needs_wrappers = true;
- break;
- }
+ if (table.exported) return true;
}
- if (needs_wrappers) {
+ return false;
+ }
+
+ // Process the exports, creating wrappers for functions, tables, memories,
+ // and globals.
+ void ProcessExports(Handle<FixedArray> code_table,
+ Handle<WasmInstanceObject> instance,
+ Handle<WasmCompiledModule> compiled_module) {
+ if (NeedsWrappers()) {
// Fill the table to cache the exported JSFunction wrappers.
js_wrappers_.insert(js_wrappers_.begin(), module_->functions.size(),
Handle<JSFunction>::null());
}
Handle<JSObject> exports_object = instance;
- if (module_->export_table.size() > 0 && module_->origin == kWasmOrigin) {
+ if (module_->origin == kWasmOrigin) {
// Create the "exports" object.
Handle<JSFunction> object_function = Handle<JSFunction>(
isolate_->native_context()->object_function(), isolate_);
@@ -1610,38 +1918,68 @@ class WasmInstanceBuilder {
isolate_->factory()->NewJSObject(object_function, TENURED);
Handle<String> exports_name =
isolate_->factory()->InternalizeUtf8String("exports");
- JSObject::AddProperty(instance, exports_name, exports_object, READ_ONLY);
+ JSObject::AddProperty(instance, exports_name, exports_object, NONE);
}
PropertyDescriptor desc;
desc.set_writable(false);
+ desc.set_enumerable(true);
- // Process each export in the export table.
+ // Count up export indexes.
int export_index = 0;
for (auto exp : module_->export_table) {
+ if (exp.kind == kExternalFunction) {
+ ++export_index;
+ }
+ }
+
+ // Store weak references to all exported functions.
+ Handle<FixedArray> weak_exported_functions;
+ if (compiled_module->has_weak_exported_functions()) {
+ weak_exported_functions = compiled_module->weak_exported_functions();
+ } else {
+ weak_exported_functions =
+ isolate_->factory()->NewFixedArray(export_index);
+ compiled_module->set_weak_exported_functions(weak_exported_functions);
+ }
+ DCHECK_EQ(export_index, weak_exported_functions->length());
+
+ // Process each export in the export table (go in reverse so asm.js
+ // can skip duplicates).
+ for (auto exp : base::Reversed(module_->export_table)) {
Handle<String> name =
- ExtractStringFromModuleBytes(isolate_, compiled_module_,
- exp.name_offset, exp.name_length)
+ WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+ isolate_, compiled_module_, exp.name_offset, exp.name_length)
.ToHandleChecked();
switch (exp.kind) {
case kExternalFunction: {
// Wrap and export the code as a JSFunction.
WasmFunction& function = module_->functions[exp.index];
int func_index =
- static_cast<int>(module_->functions.size() + export_index);
+ static_cast<int>(module_->functions.size() + --export_index);
Handle<JSFunction> js_function = js_wrappers_[exp.index];
if (js_function.is_null()) {
// Wrap the exported code as a JSFunction.
Handle<Code> export_code =
code_table->GetValueChecked<Code>(isolate_, func_index);
+ MaybeHandle<String> func_name;
+ if (module_->origin == kAsmJsOrigin) {
+ // For modules arising from asm.js, honor the names section.
+ func_name = WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+ isolate_, compiled_module_, function.name_offset,
+ function.name_length)
+ .ToHandleChecked();
+ }
js_function = WasmExportedFunction::New(
- isolate_, instance, name, export_code,
- static_cast<int>(function.sig->parameter_count()),
- function.func_index);
+ isolate_, instance, func_name, function.func_index,
+ static_cast<int>(function.sig->parameter_count()), export_code);
js_wrappers_[exp.index] = js_function;
}
desc.set_value(js_function);
- export_index++;
+ Handle<WeakCell> weak_export =
+ isolate_->factory()->NewWeakCell(js_function);
+ DCHECK_GT(weak_exported_functions->length(), export_index);
+ weak_exported_functions->set(export_index, *weak_export);
break;
}
case kExternalTable: {
@@ -1651,7 +1989,7 @@ class WasmInstanceBuilder {
module_->function_tables[exp.index];
if (table_instance.table_object.is_null()) {
uint32_t maximum =
- table.has_max ? table.max_size : WasmModule::kV8MaxTableSize;
+ table.has_max ? table.max_size : kV8MaxWasmTableSize;
table_instance.table_object = WasmTableObject::New(
isolate_, table.min_size, maximum, &table_instance.js_wrappers);
}
@@ -1663,15 +2001,16 @@ class WasmInstanceBuilder {
Handle<WasmMemoryObject> memory_object;
if (!instance->has_memory_object()) {
// If there was no imported WebAssembly.Memory object, create one.
- Handle<JSArrayBuffer> buffer(instance->get_memory_buffer(),
- isolate_);
+ Handle<JSArrayBuffer> buffer(instance->memory_buffer(), isolate_);
memory_object = WasmMemoryObject::New(
isolate_, buffer,
(module_->max_mem_pages != 0) ? module_->max_mem_pages : -1);
instance->set_memory_object(*memory_object);
} else {
- memory_object = Handle<WasmMemoryObject>(
- instance->get_memory_object(), isolate_);
+ memory_object =
+ Handle<WasmMemoryObject>(instance->memory_object(), isolate_);
+ DCHECK(WasmJs::IsWasmMemoryObject(isolate_, memory_object));
+ memory_object->ResetInstancesLink(isolate_);
}
desc.set_value(memory_object);
@@ -1682,15 +2021,19 @@ class WasmInstanceBuilder {
WasmGlobal& global = module_->globals[exp.index];
double num = 0;
switch (global.type) {
- case kAstI32:
+ case kWasmI32:
num = *GetRawGlobalPtr<int32_t>(global);
break;
- case kAstF32:
+ case kWasmF32:
num = *GetRawGlobalPtr<float>(global);
break;
- case kAstF64:
+ case kWasmF64:
num = *GetRawGlobalPtr<double>(global);
break;
+ case kWasmI64:
+ thrower_->LinkError(
+ "export of globals of type I64 is not allowed.");
+ break;
default:
UNREACHABLE();
}
@@ -1702,42 +2045,99 @@ class WasmInstanceBuilder {
break;
}
+ // Skip duplicates for asm.js.
+ if (module_->origin == kAsmJsOrigin) {
+ v8::Maybe<bool> status =
+ JSReceiver::HasOwnProperty(exports_object, name);
+ if (status.FromMaybe(false)) {
+ continue;
+ }
+ }
v8::Maybe<bool> status = JSReceiver::DefineOwnProperty(
isolate_, exports_object, name, &desc, Object::THROW_ON_ERROR);
if (!status.IsJust()) {
- thrower_->TypeError("export of %.*s failed.", name->length(),
+ thrower_->LinkError("export of %.*s failed.", name->length(),
name->ToCString().get());
return;
}
}
+
+ if (module_->origin == kWasmOrigin) {
+ v8::Maybe<bool> success = JSReceiver::SetIntegrityLevel(
+ exports_object, FROZEN, Object::DONT_THROW);
+ DCHECK(success.FromMaybe(false));
+ USE(success);
+ }
}
void InitializeTables(Handle<FixedArray> code_table,
Handle<WasmInstanceObject> instance) {
- Handle<FixedArray> old_function_tables =
- compiled_module_->function_tables();
int function_table_count =
static_cast<int>(module_->function_tables.size());
Handle<FixedArray> new_function_tables =
isolate_->factory()->NewFixedArray(function_table_count);
+ Handle<FixedArray> new_signature_tables =
+ isolate_->factory()->NewFixedArray(function_table_count);
for (int index = 0; index < function_table_count; ++index) {
WasmIndirectFunctionTable& table = module_->function_tables[index];
TableInstance& table_instance = table_instances_[index];
int table_size = static_cast<int>(table.min_size);
- if (table_instance.dispatch_table.is_null()) {
+ if (table_instance.function_table.is_null()) {
// Create a new dispatch table if necessary.
- table_instance.dispatch_table =
- isolate_->factory()->NewFixedArray(table_size * 2);
+ table_instance.function_table =
+ isolate_->factory()->NewFixedArray(table_size);
+ table_instance.signature_table =
+ isolate_->factory()->NewFixedArray(table_size);
for (int i = 0; i < table_size; ++i) {
// Fill the table with invalid signature indexes so that
// uninitialized entries will always fail the signature check.
- table_instance.dispatch_table->set(i, Smi::FromInt(kInvalidSigIndex));
+ table_instance.signature_table->set(i,
+ Smi::FromInt(kInvalidSigIndex));
+ }
+ } else {
+ // Table is imported, patch table bounds check
+ DCHECK(table_size <= table_instance.function_table->length());
+ if (table_size < table_instance.function_table->length()) {
+ RelocateTableSizeReferences(code_table, table_size,
+ table_instance.function_table->length());
}
}
new_function_tables->set(static_cast<int>(index),
- *table_instance.dispatch_table);
+ *table_instance.function_table);
+ new_signature_tables->set(static_cast<int>(index),
+ *table_instance.signature_table);
+ }
+
+ // Patch all code that has references to the old indirect tables.
+ Handle<FixedArray> old_function_tables =
+ compiled_module_->function_tables();
+ Handle<FixedArray> old_signature_tables =
+ compiled_module_->signature_tables();
+ for (int i = 0; i < code_table->length(); ++i) {
+ if (!code_table->get(i)->IsCode()) continue;
+ Handle<Code> code(Code::cast(code_table->get(i)), isolate_);
+ for (int j = 0; j < function_table_count; ++j) {
+ ReplaceReferenceInCode(
+ code, Handle<Object>(old_function_tables->get(j), isolate_),
+ Handle<Object>(new_function_tables->get(j), isolate_));
+ ReplaceReferenceInCode(
+ code, Handle<Object>(old_signature_tables->get(j), isolate_),
+ Handle<Object>(new_signature_tables->get(j), isolate_));
+ }
+ }
+ compiled_module_->set_function_tables(new_function_tables);
+ compiled_module_->set_signature_tables(new_signature_tables);
+ }
+
+ void LoadTableSegments(Handle<FixedArray> code_table,
+ Handle<WasmInstanceObject> instance) {
+ int function_table_count =
+ static_cast<int>(module_->function_tables.size());
+ for (int index = 0; index < function_table_count; ++index) {
+ WasmIndirectFunctionTable& table = module_->function_tables[index];
+ TableInstance& table_instance = table_instances_[index];
Handle<FixedArray> all_dispatch_tables;
if (!table_instance.table_object.is_null()) {
@@ -1745,28 +2145,24 @@ class WasmInstanceBuilder {
all_dispatch_tables = WasmTableObject::AddDispatchTable(
isolate_, table_instance.table_object,
Handle<WasmInstanceObject>::null(), index,
- Handle<FixedArray>::null());
+ Handle<FixedArray>::null(), Handle<FixedArray>::null());
}
// TODO(titzer): this does redundant work if there are multiple tables,
// since initializations are not sorted by table index.
for (auto table_init : module_->table_inits) {
uint32_t base = EvalUint32InitExpr(table_init.offset);
- if (base > static_cast<uint32_t>(table_size) ||
- (base + table_init.entries.size() >
- static_cast<uint32_t>(table_size))) {
- thrower_->CompileError("table initializer is out of bounds");
- continue;
- }
+ DCHECK(in_bounds(base, static_cast<uint32_t>(table_init.entries.size()),
+ table_instance.function_table->length()));
for (int i = 0; i < static_cast<int>(table_init.entries.size()); ++i) {
uint32_t func_index = table_init.entries[i];
WasmFunction* function = &module_->functions[func_index];
int table_index = static_cast<int>(i + base);
int32_t sig_index = table.map.Find(function->sig);
DCHECK_GE(sig_index, 0);
- table_instance.dispatch_table->set(table_index,
- Smi::FromInt(sig_index));
- table_instance.dispatch_table->set(table_index + table_size,
+ table_instance.signature_table->set(table_index,
+ Smi::FromInt(sig_index));
+ table_instance.function_table->set(table_index,
code_table->get(func_index));
if (!all_dispatch_tables.is_null()) {
@@ -1783,19 +2179,22 @@ class WasmInstanceBuilder {
temp_instance.mem_start = nullptr;
temp_instance.globals_start = nullptr;
- ModuleEnv module_env;
- module_env.module = module_;
- module_env.instance = &temp_instance;
- module_env.origin = module_->origin;
-
Handle<Code> wrapper_code = compiler::CompileJSToWasmWrapper(
- isolate_, &module_env, wasm_code, func_index);
+ isolate_, module_, wasm_code, func_index);
+ MaybeHandle<String> func_name;
+ if (module_->origin == kAsmJsOrigin) {
+ // For modules arising from asm.js, honor the names section.
+ func_name =
+ WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+ isolate_, compiled_module_, function->name_offset,
+ function->name_length)
+ .ToHandleChecked();
+ }
Handle<WasmExportedFunction> js_function =
WasmExportedFunction::New(
- isolate_, instance, isolate_->factory()->empty_string(),
- wrapper_code,
+ isolate_, instance, func_name, func_index,
static_cast<int>(function->sig->parameter_count()),
- func_index);
+ wrapper_code);
js_wrappers_[func_index] = js_function;
}
table_instance.js_wrappers->set(table_index,
@@ -1814,115 +2213,57 @@ class WasmInstanceBuilder {
// Add the new dispatch table to the WebAssembly.Table object.
all_dispatch_tables = WasmTableObject::AddDispatchTable(
isolate_, table_instance.table_object, instance, index,
- table_instance.dispatch_table);
+ table_instance.function_table, table_instance.signature_table);
}
}
- // Patch all code that has references to the old indirect tables.
- for (int i = 0; i < code_table->length(); ++i) {
- if (!code_table->get(i)->IsCode()) continue;
- Handle<Code> code(Code::cast(code_table->get(i)), isolate_);
- for (int j = 0; j < function_table_count; ++j) {
- ReplaceReferenceInCode(
- code, Handle<Object>(old_function_tables->get(j), isolate_),
- Handle<Object>(new_function_tables->get(j), isolate_));
- }
- }
- compiled_module_->set_function_tables(new_function_tables);
}
};
// Instantiates a WASM module, creating a WebAssembly.Instance from a
// WebAssembly.Module.
-MaybeHandle<JSObject> WasmModule::Instantiate(Isolate* isolate,
- ErrorThrower* thrower,
- Handle<JSObject> wasm_module,
- Handle<JSReceiver> ffi,
- Handle<JSArrayBuffer> memory) {
+MaybeHandle<WasmInstanceObject> WasmModule::Instantiate(
+ Isolate* isolate, ErrorThrower* thrower,
+ Handle<WasmModuleObject> wasm_module, Handle<JSReceiver> ffi,
+ Handle<JSArrayBuffer> memory) {
WasmInstanceBuilder builder(isolate, thrower, wasm_module, ffi, memory);
return builder.Build();
}
-Handle<String> wasm::GetWasmFunctionName(Isolate* isolate,
- Handle<Object> instance_or_undef,
- uint32_t func_index) {
- if (!instance_or_undef->IsUndefined(isolate)) {
- Handle<WasmCompiledModule> compiled_module(
- Handle<WasmInstanceObject>::cast(instance_or_undef)
- ->get_compiled_module());
- MaybeHandle<String> maybe_name =
- WasmCompiledModule::GetFunctionName(compiled_module, func_index);
- if (!maybe_name.is_null()) return maybe_name.ToHandleChecked();
- }
- return isolate->factory()->NewStringFromStaticChars("<WASM UNNAMED>");
-}
-
bool wasm::IsWasmInstance(Object* object) {
return WasmInstanceObject::IsWasmInstanceObject(object);
}
-WasmCompiledModule* wasm::GetCompiledModule(Object* object) {
- return WasmInstanceObject::cast(object)->get_compiled_module();
-}
-
-bool wasm::WasmIsAsmJs(Object* instance, Isolate* isolate) {
- if (instance->IsUndefined(isolate)) return false;
- DCHECK(IsWasmInstance(instance));
- WasmCompiledModule* compiled_module =
- GetCompiledModule(JSObject::cast(instance));
- DCHECK_EQ(compiled_module->has_asm_js_offset_tables(),
- compiled_module->script()->type() == Script::TYPE_NORMAL);
- return compiled_module->has_asm_js_offset_tables();
-}
-
Handle<Script> wasm::GetScript(Handle<JSObject> instance) {
- DCHECK(IsWasmInstance(*instance));
- WasmCompiledModule* compiled_module = GetCompiledModule(*instance);
- DCHECK(compiled_module->has_script());
- return compiled_module->script();
-}
-
-int wasm::GetAsmWasmSourcePosition(Handle<JSObject> instance, int func_index,
- int byte_offset) {
- return WasmDebugInfo::GetAsmJsSourcePosition(GetDebugInfo(instance),
- func_index, byte_offset);
-}
-
-Handle<SeqOneByteString> wasm::GetWasmBytes(Handle<JSObject> object) {
- return Handle<WasmInstanceObject>::cast(object)
- ->get_compiled_module()
- ->module_bytes();
-}
-
-Handle<WasmDebugInfo> wasm::GetDebugInfo(Handle<JSObject> object) {
- auto instance = Handle<WasmInstanceObject>::cast(object);
- if (instance->has_debug_info()) {
- Handle<WasmDebugInfo> info(instance->get_debug_info(),
- instance->GetIsolate());
- return info;
- }
- Handle<WasmDebugInfo> new_info = WasmDebugInfo::New(instance);
- instance->set_debug_info(*new_info);
- return new_info;
+ WasmCompiledModule* compiled_module =
+ WasmInstanceObject::cast(*instance)->compiled_module();
+ return handle(compiled_module->script());
}
-int wasm::GetNumberOfFunctions(Handle<JSObject> object) {
- return static_cast<int>(
- Handle<WasmInstanceObject>::cast(object)->module()->functions.size());
+bool wasm::IsWasmCodegenAllowed(Isolate* isolate, Handle<Context> context) {
+ return isolate->allow_code_gen_callback() == nullptr ||
+ isolate->allow_code_gen_callback()(v8::Utils::ToLocal(context));
}
// TODO(clemensh): origin can be inferred from asm_js_script; remove it.
MaybeHandle<WasmModuleObject> wasm::CreateModuleObjectFromBytes(
Isolate* isolate, const byte* start, const byte* end, ErrorThrower* thrower,
ModuleOrigin origin, Handle<Script> asm_js_script,
- const byte* asm_js_offset_tables_start,
- const byte* asm_js_offset_tables_end) {
+ Vector<const byte> asm_js_offset_table_bytes) {
MaybeHandle<WasmModuleObject> nothing;
+
+ if (origin != kAsmJsOrigin &&
+ !IsWasmCodegenAllowed(isolate, isolate->native_context())) {
+ thrower->CompileError("Wasm code generation disallowed in this context");
+ return nothing;
+ }
+
ModuleResult result = DecodeWasmModule(isolate, start, end, false, origin);
if (result.failed()) {
if (result.val) delete result.val;
thrower->CompileFailed("Wasm decoding failed", result);
return nothing;
}
+
// The {module_wrapper} will take ownership of the {WasmModule} object,
// and it will be destroyed when the GC reclaims the wrapper object.
Handle<WasmModuleWrapper> module_wrapper =
@@ -1930,61 +2271,15 @@ MaybeHandle<WasmModuleObject> wasm::CreateModuleObjectFromBytes(
// Compile the functions of the module, producing a compiled module.
MaybeHandle<WasmCompiledModule> maybe_compiled_module =
- result.val->CompileFunctions(isolate, module_wrapper, thrower);
+ result.val->CompileFunctions(isolate, module_wrapper, thrower,
+ ModuleWireBytes(start, end), asm_js_script,
+ asm_js_offset_table_bytes);
if (maybe_compiled_module.is_null()) return nothing;
Handle<WasmCompiledModule> compiled_module =
maybe_compiled_module.ToHandleChecked();
- DCHECK_EQ(origin == kAsmJsOrigin, !asm_js_script.is_null());
- DCHECK(!compiled_module->has_script());
- DCHECK(!compiled_module->has_asm_js_offset_tables());
- if (origin == kAsmJsOrigin) {
- // Set script for the asm.js source, and the offset table mapping wasm byte
- // offsets to source positions.
- compiled_module->set_script(asm_js_script);
- size_t offset_tables_len =
- asm_js_offset_tables_end - asm_js_offset_tables_start;
- DCHECK_GE(static_cast<size_t>(kMaxInt), offset_tables_len);
- Handle<ByteArray> offset_tables =
- isolate->factory()->NewByteArray(static_cast<int>(offset_tables_len));
- memcpy(offset_tables->GetDataStartAddress(), asm_js_offset_tables_start,
- offset_tables_len);
- compiled_module->set_asm_js_offset_tables(offset_tables);
- } else {
- // Create a new Script object representing this wasm module, store it in the
- // compiled wasm module, and register it at the debugger.
- Handle<Script> script =
- isolate->factory()->NewScript(isolate->factory()->empty_string());
- script->set_type(Script::TYPE_WASM);
-
- DCHECK_GE(kMaxInt, end - start);
- int hash = StringHasher::HashSequentialString(
- reinterpret_cast<const char*>(start), static_cast<int>(end - start),
- kZeroHashSeed);
-
- const int kBufferSize = 50;
- char buffer[kBufferSize];
- int url_chars = SNPrintF(ArrayVector(buffer), "wasm://wasm/%08x", hash);
- DCHECK(url_chars >= 0 && url_chars < kBufferSize);
- MaybeHandle<String> url_str = isolate->factory()->NewStringFromOneByte(
- Vector<const uint8_t>(reinterpret_cast<uint8_t*>(buffer), url_chars),
- TENURED);
- script->set_source_url(*url_str.ToHandleChecked());
-
- int name_chars = SNPrintF(ArrayVector(buffer), "wasm-%08x", hash);
- DCHECK(name_chars >= 0 && name_chars < kBufferSize);
- MaybeHandle<String> name_str = isolate->factory()->NewStringFromOneByte(
- Vector<const uint8_t>(reinterpret_cast<uint8_t*>(buffer), name_chars),
- TENURED);
- script->set_name(*name_str.ToHandleChecked());
-
- script->set_wasm_compiled_module(*compiled_module);
- compiled_module->set_script(script);
- isolate->debug()->OnAfterCompile(script);
- }
-
return WasmModuleObject::New(isolate, compiled_module);
}
@@ -2000,24 +2295,25 @@ bool wasm::ValidateModuleBytes(Isolate* isolate, const byte* start,
return result.ok();
}
-MaybeHandle<JSArrayBuffer> wasm::GetInstanceMemory(Isolate* isolate,
- Handle<JSObject> object) {
+MaybeHandle<JSArrayBuffer> wasm::GetInstanceMemory(
+ Isolate* isolate, Handle<WasmInstanceObject> object) {
auto instance = Handle<WasmInstanceObject>::cast(object);
if (instance->has_memory_buffer()) {
- return Handle<JSArrayBuffer>(instance->get_memory_buffer(), isolate);
+ return Handle<JSArrayBuffer>(instance->memory_buffer(), isolate);
}
return MaybeHandle<JSArrayBuffer>();
}
-void SetInstanceMemory(Handle<JSObject> object, JSArrayBuffer* buffer) {
+void SetInstanceMemory(Handle<WasmInstanceObject> instance,
+ JSArrayBuffer* buffer) {
DisallowHeapAllocation no_gc;
- auto instance = Handle<WasmInstanceObject>::cast(object);
instance->set_memory_buffer(buffer);
- instance->get_compiled_module()->set_ptr_to_memory(buffer);
+ instance->compiled_module()->set_ptr_to_memory(buffer);
}
int32_t wasm::GetInstanceMemorySize(Isolate* isolate,
- Handle<JSObject> instance) {
+ Handle<WasmInstanceObject> instance) {
+ DCHECK(IsWasmInstance(*instance));
MaybeHandle<JSArrayBuffer> maybe_mem_buffer =
GetInstanceMemory(isolate, instance);
Handle<JSArrayBuffer> buffer;
@@ -2028,92 +2324,222 @@ int32_t wasm::GetInstanceMemorySize(Isolate* isolate,
}
}
-uint32_t GetMaxInstanceMemorySize(Isolate* isolate,
- Handle<WasmInstanceObject> instance) {
+uint32_t GetMaxInstanceMemoryPages(Isolate* isolate,
+ Handle<WasmInstanceObject> instance) {
if (instance->has_memory_object()) {
- Handle<WasmMemoryObject> memory_object(instance->get_memory_object(),
- isolate);
-
- int maximum = memory_object->maximum_pages();
- if (maximum > 0) return static_cast<uint32_t>(maximum);
+ Handle<WasmMemoryObject> memory_object(instance->memory_object(), isolate);
+ if (memory_object->has_maximum_pages()) {
+ uint32_t maximum = static_cast<uint32_t>(memory_object->maximum_pages());
+ if (maximum < kV8MaxWasmMemoryPages) return maximum;
+ }
}
- uint32_t compiled_max_pages =
- instance->get_compiled_module()->max_mem_pages();
+ uint32_t compiled_max_pages = instance->compiled_module()->max_mem_pages();
isolate->counters()->wasm_max_mem_pages_count()->AddSample(
compiled_max_pages);
if (compiled_max_pages != 0) return compiled_max_pages;
- return WasmModule::kV8MaxPages;
+ return kV8MaxWasmMemoryPages;
}
-int32_t wasm::GrowInstanceMemory(Isolate* isolate, Handle<JSObject> object,
- uint32_t pages) {
- if (!IsWasmInstance(*object)) return -1;
- auto instance = Handle<WasmInstanceObject>::cast(object);
- if (pages == 0) return GetInstanceMemorySize(isolate, instance);
- uint32_t max_pages = GetMaxInstanceMemorySize(isolate, instance);
-
- Address old_mem_start = nullptr;
- uint32_t old_size = 0, new_size = 0;
-
- MaybeHandle<JSArrayBuffer> maybe_mem_buffer =
- GetInstanceMemory(isolate, instance);
+Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
+ MaybeHandle<JSArrayBuffer> buffer,
+ uint32_t pages, uint32_t max_pages) {
Handle<JSArrayBuffer> old_buffer;
- if (!maybe_mem_buffer.ToHandle(&old_buffer) ||
- old_buffer->backing_store() == nullptr) {
- // If module object does not have linear memory associated with it,
- // Allocate new array buffer of given size.
- new_size = pages * WasmModule::kPageSize;
- if (max_pages < pages) return -1;
- } else {
+ Address old_mem_start = nullptr;
+ uint32_t old_size = 0;
+ if (buffer.ToHandle(&old_buffer) && old_buffer->backing_store() != nullptr) {
old_mem_start = static_cast<Address>(old_buffer->backing_store());
- old_size = old_buffer->byte_length()->Number();
- // If the old memory was zero-sized, we should have been in the
- // "undefined" case above.
DCHECK_NOT_NULL(old_mem_start);
- DCHECK(old_size + pages * WasmModule::kPageSize <=
- std::numeric_limits<uint32_t>::max());
- new_size = old_size + pages * WasmModule::kPageSize;
+ old_size = old_buffer->byte_length()->Number();
}
-
+ DCHECK(old_size + pages * WasmModule::kPageSize <=
+ std::numeric_limits<uint32_t>::max());
+ uint32_t new_size = old_size + pages * WasmModule::kPageSize;
if (new_size <= old_size || max_pages * WasmModule::kPageSize < new_size ||
- WasmModule::kV8MaxPages * WasmModule::kPageSize < new_size) {
- return -1;
- }
- Handle<JSArrayBuffer> buffer = NewArrayBuffer(isolate, new_size);
- if (buffer.is_null()) return -1;
- Address new_mem_start = static_cast<Address>(buffer->backing_store());
- if (old_size != 0) {
- memcpy(new_mem_start, old_mem_start, old_size);
- }
- SetInstanceMemory(instance, *buffer);
- Handle<FixedArray> code_table = instance->get_compiled_module()->code_table();
- RelocateMemoryReferencesInCode(code_table, old_mem_start, new_mem_start,
- old_size, new_size);
- if (instance->has_memory_object()) {
- instance->get_memory_object()->set_buffer(*buffer);
+ kV8MaxWasmMemoryPages * WasmModule::kPageSize < new_size) {
+ return Handle<JSArrayBuffer>::null();
}
+ Handle<JSArrayBuffer> new_buffer;
+ if (!old_buffer.is_null() && old_buffer->has_guard_region()) {
+ // We don't move the backing store, we simply change the protection to make
+ // more of it accessible.
+ base::OS::Unprotect(old_buffer->backing_store(), new_size);
+ reinterpret_cast<v8::Isolate*>(isolate)
+ ->AdjustAmountOfExternalAllocatedMemory(pages * WasmModule::kPageSize);
+ Handle<Object> new_size_object =
+ isolate->factory()->NewNumberFromSize(new_size);
+ old_buffer->set_byte_length(*new_size_object);
+ new_buffer = old_buffer;
+ } else {
+ const bool enable_guard_regions = false;
+ new_buffer = NewArrayBuffer(isolate, new_size, enable_guard_regions);
+ if (new_buffer.is_null()) return new_buffer;
+ Address new_mem_start = static_cast<Address>(new_buffer->backing_store());
+ if (old_size != 0) {
+ memcpy(new_mem_start, old_mem_start, old_size);
+ }
+ }
+ return new_buffer;
+}
+
+void UncheckedUpdateInstanceMemory(Isolate* isolate,
+ Handle<WasmInstanceObject> instance,
+ Address old_mem_start, uint32_t old_size) {
+ DCHECK(instance->has_memory_buffer());
+ Handle<JSArrayBuffer> new_buffer(instance->memory_buffer());
+ uint32_t new_size = new_buffer->byte_length()->Number();
+ DCHECK(new_size <= std::numeric_limits<uint32_t>::max());
+ Address new_mem_start = static_cast<Address>(new_buffer->backing_store());
+ DCHECK_NOT_NULL(new_mem_start);
+ Handle<FixedArray> code_table = instance->compiled_module()->code_table();
+ RelocateMemoryReferencesInCode(
+ code_table, instance->compiled_module()->module()->num_imported_functions,
+ old_mem_start, new_mem_start, old_size, new_size);
+}
+
+int32_t wasm::GrowWebAssemblyMemory(Isolate* isolate,
+ Handle<WasmMemoryObject> receiver,
+ uint32_t pages) {
+ DCHECK(WasmJs::IsWasmMemoryObject(isolate, receiver));
+ Handle<WasmMemoryObject> memory_object =
+ handle(WasmMemoryObject::cast(*receiver));
+ MaybeHandle<JSArrayBuffer> memory_buffer = handle(memory_object->buffer());
+ Handle<JSArrayBuffer> old_buffer;
+ uint32_t old_size = 0;
+ Address old_mem_start = nullptr;
+ if (memory_buffer.ToHandle(&old_buffer) &&
+ old_buffer->backing_store() != nullptr) {
+ old_size = old_buffer->byte_length()->Number();
+ old_mem_start = static_cast<Address>(old_buffer->backing_store());
+ }
+ // Return current size if grow by 0
+ if (pages == 0) {
+ DCHECK(old_size % WasmModule::kPageSize == 0);
+ return (old_size / WasmModule::kPageSize);
+ }
+ Handle<JSArrayBuffer> new_buffer;
+ if (!memory_object->has_instances_link()) {
+ // Memory object does not have an instance associated with it, just grow
+ uint32_t max_pages;
+ if (memory_object->has_maximum_pages()) {
+ max_pages = static_cast<uint32_t>(memory_object->maximum_pages());
+ if (kV8MaxWasmMemoryPages < max_pages) return -1;
+ } else {
+ max_pages = kV8MaxWasmMemoryPages;
+ }
+ new_buffer = GrowMemoryBuffer(isolate, memory_buffer, pages, max_pages);
+ if (new_buffer.is_null()) return -1;
+ } else {
+ Handle<WasmInstanceWrapper> instance_wrapper(
+ memory_object->instances_link());
+ DCHECK(WasmInstanceWrapper::IsWasmInstanceWrapper(*instance_wrapper));
+ DCHECK(instance_wrapper->has_instance());
+ Handle<WasmInstanceObject> instance = instance_wrapper->instance_object();
+ DCHECK(IsWasmInstance(*instance));
+ uint32_t max_pages = GetMaxInstanceMemoryPages(isolate, instance);
+
+ // Grow memory object buffer and update instances associated with it.
+ new_buffer = GrowMemoryBuffer(isolate, memory_buffer, pages, max_pages);
+ if (new_buffer.is_null()) return -1;
+ DCHECK(!instance_wrapper->has_previous());
+ SetInstanceMemory(instance, *new_buffer);
+ UncheckedUpdateInstanceMemory(isolate, instance, old_mem_start, old_size);
+ while (instance_wrapper->has_next()) {
+ instance_wrapper = instance_wrapper->next_wrapper();
+ DCHECK(WasmInstanceWrapper::IsWasmInstanceWrapper(*instance_wrapper));
+ Handle<WasmInstanceObject> instance = instance_wrapper->instance_object();
+ DCHECK(IsWasmInstance(*instance));
+ SetInstanceMemory(instance, *new_buffer);
+ UncheckedUpdateInstanceMemory(isolate, instance, old_mem_start, old_size);
+ }
+ }
+ memory_object->set_buffer(*new_buffer);
DCHECK(old_size % WasmModule::kPageSize == 0);
return (old_size / WasmModule::kPageSize);
}
+int32_t wasm::GrowMemory(Isolate* isolate, Handle<WasmInstanceObject> instance,
+ uint32_t pages) {
+ if (!IsWasmInstance(*instance)) return -1;
+ if (pages == 0) return GetInstanceMemorySize(isolate, instance);
+ Handle<WasmInstanceObject> instance_obj(WasmInstanceObject::cast(*instance));
+ if (!instance_obj->has_memory_object()) {
+ // No other instances to grow, grow just the one.
+ MaybeHandle<JSArrayBuffer> instance_buffer =
+ GetInstanceMemory(isolate, instance);
+ Handle<JSArrayBuffer> old_buffer;
+ uint32_t old_size = 0;
+ Address old_mem_start = nullptr;
+ if (instance_buffer.ToHandle(&old_buffer) &&
+ old_buffer->backing_store() != nullptr) {
+ old_size = old_buffer->byte_length()->Number();
+ old_mem_start = static_cast<Address>(old_buffer->backing_store());
+ }
+ uint32_t max_pages = GetMaxInstanceMemoryPages(isolate, instance_obj);
+ Handle<JSArrayBuffer> buffer =
+ GrowMemoryBuffer(isolate, instance_buffer, pages, max_pages);
+ if (buffer.is_null()) return -1;
+ SetInstanceMemory(instance, *buffer);
+ UncheckedUpdateInstanceMemory(isolate, instance, old_mem_start, old_size);
+ DCHECK(old_size % WasmModule::kPageSize == 0);
+ return (old_size / WasmModule::kPageSize);
+ } else {
+ return GrowWebAssemblyMemory(isolate, handle(instance_obj->memory_object()),
+ pages);
+ }
+}
+
+void wasm::GrowDispatchTables(Isolate* isolate,
+ Handle<FixedArray> dispatch_tables,
+ uint32_t old_size, uint32_t count) {
+ DCHECK_EQ(0, dispatch_tables->length() % 4);
+ for (int i = 0; i < dispatch_tables->length(); i += 4) {
+ Handle<FixedArray> old_function_table(
+ FixedArray::cast(dispatch_tables->get(i + 2)));
+ Handle<FixedArray> old_signature_table(
+ FixedArray::cast(dispatch_tables->get(i + 3)));
+ Handle<FixedArray> new_function_table =
+ isolate->factory()->CopyFixedArrayAndGrow(old_function_table, count);
+ Handle<FixedArray> new_signature_table =
+ isolate->factory()->CopyFixedArrayAndGrow(old_signature_table, count);
+
+ // Get code table for the instance
+ Handle<WasmInstanceObject> instance(
+ WasmInstanceObject::cast(dispatch_tables->get(i)));
+ Handle<FixedArray> code_table(instance->compiled_module()->code_table());
+
+ // Relocate size references
+ RelocateTableSizeReferences(code_table, old_size, old_size + count);
+
+ // Replace references of old tables with new tables.
+ for (int j = 0; j < code_table->length(); ++j) {
+ if (!code_table->get(j)->IsCode()) continue;
+ Handle<Code> code = Handle<Code>(Code::cast(code_table->get(j)));
+ ReplaceReferenceInCode(code, old_function_table, new_function_table);
+ ReplaceReferenceInCode(code, old_signature_table, new_signature_table);
+ }
+
+ // Update dispatch tables with new function/signature tables
+ dispatch_tables->set(i + 2, *new_function_table);
+ dispatch_tables->set(i + 3, *new_signature_table);
+ }
+}
+
void testing::ValidateInstancesChain(Isolate* isolate,
- Handle<JSObject> wasm_module,
+ Handle<WasmModuleObject> module_obj,
int instance_count) {
CHECK_GE(instance_count, 0);
DisallowHeapAllocation no_gc;
- WasmCompiledModule* compiled_module =
- WasmCompiledModule::cast(wasm_module->GetInternalField(0));
+ WasmCompiledModule* compiled_module = module_obj->compiled_module();
CHECK_EQ(JSObject::cast(compiled_module->ptr_to_weak_wasm_module()->value()),
- *wasm_module);
+ *module_obj);
Object* prev = nullptr;
int found_instances = compiled_module->has_weak_owning_instance() ? 1 : 0;
WasmCompiledModule* current_instance = compiled_module;
while (current_instance->has_weak_next_instance()) {
CHECK((prev == nullptr && !current_instance->has_weak_prev_instance()) ||
current_instance->ptr_to_weak_prev_instance()->value() == prev);
- CHECK_EQ(current_instance->ptr_to_weak_wasm_module()->value(),
- *wasm_module);
+ CHECK_EQ(current_instance->ptr_to_weak_wasm_module()->value(), *module_obj);
CHECK(IsWasmInstance(
current_instance->ptr_to_weak_owning_instance()->value()));
prev = current_instance;
@@ -2126,63 +2552,222 @@ void testing::ValidateInstancesChain(Isolate* isolate,
}
void testing::ValidateModuleState(Isolate* isolate,
- Handle<JSObject> wasm_module) {
+ Handle<WasmModuleObject> module_obj) {
DisallowHeapAllocation no_gc;
- WasmCompiledModule* compiled_module =
- WasmCompiledModule::cast(wasm_module->GetInternalField(0));
+ WasmCompiledModule* compiled_module = module_obj->compiled_module();
CHECK(compiled_module->has_weak_wasm_module());
- CHECK_EQ(compiled_module->ptr_to_weak_wasm_module()->value(), *wasm_module);
+ CHECK_EQ(compiled_module->ptr_to_weak_wasm_module()->value(), *module_obj);
CHECK(!compiled_module->has_weak_prev_instance());
CHECK(!compiled_module->has_weak_next_instance());
CHECK(!compiled_module->has_weak_owning_instance());
}
void testing::ValidateOrphanedInstance(Isolate* isolate,
- Handle<JSObject> object) {
+ Handle<WasmInstanceObject> instance) {
DisallowHeapAllocation no_gc;
- WasmInstanceObject* instance = WasmInstanceObject::cast(*object);
- WasmCompiledModule* compiled_module = instance->get_compiled_module();
+ WasmCompiledModule* compiled_module = instance->compiled_module();
CHECK(compiled_module->has_weak_wasm_module());
CHECK(compiled_module->ptr_to_weak_wasm_module()->cleared());
}
-void WasmCompiledModule::RecreateModuleWrapper(Isolate* isolate,
- Handle<FixedArray> array) {
- Handle<WasmCompiledModule> compiled_module(
- reinterpret_cast<WasmCompiledModule*>(*array), isolate);
+Handle<JSArray> wasm::GetImports(Isolate* isolate,
+ Handle<WasmModuleObject> module_object) {
+ Handle<WasmCompiledModule> compiled_module(module_object->compiled_module(),
+ isolate);
+ Factory* factory = isolate->factory();
- WasmModule* module = nullptr;
+ Handle<String> module_string = factory->InternalizeUtf8String("module");
+ Handle<String> name_string = factory->InternalizeUtf8String("name");
+ Handle<String> kind_string = factory->InternalizeUtf8String("kind");
+
+ Handle<String> function_string = factory->InternalizeUtf8String("function");
+ Handle<String> table_string = factory->InternalizeUtf8String("table");
+ Handle<String> memory_string = factory->InternalizeUtf8String("memory");
+ Handle<String> global_string = factory->InternalizeUtf8String("global");
+
+ // Create the result array.
+ WasmModule* module = compiled_module->module();
+ int num_imports = static_cast<int>(module->import_table.size());
+ Handle<JSArray> array_object = factory->NewJSArray(FAST_ELEMENTS, 0, 0);
+ Handle<FixedArray> storage = factory->NewFixedArray(num_imports);
+ JSArray::SetContent(array_object, storage);
+ array_object->set_length(Smi::FromInt(num_imports));
+
+ Handle<JSFunction> object_function =
+ Handle<JSFunction>(isolate->native_context()->object_function(), isolate);
+
+ // Populate the result array.
+ for (int index = 0; index < num_imports; ++index) {
+ WasmImport& import = module->import_table[index];
+
+ Handle<JSObject> entry = factory->NewJSObject(object_function);
+
+ Handle<String> import_kind;
+ switch (import.kind) {
+ case kExternalFunction:
+ import_kind = function_string;
+ break;
+ case kExternalTable:
+ import_kind = table_string;
+ break;
+ case kExternalMemory:
+ import_kind = memory_string;
+ break;
+ case kExternalGlobal:
+ import_kind = global_string;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ MaybeHandle<String> import_module =
+ WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+ isolate, compiled_module, import.module_name_offset,
+ import.module_name_length);
+
+ MaybeHandle<String> import_name =
+ WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+ isolate, compiled_module, import.field_name_offset,
+ import.field_name_length);
+
+ JSObject::AddProperty(entry, module_string, import_module.ToHandleChecked(),
+ NONE);
+ JSObject::AddProperty(entry, name_string, import_name.ToHandleChecked(),
+ NONE);
+ JSObject::AddProperty(entry, kind_string, import_kind, NONE);
+
+ storage->set(index, *entry);
+ }
+
+ return array_object;
+}
+
+Handle<JSArray> wasm::GetExports(Isolate* isolate,
+ Handle<WasmModuleObject> module_object) {
+ Handle<WasmCompiledModule> compiled_module(module_object->compiled_module(),
+ isolate);
+ Factory* factory = isolate->factory();
+
+ Handle<String> name_string = factory->InternalizeUtf8String("name");
+ Handle<String> kind_string = factory->InternalizeUtf8String("kind");
+
+ Handle<String> function_string = factory->InternalizeUtf8String("function");
+ Handle<String> table_string = factory->InternalizeUtf8String("table");
+ Handle<String> memory_string = factory->InternalizeUtf8String("memory");
+ Handle<String> global_string = factory->InternalizeUtf8String("global");
+
+ // Create the result array.
+ WasmModule* module = compiled_module->module();
+ int num_exports = static_cast<int>(module->export_table.size());
+ Handle<JSArray> array_object = factory->NewJSArray(FAST_ELEMENTS, 0, 0);
+ Handle<FixedArray> storage = factory->NewFixedArray(num_exports);
+ JSArray::SetContent(array_object, storage);
+ array_object->set_length(Smi::FromInt(num_exports));
+
+ Handle<JSFunction> object_function =
+ Handle<JSFunction>(isolate->native_context()->object_function(), isolate);
+
+ // Populate the result array.
+ for (int index = 0; index < num_exports; ++index) {
+ WasmExport& exp = module->export_table[index];
+
+ Handle<String> export_kind;
+ switch (exp.kind) {
+ case kExternalFunction:
+ export_kind = function_string;
+ break;
+ case kExternalTable:
+ export_kind = table_string;
+ break;
+ case kExternalMemory:
+ export_kind = memory_string;
+ break;
+ case kExternalGlobal:
+ export_kind = global_string;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ Handle<JSObject> entry = factory->NewJSObject(object_function);
+
+ MaybeHandle<String> export_name =
+ WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+ isolate, compiled_module, exp.name_offset, exp.name_length);
+
+ JSObject::AddProperty(entry, name_string, export_name.ToHandleChecked(),
+ NONE);
+ JSObject::AddProperty(entry, kind_string, export_kind, NONE);
+
+ storage->set(index, *entry);
+ }
+
+ return array_object;
+}
+
+Handle<JSArray> wasm::GetCustomSections(Isolate* isolate,
+ Handle<WasmModuleObject> module_object,
+ Handle<String> name,
+ ErrorThrower* thrower) {
+ Handle<WasmCompiledModule> compiled_module(module_object->compiled_module(),
+ isolate);
+ Factory* factory = isolate->factory();
+
+ std::vector<CustomSectionOffset> custom_sections;
{
- Handle<SeqOneByteString> module_bytes = compiled_module->module_bytes();
- // We parse the module again directly from the module bytes, so
- // the underlying storage must not be moved meanwhile.
- DisallowHeapAllocation no_allocation;
+ DisallowHeapAllocation no_gc; // for raw access to string bytes.
+ Handle<SeqOneByteString> module_bytes(compiled_module->module_bytes(),
+ isolate);
const byte* start =
reinterpret_cast<const byte*>(module_bytes->GetCharsAddress());
const byte* end = start + module_bytes->length();
- // TODO(titzer): remember the module origin in the compiled_module
- // For now, we assume serialized modules did not originate from asm.js.
- ModuleResult result =
- DecodeWasmModule(isolate, start, end, false, kWasmOrigin);
- CHECK(result.ok());
- CHECK_NOT_NULL(result.val);
- module = const_cast<WasmModule*>(result.val);
+ custom_sections = DecodeCustomSections(start, end);
+ }
+
+ std::vector<Handle<Object>> matching_sections;
+
+ // Gather matching sections.
+ for (auto section : custom_sections) {
+ MaybeHandle<String> section_name =
+ WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+ isolate, compiled_module, section.name_offset, section.name_length);
+
+ if (!name->Equals(*section_name.ToHandleChecked())) continue;
+
+ // Make a copy of the payload data in the section.
+ bool is_external; // Set by TryAllocateBackingStore
+ void* memory = TryAllocateBackingStore(isolate, section.payload_length,
+ false, is_external);
+
+ Handle<Object> section_data = factory->undefined_value();
+ if (memory) {
+ Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
+ JSArrayBuffer::Setup(buffer, isolate, is_external, memory,
+ static_cast<int>(section.payload_length));
+ DisallowHeapAllocation no_gc; // for raw access to string bytes.
+ Handle<SeqOneByteString> module_bytes(compiled_module->module_bytes(),
+ isolate);
+ const byte* start =
+ reinterpret_cast<const byte*>(module_bytes->GetCharsAddress());
+ memcpy(memory, start + section.payload_offset, section.payload_length);
+ section_data = buffer;
+ } else {
+ thrower->RangeError("out of memory allocating custom section data");
+ return Handle<JSArray>();
+ }
+
+ matching_sections.push_back(section_data);
}
- Handle<WasmModuleWrapper> module_wrapper =
- WasmModuleWrapper::New(isolate, module);
+ int num_custom_sections = static_cast<int>(matching_sections.size());
+ Handle<JSArray> array_object = factory->NewJSArray(FAST_ELEMENTS, 0, 0);
+ Handle<FixedArray> storage = factory->NewFixedArray(num_custom_sections);
+ JSArray::SetContent(array_object, storage);
+ array_object->set_length(Smi::FromInt(num_custom_sections));
- compiled_module->set_module_wrapper(module_wrapper);
- DCHECK(WasmCompiledModule::IsWasmCompiledModule(*compiled_module));
-}
+ for (int i = 0; i < num_custom_sections; i++) {
+ storage->set(i, *matching_sections[i]);
+ }
-MaybeHandle<String> WasmCompiledModule::GetFunctionName(
- Handle<WasmCompiledModule> compiled_module, uint32_t func_index) {
- DCHECK_LT(func_index, compiled_module->module()->functions.size());
- WasmFunction& function = compiled_module->module()->functions[func_index];
- Isolate* isolate = compiled_module->GetIsolate();
- MaybeHandle<String> string = ExtractStringFromModuleBytes(
- isolate, compiled_module, function.name_offset, function.name_length);
- if (!string.is_null()) return string.ToHandleChecked();
- return {};
+ return array_object;
}
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index 2ad46e21b6..2f368a7391 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -8,6 +8,7 @@
#include <memory>
#include "src/api.h"
+#include "src/debug/debug-interface.h"
#include "src/globals.h"
#include "src/handles.h"
#include "src/parsing/preparse-data.h"
@@ -22,6 +23,8 @@ namespace internal {
class WasmCompiledModule;
class WasmDebugInfo;
class WasmModuleObject;
+class WasmInstanceObject;
+class WasmMemoryObject;
namespace compiler {
class CallDescriptor;
@@ -31,11 +34,8 @@ class WasmCompilationUnit;
namespace wasm {
class ErrorThrower;
-const size_t kMaxModuleSize = 1024 * 1024 * 1024;
-const size_t kMaxFunctionSize = 128 * 1024;
-const size_t kMaxStringSize = 256;
const uint32_t kWasmMagic = 0x6d736100;
-const uint32_t kWasmVersion = 0x0d;
+const uint32_t kWasmVersion = 0x01;
const uint8_t kWasmFunctionTypeForm = 0x60;
const uint8_t kWasmAnyFunctionTypeForm = 0x70;
@@ -63,7 +63,6 @@ inline bool IsValidSectionCode(uint8_t byte) {
const char* SectionName(WasmSectionCode code);
// Constants for fixed-size elements within a module.
-static const uint32_t kMaxReturnCount = 1;
static const uint8_t kResizableMaximumFlag = 1;
static const int32_t kInvalidFunctionIndex = -1;
@@ -118,7 +117,7 @@ struct WasmFunction {
// Static representation of a wasm global variable.
struct WasmGlobal {
- LocalType type; // type of the global.
+ ValueType type; // type of the global.
bool mutability; // {true} if mutable.
WasmInitExpr init; // the initialization expression of the global.
uint32_t offset; // offset into global memory.
@@ -170,21 +169,18 @@ struct WasmExport {
uint32_t index; // index into the respective space.
};
-enum ModuleOrigin { kWasmOrigin, kAsmJsOrigin };
+enum ModuleOrigin : uint8_t { kWasmOrigin, kAsmJsOrigin };
+struct ModuleWireBytes;
// Static representation of a module.
struct V8_EXPORT_PRIVATE WasmModule {
static const uint32_t kPageSize = 0x10000; // Page size, 64kb.
static const uint32_t kMinMemPages = 1; // Minimum memory size = 64kb
- static const size_t kV8MaxPages = 16384; // Maximum memory size = 1gb
- static const size_t kSpecMaxPages = 65536; // Maximum according to the spec
- static const size_t kV8MaxTableSize = 16 * 1024 * 1024;
Zone* owned_zone;
- const byte* module_start = nullptr; // starting address for the module bytes
- const byte* module_end = nullptr; // end address for the module bytes
uint32_t min_mem_pages = 0; // minimum size of the memory in 64k pages
uint32_t max_mem_pages = 0; // maximum size of the memory in 64k pages
+ bool has_max_mem = false; // try if a maximum memory size exists
bool has_memory = false; // true if the memory was defined or imported
bool mem_export = false; // true if the memory is exported
// TODO(wasm): reconcile start function index being an int with
@@ -214,56 +210,23 @@ struct V8_EXPORT_PRIVATE WasmModule {
// switch to libc-2.21 or higher.
std::unique_ptr<base::Semaphore> pending_tasks;
- WasmModule() : WasmModule(nullptr, nullptr) {}
- WasmModule(Zone* owned_zone, const byte* module_start);
+ WasmModule() : WasmModule(nullptr) {}
+ WasmModule(Zone* owned_zone);
~WasmModule() {
if (owned_zone) delete owned_zone;
}
- // Get a string stored in the module bytes representing a name.
- WasmName GetName(uint32_t offset, uint32_t length) const {
- if (length == 0) return {"<?>", 3}; // no name.
- CHECK(BoundsCheck(offset, offset + length));
- DCHECK_GE(static_cast<int>(length), 0);
- return {reinterpret_cast<const char*>(module_start + offset),
- static_cast<int>(length)};
- }
-
- // Get a string stored in the module bytes representing a function name.
- WasmName GetName(WasmFunction* function) const {
- return GetName(function->name_offset, function->name_length);
- }
-
- // Get a string stored in the module bytes representing a name.
- WasmName GetNameOrNull(uint32_t offset, uint32_t length) const {
- if (offset == 0 && length == 0) return {NULL, 0}; // no name.
- CHECK(BoundsCheck(offset, offset + length));
- DCHECK_GE(static_cast<int>(length), 0);
- return {reinterpret_cast<const char*>(module_start + offset),
- static_cast<int>(length)};
- }
-
- // Get a string stored in the module bytes representing a function name.
- WasmName GetNameOrNull(const WasmFunction* function) const {
- return GetNameOrNull(function->name_offset, function->name_length);
- }
-
- // Checks the given offset range is contained within the module bytes.
- bool BoundsCheck(uint32_t start, uint32_t end) const {
- size_t size = module_end - module_start;
- return start <= size && end <= size;
- }
-
// Creates a new instantiation of the module in the given isolate.
- static MaybeHandle<JSObject> Instantiate(Isolate* isolate,
- ErrorThrower* thrower,
- Handle<JSObject> wasm_module,
- Handle<JSReceiver> ffi,
- Handle<JSArrayBuffer> memory);
+ static MaybeHandle<WasmInstanceObject> Instantiate(
+ Isolate* isolate, ErrorThrower* thrower,
+ Handle<WasmModuleObject> wasm_module, Handle<JSReceiver> ffi,
+ Handle<JSArrayBuffer> memory = Handle<JSArrayBuffer>::null());
MaybeHandle<WasmCompiledModule> CompileFunctions(
Isolate* isolate, Handle<Managed<WasmModule>> module_wrapper,
- ErrorThrower* thrower) const;
+ ErrorThrower* thrower, const ModuleWireBytes& wire_bytes,
+ Handle<Script> asm_js_script,
+ Vector<const byte> asm_js_offset_table_bytes) const;
};
typedef Managed<WasmModule> WasmModuleWrapper;
@@ -272,11 +235,10 @@ typedef Managed<WasmModule> WasmModuleWrapper;
struct WasmInstance {
const WasmModule* module; // static representation of the module.
// -- Heap allocated --------------------------------------------------------
- Handle<JSObject> js_object; // JavaScript module object.
Handle<Context> context; // JavaScript native context.
- Handle<JSArrayBuffer> mem_buffer; // Handle to array buffer of memory.
- Handle<JSArrayBuffer> globals_buffer; // Handle to array buffer of globals.
std::vector<Handle<FixedArray>> function_tables; // indirect function tables.
+ std::vector<Handle<FixedArray>>
+ signature_tables; // indirect signature tables.
std::vector<Handle<Code>> function_code; // code objects for each function.
// -- raw memory ------------------------------------------------------------
byte* mem_start = nullptr; // start of linear memory.
@@ -287,15 +249,67 @@ struct WasmInstance {
explicit WasmInstance(const WasmModule* m)
: module(m),
function_tables(m->function_tables.size()),
+ signature_tables(m->function_tables.size()),
function_code(m->functions.size()) {}
};
+// Interface to the storage (wire bytes) of a wasm module.
+// It is illegal for anyone receiving a ModuleWireBytes to store pointers based
+// on module_bytes, as this storage is only guaranteed to be alive as long as
+// this struct is alive.
+struct V8_EXPORT_PRIVATE ModuleWireBytes {
+ ModuleWireBytes(Vector<const byte> module_bytes)
+ : module_bytes(module_bytes) {}
+ ModuleWireBytes(const byte* start, const byte* end)
+ : module_bytes(start, static_cast<int>(end - start)) {
+ DCHECK_GE(kMaxInt, end - start);
+ }
+
+ const Vector<const byte> module_bytes;
+
+ // Get a string stored in the module bytes representing a name.
+ WasmName GetName(uint32_t offset, uint32_t length) const {
+ if (length == 0) return {"<?>", 3}; // no name.
+ CHECK(BoundsCheck(offset, length));
+ DCHECK_GE(length, 0);
+ return Vector<const char>::cast(
+ module_bytes.SubVector(offset, offset + length));
+ }
+
+ // Get a string stored in the module bytes representing a function name.
+ WasmName GetName(const WasmFunction* function) const {
+ return GetName(function->name_offset, function->name_length);
+ }
+
+ // Get a string stored in the module bytes representing a name.
+ WasmName GetNameOrNull(uint32_t offset, uint32_t length) const {
+ if (offset == 0 && length == 0) return {NULL, 0}; // no name.
+ CHECK(BoundsCheck(offset, length));
+ DCHECK_GE(length, 0);
+ return Vector<const char>::cast(
+ module_bytes.SubVector(offset, offset + length));
+ }
+
+ // Get a string stored in the module bytes representing a function name.
+ WasmName GetNameOrNull(const WasmFunction* function) const {
+ return GetNameOrNull(function->name_offset, function->name_length);
+ }
+
+ // Checks the given offset range is contained within the module bytes.
+ bool BoundsCheck(uint32_t offset, uint32_t length) const {
+ uint32_t size = static_cast<uint32_t>(module_bytes.length());
+ return offset <= size && length <= size - offset;
+ }
+};
+
// Interface provided to the decoder/graph builder which contains only
// minimal information about the globals, functions, and function tables.
struct V8_EXPORT_PRIVATE ModuleEnv {
+ ModuleEnv(const WasmModule* module, WasmInstance* instance)
+ : module(module), instance(instance) {}
+
const WasmModule* module;
WasmInstance* instance;
- ModuleOrigin origin;
bool IsValidGlobal(uint32_t index) const {
return module && index < module->globals.size();
@@ -309,7 +323,7 @@ struct V8_EXPORT_PRIVATE ModuleEnv {
bool IsValidTable(uint32_t index) const {
return module && index < module->function_tables.size();
}
- LocalType GetGlobalType(uint32_t index) {
+ ValueType GetGlobalType(uint32_t index) {
DCHECK(IsValidGlobal(index));
return module->globals[index].type;
}
@@ -326,7 +340,7 @@ struct V8_EXPORT_PRIVATE ModuleEnv {
return &module->function_tables[index];
}
- bool asm_js() { return origin == kAsmJsOrigin; }
+ bool asm_js() { return module->origin == kAsmJsOrigin; }
Handle<Code> GetFunctionCode(uint32_t index) {
DCHECK_NOT_NULL(instance);
@@ -341,42 +355,33 @@ struct V8_EXPORT_PRIVATE ModuleEnv {
Zone* zone, compiler::CallDescriptor* descriptor);
};
+// A ModuleEnv together with ModuleWireBytes.
+struct ModuleBytesEnv : public ModuleEnv, public ModuleWireBytes {
+ ModuleBytesEnv(const WasmModule* module, WasmInstance* instance,
+ Vector<const byte> module_bytes)
+ : ModuleEnv(module, instance), ModuleWireBytes(module_bytes) {}
+ ModuleBytesEnv(const WasmModule* module, WasmInstance* instance,
+ const ModuleWireBytes& wire_bytes)
+ : ModuleEnv(module, instance), ModuleWireBytes(wire_bytes) {}
+};
+
// A helper for printing out the names of functions.
struct WasmFunctionName {
+ WasmFunctionName(const WasmFunction* function, ModuleBytesEnv* module_env)
+ : function_(function), name_(module_env->GetNameOrNull(function)) {}
+
const WasmFunction* function_;
- const WasmModule* module_;
- WasmFunctionName(const WasmFunction* function, const ModuleEnv* menv)
- : function_(function), module_(menv ? menv->module : nullptr) {}
+ WasmName name_;
};
std::ostream& operator<<(std::ostream& os, const WasmModule& module);
std::ostream& operator<<(std::ostream& os, const WasmFunction& function);
std::ostream& operator<<(std::ostream& os, const WasmFunctionName& name);
-// Extract a function name from the given wasm instance.
-// Returns "<WASM UNNAMED>" if no instance is passed, the function is unnamed or
-// the name is not a valid UTF-8 string.
-// TODO(5620): Refactor once we always get a wasm instance.
-Handle<String> GetWasmFunctionName(Isolate* isolate, Handle<Object> instance,
- uint32_t func_index);
-
-// Return the binary source bytes of a wasm module.
-Handle<SeqOneByteString> GetWasmBytes(Handle<JSObject> wasm);
-
// Get the debug info associated with the given wasm object.
// If no debug info exists yet, it is created automatically.
Handle<WasmDebugInfo> GetDebugInfo(Handle<JSObject> wasm);
-// Return the number of functions in the given wasm object.
-int GetNumberOfFunctions(Handle<JSObject> wasm);
-
-// Create and export JSFunction
-Handle<JSFunction> WrapExportCodeAsJSFunction(Isolate* isolate,
- Handle<Code> export_code,
- Handle<String> name,
- FunctionSig* sig, int func_index,
- Handle<JSObject> instance);
-
// Check whether the given object represents a WebAssembly.Instance instance.
// This checks the number and type of internal fields, so it's not 100 percent
// secure. If it turns out that we need more complete checks, we could add a
@@ -384,26 +389,26 @@ Handle<JSFunction> WrapExportCodeAsJSFunction(Isolate* isolate,
// else.
bool IsWasmInstance(Object* instance);
-// Return the compiled module object for this WASM instance.
-WasmCompiledModule* GetCompiledModule(Object* wasm_instance);
-
-// Check whether the wasm module was generated from asm.js code.
-bool WasmIsAsmJs(Object* instance, Isolate* isolate);
-
// Get the script of the wasm module. If the origin of the module is asm.js, the
// returned Script will be a JavaScript Script of Script::TYPE_NORMAL, otherwise
// it's of type TYPE_WASM.
Handle<Script> GetScript(Handle<JSObject> instance);
-// Get the asm.js source position for the given byte offset in the given
-// function.
-int GetAsmWasmSourcePosition(Handle<JSObject> instance, int func_index,
- int byte_offset);
-
V8_EXPORT_PRIVATE MaybeHandle<WasmModuleObject> CreateModuleObjectFromBytes(
Isolate* isolate, const byte* start, const byte* end, ErrorThrower* thrower,
ModuleOrigin origin, Handle<Script> asm_js_script,
- const byte* asm_offset_tables_start, const byte* asm_offset_tables_end);
+ Vector<const byte> asm_offset_table);
+
+V8_EXPORT_PRIVATE bool IsWasmCodegenAllowed(Isolate* isolate,
+ Handle<Context> context);
+
+V8_EXPORT_PRIVATE Handle<JSArray> GetImports(Isolate* isolate,
+ Handle<WasmModuleObject> module);
+V8_EXPORT_PRIVATE Handle<JSArray> GetExports(Isolate* isolate,
+ Handle<WasmModuleObject> module);
+V8_EXPORT_PRIVATE Handle<JSArray> GetCustomSections(
+ Isolate* isolate, Handle<WasmModuleObject> module, Handle<String> name,
+ ErrorThrower* thrower);
V8_EXPORT_PRIVATE bool ValidateModuleBytes(Isolate* isolate, const byte* start,
const byte* end,
@@ -414,33 +419,44 @@ V8_EXPORT_PRIVATE bool ValidateModuleBytes(Isolate* isolate, const byte* start,
int GetFunctionCodeOffset(Handle<WasmCompiledModule> compiled_module,
int func_index);
-// Translate from byte offset in the module to function number and byte offset
-// within that function, encoded as line and column in the position info.
-bool GetPositionInfo(Handle<WasmCompiledModule> compiled_module,
- uint32_t position, Script::PositionInfo* info);
-
// Assumed to be called with a code object associated to a wasm module instance.
// Intended to be called from runtime functions.
// Returns nullptr on failing to get owning instance.
-Object* GetOwningWasmInstance(Code* code);
+WasmInstanceObject* GetOwningWasmInstance(Code* code);
-MaybeHandle<JSArrayBuffer> GetInstanceMemory(Isolate* isolate,
- Handle<JSObject> instance);
+MaybeHandle<JSArrayBuffer> GetInstanceMemory(
+ Isolate* isolate, Handle<WasmInstanceObject> instance);
-int32_t GetInstanceMemorySize(Isolate* isolate, Handle<JSObject> instance);
+int32_t GetInstanceMemorySize(Isolate* isolate,
+ Handle<WasmInstanceObject> instance);
-int32_t GrowInstanceMemory(Isolate* isolate, Handle<JSObject> instance,
- uint32_t pages);
+int32_t GrowInstanceMemory(Isolate* isolate,
+ Handle<WasmInstanceObject> instance, uint32_t pages);
+
+Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
+ bool enable_guard_regions);
+
+int32_t GrowWebAssemblyMemory(Isolate* isolate,
+ Handle<WasmMemoryObject> receiver,
+ uint32_t pages);
+
+int32_t GrowMemory(Isolate* isolate, Handle<WasmInstanceObject> instance,
+ uint32_t pages);
void UpdateDispatchTables(Isolate* isolate, Handle<FixedArray> dispatch_tables,
int index, Handle<JSFunction> js_function);
+void GrowDispatchTables(Isolate* isolate, Handle<FixedArray> dispatch_tables,
+ uint32_t old_size, uint32_t count);
+
namespace testing {
-void ValidateInstancesChain(Isolate* isolate, Handle<JSObject> wasm_module,
+void ValidateInstancesChain(Isolate* isolate,
+ Handle<WasmModuleObject> module_obj,
int instance_count);
-void ValidateModuleState(Isolate* isolate, Handle<JSObject> wasm_module);
-void ValidateOrphanedInstance(Isolate* isolate, Handle<JSObject> instance);
+void ValidateModuleState(Isolate* isolate, Handle<WasmModuleObject> module_obj);
+void ValidateOrphanedInstance(Isolate* isolate,
+ Handle<WasmInstanceObject> instance);
} // namespace testing
} // namespace wasm
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index 68f66d246d..3f694c579f 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -3,7 +3,12 @@
// found in the LICENSE file.
#include "src/wasm/wasm-objects.h"
+#include "src/utils.h"
+
+#include "src/debug/debug-interface.h"
+#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-text.h"
#define TRACE(...) \
do { \
@@ -18,29 +23,38 @@
using namespace v8::internal;
using namespace v8::internal::wasm;
-#define DEFINE_ACCESSORS(Container, name, field, type) \
- type* Container::get_##name() { \
- return type::cast(GetInternalField(field)); \
- } \
- void Container::set_##name(type* value) { \
- return SetInternalField(field, value); \
- }
+#define DEFINE_GETTER0(getter, Container, name, field, type) \
+ type* Container::name() { return type::cast(getter(field)); }
-#define DEFINE_OPTIONAL_ACCESSORS(Container, name, field, type) \
- bool Container::has_##name() { \
- return !GetInternalField(field)->IsUndefined(GetIsolate()); \
- } \
- type* Container::get_##name() { \
- return type::cast(GetInternalField(field)); \
- } \
- void Container::set_##name(type* value) { \
- return SetInternalField(field, value); \
- }
+#define DEFINE_ACCESSORS0(getter, setter, Container, name, field, type) \
+ DEFINE_GETTER0(getter, Container, name, field, type) \
+ void Container::set_##name(type* value) { return setter(field, value); }
-#define DEFINE_GETTER(Container, name, field, type) \
- type* Container::get_##name() { return type::cast(GetInternalField(field)); }
+#define DEFINE_OPTIONAL_ACCESSORS0(getter, setter, Container, name, field, \
+ type) \
+ DEFINE_ACCESSORS0(getter, setter, Container, name, field, type) \
+ bool Container::has_##name() { \
+ return !getter(field)->IsUndefined(GetIsolate()); \
+ }
-static uint32_t SafeUint32(Object* value) {
+#define DEFINE_OBJ_GETTER(Container, name, field, type) \
+ DEFINE_GETTER0(GetInternalField, Container, name, field, type)
+#define DEFINE_OBJ_ACCESSORS(Container, name, field, type) \
+ DEFINE_ACCESSORS0(GetInternalField, SetInternalField, Container, name, \
+ field, type)
+#define DEFINE_OPTIONAL_OBJ_ACCESSORS(Container, name, field, type) \
+ DEFINE_OPTIONAL_ACCESSORS0(GetInternalField, SetInternalField, Container, \
+ name, field, type)
+#define DEFINE_ARR_GETTER(Container, name, field, type) \
+ DEFINE_GETTER0(get, Container, name, field, type)
+#define DEFINE_ARR_ACCESSORS(Container, name, field, type) \
+ DEFINE_ACCESSORS0(get, set, Container, name, field, type)
+#define DEFINE_OPTIONAL_ARR_ACCESSORS(Container, name, field, type) \
+ DEFINE_OPTIONAL_ACCESSORS0(get, set, Container, name, field, type)
+
+namespace {
+
+uint32_t SafeUint32(Object* value) {
if (value->IsSmi()) {
int32_t val = Smi::cast(value)->value();
CHECK_GE(val, 0);
@@ -49,21 +63,23 @@ static uint32_t SafeUint32(Object* value) {
DCHECK(value->IsHeapNumber());
HeapNumber* num = HeapNumber::cast(value);
CHECK_GE(num->value(), 0.0);
- CHECK_LE(num->value(), static_cast<double>(kMaxUInt32));
+ CHECK_LE(num->value(), kMaxUInt32);
return static_cast<uint32_t>(num->value());
}
-static int32_t SafeInt32(Object* value) {
+int32_t SafeInt32(Object* value) {
if (value->IsSmi()) {
return Smi::cast(value)->value();
}
DCHECK(value->IsHeapNumber());
HeapNumber* num = HeapNumber::cast(value);
- CHECK_GE(num->value(), static_cast<double>(Smi::kMinValue));
- CHECK_LE(num->value(), static_cast<double>(Smi::kMaxValue));
+ CHECK_GE(num->value(), Smi::kMinValue);
+ CHECK_LE(num->value(), Smi::kMaxValue);
return static_cast<int32_t>(num->value());
}
+} // namespace
+
Handle<WasmModuleObject> WasmModuleObject::New(
Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
ModuleOrigin origin = compiled_module->module()->origin;
@@ -97,8 +113,16 @@ WasmModuleObject* WasmModuleObject::cast(Object* object) {
return reinterpret_cast<WasmModuleObject*>(object);
}
+bool WasmModuleObject::IsWasmModuleObject(Object* object) {
+ return object->IsJSObject() &&
+ JSObject::cast(object)->GetInternalFieldCount() == kFieldCount;
+}
+
+DEFINE_OBJ_GETTER(WasmModuleObject, compiled_module, kCompiledModule,
+ WasmCompiledModule)
+
Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate, uint32_t initial,
- uint32_t maximum,
+ int64_t maximum,
Handle<FixedArray>* js_functions) {
Handle<JSFunction> table_ctor(
isolate->native_context()->wasm_table_constructor());
@@ -109,8 +133,8 @@ Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate, uint32_t initial,
(*js_functions)->set(i, null);
}
table_obj->SetInternalField(kFunctions, *(*js_functions));
- table_obj->SetInternalField(kMaximum,
- static_cast<Object*>(Smi::FromInt(maximum)));
+ Handle<Object> max = isolate->factory()->NewNumber(maximum);
+ table_obj->SetInternalField(kMaximum, *max);
Handle<FixedArray> dispatch_tables = isolate->factory()->NewFixedArray(0);
table_obj->SetInternalField(kDispatchTables, *dispatch_tables);
@@ -119,27 +143,28 @@ Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate, uint32_t initial,
return Handle<WasmTableObject>::cast(table_obj);
}
-DEFINE_GETTER(WasmTableObject, dispatch_tables, kDispatchTables, FixedArray)
+DEFINE_OBJ_GETTER(WasmTableObject, dispatch_tables, kDispatchTables, FixedArray)
Handle<FixedArray> WasmTableObject::AddDispatchTable(
Isolate* isolate, Handle<WasmTableObject> table_obj,
Handle<WasmInstanceObject> instance, int table_index,
- Handle<FixedArray> dispatch_table) {
+ Handle<FixedArray> function_table, Handle<FixedArray> signature_table) {
Handle<FixedArray> dispatch_tables(
FixedArray::cast(table_obj->GetInternalField(kDispatchTables)), isolate);
- DCHECK_EQ(0, dispatch_tables->length() % 3);
+ DCHECK_EQ(0, dispatch_tables->length() % 4);
if (instance.is_null()) return dispatch_tables;
// TODO(titzer): use weak cells here to avoid leaking instances.
// Grow the dispatch table and add a new triple at the end.
Handle<FixedArray> new_dispatch_tables =
- isolate->factory()->CopyFixedArrayAndGrow(dispatch_tables, 3);
+ isolate->factory()->CopyFixedArrayAndGrow(dispatch_tables, 4);
new_dispatch_tables->set(dispatch_tables->length() + 0, *instance);
new_dispatch_tables->set(dispatch_tables->length() + 1,
Smi::FromInt(table_index));
- new_dispatch_tables->set(dispatch_tables->length() + 2, *dispatch_table);
+ new_dispatch_tables->set(dispatch_tables->length() + 2, *function_table);
+ new_dispatch_tables->set(dispatch_tables->length() + 3, *signature_table);
table_obj->SetInternalField(WasmTableObject::kDispatchTables,
*new_dispatch_tables);
@@ -147,12 +172,16 @@ Handle<FixedArray> WasmTableObject::AddDispatchTable(
return new_dispatch_tables;
}
-DEFINE_ACCESSORS(WasmTableObject, functions, kFunctions, FixedArray)
+DEFINE_OBJ_ACCESSORS(WasmTableObject, functions, kFunctions, FixedArray)
+
+uint32_t WasmTableObject::current_length() { return functions()->length(); }
-uint32_t WasmTableObject::current_length() { return get_functions()->length(); }
+bool WasmTableObject::has_maximum_length() {
+ return GetInternalField(kMaximum)->Number() >= 0;
+}
-uint32_t WasmTableObject::maximum_length() {
- return SafeUint32(GetInternalField(kMaximum));
+int64_t WasmTableObject::maximum_length() {
+ return static_cast<int64_t>(GetInternalField(kMaximum)->Number());
}
WasmTableObject* WasmTableObject::cast(Object* object) {
@@ -161,28 +190,42 @@ WasmTableObject* WasmTableObject::cast(Object* object) {
return reinterpret_cast<WasmTableObject*>(object);
}
+void WasmTableObject::Grow(Isolate* isolate, Handle<WasmTableObject> table,
+ uint32_t count) {
+ Handle<FixedArray> dispatch_tables(table->dispatch_tables());
+ wasm::GrowDispatchTables(isolate, dispatch_tables,
+ table->functions()->length(), count);
+}
+
Handle<WasmMemoryObject> WasmMemoryObject::New(Isolate* isolate,
Handle<JSArrayBuffer> buffer,
- int maximum) {
+ int32_t maximum) {
Handle<JSFunction> memory_ctor(
isolate->native_context()->wasm_memory_constructor());
- Handle<JSObject> memory_obj = isolate->factory()->NewJSObject(memory_ctor);
+ Handle<JSObject> memory_obj =
+ isolate->factory()->NewJSObject(memory_ctor, TENURED);
memory_obj->SetInternalField(kArrayBuffer, *buffer);
- memory_obj->SetInternalField(kMaximum,
- static_cast<Object*>(Smi::FromInt(maximum)));
+ Handle<Object> max = isolate->factory()->NewNumber(maximum);
+ memory_obj->SetInternalField(kMaximum, *max);
Handle<Symbol> memory_sym(isolate->native_context()->wasm_memory_sym());
Object::SetProperty(memory_obj, memory_sym, memory_obj, STRICT).Check();
return Handle<WasmMemoryObject>::cast(memory_obj);
}
-DEFINE_ACCESSORS(WasmMemoryObject, buffer, kArrayBuffer, JSArrayBuffer)
+DEFINE_OBJ_ACCESSORS(WasmMemoryObject, buffer, kArrayBuffer, JSArrayBuffer)
+DEFINE_OPTIONAL_OBJ_ACCESSORS(WasmMemoryObject, instances_link, kInstancesLink,
+ WasmInstanceWrapper)
uint32_t WasmMemoryObject::current_pages() {
- return SafeUint32(get_buffer()->byte_length()) / wasm::WasmModule::kPageSize;
+ return SafeUint32(buffer()->byte_length()) / wasm::WasmModule::kPageSize;
+}
+
+bool WasmMemoryObject::has_maximum_pages() {
+ return GetInternalField(kMaximum)->Number() >= 0;
}
int32_t WasmMemoryObject::maximum_pages() {
- return SafeInt32(GetInternalField(kMaximum));
+ return static_cast<int32_t>(GetInternalField(kMaximum)->Number());
}
WasmMemoryObject* WasmMemoryObject::cast(Object* object) {
@@ -191,31 +234,50 @@ WasmMemoryObject* WasmMemoryObject::cast(Object* object) {
return reinterpret_cast<WasmMemoryObject*>(object);
}
-void WasmMemoryObject::AddInstance(WasmInstanceObject* instance) {
- // TODO(gdeepti): This should be a weak list of instance objects
- // for instances that share memory.
- SetInternalField(kInstance, instance);
+void WasmMemoryObject::AddInstance(Isolate* isolate,
+ Handle<WasmInstanceObject> instance) {
+ Handle<WasmInstanceWrapper> instance_wrapper =
+ handle(instance->instance_wrapper());
+ if (has_instances_link()) {
+ Handle<WasmInstanceWrapper> current_wrapper(instances_link());
+ DCHECK(WasmInstanceWrapper::IsWasmInstanceWrapper(*current_wrapper));
+ DCHECK(!current_wrapper->has_previous());
+ instance_wrapper->set_next_wrapper(*current_wrapper);
+ current_wrapper->set_previous_wrapper(*instance_wrapper);
+ }
+ set_instances_link(*instance_wrapper);
+}
+
+void WasmMemoryObject::ResetInstancesLink(Isolate* isolate) {
+ Handle<Object> undefined = isolate->factory()->undefined_value();
+ SetInternalField(kInstancesLink, *undefined);
}
-DEFINE_ACCESSORS(WasmInstanceObject, compiled_module, kCompiledModule,
- WasmCompiledModule)
-DEFINE_OPTIONAL_ACCESSORS(WasmInstanceObject, globals_buffer,
- kGlobalsArrayBuffer, JSArrayBuffer)
-DEFINE_OPTIONAL_ACCESSORS(WasmInstanceObject, memory_buffer, kMemoryArrayBuffer,
- JSArrayBuffer)
-DEFINE_OPTIONAL_ACCESSORS(WasmInstanceObject, memory_object, kMemoryObject,
- WasmMemoryObject)
-DEFINE_OPTIONAL_ACCESSORS(WasmInstanceObject, debug_info, kDebugInfo,
- WasmDebugInfo)
+DEFINE_OBJ_ACCESSORS(WasmInstanceObject, compiled_module, kCompiledModule,
+ WasmCompiledModule)
+DEFINE_OPTIONAL_OBJ_ACCESSORS(WasmInstanceObject, globals_buffer,
+ kGlobalsArrayBuffer, JSArrayBuffer)
+DEFINE_OPTIONAL_OBJ_ACCESSORS(WasmInstanceObject, memory_buffer,
+ kMemoryArrayBuffer, JSArrayBuffer)
+DEFINE_OPTIONAL_OBJ_ACCESSORS(WasmInstanceObject, memory_object, kMemoryObject,
+ WasmMemoryObject)
+DEFINE_OPTIONAL_OBJ_ACCESSORS(WasmInstanceObject, debug_info, kDebugInfo,
+ WasmDebugInfo)
+DEFINE_OPTIONAL_OBJ_ACCESSORS(WasmInstanceObject, instance_wrapper,
+ kWasmMemInstanceWrapper, WasmInstanceWrapper)
WasmModuleObject* WasmInstanceObject::module_object() {
- return WasmModuleObject::cast(*get_compiled_module()->wasm_module());
+ return *compiled_module()->wasm_module();
}
-WasmModule* WasmInstanceObject::module() {
- return reinterpret_cast<WasmModuleWrapper*>(
- *get_compiled_module()->module_wrapper())
- ->get();
+WasmModule* WasmInstanceObject::module() { return compiled_module()->module(); }
+
+Handle<WasmDebugInfo> WasmInstanceObject::GetOrCreateDebugInfo(
+ Handle<WasmInstanceObject> instance) {
+ if (instance->has_debug_info()) return handle(instance->debug_info());
+ Handle<WasmDebugInfo> new_info = WasmDebugInfo::New(instance);
+ instance->set_debug_info(*new_info);
+ return new_info;
}
WasmInstanceObject* WasmInstanceObject::cast(Object* object) {
@@ -224,7 +286,6 @@ WasmInstanceObject* WasmInstanceObject::cast(Object* object) {
}
bool WasmInstanceObject::IsWasmInstanceObject(Object* object) {
- if (!object->IsObject()) return false;
if (!object->IsJSObject()) return false;
JSObject* obj = JSObject::cast(object);
@@ -246,15 +307,21 @@ bool WasmInstanceObject::IsWasmInstanceObject(Object* object) {
Handle<WasmInstanceObject> WasmInstanceObject::New(
Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
- Handle<Map> map = isolate->factory()->NewMap(
- JS_OBJECT_TYPE, JSObject::kHeaderSize + kFieldCount * kPointerSize);
+ Handle<JSFunction> instance_cons(
+ isolate->native_context()->wasm_instance_constructor());
+ Handle<JSObject> instance_object =
+ isolate->factory()->NewJSObject(instance_cons, TENURED);
+ Handle<Symbol> instance_sym(isolate->native_context()->wasm_instance_sym());
+ Object::SetProperty(instance_object, instance_sym, instance_object, STRICT)
+ .Check();
Handle<WasmInstanceObject> instance(
- reinterpret_cast<WasmInstanceObject*>(
- *isolate->factory()->NewJSObjectFromMap(map, TENURED)),
- isolate);
+ reinterpret_cast<WasmInstanceObject*>(*instance_object), isolate);
instance->SetInternalField(kCompiledModule, *compiled_module);
instance->SetInternalField(kMemoryObject, isolate->heap()->undefined_value());
+ Handle<WasmInstanceWrapper> instance_wrapper =
+ WasmInstanceWrapper::New(isolate, instance);
+ instance->SetInternalField(kWasmMemInstanceWrapper, *instance_wrapper);
return instance;
}
@@ -275,8 +342,20 @@ WasmExportedFunction* WasmExportedFunction::cast(Object* object) {
}
Handle<WasmExportedFunction> WasmExportedFunction::New(
- Isolate* isolate, Handle<WasmInstanceObject> instance, Handle<String> name,
- Handle<Code> export_wrapper, int arity, int func_index) {
+ Isolate* isolate, Handle<WasmInstanceObject> instance,
+ MaybeHandle<String> maybe_name, int func_index, int arity,
+ Handle<Code> export_wrapper) {
+ Handle<String> name;
+ if (maybe_name.is_null()) {
+ EmbeddedVector<char, 16> buffer;
+ int length = SNPrintF(buffer, "%d", func_index);
+ name = isolate->factory()
+ ->NewStringFromAscii(
+ Vector<const char>::cast(buffer.SubVector(0, length)))
+ .ToHandleChecked();
+ } else {
+ name = maybe_name.ToHandleChecked();
+ }
DCHECK_EQ(Code::JS_TO_WASM_FUNCTION, export_wrapper->kind());
Handle<SharedFunctionInfo> shared =
isolate->factory()->NewSharedFunctionInfo(name, export_wrapper, false);
@@ -291,22 +370,109 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
return Handle<WasmExportedFunction>::cast(function);
}
+bool WasmSharedModuleData::IsWasmSharedModuleData(Object* object) {
+ if (!object->IsFixedArray()) return false;
+ FixedArray* arr = FixedArray::cast(object);
+ if (arr->length() != kFieldCount) return false;
+ Isolate* isolate = arr->GetIsolate();
+ if (!arr->get(kModuleWrapper)->IsForeign()) return false;
+ if (!arr->get(kModuleBytes)->IsUndefined(isolate) &&
+ !arr->get(kModuleBytes)->IsSeqOneByteString())
+ return false;
+ if (!arr->get(kScript)->IsScript()) return false;
+ if (!arr->get(kAsmJsOffsetTable)->IsUndefined(isolate) &&
+ !arr->get(kAsmJsOffsetTable)->IsByteArray())
+ return false;
+ return true;
+}
+
+WasmSharedModuleData* WasmSharedModuleData::cast(Object* object) {
+ DCHECK(IsWasmSharedModuleData(object));
+ return reinterpret_cast<WasmSharedModuleData*>(object);
+}
+
+wasm::WasmModule* WasmSharedModuleData::module() {
+ return reinterpret_cast<WasmModuleWrapper*>(get(kModuleWrapper))->get();
+}
+
+DEFINE_OPTIONAL_ARR_ACCESSORS(WasmSharedModuleData, module_bytes, kModuleBytes,
+ SeqOneByteString);
+DEFINE_ARR_GETTER(WasmSharedModuleData, script, kScript, Script);
+DEFINE_OPTIONAL_ARR_ACCESSORS(WasmSharedModuleData, asm_js_offset_table,
+ kAsmJsOffsetTable, ByteArray);
+
+Handle<WasmSharedModuleData> WasmSharedModuleData::New(
+ Isolate* isolate, Handle<Foreign> module_wrapper,
+ Handle<SeqOneByteString> module_bytes, Handle<Script> script,
+ Handle<ByteArray> asm_js_offset_table) {
+ Handle<FixedArray> arr =
+ isolate->factory()->NewFixedArray(kFieldCount, TENURED);
+
+ arr->set(kModuleWrapper, *module_wrapper);
+ if (!module_bytes.is_null()) {
+ arr->set(kModuleBytes, *module_bytes);
+ }
+ if (!script.is_null()) {
+ arr->set(kScript, *script);
+ }
+ if (!asm_js_offset_table.is_null()) {
+ arr->set(kAsmJsOffsetTable, *asm_js_offset_table);
+ }
+
+ DCHECK(WasmSharedModuleData::IsWasmSharedModuleData(*arr));
+ return Handle<WasmSharedModuleData>::cast(arr);
+}
+
+bool WasmSharedModuleData::is_asm_js() {
+ bool asm_js = module()->origin == wasm::ModuleOrigin::kAsmJsOrigin;
+ DCHECK_EQ(asm_js, script()->type() == Script::TYPE_NORMAL);
+ DCHECK_EQ(asm_js, has_asm_js_offset_table());
+ return asm_js;
+}
+
+void WasmSharedModuleData::RecreateModuleWrapper(
+ Isolate* isolate, Handle<WasmSharedModuleData> shared) {
+ DCHECK(shared->get(kModuleWrapper)->IsUndefined(isolate));
+
+ WasmModule* module = nullptr;
+ {
+ // We parse the module again directly from the module bytes, so
+ // the underlying storage must not be moved meanwhile.
+ DisallowHeapAllocation no_allocation;
+ SeqOneByteString* module_bytes = shared->module_bytes();
+ const byte* start =
+ reinterpret_cast<const byte*>(module_bytes->GetCharsAddress());
+ const byte* end = start + module_bytes->length();
+ // TODO(titzer): remember the module origin in the compiled_module
+ // For now, we assume serialized modules did not originate from asm.js.
+ ModuleResult result =
+ DecodeWasmModule(isolate, start, end, false, kWasmOrigin);
+ CHECK(result.ok());
+ CHECK_NOT_NULL(result.val);
+ module = const_cast<WasmModule*>(result.val);
+ }
+
+ Handle<WasmModuleWrapper> module_wrapper =
+ WasmModuleWrapper::New(isolate, module);
+
+ shared->set(kModuleWrapper, *module_wrapper);
+ DCHECK(WasmSharedModuleData::IsWasmSharedModuleData(*shared));
+}
+
Handle<WasmCompiledModule> WasmCompiledModule::New(
- Isolate* isolate, Handle<WasmModuleWrapper> module_wrapper) {
+ Isolate* isolate, Handle<WasmSharedModuleData> shared) {
Handle<FixedArray> ret =
isolate->factory()->NewFixedArray(PropertyIndices::Count, TENURED);
- // WasmCompiledModule::cast would fail since module bytes are not set yet.
+ // WasmCompiledModule::cast would fail since fields are not set yet.
Handle<WasmCompiledModule> compiled_module(
reinterpret_cast<WasmCompiledModule*>(*ret), isolate);
compiled_module->InitId();
- compiled_module->set_module_wrapper(module_wrapper);
+ compiled_module->set_num_imported_functions(0);
+ compiled_module->set_shared(shared);
+ compiled_module->set_native_context(isolate->native_context());
return compiled_module;
}
-wasm::WasmModule* WasmCompiledModule::module() const {
- return reinterpret_cast<WasmModuleWrapper*>(*module_wrapper())->get();
-}
-
void WasmCompiledModule::InitId() {
#if DEBUG
static uint32_t instance_id_counter = 0;
@@ -315,19 +481,39 @@ void WasmCompiledModule::InitId() {
#endif
}
+MaybeHandle<String> WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+ Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
+ uint32_t offset, uint32_t size) {
+ // TODO(wasm): cache strings from modules if it's a performance win.
+ Handle<SeqOneByteString> module_bytes(compiled_module->module_bytes(),
+ isolate);
+ DCHECK_GE(module_bytes->length(), offset);
+ DCHECK_GE(module_bytes->length() - offset, size);
+ Address raw = module_bytes->GetCharsAddress() + offset;
+ if (!unibrow::Utf8::Validate(reinterpret_cast<const byte*>(raw), size))
+ return {}; // UTF8 decoding error for name.
+ DCHECK_GE(kMaxInt, offset);
+ DCHECK_GE(kMaxInt, size);
+ return isolate->factory()->NewStringFromUtf8SubString(
+ module_bytes, static_cast<int>(offset), static_cast<int>(size));
+}
+
bool WasmCompiledModule::IsWasmCompiledModule(Object* obj) {
if (!obj->IsFixedArray()) return false;
FixedArray* arr = FixedArray::cast(obj);
if (arr->length() != PropertyIndices::Count) return false;
Isolate* isolate = arr->GetIsolate();
-#define WCM_CHECK_SMALL_NUMBER(TYPE, NAME) \
- if (!arr->get(kID_##NAME)->IsSmi()) return false;
-#define WCM_CHECK_OBJECT_OR_WEAK(TYPE, NAME) \
- if (!arr->get(kID_##NAME)->IsUndefined(isolate) && \
- !arr->get(kID_##NAME)->Is##TYPE()) \
- return false;
-#define WCM_CHECK_OBJECT(TYPE, NAME) WCM_CHECK_OBJECT_OR_WEAK(TYPE, NAME)
-#define WCM_CHECK_WEAK_LINK(TYPE, NAME) WCM_CHECK_OBJECT_OR_WEAK(WeakCell, NAME)
+#define WCM_CHECK_TYPE(NAME, TYPE_CHECK) \
+ do { \
+ Object* obj = arr->get(kID_##NAME); \
+ if (!(TYPE_CHECK)) return false; \
+ } while (false);
+#define WCM_CHECK_OBJECT(TYPE, NAME) \
+ WCM_CHECK_TYPE(NAME, obj->IsUndefined(isolate) || obj->Is##TYPE())
+#define WCM_CHECK_WASM_OBJECT(TYPE, NAME) \
+ WCM_CHECK_TYPE(NAME, TYPE::Is##TYPE(obj))
+#define WCM_CHECK_WEAK_LINK(TYPE, NAME) WCM_CHECK_OBJECT(WeakCell, NAME)
+#define WCM_CHECK_SMALL_NUMBER(TYPE, NAME) WCM_CHECK_TYPE(NAME, obj->IsSmi())
#define WCM_CHECK(KIND, TYPE, NAME) WCM_CHECK_##KIND(TYPE, NAME)
WCM_PROPERTY_TABLE(WCM_CHECK)
#undef WCM_CHECK
@@ -341,7 +527,7 @@ void WasmCompiledModule::PrintInstancesChain() {
if (!FLAG_trace_wasm_instances) return;
for (WasmCompiledModule* current = this; current != nullptr;) {
PrintF("->%d", current->instance_id());
- if (current->ptr_to_weak_next_instance() == nullptr) break;
+ if (!current->has_weak_next_instance()) break;
CHECK(!current->ptr_to_weak_next_instance()->cleared());
current =
WasmCompiledModule::cast(current->ptr_to_weak_next_instance()->value());
@@ -350,6 +536,19 @@ void WasmCompiledModule::PrintInstancesChain() {
#endif
}
+void WasmCompiledModule::RecreateModuleWrapper(
+ Isolate* isolate, Handle<WasmCompiledModule> compiled_module) {
+ // This method must only be called immediately after deserialization.
+ // At this point, no module wrapper exists, so the shared module data is
+ // incomplete.
+ Handle<WasmSharedModuleData> shared(
+ static_cast<WasmSharedModuleData*>(compiled_module->get(kID_shared)),
+ isolate);
+ DCHECK(!WasmSharedModuleData::IsWasmSharedModuleData(*shared));
+ WasmSharedModuleData::RecreateModuleWrapper(isolate, shared);
+ DCHECK(WasmSharedModuleData::IsWasmSharedModuleData(*shared));
+}
+
uint32_t WasmCompiledModule::mem_size() const {
return has_memory() ? memory()->byte_length()->Number() : default_mem_size();
}
@@ -357,3 +556,310 @@ uint32_t WasmCompiledModule::mem_size() const {
uint32_t WasmCompiledModule::default_mem_size() const {
return min_mem_pages() * WasmModule::kPageSize;
}
+
+MaybeHandle<String> WasmCompiledModule::GetFunctionNameOrNull(
+ Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
+ uint32_t func_index) {
+ DCHECK_LT(func_index, compiled_module->module()->functions.size());
+ WasmFunction& function = compiled_module->module()->functions[func_index];
+ return WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
+ isolate, compiled_module, function.name_offset, function.name_length);
+}
+
+Handle<String> WasmCompiledModule::GetFunctionName(
+ Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
+ uint32_t func_index) {
+ MaybeHandle<String> name =
+ GetFunctionNameOrNull(isolate, compiled_module, func_index);
+ if (!name.is_null()) return name.ToHandleChecked();
+ return isolate->factory()->NewStringFromStaticChars("<WASM UNNAMED>");
+}
+
+Vector<const uint8_t> WasmCompiledModule::GetRawFunctionName(
+ uint32_t func_index) {
+ DCHECK_GT(module()->functions.size(), func_index);
+ WasmFunction& function = module()->functions[func_index];
+ SeqOneByteString* bytes = module_bytes();
+ DCHECK_GE(bytes->length(), function.name_offset);
+ DCHECK_GE(bytes->length() - function.name_offset, function.name_length);
+ return Vector<const uint8_t>(bytes->GetCharsAddress() + function.name_offset,
+ function.name_length);
+}
+
+int WasmCompiledModule::GetFunctionOffset(uint32_t func_index) {
+ std::vector<WasmFunction>& functions = module()->functions;
+ if (static_cast<uint32_t>(func_index) >= functions.size()) return -1;
+ DCHECK_GE(kMaxInt, functions[func_index].code_start_offset);
+ return static_cast<int>(functions[func_index].code_start_offset);
+}
+
+int WasmCompiledModule::GetContainingFunction(uint32_t byte_offset) {
+ std::vector<WasmFunction>& functions = module()->functions;
+
+ // Binary search for a function containing the given position.
+ int left = 0; // inclusive
+ int right = static_cast<int>(functions.size()); // exclusive
+ if (right == 0) return false;
+ while (right - left > 1) {
+ int mid = left + (right - left) / 2;
+ if (functions[mid].code_start_offset <= byte_offset) {
+ left = mid;
+ } else {
+ right = mid;
+ }
+ }
+ // If the found function does not contains the given position, return -1.
+ WasmFunction& func = functions[left];
+ if (byte_offset < func.code_start_offset ||
+ byte_offset >= func.code_end_offset) {
+ return -1;
+ }
+
+ return left;
+}
+
+bool WasmCompiledModule::GetPositionInfo(uint32_t position,
+ Script::PositionInfo* info) {
+ int func_index = GetContainingFunction(position);
+ if (func_index < 0) return false;
+
+ WasmFunction& function = module()->functions[func_index];
+
+ info->line = func_index;
+ info->column = position - function.code_start_offset;
+ info->line_start = function.code_start_offset;
+ info->line_end = function.code_end_offset;
+ return true;
+}
+
+namespace {
+
+enum AsmJsOffsetTableEntryLayout {
+ kOTEByteOffset,
+ kOTECallPosition,
+ kOTENumberConvPosition,
+ kOTESize
+};
+
+Handle<ByteArray> GetDecodedAsmJsOffsetTable(
+ Handle<WasmCompiledModule> compiled_module, Isolate* isolate) {
+ DCHECK(compiled_module->is_asm_js());
+ Handle<ByteArray> offset_table(
+ compiled_module->shared()->asm_js_offset_table(), isolate);
+
+ // The last byte in the asm_js_offset_tables ByteArray tells whether it is
+ // still encoded (0) or decoded (1).
+ enum AsmJsTableType : int { Encoded = 0, Decoded = 1 };
+ int table_type = offset_table->get(offset_table->length() - 1);
+ DCHECK(table_type == Encoded || table_type == Decoded);
+ if (table_type == Decoded) return offset_table;
+
+ AsmJsOffsetsResult asm_offsets;
+ {
+ DisallowHeapAllocation no_gc;
+ const byte* bytes_start = offset_table->GetDataStartAddress();
+ const byte* bytes_end = bytes_start + offset_table->length() - 1;
+ asm_offsets = wasm::DecodeAsmJsOffsets(bytes_start, bytes_end);
+ }
+ // Wasm bytes must be valid and must contain asm.js offset table.
+ DCHECK(asm_offsets.ok());
+ DCHECK_GE(kMaxInt, asm_offsets.val.size());
+ int num_functions = static_cast<int>(asm_offsets.val.size());
+ int num_imported_functions =
+ static_cast<int>(compiled_module->module()->num_imported_functions);
+ DCHECK_EQ(compiled_module->module()->functions.size(),
+ static_cast<size_t>(num_functions) + num_imported_functions);
+ int num_entries = 0;
+ for (int func = 0; func < num_functions; ++func) {
+ size_t new_size = asm_offsets.val[func].size();
+ DCHECK_LE(new_size, static_cast<size_t>(kMaxInt) - num_entries);
+ num_entries += static_cast<int>(new_size);
+ }
+ // One byte to encode that this is a decoded table.
+ DCHECK_GE(kMaxInt,
+ 1 + static_cast<uint64_t>(num_entries) * kOTESize * kIntSize);
+ int total_size = 1 + num_entries * kOTESize * kIntSize;
+ Handle<ByteArray> decoded_table =
+ isolate->factory()->NewByteArray(total_size, TENURED);
+ decoded_table->set(total_size - 1, AsmJsTableType::Decoded);
+ compiled_module->shared()->set_asm_js_offset_table(*decoded_table);
+
+ int idx = 0;
+ std::vector<WasmFunction>& wasm_funs = compiled_module->module()->functions;
+ for (int func = 0; func < num_functions; ++func) {
+ std::vector<AsmJsOffsetEntry>& func_asm_offsets = asm_offsets.val[func];
+ if (func_asm_offsets.empty()) continue;
+ int func_offset =
+ wasm_funs[num_imported_functions + func].code_start_offset;
+ for (AsmJsOffsetEntry& e : func_asm_offsets) {
+ // Byte offsets must be strictly monotonously increasing:
+ DCHECK_IMPLIES(idx > 0, func_offset + e.byte_offset >
+ decoded_table->get_int(idx - kOTESize));
+ decoded_table->set_int(idx + kOTEByteOffset, func_offset + e.byte_offset);
+ decoded_table->set_int(idx + kOTECallPosition, e.source_position_call);
+ decoded_table->set_int(idx + kOTENumberConvPosition,
+ e.source_position_number_conversion);
+ idx += kOTESize;
+ }
+ }
+ DCHECK_EQ(total_size, idx * kIntSize + 1);
+ return decoded_table;
+}
+
+} // namespace
+
+int WasmCompiledModule::GetAsmJsSourcePosition(
+ Handle<WasmCompiledModule> compiled_module, uint32_t func_index,
+ uint32_t byte_offset, bool is_at_number_conversion) {
+ Isolate* isolate = compiled_module->GetIsolate();
+ Handle<ByteArray> offset_table =
+ GetDecodedAsmJsOffsetTable(compiled_module, isolate);
+
+ DCHECK_LT(func_index, compiled_module->module()->functions.size());
+ uint32_t func_code_offset =
+ compiled_module->module()->functions[func_index].code_start_offset;
+ uint32_t total_offset = func_code_offset + byte_offset;
+
+ // Binary search for the total byte offset.
+ int left = 0; // inclusive
+ int right = offset_table->length() / kIntSize / kOTESize; // exclusive
+ DCHECK_LT(left, right);
+ while (right - left > 1) {
+ int mid = left + (right - left) / 2;
+ int mid_entry = offset_table->get_int(kOTESize * mid);
+ DCHECK_GE(kMaxInt, mid_entry);
+ if (static_cast<uint32_t>(mid_entry) <= total_offset) {
+ left = mid;
+ } else {
+ right = mid;
+ }
+ }
+ // There should be an entry for each position that could show up on the stack
+ // trace:
+ DCHECK_EQ(total_offset, offset_table->get_int(kOTESize * left));
+ int idx = is_at_number_conversion ? kOTENumberConvPosition : kOTECallPosition;
+ return offset_table->get_int(kOTESize * left + idx);
+}
+
+v8::debug::WasmDisassembly WasmCompiledModule::DisassembleFunction(
+ int func_index) {
+ DisallowHeapAllocation no_gc;
+
+ if (func_index < 0 ||
+ static_cast<uint32_t>(func_index) >= module()->functions.size())
+ return {};
+
+ SeqOneByteString* module_bytes_str = module_bytes();
+ Vector<const byte> module_bytes(module_bytes_str->GetChars(),
+ module_bytes_str->length());
+
+ std::ostringstream disassembly_os;
+ v8::debug::WasmDisassembly::OffsetTable offset_table;
+
+ PrintWasmText(module(), module_bytes, static_cast<uint32_t>(func_index),
+ disassembly_os, &offset_table);
+
+ return {disassembly_os.str(), std::move(offset_table)};
+}
+
+bool WasmCompiledModule::GetPossibleBreakpoints(
+ const v8::debug::Location& start, const v8::debug::Location& end,
+ std::vector<v8::debug::Location>* locations) {
+ DisallowHeapAllocation no_gc;
+
+ std::vector<WasmFunction>& functions = module()->functions;
+ if (start.GetLineNumber() < 0 || start.GetColumnNumber() < 0 ||
+ (!end.IsEmpty() &&
+ (end.GetLineNumber() < 0 || end.GetColumnNumber() < 0)))
+ return false;
+
+ // start_func_index, start_offset and end_func_index is inclusive.
+ // end_offset is exclusive.
+ // start_offset and end_offset are module-relative byte offsets.
+ uint32_t start_func_index = start.GetLineNumber();
+ if (start_func_index >= functions.size()) return false;
+ int start_func_len = functions[start_func_index].code_end_offset -
+ functions[start_func_index].code_start_offset;
+ if (start.GetColumnNumber() > start_func_len) return false;
+ uint32_t start_offset =
+ functions[start_func_index].code_start_offset + start.GetColumnNumber();
+ uint32_t end_func_index;
+ uint32_t end_offset;
+ if (end.IsEmpty()) {
+ // Default: everything till the end of the Script.
+ end_func_index = static_cast<uint32_t>(functions.size() - 1);
+ end_offset = functions[end_func_index].code_end_offset;
+ } else {
+ // If end is specified: Use it and check for valid input.
+ end_func_index = static_cast<uint32_t>(end.GetLineNumber());
+
+ // Special case: Stop before the start of the next function. Change to: Stop
+ // at the end of the function before, such that we don't disassemble the
+ // next function also.
+ if (end.GetColumnNumber() == 0 && end_func_index > 0) {
+ --end_func_index;
+ end_offset = functions[end_func_index].code_end_offset;
+ } else {
+ if (end_func_index >= functions.size()) return false;
+ end_offset =
+ functions[end_func_index].code_start_offset + end.GetColumnNumber();
+ if (end_offset > functions[end_func_index].code_end_offset) return false;
+ }
+ }
+
+ AccountingAllocator alloc;
+ Zone tmp(&alloc, ZONE_NAME);
+ const byte* module_start = module_bytes()->GetChars();
+
+ for (uint32_t func_idx = start_func_index; func_idx <= end_func_index;
+ ++func_idx) {
+ WasmFunction& func = functions[func_idx];
+ if (func.code_start_offset == func.code_end_offset) continue;
+
+ BodyLocalDecls locals(&tmp);
+ BytecodeIterator iterator(module_start + func.code_start_offset,
+ module_start + func.code_end_offset, &locals);
+ DCHECK_LT(0u, locals.encoded_size);
+ for (uint32_t offset : iterator.offsets()) {
+ uint32_t total_offset = func.code_start_offset + offset;
+ if (total_offset >= end_offset) {
+ DCHECK_EQ(end_func_index, func_idx);
+ break;
+ }
+ if (total_offset < start_offset) continue;
+ locations->push_back(v8::debug::Location(func_idx, offset));
+ }
+ }
+ return true;
+}
+
+Handle<WasmInstanceWrapper> WasmInstanceWrapper::New(
+ Isolate* isolate, Handle<WasmInstanceObject> instance) {
+ Handle<FixedArray> array =
+ isolate->factory()->NewFixedArray(kWrapperPropertyCount, TENURED);
+ Handle<WasmInstanceWrapper> instance_wrapper(
+ reinterpret_cast<WasmInstanceWrapper*>(*array), isolate);
+ instance_wrapper->set_instance_object(instance, isolate);
+ return instance_wrapper;
+}
+
+bool WasmInstanceWrapper::IsWasmInstanceWrapper(Object* obj) {
+ if (!obj->IsFixedArray()) return false;
+ Handle<FixedArray> array = handle(FixedArray::cast(obj));
+ if (array->length() != kWrapperPropertyCount) return false;
+ if (!array->get(kWrapperInstanceObject)->IsWeakCell()) return false;
+ Isolate* isolate = array->GetIsolate();
+ if (!array->get(kNextInstanceWrapper)->IsUndefined(isolate) &&
+ !array->get(kNextInstanceWrapper)->IsFixedArray())
+ return false;
+ if (!array->get(kPreviousInstanceWrapper)->IsUndefined(isolate) &&
+ !array->get(kPreviousInstanceWrapper)->IsFixedArray())
+ return false;
+ return true;
+}
+
+void WasmInstanceWrapper::set_instance_object(Handle<JSObject> instance,
+ Isolate* isolate) {
+ Handle<WeakCell> cell = isolate->factory()->NewWeakCell(instance);
+ set(kWrapperInstanceObject, *cell);
+}
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index f74661f652..c478fe0419 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -5,8 +5,11 @@
#ifndef V8_WASM_OBJECTS_H_
#define V8_WASM_OBJECTS_H_
+#include "src/debug/interface-types.h"
#include "src/objects-inl.h"
+#include "src/trap-handler/trap-handler.h"
#include "src/wasm/managed.h"
+#include "src/wasm/wasm-limits.h"
namespace v8 {
namespace internal {
@@ -17,19 +20,21 @@ struct WasmModule;
class WasmCompiledModule;
class WasmDebugInfo;
class WasmInstanceObject;
+class WasmInstanceWrapper;
#define DECLARE_CASTS(name) \
static bool Is##name(Object* object); \
static name* cast(Object* object)
+#define DECLARE_GETTER(name, type) type* name()
+
#define DECLARE_ACCESSORS(name, type) \
- type* get_##name(); \
- void set_##name(type* value)
+ void set_##name(type* value); \
+ DECLARE_GETTER(name, type)
#define DECLARE_OPTIONAL_ACCESSORS(name, type) \
bool has_##name(); \
- type* get_##name(); \
- void set_##name(type* value)
+ DECLARE_ACCESSORS(name, type)
// Representation of a WebAssembly.Module JavaScript-level object.
class WasmModuleObject : public JSObject {
@@ -40,13 +45,6 @@ class WasmModuleObject : public JSObject {
DECLARE_CASTS(WasmModuleObject);
WasmCompiledModule* compiled_module();
- wasm::WasmModule* module();
- int num_functions();
- bool is_asm_js();
- int GetAsmWasmSourcePosition(int func_index, int byte_offset);
- WasmDebugInfo* debug_info();
- void set_debug_info(WasmDebugInfo* debug_info);
- MaybeHandle<String> GetFunctionName(Isolate* isolate, int func_index);
static Handle<WasmModuleObject> New(
Isolate* isolate, Handle<WasmCompiledModule> compiled_module);
@@ -61,38 +59,44 @@ class WasmTableObject : public JSObject {
DECLARE_CASTS(WasmTableObject);
DECLARE_ACCESSORS(functions, FixedArray);
- FixedArray* get_dispatch_tables();
+ FixedArray* dispatch_tables();
uint32_t current_length();
- uint32_t maximum_length();
+ bool has_maximum_length();
+ int64_t maximum_length(); // Returns < 0 if no maximum.
static Handle<WasmTableObject> New(Isolate* isolate, uint32_t initial,
- uint32_t maximum,
+ int64_t maximum,
Handle<FixedArray>* js_functions);
- static bool Grow(Handle<WasmTableObject> table, uint32_t count);
+ static void Grow(Isolate* isolate, Handle<WasmTableObject> table,
+ uint32_t count);
static Handle<FixedArray> AddDispatchTable(
Isolate* isolate, Handle<WasmTableObject> table,
Handle<WasmInstanceObject> instance, int table_index,
- Handle<FixedArray> dispatch_table);
+ Handle<FixedArray> function_table, Handle<FixedArray> signature_table);
};
// Representation of a WebAssembly.Memory JavaScript-level object.
class WasmMemoryObject : public JSObject {
public:
// TODO(titzer): add the brand as an internal field instead of a property.
- enum Fields : uint8_t { kArrayBuffer, kMaximum, kInstance, kFieldCount };
+ enum Fields : uint8_t { kArrayBuffer, kMaximum, kInstancesLink, kFieldCount };
DECLARE_CASTS(WasmMemoryObject);
DECLARE_ACCESSORS(buffer, JSArrayBuffer);
+ DECLARE_OPTIONAL_ACCESSORS(instances_link, WasmInstanceWrapper);
- void AddInstance(WasmInstanceObject* object);
+ void AddInstance(Isolate* isolate, Handle<WasmInstanceObject> object);
+ void ResetInstancesLink(Isolate* isolate);
uint32_t current_pages();
- int32_t maximum_pages(); // returns < 0 if there is no maximum
+ bool has_maximum_pages();
+ int32_t maximum_pages(); // Returns < 0 if there is no maximum.
static Handle<WasmMemoryObject> New(Isolate* isolate,
Handle<JSArrayBuffer> buffer,
- int maximum);
+ int32_t maximum);
- static bool Grow(Handle<WasmMemoryObject> memory, uint32_t count);
+ static bool Grow(Isolate* isolate, Handle<WasmMemoryObject> memory,
+ uint32_t count);
};
// Representation of a WebAssembly.Instance JavaScript-level object.
@@ -105,6 +109,7 @@ class WasmInstanceObject : public JSObject {
kMemoryArrayBuffer,
kGlobalsArrayBuffer,
kDebugInfo,
+ kWasmMemInstanceWrapper,
kFieldCount
};
@@ -115,10 +120,16 @@ class WasmInstanceObject : public JSObject {
DECLARE_OPTIONAL_ACCESSORS(memory_buffer, JSArrayBuffer);
DECLARE_OPTIONAL_ACCESSORS(memory_object, WasmMemoryObject);
DECLARE_OPTIONAL_ACCESSORS(debug_info, WasmDebugInfo);
+ DECLARE_OPTIONAL_ACCESSORS(instance_wrapper, WasmInstanceWrapper);
WasmModuleObject* module_object();
wasm::WasmModule* module();
+ // Get the debug info associated with the given wasm object.
+ // If no debug info exists yet, it is created automatically.
+ static Handle<WasmDebugInfo> GetOrCreateDebugInfo(
+ Handle<WasmInstanceObject> instance);
+
static Handle<WasmInstanceObject> New(
Isolate* isolate, Handle<WasmCompiledModule> compiled_module);
};
@@ -135,9 +146,39 @@ class WasmExportedFunction : public JSFunction {
static Handle<WasmExportedFunction> New(Isolate* isolate,
Handle<WasmInstanceObject> instance,
- Handle<String> name,
- Handle<Code> export_wrapper,
- int arity, int func_index);
+ MaybeHandle<String> maybe_name,
+ int func_index, int arity,
+ Handle<Code> export_wrapper);
+};
+
+// Information shared by all WasmCompiledModule objects for the same module.
+class WasmSharedModuleData : public FixedArray {
+ enum Fields {
+ kModuleWrapper,
+ kModuleBytes,
+ kScript,
+ kAsmJsOffsetTable,
+ kFieldCount
+ };
+
+ public:
+ DECLARE_CASTS(WasmSharedModuleData);
+
+ DECLARE_GETTER(module, wasm::WasmModule);
+ DECLARE_OPTIONAL_ACCESSORS(module_bytes, SeqOneByteString);
+ DECLARE_GETTER(script, Script);
+ DECLARE_OPTIONAL_ACCESSORS(asm_js_offset_table, ByteArray);
+
+ static Handle<WasmSharedModuleData> New(
+ Isolate* isolate, Handle<Foreign> module_wrapper,
+ Handle<SeqOneByteString> module_bytes, Handle<Script> script,
+ Handle<ByteArray> asm_js_offset_table);
+
+ // Check whether this module was generated from asm.js source.
+ bool is_asm_js();
+
+ // Recreate the ModuleWrapper from the module bytes after deserialization.
+ static void RecreateModuleWrapper(Isolate*, Handle<WasmSharedModuleData>);
};
class WasmCompiledModule : public FixedArray {
@@ -149,7 +190,7 @@ class WasmCompiledModule : public FixedArray {
return reinterpret_cast<WasmCompiledModule*>(fixed_array);
}
-#define WCM_OBJECT_OR_WEAK(TYPE, NAME, ID) \
+#define WCM_OBJECT_OR_WEAK(TYPE, NAME, ID, TYPE_CHECK) \
Handle<TYPE> NAME() const { return handle(ptr_to_##NAME()); } \
\
MaybeHandle<TYPE> maybe_##NAME() const { \
@@ -157,9 +198,15 @@ class WasmCompiledModule : public FixedArray {
return MaybeHandle<TYPE>(); \
} \
\
+ TYPE* maybe_ptr_to_##NAME() const { \
+ Object* obj = get(ID); \
+ if (!(TYPE_CHECK)) return nullptr; \
+ return TYPE::cast(obj); \
+ } \
+ \
TYPE* ptr_to_##NAME() const { \
Object* obj = get(ID); \
- if (!obj->Is##TYPE()) return nullptr; \
+ DCHECK(TYPE_CHECK); \
return TYPE::cast(obj); \
} \
\
@@ -167,11 +214,18 @@ class WasmCompiledModule : public FixedArray {
\
void set_ptr_to_##NAME(TYPE* value) { set(ID, value); } \
\
- bool has_##NAME() const { return get(ID)->Is##TYPE(); } \
+ bool has_##NAME() const { \
+ Object* obj = get(ID); \
+ return TYPE_CHECK; \
+ } \
\
void reset_##NAME() { set_undefined(ID); }
-#define WCM_OBJECT(TYPE, NAME) WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME)
+#define WCM_OBJECT(TYPE, NAME) \
+ WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME, obj->Is##TYPE())
+
+#define WCM_WASM_OBJECT(TYPE, NAME) \
+ WCM_OBJECT_OR_WEAK(TYPE, NAME, kID_##NAME, TYPE::Is##TYPE(obj))
#define WCM_SMALL_NUMBER(TYPE, NAME) \
TYPE NAME() const { \
@@ -179,30 +233,29 @@ class WasmCompiledModule : public FixedArray {
} \
void set_##NAME(TYPE value) { set(kID_##NAME, Smi::FromInt(value)); }
-#define WCM_WEAK_LINK(TYPE, NAME) \
- WCM_OBJECT_OR_WEAK(WeakCell, weak_##NAME, kID_##NAME); \
- \
- Handle<TYPE> NAME() const { \
- return handle(TYPE::cast(weak_##NAME()->value())); \
+#define WCM_WEAK_LINK(TYPE, NAME) \
+ WCM_OBJECT_OR_WEAK(WeakCell, weak_##NAME, kID_##NAME, obj->IsWeakCell()); \
+ \
+ Handle<TYPE> NAME() const { \
+ return handle(TYPE::cast(weak_##NAME()->value())); \
}
-#define CORE_WCM_PROPERTY_TABLE(MACRO) \
- MACRO(OBJECT, FixedArray, code_table) \
- MACRO(OBJECT, Foreign, module_wrapper) \
- /* For debugging: */ \
- MACRO(OBJECT, SeqOneByteString, module_bytes) \
- MACRO(OBJECT, Script, script) \
- MACRO(OBJECT, ByteArray, asm_js_offset_tables) \
- /* End of debugging stuff */ \
- MACRO(OBJECT, FixedArray, function_tables) \
- MACRO(OBJECT, FixedArray, empty_function_tables) \
- MACRO(OBJECT, JSArrayBuffer, memory) \
- MACRO(SMALL_NUMBER, uint32_t, min_mem_pages) \
- MACRO(SMALL_NUMBER, uint32_t, max_mem_pages) \
- MACRO(WEAK_LINK, WasmCompiledModule, next_instance) \
- MACRO(WEAK_LINK, WasmCompiledModule, prev_instance) \
- MACRO(WEAK_LINK, JSObject, owning_instance) \
- MACRO(WEAK_LINK, JSObject, wasm_module)
+#define CORE_WCM_PROPERTY_TABLE(MACRO) \
+ MACRO(WASM_OBJECT, WasmSharedModuleData, shared) \
+ MACRO(OBJECT, Context, native_context) \
+ MACRO(SMALL_NUMBER, uint32_t, num_imported_functions) \
+ MACRO(OBJECT, FixedArray, code_table) \
+ MACRO(OBJECT, FixedArray, weak_exported_functions) \
+ MACRO(OBJECT, FixedArray, function_tables) \
+ MACRO(OBJECT, FixedArray, signature_tables) \
+ MACRO(OBJECT, FixedArray, empty_function_tables) \
+ MACRO(OBJECT, JSArrayBuffer, memory) \
+ MACRO(SMALL_NUMBER, uint32_t, min_mem_pages) \
+ MACRO(SMALL_NUMBER, uint32_t, max_mem_pages) \
+ MACRO(WEAK_LINK, WasmCompiledModule, next_instance) \
+ MACRO(WEAK_LINK, WasmCompiledModule, prev_instance) \
+ MACRO(WEAK_LINK, JSObject, owning_instance) \
+ MACRO(WEAK_LINK, WasmModuleObject, wasm_module)
#if DEBUG
#define DEBUG_ONLY_TABLE(MACRO) MACRO(SMALL_NUMBER, uint32_t, instance_id)
@@ -223,8 +276,8 @@ class WasmCompiledModule : public FixedArray {
};
public:
- static Handle<WasmCompiledModule> New(
- Isolate* isolate, Handle<Managed<wasm::WasmModule>> module_wrapper);
+ static Handle<WasmCompiledModule> New(Isolate* isolate,
+ Handle<WasmSharedModuleData> shared);
static Handle<WasmCompiledModule> Clone(Isolate* isolate,
Handle<WasmCompiledModule> module) {
@@ -234,30 +287,93 @@ class WasmCompiledModule : public FixedArray {
ret->reset_weak_owning_instance();
ret->reset_weak_next_instance();
ret->reset_weak_prev_instance();
+ ret->reset_weak_exported_functions();
return ret;
}
uint32_t mem_size() const;
uint32_t default_mem_size() const;
- wasm::WasmModule* module() const;
-
#define DECLARATION(KIND, TYPE, NAME) WCM_##KIND(TYPE, NAME)
WCM_PROPERTY_TABLE(DECLARATION)
#undef DECLARATION
+// Allow to call method on WasmSharedModuleData also on this object.
+#define FORWARD_SHARED(type, name) \
+ type name() { return shared()->name(); }
+ FORWARD_SHARED(SeqOneByteString*, module_bytes)
+ FORWARD_SHARED(wasm::WasmModule*, module)
+ FORWARD_SHARED(Script*, script)
+ FORWARD_SHARED(bool, is_asm_js)
+#undef FORWARD_SHARED
+
static bool IsWasmCompiledModule(Object* obj);
void PrintInstancesChain();
+ // Recreate the ModuleWrapper from the module bytes after deserialization.
static void RecreateModuleWrapper(Isolate* isolate,
- Handle<FixedArray> compiled_module);
+ Handle<WasmCompiledModule> compiled_module);
- // Extract a function name from the given wasm instance.
+ // Get the function name of the function identified by the given index.
// Returns a null handle if the function is unnamed or the name is not a valid
// UTF-8 string.
- static MaybeHandle<String> GetFunctionName(
- Handle<WasmCompiledModule> compiled_module, uint32_t func_index);
+ static MaybeHandle<String> GetFunctionNameOrNull(
+ Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
+ uint32_t func_index);
+
+ // Get the function name of the function identified by the given index.
+ // Returns "<WASM UNNAMED>" if the function is unnamed or the name is not a
+ // valid UTF-8 string.
+ static Handle<String> GetFunctionName(
+ Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
+ uint32_t func_index);
+
+ // Get the raw bytes of the function name of the function identified by the
+ // given index.
+ // Meant to be used for debugging or frame printing.
+ // Does not allocate, hence gc-safe.
+ Vector<const uint8_t> GetRawFunctionName(uint32_t func_index);
+
+ // Return the byte offset of the function identified by the given index.
+ // The offset will be relative to the start of the module bytes.
+ // Returns -1 if the function index is invalid.
+ int GetFunctionOffset(uint32_t func_index);
+
+ // Returns the function containing the given byte offset.
+ // Returns -1 if the byte offset is not contained in any function of this
+ // module.
+ int GetContainingFunction(uint32_t byte_offset);
+
+ // Translate from byte offset in the module to function number and byte offset
+ // within that function, encoded as line and column in the position info.
+ // Returns true if the position is valid inside this module, false otherwise.
+ bool GetPositionInfo(uint32_t position, Script::PositionInfo* info);
+
+ // Get the asm.js source position from a byte offset.
+ // Must only be called if the associated wasm object was created from asm.js.
+ static int GetAsmJsSourcePosition(Handle<WasmCompiledModule> compiled_module,
+ uint32_t func_index, uint32_t byte_offset,
+ bool is_at_number_conversion);
+
+ // Compute the disassembly of a wasm function.
+ // Returns the disassembly string and a list of <byte_offset, line, column>
+ // entries, mapping wasm byte offsets to line and column in the disassembly.
+ // The list is guaranteed to be ordered by the byte_offset.
+ // Returns an empty string and empty vector if the function index is invalid.
+ debug::WasmDisassembly DisassembleFunction(int func_index);
+
+ // Extract a portion of the wire bytes as UTF-8 string.
+ // Returns a null handle if the respective bytes do not form a valid UTF-8
+ // string.
+ static MaybeHandle<String> ExtractUtf8StringFromModuleBytes(
+ Isolate* isolate, Handle<WasmCompiledModule> compiled_module,
+ uint32_t offset, uint32_t size);
+
+ // Get a list of all possible breakpoints within a given range of this module.
+ bool GetPossibleBreakpoints(const debug::Location& start,
+ const debug::Location& end,
+ std::vector<debug::Location>* locations);
private:
void InitId();
@@ -267,36 +383,79 @@ class WasmCompiledModule : public FixedArray {
class WasmDebugInfo : public FixedArray {
public:
- enum class Fields { kFieldCount };
-
- static Handle<WasmDebugInfo> New(Handle<JSObject> wasm);
+ enum Fields {
+ kInstance,
+ kInterpreterHandle,
+ kInterpretedFunctions,
+ kFieldCount
+ };
- static bool IsDebugInfo(Object* object);
- static WasmDebugInfo* cast(Object* object);
+ static Handle<WasmDebugInfo> New(Handle<WasmInstanceObject>);
- JSObject* wasm_instance();
+ static bool IsDebugInfo(Object*);
+ static WasmDebugInfo* cast(Object*);
- bool SetBreakPoint(int byte_offset);
+ static void SetBreakpoint(Handle<WasmDebugInfo>, int func_index, int offset);
- // Get the Script for the specified function.
- static Script* GetFunctionScript(Handle<WasmDebugInfo> debug_info,
- int func_index);
+ static void RunInterpreter(Handle<WasmDebugInfo>, int func_index,
+ uint8_t* arg_buffer);
- // Disassemble the specified function from this module.
- static Handle<String> DisassembleFunction(Handle<WasmDebugInfo> debug_info,
- int func_index);
+ DECLARE_GETTER(wasm_instance, WasmInstanceObject);
+};
- // Get the offset table for the specified function, mapping from byte offsets
- // to position in the disassembly.
- // Returns an array with three entries per instruction: byte offset, line and
- // column.
- static Handle<FixedArray> GetFunctionOffsetTable(
- Handle<WasmDebugInfo> debug_info, int func_index);
+class WasmInstanceWrapper : public FixedArray {
+ public:
+ static Handle<WasmInstanceWrapper> New(Isolate* isolate,
+ Handle<WasmInstanceObject> instance);
+ static WasmInstanceWrapper* cast(Object* fixed_array) {
+ SLOW_DCHECK(IsWasmInstanceWrapper(fixed_array));
+ return reinterpret_cast<WasmInstanceWrapper*>(fixed_array);
+ }
+ static bool IsWasmInstanceWrapper(Object* obj);
+ bool has_instance() { return get(kWrapperInstanceObject)->IsWeakCell(); }
+ Handle<WasmInstanceObject> instance_object() {
+ Object* obj = get(kWrapperInstanceObject);
+ DCHECK(obj->IsWeakCell());
+ WeakCell* cell = WeakCell::cast(obj);
+ DCHECK(cell->value()->IsJSObject());
+ return handle(WasmInstanceObject::cast(cell->value()));
+ }
+ bool has_next() { return IsWasmInstanceWrapper(get(kNextInstanceWrapper)); }
+ bool has_previous() {
+ return IsWasmInstanceWrapper(get(kPreviousInstanceWrapper));
+ }
+ void set_instance_object(Handle<JSObject> instance, Isolate* isolate);
+ void set_next_wrapper(Object* obj) {
+ DCHECK(IsWasmInstanceWrapper(obj));
+ set(kNextInstanceWrapper, obj);
+ }
+ void set_previous_wrapper(Object* obj) {
+ DCHECK(IsWasmInstanceWrapper(obj));
+ set(kPreviousInstanceWrapper, obj);
+ }
+ Handle<WasmInstanceWrapper> next_wrapper() {
+ Object* obj = get(kNextInstanceWrapper);
+ DCHECK(IsWasmInstanceWrapper(obj));
+ return handle(WasmInstanceWrapper::cast(obj));
+ }
+ Handle<WasmInstanceWrapper> previous_wrapper() {
+ Object* obj = get(kPreviousInstanceWrapper);
+ DCHECK(IsWasmInstanceWrapper(obj));
+ return handle(WasmInstanceWrapper::cast(obj));
+ }
+ void reset_next_wrapper() { set_undefined(kNextInstanceWrapper); }
+ void reset_previous_wrapper() { set_undefined(kPreviousInstanceWrapper); }
+ void reset() {
+ for (int kID = 0; kID < kWrapperPropertyCount; kID++) set_undefined(kID);
+ }
- // Get the asm.js source position from a byte offset.
- // Must only be called if the associated wasm object was created from asm.js.
- static int GetAsmJsSourcePosition(Handle<WasmDebugInfo> debug_info,
- int func_index, int byte_offset);
+ private:
+ enum {
+ kWrapperInstanceObject,
+ kNextInstanceWrapper,
+ kPreviousInstanceWrapper,
+ kWrapperPropertyCount
+ };
};
#undef DECLARE_ACCESSORS
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index 8f81b81a50..2a00a73cbd 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -4,13 +4,14 @@
#include "src/wasm/wasm-opcodes.h"
#include "src/messages.h"
+#include "src/runtime/runtime.h"
#include "src/signature.h"
namespace v8 {
namespace internal {
namespace wasm {
-typedef Signature<LocalType> FunctionSig;
+typedef Signature<ValueType> FunctionSig;
const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
switch (opcode) {
@@ -69,7 +70,7 @@ enum WasmOpcodeSig { FOREACH_SIGNATURE(DECLARE_SIG_ENUM) };
// TODO(titzer): not static-initializer safe. Wrap in LazyInstance.
#define DECLARE_SIG(name, ...) \
- static LocalType kTypes_##name[] = {__VA_ARGS__}; \
+ static ValueType kTypes_##name[] = {__VA_ARGS__}; \
static const FunctionSig kSig_##name( \
1, static_cast<int>(arraysize(kTypes_##name)) - 1, kTypes_##name);
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index ec22579bd7..6c231ac69b 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -7,6 +7,7 @@
#include "src/globals.h"
#include "src/machine-type.h"
+#include "src/runtime/runtime.h"
#include "src/signature.h"
namespace v8 {
@@ -14,7 +15,7 @@ namespace internal {
namespace wasm {
// Binary encoding of local types.
-enum LocalTypeCode {
+enum ValueTypeCode {
kLocalVoid = 0x40,
kLocalI32 = 0x7f,
kLocalI64 = 0x7e,
@@ -26,19 +27,18 @@ enum LocalTypeCode {
// Type code for multi-value block types.
static const uint8_t kMultivalBlock = 0x41;
-// We reuse the internal machine type to represent WebAssembly AST types.
+// We reuse the internal machine type to represent WebAssembly types.
// A typedef improves readability without adding a whole new type system.
-typedef MachineRepresentation LocalType;
-const LocalType kAstStmt = MachineRepresentation::kNone;
-const LocalType kAstI32 = MachineRepresentation::kWord32;
-const LocalType kAstI64 = MachineRepresentation::kWord64;
-const LocalType kAstF32 = MachineRepresentation::kFloat32;
-const LocalType kAstF64 = MachineRepresentation::kFloat64;
-const LocalType kAstS128 = MachineRepresentation::kSimd128;
-// We use kTagged here because kNone is already used by kAstStmt.
-const LocalType kAstEnd = MachineRepresentation::kTagged;
-
-typedef Signature<LocalType> FunctionSig;
+typedef MachineRepresentation ValueType;
+const ValueType kWasmStmt = MachineRepresentation::kNone;
+const ValueType kWasmI32 = MachineRepresentation::kWord32;
+const ValueType kWasmI64 = MachineRepresentation::kWord64;
+const ValueType kWasmF32 = MachineRepresentation::kFloat32;
+const ValueType kWasmF64 = MachineRepresentation::kFloat64;
+const ValueType kWasmS128 = MachineRepresentation::kSimd128;
+const ValueType kWasmVar = MachineRepresentation::kTagged;
+
+typedef Signature<ValueType> FunctionSig;
std::ostream& operator<<(std::ostream& os, const FunctionSig& function);
typedef Vector<const char> WasmName;
@@ -77,8 +77,7 @@ const WasmCodePosition kNoCodePosition = -1;
V(I32Const, 0x41, _) \
V(I64Const, 0x42, _) \
V(F32Const, 0x43, _) \
- V(F64Const, 0x44, _) \
- V(I8Const, 0xcb, _ /* TODO(titzer): V8 specific, remove */)
+ V(F64Const, 0x44, _)
// Load memory expressions.
#define FOREACH_LOAD_MEM_OPCODE(V) \
@@ -276,7 +275,6 @@ const WasmCodePosition kNoCodePosition = -1;
#define FOREACH_SIMD_0_OPERAND_OPCODE(V) \
V(F32x4Splat, 0xe500, s_f) \
- V(F32x4ReplaceLane, 0xe502, s_sif) \
V(F32x4Abs, 0xe503, s_s) \
V(F32x4Neg, 0xe504, s_s) \
V(F32x4Sqrt, 0xe505, s_s) \
@@ -296,13 +294,9 @@ const WasmCodePosition kNoCodePosition = -1;
V(F32x4Le, 0xe513, s_ss) \
V(F32x4Gt, 0xe514, s_ss) \
V(F32x4Ge, 0xe515, s_ss) \
- V(F32x4Select, 0xe516, s_sss) \
- V(F32x4Swizzle, 0xe517, s_s) \
- V(F32x4Shuffle, 0xe518, s_ss) \
V(F32x4FromInt32x4, 0xe519, s_s) \
V(F32x4FromUint32x4, 0xe51a, s_s) \
V(I32x4Splat, 0xe51b, s_i) \
- V(I32x4ReplaceLane, 0xe51d, s_sii) \
V(I32x4Neg, 0xe51e, s_s) \
V(I32x4Add, 0xe51f, s_ss) \
V(I32x4Sub, 0xe520, s_ss) \
@@ -330,7 +324,6 @@ const WasmCodePosition kNoCodePosition = -1;
V(I32x4Ge_u, 0xe536, s_ss) \
V(Ui32x4FromFloat32x4, 0xe537, s_s) \
V(I16x8Splat, 0xe538, s_i) \
- V(I16x8ReplaceLane, 0xe53a, s_sii) \
V(I16x8Neg, 0xe53b, s_s) \
V(I16x8Add, 0xe53c, s_ss) \
V(I16x8AddSaturate_s, 0xe53d, s_ss) \
@@ -360,7 +353,6 @@ const WasmCodePosition kNoCodePosition = -1;
V(I16x8Gt_u, 0xe555, s_ss) \
V(I16x8Ge_u, 0xe556, s_ss) \
V(I8x16Splat, 0xe557, s_i) \
- V(I8x16ReplaceLane, 0xe559, s_sii) \
V(I8x16Neg, 0xe55a, s_s) \
V(I8x16Add, 0xe55b, s_ss) \
V(I8x16AddSaturate_s, 0xe55c, s_ss) \
@@ -392,13 +384,20 @@ const WasmCodePosition kNoCodePosition = -1;
V(S128And, 0xe576, s_ss) \
V(S128Ior, 0xe577, s_ss) \
V(S128Xor, 0xe578, s_ss) \
- V(S128Not, 0xe579, s_s)
+ V(S128Not, 0xe579, s_s) \
+ V(S32x4Select, 0xe580, s_sss) \
+ V(S32x4Swizzle, 0xe581, s_s) \
+ V(S32x4Shuffle, 0xe582, s_ss)
#define FOREACH_SIMD_1_OPERAND_OPCODE(V) \
V(F32x4ExtractLane, 0xe501, _) \
+ V(F32x4ReplaceLane, 0xe502, _) \
V(I32x4ExtractLane, 0xe51c, _) \
+ V(I32x4ReplaceLane, 0xe51d, _) \
V(I16x8ExtractLane, 0xe539, _) \
- V(I8x16ExtractLane, 0xe558, _)
+ V(I16x8ReplaceLane, 0xe53a, _) \
+ V(I8x16ExtractLane, 0xe558, _) \
+ V(I8x16ReplaceLane, 0xe559, _)
#define FOREACH_ATOMIC_OPCODE(V) \
V(I32AtomicAdd8S, 0xe601, i_ii) \
@@ -451,45 +450,43 @@ const WasmCodePosition kNoCodePosition = -1;
FOREACH_ATOMIC_OPCODE(V)
// All signatures.
-#define FOREACH_SIGNATURE(V) \
- FOREACH_SIMD_SIGNATURE(V) \
- V(i_ii, kAstI32, kAstI32, kAstI32) \
- V(i_i, kAstI32, kAstI32) \
- V(i_v, kAstI32) \
- V(i_ff, kAstI32, kAstF32, kAstF32) \
- V(i_f, kAstI32, kAstF32) \
- V(i_dd, kAstI32, kAstF64, kAstF64) \
- V(i_d, kAstI32, kAstF64) \
- V(i_l, kAstI32, kAstI64) \
- V(l_ll, kAstI64, kAstI64, kAstI64) \
- V(i_ll, kAstI32, kAstI64, kAstI64) \
- V(l_l, kAstI64, kAstI64) \
- V(l_i, kAstI64, kAstI32) \
- V(l_f, kAstI64, kAstF32) \
- V(l_d, kAstI64, kAstF64) \
- V(f_ff, kAstF32, kAstF32, kAstF32) \
- V(f_f, kAstF32, kAstF32) \
- V(f_d, kAstF32, kAstF64) \
- V(f_i, kAstF32, kAstI32) \
- V(f_l, kAstF32, kAstI64) \
- V(d_dd, kAstF64, kAstF64, kAstF64) \
- V(d_d, kAstF64, kAstF64) \
- V(d_f, kAstF64, kAstF32) \
- V(d_i, kAstF64, kAstI32) \
- V(d_l, kAstF64, kAstI64) \
- V(d_id, kAstF64, kAstI32, kAstF64) \
- V(f_if, kAstF32, kAstI32, kAstF32) \
- V(l_il, kAstI64, kAstI32, kAstI64)
-
-#define FOREACH_SIMD_SIGNATURE(V) \
- V(s_s, kAstS128, kAstS128) \
- V(s_f, kAstS128, kAstF32) \
- V(s_sif, kAstS128, kAstS128, kAstI32, kAstF32) \
- V(s_ss, kAstS128, kAstS128, kAstS128) \
- V(s_sss, kAstS128, kAstS128, kAstS128, kAstS128) \
- V(s_i, kAstS128, kAstI32) \
- V(s_sii, kAstS128, kAstS128, kAstI32, kAstI32) \
- V(s_si, kAstS128, kAstS128, kAstI32)
+#define FOREACH_SIGNATURE(V) \
+ FOREACH_SIMD_SIGNATURE(V) \
+ V(i_ii, kWasmI32, kWasmI32, kWasmI32) \
+ V(i_i, kWasmI32, kWasmI32) \
+ V(i_v, kWasmI32) \
+ V(i_ff, kWasmI32, kWasmF32, kWasmF32) \
+ V(i_f, kWasmI32, kWasmF32) \
+ V(i_dd, kWasmI32, kWasmF64, kWasmF64) \
+ V(i_d, kWasmI32, kWasmF64) \
+ V(i_l, kWasmI32, kWasmI64) \
+ V(l_ll, kWasmI64, kWasmI64, kWasmI64) \
+ V(i_ll, kWasmI32, kWasmI64, kWasmI64) \
+ V(l_l, kWasmI64, kWasmI64) \
+ V(l_i, kWasmI64, kWasmI32) \
+ V(l_f, kWasmI64, kWasmF32) \
+ V(l_d, kWasmI64, kWasmF64) \
+ V(f_ff, kWasmF32, kWasmF32, kWasmF32) \
+ V(f_f, kWasmF32, kWasmF32) \
+ V(f_d, kWasmF32, kWasmF64) \
+ V(f_i, kWasmF32, kWasmI32) \
+ V(f_l, kWasmF32, kWasmI64) \
+ V(d_dd, kWasmF64, kWasmF64, kWasmF64) \
+ V(d_d, kWasmF64, kWasmF64) \
+ V(d_f, kWasmF64, kWasmF32) \
+ V(d_i, kWasmF64, kWasmI32) \
+ V(d_l, kWasmF64, kWasmI64) \
+ V(d_id, kWasmF64, kWasmI32, kWasmF64) \
+ V(f_if, kWasmF32, kWasmI32, kWasmF32) \
+ V(l_il, kWasmI64, kWasmI32, kWasmI64)
+
+#define FOREACH_SIMD_SIGNATURE(V) \
+ V(s_s, kWasmS128, kWasmS128) \
+ V(s_f, kWasmS128, kWasmF32) \
+ V(s_ss, kWasmS128, kWasmS128, kWasmS128) \
+ V(s_sss, kWasmS128, kWasmS128, kWasmS128, kWasmS128) \
+ V(s_i, kWasmS128, kWasmI32) \
+ V(s_si, kWasmS128, kWasmS128, kWasmI32)
#define FOREACH_PREFIX(V) \
V(Simd, 0xe5) \
@@ -514,8 +511,7 @@ enum WasmOpcode {
V(TrapRemByZero) \
V(TrapFloatUnrepresentable) \
V(TrapFuncInvalid) \
- V(TrapFuncSigMismatch) \
- V(TrapInvalidIndex)
+ V(TrapFuncSigMismatch)
enum TrapReason {
#define DECLARE_ENUM(name) k##name,
@@ -541,21 +537,21 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
return 1 << ElementSizeLog2Of(type.representation());
}
- static byte MemSize(LocalType type) { return 1 << ElementSizeLog2Of(type); }
+ static byte MemSize(ValueType type) { return 1 << ElementSizeLog2Of(type); }
- static LocalTypeCode LocalTypeCodeFor(LocalType type) {
+ static ValueTypeCode ValueTypeCodeFor(ValueType type) {
switch (type) {
- case kAstI32:
+ case kWasmI32:
return kLocalI32;
- case kAstI64:
+ case kWasmI64:
return kLocalI64;
- case kAstF32:
+ case kWasmF32:
return kLocalF32;
- case kAstF64:
+ case kWasmF64:
return kLocalF64;
- case kAstS128:
+ case kWasmS128:
return kLocalS128;
- case kAstStmt:
+ case kWasmStmt:
return kLocalVoid;
default:
UNREACHABLE();
@@ -563,19 +559,19 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
}
}
- static MachineType MachineTypeFor(LocalType type) {
+ static MachineType MachineTypeFor(ValueType type) {
switch (type) {
- case kAstI32:
+ case kWasmI32:
return MachineType::Int32();
- case kAstI64:
+ case kWasmI64:
return MachineType::Int64();
- case kAstF32:
+ case kWasmF32:
return MachineType::Float32();
- case kAstF64:
+ case kWasmF64:
return MachineType::Float64();
- case kAstS128:
+ case kWasmS128:
return MachineType::Simd128();
- case kAstStmt:
+ case kWasmStmt:
return MachineType::None();
default:
UNREACHABLE();
@@ -583,32 +579,32 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
}
}
- static LocalType LocalTypeFor(MachineType type) {
+ static ValueType ValueTypeFor(MachineType type) {
if (type == MachineType::Int8()) {
- return kAstI32;
+ return kWasmI32;
} else if (type == MachineType::Uint8()) {
- return kAstI32;
+ return kWasmI32;
} else if (type == MachineType::Int16()) {
- return kAstI32;
+ return kWasmI32;
} else if (type == MachineType::Uint16()) {
- return kAstI32;
+ return kWasmI32;
} else if (type == MachineType::Int32()) {
- return kAstI32;
+ return kWasmI32;
} else if (type == MachineType::Uint32()) {
- return kAstI32;
+ return kWasmI32;
} else if (type == MachineType::Int64()) {
- return kAstI64;
+ return kWasmI64;
} else if (type == MachineType::Uint64()) {
- return kAstI64;
+ return kWasmI64;
} else if (type == MachineType::Float32()) {
- return kAstF32;
+ return kWasmF32;
} else if (type == MachineType::Float64()) {
- return kAstF64;
+ return kWasmF64;
} else if (type == MachineType::Simd128()) {
- return kAstS128;
+ return kWasmS128;
} else {
UNREACHABLE();
- return kAstI32;
+ return kWasmI32;
}
}
@@ -639,44 +635,43 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
}
}
- static char ShortNameOf(LocalType type) {
+ static char ShortNameOf(ValueType type) {
switch (type) {
- case kAstI32:
+ case kWasmI32:
return 'i';
- case kAstI64:
+ case kWasmI64:
return 'l';
- case kAstF32:
+ case kWasmF32:
return 'f';
- case kAstF64:
+ case kWasmF64:
return 'd';
- case kAstS128:
+ case kWasmS128:
return 's';
- case kAstStmt:
+ case kWasmStmt:
return 'v';
- case kAstEnd:
- return 'x';
+ case kWasmVar:
+ return '*';
default:
- UNREACHABLE();
return '?';
}
}
- static const char* TypeName(LocalType type) {
+ static const char* TypeName(ValueType type) {
switch (type) {
- case kAstI32:
+ case kWasmI32:
return "i32";
- case kAstI64:
+ case kWasmI64:
return "i64";
- case kAstF32:
+ case kWasmF32:
return "f32";
- case kAstF64:
+ case kWasmF64:
return "f64";
- case kAstS128:
+ case kWasmS128:
return "s128";
- case kAstStmt:
+ case kWasmStmt:
return "<stmt>";
- case kAstEnd:
- return "<end>";
+ case kWasmVar:
+ return "<var>";
default:
return "<unknown>";
}
diff --git a/deps/v8/src/wasm/wasm-result.cc b/deps/v8/src/wasm/wasm-result.cc
index 6d535e3f57..e22f9ad442 100644
--- a/deps/v8/src/wasm/wasm-result.cc
+++ b/deps/v8/src/wasm/wasm-result.cc
@@ -64,14 +64,25 @@ void ErrorThrower::RangeError(const char* format, ...) {
void ErrorThrower::CompileError(const char* format, ...) {
if (error()) return;
+ wasm_error_ = true;
va_list arguments;
va_start(arguments, format);
Format(isolate_->wasm_compile_error_function(), format, arguments);
va_end(arguments);
}
+void ErrorThrower::LinkError(const char* format, ...) {
+ if (error()) return;
+ wasm_error_ = true;
+ va_list arguments;
+ va_start(arguments, format);
+ Format(isolate_->wasm_link_error_function(), format, arguments);
+ va_end(arguments);
+}
+
void ErrorThrower::RuntimeError(const char* format, ...) {
if (error()) return;
+ wasm_error_ = true;
va_list arguments;
va_start(arguments, format);
Format(isolate_->wasm_runtime_error_function(), format, arguments);
diff --git a/deps/v8/src/wasm/wasm-result.h b/deps/v8/src/wasm/wasm-result.h
index 53c6b8dcf9..004ac22d33 100644
--- a/deps/v8/src/wasm/wasm-result.h
+++ b/deps/v8/src/wasm/wasm-result.h
@@ -95,6 +95,7 @@ class V8_EXPORT_PRIVATE ErrorThrower {
PRINTF_FORMAT(2, 3) void TypeError(const char* fmt, ...);
PRINTF_FORMAT(2, 3) void RangeError(const char* fmt, ...);
PRINTF_FORMAT(2, 3) void CompileError(const char* fmt, ...);
+ PRINTF_FORMAT(2, 3) void LinkError(const char* fmt, ...);
PRINTF_FORMAT(2, 3) void RuntimeError(const char* fmt, ...);
template <typename T>
@@ -111,6 +112,7 @@ class V8_EXPORT_PRIVATE ErrorThrower {
}
bool error() const { return !exception_.is_null(); }
+ bool wasm_error() { return wasm_error_; }
private:
void Format(i::Handle<i::JSFunction> constructor, const char* fmt, va_list);
@@ -118,6 +120,7 @@ class V8_EXPORT_PRIVATE ErrorThrower {
i::Isolate* isolate_;
const char* context_;
i::Handle<i::Object> exception_;
+ bool wasm_error_ = false;
};
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-text.cc b/deps/v8/src/wasm/wasm-text.cc
new file mode 100644
index 0000000000..1878095b09
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-text.cc
@@ -0,0 +1,312 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/wasm-text.h"
+
+#include "src/debug/interface-types.h"
+#include "src/ostreams.h"
+#include "src/vector.h"
+#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-opcodes.h"
+#include "src/zone/zone.h"
+
+using namespace v8;
+using namespace v8::internal;
+using namespace v8::internal::wasm;
+
+namespace {
+const char *GetOpName(WasmOpcode opcode) {
+#define CASE_OP(name, str) \
+ case kExpr##name: \
+ return str;
+#define CASE_I32_OP(name, str) CASE_OP(I32##name, "i32." str)
+#define CASE_I64_OP(name, str) CASE_OP(I64##name, "i64." str)
+#define CASE_F32_OP(name, str) CASE_OP(F32##name, "f32." str)
+#define CASE_F64_OP(name, str) CASE_OP(F64##name, "f64." str)
+#define CASE_INT_OP(name, str) CASE_I32_OP(name, str) CASE_I64_OP(name, str)
+#define CASE_FLOAT_OP(name, str) CASE_F32_OP(name, str) CASE_F64_OP(name, str)
+#define CASE_ALL_OP(name, str) CASE_FLOAT_OP(name, str) CASE_INT_OP(name, str)
+#define CASE_SIGN_OP(TYPE, name, str) \
+ CASE_##TYPE##_OP(name##S, str "_s") CASE_##TYPE##_OP(name##U, str "_u")
+#define CASE_ALL_SIGN_OP(name, str) \
+ CASE_FLOAT_OP(name, str) CASE_SIGN_OP(INT, name, str)
+#define CASE_CONVERT_OP(name, RES, SRC, src_suffix, str) \
+ CASE_##RES##_OP(U##name##SRC, str "_u/" src_suffix) \
+ CASE_##RES##_OP(S##name##SRC, str "_s/" src_suffix)
+
+ switch (opcode) {
+ CASE_INT_OP(Eqz, "eqz")
+ CASE_ALL_OP(Eq, "eq")
+ CASE_ALL_OP(Ne, "ne")
+ CASE_ALL_OP(Add, "add")
+ CASE_ALL_OP(Sub, "sub")
+ CASE_ALL_OP(Mul, "mul")
+ CASE_ALL_SIGN_OP(Lt, "lt")
+ CASE_ALL_SIGN_OP(Gt, "gt")
+ CASE_ALL_SIGN_OP(Le, "le")
+ CASE_ALL_SIGN_OP(Ge, "ge")
+ CASE_INT_OP(Clz, "clz")
+ CASE_INT_OP(Ctz, "ctz")
+ CASE_INT_OP(Popcnt, "popcnt")
+ CASE_ALL_SIGN_OP(Div, "div")
+ CASE_SIGN_OP(INT, Rem, "rem")
+ CASE_INT_OP(And, "and")
+ CASE_INT_OP(Ior, "or")
+ CASE_INT_OP(Xor, "xor")
+ CASE_INT_OP(Shl, "shl")
+ CASE_SIGN_OP(INT, Shr, "shr")
+ CASE_INT_OP(Rol, "rol")
+ CASE_INT_OP(Ror, "ror")
+ CASE_FLOAT_OP(Abs, "abs")
+ CASE_FLOAT_OP(Neg, "neg")
+ CASE_FLOAT_OP(Ceil, "ceil")
+ CASE_FLOAT_OP(Floor, "floor")
+ CASE_FLOAT_OP(Trunc, "trunc")
+ CASE_FLOAT_OP(NearestInt, "nearest")
+ CASE_FLOAT_OP(Sqrt, "sqrt")
+ CASE_FLOAT_OP(Min, "min")
+ CASE_FLOAT_OP(Max, "max")
+ CASE_FLOAT_OP(CopySign, "copysign")
+ CASE_I32_OP(ConvertI64, "wrap/i64")
+ CASE_CONVERT_OP(Convert, INT, F32, "f32", "trunc")
+ CASE_CONVERT_OP(Convert, INT, F64, "f64", "trunc")
+ CASE_CONVERT_OP(Convert, I64, I32, "i32", "extend")
+ CASE_CONVERT_OP(Convert, F32, I32, "i32", "convert")
+ CASE_CONVERT_OP(Convert, F32, I64, "i64", "convert")
+ CASE_F32_OP(ConvertF64, "demote/f64")
+ CASE_CONVERT_OP(Convert, F64, I32, "i32", "convert")
+ CASE_CONVERT_OP(Convert, F64, I64, "i64", "convert")
+ CASE_F64_OP(ConvertF32, "promote/f32")
+ CASE_I32_OP(ReinterpretF32, "reinterpret/f32")
+ CASE_I64_OP(ReinterpretF64, "reinterpret/f64")
+ CASE_F32_OP(ReinterpretI32, "reinterpret/i32")
+ CASE_F64_OP(ReinterpretI64, "reinterpret/i64")
+ CASE_OP(Unreachable, "unreachable")
+ CASE_OP(Nop, "nop")
+ CASE_OP(Return, "return")
+ CASE_OP(MemorySize, "current_memory")
+ CASE_OP(GrowMemory, "grow_memory")
+ CASE_OP(Loop, "loop")
+ CASE_OP(If, "if")
+ CASE_OP(Block, "block")
+ CASE_OP(Try, "try")
+ CASE_OP(Throw, "throw")
+ CASE_OP(Catch, "catch")
+ CASE_OP(Drop, "drop")
+ CASE_OP(Select, "select")
+ CASE_ALL_OP(LoadMem, "load")
+ CASE_SIGN_OP(INT, LoadMem8, "load8")
+ CASE_SIGN_OP(INT, LoadMem16, "load16")
+ CASE_SIGN_OP(I64, LoadMem32, "load32")
+ CASE_ALL_OP(StoreMem, "store")
+ CASE_INT_OP(StoreMem8, "store8")
+ CASE_INT_OP(StoreMem16, "store16")
+ CASE_I64_OP(StoreMem32, "store32")
+ CASE_OP(SetLocal, "set_local")
+ CASE_OP(GetLocal, "get_local")
+ CASE_OP(TeeLocal, "tee_local")
+ CASE_OP(GetGlobal, "get_global")
+ CASE_OP(SetGlobal, "set_global")
+ CASE_OP(Br, "br")
+ CASE_OP(BrIf, "br_if")
+ default:
+ UNREACHABLE();
+ return "";
+ }
+}
+
+bool IsValidFunctionName(const Vector<const char> &name) {
+ if (name.is_empty()) return false;
+ const char *special_chars = "_.+-*/\\^~=<>!?@#$%&|:'`";
+ for (char c : name) {
+ bool valid_char = (c >= '0' && c <= '9') || (c >= 'a' && c <= 'z') ||
+ (c >= 'A' && c <= 'Z') || strchr(special_chars, c);
+ if (!valid_char) return false;
+ }
+ return true;
+}
+
+} // namespace
+
+void wasm::PrintWasmText(const WasmModule *module,
+ const ModuleWireBytes &wire_bytes, uint32_t func_index,
+ std::ostream &os,
+ debug::WasmDisassembly::OffsetTable *offset_table) {
+ DCHECK_NOT_NULL(module);
+ DCHECK_GT(module->functions.size(), func_index);
+ const WasmFunction *fun = &module->functions[func_index];
+
+ AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+ int line_nr = 0;
+ int control_depth = 1;
+
+ // Print the function signature.
+ os << "func";
+ WasmName fun_name = wire_bytes.GetNameOrNull(fun);
+ if (IsValidFunctionName(fun_name)) {
+ os << " $";
+ os.write(fun_name.start(), fun_name.length());
+ }
+ size_t param_count = fun->sig->parameter_count();
+ if (param_count) {
+ os << " (param";
+ for (size_t i = 0; i < param_count; ++i)
+ os << ' ' << WasmOpcodes::TypeName(fun->sig->GetParam(i));
+ os << ')';
+ }
+ size_t return_count = fun->sig->return_count();
+ if (return_count) {
+ os << " (result";
+ for (size_t i = 0; i < return_count; ++i)
+ os << ' ' << WasmOpcodes::TypeName(fun->sig->GetReturn(i));
+ os << ')';
+ }
+ os << "\n";
+ ++line_nr;
+
+ // Print the local declarations.
+ BodyLocalDecls decls(&zone);
+ Vector<const byte> func_bytes = wire_bytes.module_bytes.SubVector(
+ fun->code_start_offset, fun->code_end_offset);
+ BytecodeIterator i(func_bytes.begin(), func_bytes.end(), &decls);
+ DCHECK_LT(func_bytes.begin(), i.pc());
+ if (!decls.type_list.empty()) {
+ os << "(local";
+ for (const ValueType &v : decls.type_list) {
+ os << ' ' << WasmOpcodes::TypeName(v);
+ }
+ os << ")\n";
+ ++line_nr;
+ }
+
+ for (; i.has_next(); i.next()) {
+ WasmOpcode opcode = i.current();
+ if (opcode == kExprElse || opcode == kExprEnd) --control_depth;
+
+ DCHECK_LE(0, control_depth);
+ const int kMaxIndentation = 64;
+ int indentation = std::min(kMaxIndentation, 2 * control_depth);
+ if (offset_table) {
+ offset_table->push_back(debug::WasmDisassemblyOffsetTableEntry(
+ i.pc_offset(), line_nr, indentation));
+ }
+
+ // 64 whitespaces
+ const char padding[kMaxIndentation + 1] =
+ " ";
+ os.write(padding, indentation);
+
+ switch (opcode) {
+ case kExprLoop:
+ case kExprIf:
+ case kExprBlock:
+ case kExprTry: {
+ BlockTypeOperand operand(&i, i.pc());
+ os << GetOpName(opcode);
+ for (unsigned i = 0; i < operand.arity; i++) {
+ os << " " << WasmOpcodes::TypeName(operand.read_entry(i));
+ }
+ control_depth++;
+ break;
+ }
+ case kExprBr:
+ case kExprBrIf: {
+ BreakDepthOperand operand(&i, i.pc());
+ os << GetOpName(opcode) << ' ' << operand.depth;
+ break;
+ }
+ case kExprElse:
+ os << "else";
+ control_depth++;
+ break;
+ case kExprEnd:
+ os << "end";
+ break;
+ case kExprBrTable: {
+ BranchTableOperand operand(&i, i.pc());
+ BranchTableIterator iterator(&i, operand);
+ os << "br_table";
+ while (iterator.has_next()) os << ' ' << iterator.next();
+ break;
+ }
+ case kExprCallIndirect: {
+ CallIndirectOperand operand(&i, i.pc());
+ DCHECK_EQ(0, operand.table_index);
+ os << "call_indirect " << operand.index;
+ break;
+ }
+ case kExprCallFunction: {
+ CallFunctionOperand operand(&i, i.pc());
+ os << "call " << operand.index;
+ break;
+ }
+ case kExprGetLocal:
+ case kExprSetLocal:
+ case kExprTeeLocal:
+ case kExprCatch: {
+ LocalIndexOperand operand(&i, i.pc());
+ os << GetOpName(opcode) << ' ' << operand.index;
+ break;
+ }
+ case kExprGetGlobal:
+ case kExprSetGlobal: {
+ GlobalIndexOperand operand(&i, i.pc());
+ os << GetOpName(opcode) << ' ' << operand.index;
+ break;
+ }
+#define CASE_CONST(type, str, cast_type) \
+ case kExpr##type##Const: { \
+ Imm##type##Operand operand(&i, i.pc()); \
+ os << #str ".const " << static_cast<cast_type>(operand.value); \
+ break; \
+ }
+ CASE_CONST(I32, i32, int32_t)
+ CASE_CONST(I64, i64, int64_t)
+ CASE_CONST(F32, f32, float)
+ CASE_CONST(F64, f64, double)
+
+#define CASE_OPCODE(opcode, _, __) case kExpr##opcode:
+ FOREACH_LOAD_MEM_OPCODE(CASE_OPCODE)
+ FOREACH_STORE_MEM_OPCODE(CASE_OPCODE) {
+ MemoryAccessOperand operand(&i, i.pc(), kMaxUInt32);
+ os << GetOpName(opcode) << " offset=" << operand.offset
+ << " align=" << (1ULL << operand.alignment);
+ break;
+ }
+
+ FOREACH_SIMPLE_OPCODE(CASE_OPCODE)
+ case kExprUnreachable:
+ case kExprNop:
+ case kExprReturn:
+ case kExprMemorySize:
+ case kExprGrowMemory:
+ case kExprDrop:
+ case kExprSelect:
+ case kExprThrow:
+ os << GetOpName(opcode);
+ break;
+
+ // This group is just printed by their internal opcode name, as they
+ // should never be shown to end-users.
+ FOREACH_ASMJS_COMPAT_OPCODE(CASE_OPCODE)
+ // TODO(wasm): Add correct printing for SIMD and atomic opcodes once
+ // they are publicly available.
+ FOREACH_SIMD_0_OPERAND_OPCODE(CASE_OPCODE)
+ FOREACH_SIMD_1_OPERAND_OPCODE(CASE_OPCODE)
+ FOREACH_ATOMIC_OPCODE(CASE_OPCODE)
+ os << WasmOpcodes::OpcodeName(opcode);
+ break;
+
+ default:
+ UNREACHABLE();
+ break;
+ }
+ os << '\n';
+ ++line_nr;
+ }
+ DCHECK_EQ(0, control_depth);
+ DCHECK(i.ok());
+}
diff --git a/deps/v8/src/wasm/wasm-text.h b/deps/v8/src/wasm/wasm-text.h
new file mode 100644
index 0000000000..1608ea9a2d
--- /dev/null
+++ b/deps/v8/src/wasm/wasm-text.h
@@ -0,0 +1,38 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_S_EXPR_H_
+#define V8_WASM_S_EXPR_H_
+
+#include <cstdint>
+#include <ostream>
+#include <tuple>
+#include <vector>
+
+namespace v8 {
+
+namespace debug {
+struct WasmDisassemblyOffsetTableEntry;
+} // namespace debug
+
+namespace internal {
+namespace wasm {
+
+// Forward declaration.
+struct WasmModule;
+struct ModuleWireBytes;
+
+// Generate disassembly according to official text format.
+// Output disassembly to the given output stream, and optionally return an
+// offset table of <byte offset, line, column> via the given pointer.
+void PrintWasmText(
+ const WasmModule *module, const ModuleWireBytes &wire_bytes,
+ uint32_t func_index, std::ostream &os,
+ std::vector<debug::WasmDisassemblyOffsetTableEntry> *offset_table);
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_S_EXPR_H_
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 518df5a47c..fb33872627 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -16,7 +16,7 @@ namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; }
-bool CpuFeatures::SupportsSimd128() { return false; }
+bool CpuFeatures::SupportsSimd128() { return true; }
// -----------------------------------------------------------------------------
// Implementation of Assembler
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 5402a8ce87..f8162b096a 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -135,13 +135,18 @@ uint32_t RelocInfo::wasm_memory_size_reference() {
return Memory::uint32_at(pc_);
}
+uint32_t RelocInfo::wasm_function_table_size_reference() {
+ DCHECK(IsWasmFunctionTableSizeReference(rmode_));
+ return Memory::uint32_at(pc_);
+}
+
void RelocInfo::unchecked_update_wasm_memory_reference(
Address address, ICacheFlushMode flush_mode) {
Memory::Address_at(pc_) = address;
}
-void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
- ICacheFlushMode flush_mode) {
+void RelocInfo::unchecked_update_wasm_size(uint32_t size,
+ ICacheFlushMode flush_mode) {
Memory::uint32_at(pc_) = size;
}
@@ -601,12 +606,9 @@ void Assembler::immediate_arithmetic_op(byte subcode,
int size) {
EnsureSpace ensure_space(this);
emit_rex(dst, size);
- if (is_int8(src.value_)) {
+ if (is_int8(src.value_) && RelocInfo::IsNone(src.rmode_)) {
emit(0x83);
emit_operand(subcode, dst);
- if (!RelocInfo::IsNone(src.rmode_)) {
- RecordRelocInfo(src.rmode_);
- }
emit(src.value_);
} else {
emit(0x81);
@@ -2045,158 +2047,137 @@ void Assembler::store_rax(ExternalReference ref) {
void Assembler::testb(Register dst, Register src) {
EnsureSpace ensure_space(this);
- if (src.low_bits() == 4) {
- emit_rex_32(src, dst);
- emit(0x84);
- emit_modrm(src, dst);
- } else {
- if (!dst.is_byte_register() || !src.is_byte_register()) {
- // Register is not one of al, bl, cl, dl. Its encoding needs REX.
- emit_rex_32(dst, src);
- }
- emit(0x84);
- emit_modrm(dst, src);
- }
+ emit_test(dst, src, sizeof(int8_t));
}
-
void Assembler::testb(Register reg, Immediate mask) {
DCHECK(is_int8(mask.value_) || is_uint8(mask.value_));
- EnsureSpace ensure_space(this);
- if (reg.is(rax)) {
- emit(0xA8);
- emit(mask.value_); // Low byte emitted.
- } else {
- if (!reg.is_byte_register()) {
- // Register is not one of al, bl, cl, dl. Its encoding needs REX.
- emit_rex_32(reg);
- }
- emit(0xF6);
- emit_modrm(0x0, reg);
- emit(mask.value_); // Low byte emitted.
- }
+ emit_test(reg, mask, sizeof(int8_t));
}
-
void Assembler::testb(const Operand& op, Immediate mask) {
DCHECK(is_int8(mask.value_) || is_uint8(mask.value_));
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(rax, op);
- emit(0xF6);
- emit_operand(rax, op); // Operation code 0
- emit(mask.value_); // Low byte emitted.
+ emit_test(op, mask, sizeof(int8_t));
}
void Assembler::testb(const Operand& op, Register reg) {
- EnsureSpace ensure_space(this);
- if (!reg.is_byte_register()) {
- // Register is not one of al, bl, cl, dl. Its encoding needs REX.
- emit_rex_32(reg, op);
- } else {
- emit_optional_rex_32(reg, op);
- }
- emit(0x84);
- emit_operand(reg, op);
+ emit_test(op, reg, sizeof(int8_t));
}
void Assembler::testw(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- if (src.low_bits() == 4) {
- emit_rex_32(src, dst);
- }
- emit(0x85);
- emit_modrm(src, dst);
+ emit_test(dst, src, sizeof(uint16_t));
}
void Assembler::testw(Register reg, Immediate mask) {
- DCHECK(is_int16(mask.value_) || is_uint16(mask.value_));
- EnsureSpace ensure_space(this);
- emit(0x66);
- if (reg.is(rax)) {
- emit(0xA9);
- emitw(mask.value_);
- } else {
- if (reg.low_bits() == 4) {
- emit_rex_32(reg);
- }
- emit(0xF7);
- emit_modrm(0x0, reg);
- emitw(mask.value_);
- }
+ emit_test(reg, mask, sizeof(int16_t));
}
void Assembler::testw(const Operand& op, Immediate mask) {
- DCHECK(is_int16(mask.value_) || is_uint16(mask.value_));
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(rax, op);
- emit(0xF7);
- emit_operand(rax, op);
- emitw(mask.value_);
+ emit_test(op, mask, sizeof(int16_t));
}
void Assembler::testw(const Operand& op, Register reg) {
- EnsureSpace ensure_space(this);
- emit(0x66);
- emit_optional_rex_32(reg, op);
- emit(0x85);
- emit_operand(rax, op);
+ emit_test(op, reg, sizeof(int16_t));
}
void Assembler::emit_test(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
- if (src.low_bits() == 4) {
- emit_rex(src, dst, size);
- emit(0x85);
- emit_modrm(src, dst);
+ if (src.low_bits() == 4) std::swap(dst, src);
+ if (size == sizeof(int16_t)) {
+ emit(0x66);
+ size = sizeof(int32_t);
+ }
+ bool byte_operand = size == sizeof(int8_t);
+ if (byte_operand) {
+ size = sizeof(int32_t);
+ if (!src.is_byte_register() || !dst.is_byte_register()) {
+ emit_rex_32(dst, src);
+ }
} else {
emit_rex(dst, src, size);
- emit(0x85);
- emit_modrm(dst, src);
}
+ emit(byte_operand ? 0x84 : 0x85);
+ emit_modrm(dst, src);
}
void Assembler::emit_test(Register reg, Immediate mask, int size) {
- // testl with a mask that fits in the low byte is exactly testb.
if (is_uint8(mask.value_)) {
- testb(reg, mask);
- return;
+ size = sizeof(int8_t);
+ } else if (is_uint16(mask.value_)) {
+ size = sizeof(int16_t);
}
EnsureSpace ensure_space(this);
- if (reg.is(rax)) {
- emit_rex(rax, size);
- emit(0xA9);
- emit(mask);
+ bool half_word = size == sizeof(int16_t);
+ if (half_word) {
+ emit(0x66);
+ size = sizeof(int32_t);
+ }
+ bool byte_operand = size == sizeof(int8_t);
+ if (byte_operand) {
+ size = sizeof(int32_t);
+ if (!reg.is_byte_register()) emit_rex_32(reg);
} else {
emit_rex(reg, size);
- emit(0xF7);
+ }
+ if (reg.is(rax)) {
+ emit(byte_operand ? 0xA8 : 0xA9);
+ } else {
+ emit(byte_operand ? 0xF6 : 0xF7);
emit_modrm(0x0, reg);
+ }
+ if (byte_operand) {
+ emit(mask.value_);
+ } else if (half_word) {
+ emitw(mask.value_);
+ } else {
emit(mask);
}
}
-
void Assembler::emit_test(const Operand& op, Immediate mask, int size) {
- // testl with a mask that fits in the low byte is exactly testb.
if (is_uint8(mask.value_)) {
- testb(op, mask);
- return;
+ size = sizeof(int8_t);
+ } else if (is_uint16(mask.value_)) {
+ size = sizeof(int16_t);
}
EnsureSpace ensure_space(this);
+ bool half_word = size == sizeof(int16_t);
+ if (half_word) {
+ emit(0x66);
+ size = sizeof(int32_t);
+ }
+ bool byte_operand = size == sizeof(int8_t);
+ if (byte_operand) {
+ size = sizeof(int32_t);
+ }
emit_rex(rax, op, size);
- emit(0xF7);
+ emit(byte_operand ? 0xF6 : 0xF7);
emit_operand(rax, op); // Operation code 0
- emit(mask);
+ if (byte_operand) {
+ emit(mask.value_);
+ } else if (half_word) {
+ emitw(mask.value_);
+ } else {
+ emit(mask);
+ }
}
-
void Assembler::emit_test(const Operand& op, Register reg, int size) {
EnsureSpace ensure_space(this);
- emit_rex(reg, op, size);
- emit(0x85);
+ if (size == sizeof(int16_t)) {
+ emit(0x66);
+ size = sizeof(int32_t);
+ }
+ bool byte_operand = size == sizeof(int8_t);
+ if (byte_operand) {
+ size = sizeof(int32_t);
+ if (!reg.is_byte_register()) emit_rex_32(reg, op);
+ } else {
+ emit_rex(reg, op, size);
+ }
+ emit(byte_operand ? 0x84 : 0x85);
emit_operand(reg, op);
}
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index e8ee9e4bdd..08c621c938 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -1981,9 +1981,6 @@ class Assembler : public AssemblerBase {
return pc_offset() - label->pos();
}
- // Mark generator continuation.
- void RecordGeneratorContinuation();
-
// Mark address of a debug break slot.
void RecordDebugBreakSlot(RelocInfo::Mode mode);
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index d62aafe573..27c7475945 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -32,17 +32,6 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kNewArray);
}
-void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
- descriptor->Initialize(rax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
-void FastFunctionBindStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
- descriptor->Initialize(rax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
ExternalReference miss) {
// Update the static counter each time a new code stub is generated.
@@ -1117,9 +1106,11 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cc == equal) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(rdx);
- __ Push(rax);
- __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+ __ Push(rsi);
+ __ Call(strict() ? isolate()->builtins()->StrictEqual()
+ : isolate()->builtins()->Equal(),
+ RelocInfo::CODE_TARGET);
+ __ Pop(rsi);
}
// Turn true into 0 and false into some non-zero value.
STATIC_ASSERT(EQUAL == 0);
@@ -1976,37 +1967,6 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
}
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- String::Encoding encoding) {
- // Nothing to do for zero characters.
- Label done;
- __ testl(count, count);
- __ j(zero, &done, Label::kNear);
-
- // Make count the number of bytes to copy.
- if (encoding == String::TWO_BYTE_ENCODING) {
- STATIC_ASSERT(2 == sizeof(uc16));
- __ addl(count, count);
- }
-
- // Copy remaining characters.
- Label loop;
- __ bind(&loop);
- __ movb(kScratchRegister, Operand(src, 0));
- __ movb(Operand(dest, 0), kScratchRegister);
- __ incp(src);
- __ incp(dest);
- __ decl(count);
- __ j(not_zero, &loop);
-
- __ bind(&done);
-}
-
-
void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
Register left,
Register right,
@@ -2616,61 +2576,6 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ jmp(done);
}
-
-// Probe the name dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found leaving the
-// index into the dictionary in |r1|. Jump to the |miss| label
-// otherwise.
-void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register r0,
- Register r1) {
- DCHECK(!elements.is(r0));
- DCHECK(!elements.is(r1));
- DCHECK(!name.is(r0));
- DCHECK(!name.is(r1));
-
- __ AssertName(name);
-
- __ SmiToInteger32(r0, FieldOperand(elements, kCapacityOffset));
- __ decl(r0);
-
- for (int i = 0; i < kInlinedProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ movl(r1, FieldOperand(name, Name::kHashFieldOffset));
- __ shrl(r1, Immediate(Name::kHashShift));
- if (i > 0) {
- __ addl(r1, Immediate(NameDictionary::GetProbeOffset(i)));
- }
- __ andp(r1, r0);
-
- // Scale the index by multiplying by the entry size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- __ leap(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
-
- // Check if the key is identical to the name.
- __ cmpp(name, Operand(elements, r1, times_pointer_size,
- kElementsStartOffset - kHeapObjectTag));
- __ j(equal, done);
- }
-
- NameDictionaryLookupStub stub(masm->isolate(), elements, r0, r1,
- POSITIVE_LOOKUP);
- __ Push(name);
- __ movl(r0, FieldOperand(name, Name::kHashFieldOffset));
- __ shrl(r0, Immediate(Name::kHashShift));
- __ Push(r0);
- __ CallStub(&stub);
-
- __ testp(r0, r0);
- __ j(zero, miss);
- __ jmp(done);
-}
-
-
void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
@@ -2949,203 +2854,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ jmp(rcx); // Return to IC Miss stub, continuation still on stack.
}
-static void HandleArrayCases(MacroAssembler* masm, Register feedback,
- Register receiver_map, Register scratch1,
- Register scratch2, Register scratch3,
- bool is_polymorphic, Label* miss) {
- // feedback initially contains the feedback array
- Label next_loop, prepare_next;
- Label start_polymorphic;
-
- Register counter = scratch1;
- Register length = scratch2;
- Register cached_map = scratch3;
-
- __ movp(cached_map, FieldOperand(feedback, FixedArray::OffsetOfElementAt(0)));
- __ cmpp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
- __ j(not_equal, &start_polymorphic);
-
- // found, now call handler.
- Register handler = feedback;
- __ movp(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1)));
- __ leap(handler, FieldOperand(handler, Code::kHeaderSize));
- __ jmp(handler);
-
- // Polymorphic, we have to loop from 2 to N
- __ bind(&start_polymorphic);
- __ SmiToInteger32(length, FieldOperand(feedback, FixedArray::kLengthOffset));
- if (!is_polymorphic) {
- // If the IC could be monomorphic we have to make sure we don't go past the
- // end of the feedback array.
- __ cmpl(length, Immediate(2));
- __ j(equal, miss);
- }
- __ movl(counter, Immediate(2));
-
- __ bind(&next_loop);
- __ movp(cached_map, FieldOperand(feedback, counter, times_pointer_size,
- FixedArray::kHeaderSize));
- __ cmpp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
- __ j(not_equal, &prepare_next);
- __ movp(handler, FieldOperand(feedback, counter, times_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- __ leap(handler, FieldOperand(handler, Code::kHeaderSize));
- __ jmp(handler);
-
- __ bind(&prepare_next);
- __ addl(counter, Immediate(2));
- __ cmpl(counter, length);
- __ j(less, &next_loop);
-
- // We exhausted our array of map handler pairs.
- __ jmp(miss);
-}
-
-
-static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
- Register receiver_map, Register feedback,
- Register vector, Register integer_slot,
- Label* compare_map, Label* load_smi_map,
- Label* try_array) {
- __ JumpIfSmi(receiver, load_smi_map);
- __ movp(receiver_map, FieldOperand(receiver, 0));
-
- __ bind(compare_map);
- __ cmpp(receiver_map, FieldOperand(feedback, WeakCell::kValueOffset));
- __ j(not_equal, try_array);
- Register handler = feedback;
- __ movp(handler, FieldOperand(vector, integer_slot, times_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- __ leap(handler, FieldOperand(handler, Code::kHeaderSize));
- __ jmp(handler);
-}
-
-void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
- KeyedStoreICStub stub(isolate(), state());
- stub.GenerateForTrampoline(masm);
-}
-
-void KeyedStoreICStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
-
-void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-
-static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
- Register receiver_map,
- Register feedback, Register scratch,
- Register scratch1,
- Register scratch2, Label* miss) {
- // feedback initially contains the feedback array
- Label next, next_loop, prepare_next;
- Label transition_call;
-
- Register cached_map = scratch;
- Register counter = scratch1;
- Register length = scratch2;
-
- // Polymorphic, we have to loop from 0 to N - 1
- __ movp(counter, Immediate(0));
- __ movp(length, FieldOperand(feedback, FixedArray::kLengthOffset));
- __ SmiToInteger32(length, length);
-
- __ bind(&next_loop);
- __ movp(cached_map, FieldOperand(feedback, counter, times_pointer_size,
- FixedArray::kHeaderSize));
- __ cmpp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
- __ j(not_equal, &prepare_next);
- __ movp(cached_map, FieldOperand(feedback, counter, times_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- __ CompareRoot(cached_map, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &transition_call);
- __ movp(feedback, FieldOperand(feedback, counter, times_pointer_size,
- FixedArray::kHeaderSize + 2 * kPointerSize));
- __ leap(feedback, FieldOperand(feedback, Code::kHeaderSize));
- __ jmp(feedback);
-
- __ bind(&transition_call);
- DCHECK(receiver_map.is(StoreTransitionDescriptor::MapRegister()));
- __ movp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
- // The weak cell may have been cleared.
- __ JumpIfSmi(receiver_map, miss);
- // Get the handler in value.
- __ movp(feedback, FieldOperand(feedback, counter, times_pointer_size,
- FixedArray::kHeaderSize + 2 * kPointerSize));
- __ leap(feedback, FieldOperand(feedback, Code::kHeaderSize));
- __ jmp(feedback);
-
- __ bind(&prepare_next);
- __ addl(counter, Immediate(3));
- __ cmpl(counter, length);
- __ j(less, &next_loop);
-
- // We exhausted our array of map handler pairs.
- __ jmp(miss);
-}
-
-void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // rdx
- Register key = StoreWithVectorDescriptor::NameRegister(); // rcx
- Register vector = StoreWithVectorDescriptor::VectorRegister(); // rbx
- Register slot = StoreWithVectorDescriptor::SlotRegister(); // rdi
- DCHECK(StoreWithVectorDescriptor::ValueRegister().is(rax)); // rax
- Register feedback = r8;
- Register integer_slot = r9;
- Register receiver_map = r11;
- DCHECK(!AreAliased(feedback, integer_slot, vector, slot, receiver_map));
-
- __ SmiToInteger32(integer_slot, slot);
- __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size,
- FixedArray::kHeaderSize));
-
- // Try to quickly handle the monomorphic case without knowing for sure
- // if we have a weak cell in feedback. We do know it's safe to look
- // at WeakCell::kValueOffset.
- Label try_array, load_smi_map, compare_map;
- Label not_array, miss;
- HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector,
- integer_slot, &compare_map, &load_smi_map, &try_array);
-
- // Is it a fixed array?
- __ bind(&try_array);
- __ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &not_array);
- HandlePolymorphicKeyedStoreCase(masm, receiver_map, feedback, integer_slot,
- r15, r14, &miss);
-
- __ bind(&not_array);
- Label try_poly_name;
- __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
- __ j(not_equal, &try_poly_name);
-
- Handle<Code> megamorphic_stub =
- KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
- __ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
-
- __ bind(&try_poly_name);
- // We might have a name in feedback, and a fixed array in the next slot.
- __ cmpp(key, feedback);
- __ j(not_equal, &miss);
- // If the name comparison succeeded, we know we have a fixed array with
- // at least one map/handler pair.
- __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- HandleArrayCases(masm, feedback, receiver_map, integer_slot, r14, r15, false,
- &miss);
-
- __ bind(&miss);
- KeyedStoreIC::GenerateMiss(masm);
-
- __ bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ jmp(&compare_map);
-}
-
-
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(rbx);
CallICStub stub(isolate(), state());
@@ -3507,132 +3215,6 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateCase(masm, FAST_ELEMENTS);
}
-
-void FastNewObjectStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rdi : target
- // -- rdx : new target
- // -- rsi : context
- // -- rsp[0] : return address
- // -----------------------------------
- __ AssertFunction(rdi);
- __ AssertReceiver(rdx);
-
- // Verify that the new target is a JSFunction.
- Label new_object;
- __ CmpObjectType(rdx, JS_FUNCTION_TYPE, rbx);
- __ j(not_equal, &new_object);
-
- // Load the initial map and verify that it's in fact a map.
- __ movp(rcx, FieldOperand(rdx, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(rcx, &new_object);
- __ CmpObjectType(rcx, MAP_TYPE, rbx);
- __ j(not_equal, &new_object);
-
- // Fall back to runtime if the target differs from the new target's
- // initial map constructor.
- __ cmpp(rdi, FieldOperand(rcx, Map::kConstructorOrBackPointerOffset));
- __ j(not_equal, &new_object);
-
- // Allocate the JSObject on the heap.
- Label allocate, done_allocate;
- __ movzxbl(rbx, FieldOperand(rcx, Map::kInstanceSizeOffset));
- __ leal(rbx, Operand(rbx, times_pointer_size, 0));
- __ Allocate(rbx, rax, rdi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
- __ bind(&done_allocate);
-
- // Initialize the JSObject fields.
- __ movp(FieldOperand(rax, JSObject::kMapOffset), rcx);
- __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
- __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
- __ movp(FieldOperand(rax, JSObject::kElementsOffset), rbx);
- STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ leap(rbx, FieldOperand(rax, JSObject::kHeaderSize));
-
- // ----------- S t a t e -------------
- // -- rax : result (tagged)
- // -- rbx : result fields (untagged)
- // -- rdi : result end (untagged)
- // -- rcx : initial map
- // -- rsi : context
- // -- rsp[0] : return address
- // -----------------------------------
-
- // Perform in-object slack tracking if requested.
- Label slack_tracking;
- STATIC_ASSERT(Map::kNoSlackTracking == 0);
- __ LoadRoot(r11, Heap::kUndefinedValueRootIndex);
- __ testl(FieldOperand(rcx, Map::kBitField3Offset),
- Immediate(Map::ConstructionCounter::kMask));
- __ j(not_zero, &slack_tracking, Label::kNear);
- {
- // Initialize all in-object fields with undefined.
- __ InitializeFieldsWithFiller(rbx, rdi, r11);
- __ Ret();
- }
- __ bind(&slack_tracking);
- {
- // Decrease generous allocation count.
- STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
- __ subl(FieldOperand(rcx, Map::kBitField3Offset),
- Immediate(1 << Map::ConstructionCounter::kShift));
-
- // Initialize the in-object fields with undefined.
- __ movzxbl(rdx, FieldOperand(rcx, Map::kUnusedPropertyFieldsOffset));
- __ negp(rdx);
- __ leap(rdx, Operand(rdi, rdx, times_pointer_size, 0));
- __ InitializeFieldsWithFiller(rbx, rdx, r11);
-
- // Initialize the remaining (reserved) fields with one pointer filler map.
- __ LoadRoot(r11, Heap::kOnePointerFillerMapRootIndex);
- __ InitializeFieldsWithFiller(rdx, rdi, r11);
-
- // Check if we can finalize the instance size.
- Label finalize;
- STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
- __ testl(FieldOperand(rcx, Map::kBitField3Offset),
- Immediate(Map::ConstructionCounter::kMask));
- __ j(zero, &finalize, Label::kNear);
- __ Ret();
-
- // Finalize the instance size.
- __ bind(&finalize);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(rax);
- __ Push(rcx);
- __ CallRuntime(Runtime::kFinalizeInstanceSize);
- __ Pop(rax);
- }
- __ Ret();
- }
-
- // Fall back to %AllocateInNewSpace.
- __ bind(&allocate);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Integer32ToSmi(rbx, rbx);
- __ Push(rcx);
- __ Push(rbx);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ Pop(rcx);
- }
- __ movzxbl(rbx, FieldOperand(rcx, Map::kInstanceSizeOffset));
- __ leap(rdi, Operand(rax, rbx, times_pointer_size, 0));
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ decp(rdi); // Remove the tag from the end address.
- __ jmp(&done_allocate);
-
- // Fall back to %NewObject.
- __ bind(&new_object);
- __ PopReturnAddressTo(rcx);
- __ Push(rdi);
- __ Push(rdx);
- __ PushReturnAddressFrom(rcx);
- __ TailCallRuntime(Runtime::kNewObject);
-}
-
-
void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rdi : function
diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h
index a181377221..bf503dfc6b 100644
--- a/deps/v8/src/x64/code-stubs-x64.h
+++ b/deps/v8/src/x64/code-stubs-x64.h
@@ -14,15 +14,6 @@ void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
class StringHelper : public AllStatic {
public:
- // Generate code for copying characters using the rep movs instruction.
- // Copies rcx characters from rsi to rdi. Copying of overlapping regions is
- // not supported.
- static void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- String::Encoding encoding);
-
// Compares two flat one-byte strings and returns result in rax.
static void GenerateCompareFlatOneByteStrings(
MacroAssembler* masm, Register left, Register right, Register scratch1,
@@ -63,14 +54,6 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
Handle<Name> name,
Register r0);
- static void GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register r0,
- Register r1);
-
bool SometimesSetsUpAFrame() override { return false; }
private:
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 911f3cb64a..2432d7ed4f 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -62,309 +62,6 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#define __ ACCESS_MASM(masm)
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register target_map,
- AllocationSiteMode mode,
- Label* allocation_memento_found) {
- // Return address is on the stack.
- Register scratch = rdi;
- DCHECK(!AreAliased(receiver, key, value, target_map, scratch));
-
- if (mode == TRACK_ALLOCATION_SITE) {
- DCHECK(allocation_memento_found != NULL);
- __ JumpIfJSArrayHasAllocationMemento(
- receiver, scratch, allocation_memento_found);
- }
-
- // Set transitioned map.
- __ movp(FieldOperand(receiver, HeapObject::kMapOffset), target_map);
- __ RecordWriteField(receiver,
- HeapObject::kMapOffset,
- target_map,
- scratch,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateSmiToDouble(
- MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register target_map,
- AllocationSiteMode mode,
- Label* fail) {
- // Return address is on the stack.
- DCHECK(receiver.is(rdx));
- DCHECK(key.is(rcx));
- DCHECK(value.is(rax));
- DCHECK(target_map.is(rbx));
-
- // The fail label is not actually used since we do not allocate.
- Label allocated, new_backing_store, only_change_map, done;
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
- __ j(equal, &only_change_map);
-
- __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
- if (kPointerSize == kDoubleSize) {
- // Check backing store for COW-ness. For COW arrays we have to
- // allocate a new backing store.
- __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset),
- Heap::kFixedCOWArrayMapRootIndex);
- __ j(equal, &new_backing_store);
- } else {
- // For x32 port we have to allocate a new backing store as SMI size is
- // not equal with double size.
- DCHECK(kDoubleSize == 2 * kPointerSize);
- __ jmp(&new_backing_store);
- }
-
- // Check if the backing store is in new-space. If not, we need to allocate
- // a new one since the old one is in pointer-space.
- // If in new space, we can reuse the old backing store because it is
- // the same size.
- __ JumpIfNotInNewSpace(r8, rdi, &new_backing_store);
-
- __ movp(r14, r8); // Destination array equals source array.
-
- // r8 : source FixedArray
- // r9 : elements array length
- // r14: destination FixedDoubleArray
- // Set backing store's map
- __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
- __ movp(FieldOperand(r14, HeapObject::kMapOffset), rdi);
-
- __ bind(&allocated);
- // Set transitioned map.
- __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
- __ RecordWriteField(rdx,
- HeapObject::kMapOffset,
- rbx,
- rdi,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
- // Convert smis to doubles and holes to hole NaNs. The Array's length
- // remains unchanged.
- STATIC_ASSERT(FixedDoubleArray::kLengthOffset == FixedArray::kLengthOffset);
- STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
-
- Label loop, entry, convert_hole;
- __ movq(r15, bit_cast<int64_t, uint64_t>(kHoleNanInt64));
- // r15: the-hole NaN
- __ jmp(&entry);
-
- // Allocate new backing store.
- __ bind(&new_backing_store);
- __ leap(rdi, Operand(r9, times_8, FixedArray::kHeaderSize));
- __ Allocate(rdi, r14, r11, r15, fail, NO_ALLOCATION_FLAGS);
- // Set backing store's map
- __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
- __ movp(FieldOperand(r14, HeapObject::kMapOffset), rdi);
- // Set receiver's backing store.
- __ movp(FieldOperand(rdx, JSObject::kElementsOffset), r14);
- __ movp(r11, r14);
- __ RecordWriteField(rdx,
- JSObject::kElementsOffset,
- r11,
- r15,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- // Set backing store's length.
- __ Integer32ToSmi(r11, r9);
- __ movp(FieldOperand(r14, FixedDoubleArray::kLengthOffset), r11);
- __ jmp(&allocated);
-
- __ bind(&only_change_map);
- // Set transitioned map.
- __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
- __ RecordWriteField(rdx,
- HeapObject::kMapOffset,
- rbx,
- rdi,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ jmp(&done);
-
- // Conversion loop.
- __ bind(&loop);
- __ movp(rbx,
- FieldOperand(r8, r9, times_pointer_size, FixedArray::kHeaderSize));
- // r9 : current element's index
- // rbx: current element (smi-tagged)
- __ JumpIfNotSmi(rbx, &convert_hole);
- __ SmiToInteger32(rbx, rbx);
- __ Cvtlsi2sd(kScratchDoubleReg, rbx);
- __ Movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
- kScratchDoubleReg);
- __ jmp(&entry);
- __ bind(&convert_hole);
-
- if (FLAG_debug_code) {
- __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
- __ Assert(equal, kObjectFoundInSmiOnlyArray);
- }
-
- __ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15);
- __ bind(&entry);
- __ decp(r9);
- __ j(not_sign, &loop);
-
- __ bind(&done);
-}
-
-
-void ElementsTransitionGenerator::GenerateDoubleToObject(
- MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register target_map,
- AllocationSiteMode mode,
- Label* fail) {
- // Return address is on the stack.
- DCHECK(receiver.is(rdx));
- DCHECK(key.is(rcx));
- DCHECK(value.is(rax));
- DCHECK(target_map.is(rbx));
-
- Label loop, entry, convert_hole, gc_required, only_change_map;
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
- __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
- __ j(equal, &only_change_map);
-
- __ Push(rsi);
- __ Push(rax);
-
- __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
- __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
- // r8 : source FixedDoubleArray
- // r9 : number of elements
- __ leap(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
- __ Allocate(rdi, r11, r14, r15, &gc_required, NO_ALLOCATION_FLAGS);
- // r11: destination FixedArray
- __ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
- __ movp(FieldOperand(r11, HeapObject::kMapOffset), rdi);
- __ Integer32ToSmi(r14, r9);
- __ movp(FieldOperand(r11, FixedArray::kLengthOffset), r14);
-
- // Prepare for conversion loop.
- __ movq(rsi, bit_cast<int64_t, uint64_t>(kHoleNanInt64));
- __ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
- // rsi: the-hole NaN
- // rdi: pointer to the-hole
-
- // Allocating heap numbers in the loop below can fail and cause a jump to
- // gc_required. We can't leave a partly initialized FixedArray behind,
- // so pessimistically fill it with holes now.
- Label initialization_loop, initialization_loop_entry;
- __ jmp(&initialization_loop_entry, Label::kNear);
- __ bind(&initialization_loop);
- __ movp(FieldOperand(r11, r9, times_pointer_size, FixedArray::kHeaderSize),
- rdi);
- __ bind(&initialization_loop_entry);
- __ decp(r9);
- __ j(not_sign, &initialization_loop);
-
- __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
- __ jmp(&entry);
-
- // Call into runtime if GC is required.
- __ bind(&gc_required);
- __ Pop(rax);
- __ Pop(rsi);
- __ jmp(fail);
-
- // Box doubles into heap numbers.
- __ bind(&loop);
- __ movq(r14, FieldOperand(r8,
- r9,
- times_8,
- FixedDoubleArray::kHeaderSize));
- // r9 : current element's index
- // r14: current element
- __ cmpq(r14, rsi);
- __ j(equal, &convert_hole);
-
- // Non-hole double, copy value into a heap number.
- __ AllocateHeapNumber(rax, r15, &gc_required);
- // rax: new heap number
- __ movq(FieldOperand(rax, HeapNumber::kValueOffset), r14);
- __ movp(FieldOperand(r11,
- r9,
- times_pointer_size,
- FixedArray::kHeaderSize),
- rax);
- __ movp(r15, r9);
- __ RecordWriteArray(r11,
- rax,
- r15,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ jmp(&entry, Label::kNear);
-
- // Replace the-hole NaN with the-hole pointer.
- __ bind(&convert_hole);
- __ movp(FieldOperand(r11,
- r9,
- times_pointer_size,
- FixedArray::kHeaderSize),
- rdi);
-
- __ bind(&entry);
- __ decp(r9);
- __ j(not_sign, &loop);
-
- // Replace receiver's backing store with newly created and filled FixedArray.
- __ movp(FieldOperand(rdx, JSObject::kElementsOffset), r11);
- __ RecordWriteField(rdx,
- JSObject::kElementsOffset,
- r11,
- r15,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ Pop(rax);
- __ Pop(rsi);
-
- __ bind(&only_change_map);
- // Set transitioned map.
- __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
- __ RecordWriteField(rdx,
- HeapObject::kMapOffset,
- rbx,
- rdi,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-}
-
-
void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Register string,
Register index,
@@ -498,32 +195,24 @@ bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
return result;
}
+Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
+ if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
-void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
- MarkingParity* parity) {
- if (IsYoungSequence(isolate, sequence)) {
- *age = kNoAgeCodeAge;
- *parity = NO_MARKING_PARITY;
- } else {
- sequence++; // Skip the kCallOpcode byte
- Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
- Assembler::kCallTargetAddressOffset;
- Code* stub = GetCodeFromTargetAddress(target_address);
- GetCodeAgeAndParity(stub, age, parity);
- }
+ sequence++; // Skip the kCallOpcode byte
+ Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
+ Assembler::kCallTargetAddressOffset;
+ Code* stub = GetCodeFromTargetAddress(target_address);
+ return GetAgeOfCodeAgeStub(stub);
}
-
-void Code::PatchPlatformCodeAge(Isolate* isolate,
- byte* sequence,
- Code::Age age,
- MarkingParity parity) {
+void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
+ Code::Age age) {
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
Assembler::FlushICache(isolate, sequence, young_length);
} else {
- Code* stub = GetCodeAgeStub(isolate, age, parity);
+ Code* stub = GetCodeAgeStub(isolate, age);
CodePatcher patcher(isolate, sequence, young_length);
patcher.masm()->call(stub->instruction_start());
patcher.masm()->Nop(
diff --git a/deps/v8/src/x64/interface-descriptors-x64.cc b/deps/v8/src/x64/interface-descriptors-x64.cc
index 3ee4412c10..b775011681 100644
--- a/deps/v8/src/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/x64/interface-descriptors-x64.cc
@@ -64,13 +64,8 @@ const Register GrowArrayElementsDescriptor::KeyRegister() { return rbx; }
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {rbx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastNewObjectDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {rdi, rdx};
+ // SharedFunctionInfo, vector, slot index.
+ Register registers[] = {rbx, rcx, rdx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 8d70f540de..b35ef3b07a 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -2547,23 +2547,6 @@ void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(
j(not_equal, on_fail, near_jump);
}
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
- Register instance_type, Register scratch, Label* failure,
- Label::Distance near_jump) {
- if (!scratch.is(instance_type)) {
- movl(scratch, instance_type);
- }
-
- const int kFlatOneByteStringMask =
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
-
- andl(scratch, Immediate(kFlatOneByteStringMask));
- cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kOneByteStringTag));
- j(not_equal, failure, near_jump);
-}
-
-
void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
Register first_object_instance_type, Register second_object_instance_type,
Register scratch1, Register scratch2, Label* on_fail,
@@ -3663,66 +3646,6 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
Immediate(static_cast<int8_t>(type)));
}
-void MacroAssembler::CheckFastObjectElements(Register map,
- Label* fail,
- Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
- j(below_equal, fail, distance);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastHoleyElementValue));
- j(above, fail, distance);
-}
-
-
-void MacroAssembler::CheckFastSmiElements(Register map,
- Label* fail,
- Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
- j(above, fail, distance);
-}
-
-
-void MacroAssembler::StoreNumberToDoubleElements(
- Register maybe_number,
- Register elements,
- Register index,
- XMMRegister xmm_scratch,
- Label* fail,
- int elements_offset) {
- Label smi_value, done;
-
- JumpIfSmi(maybe_number, &smi_value, Label::kNear);
-
- CheckMap(maybe_number,
- isolate()->factory()->heap_number_map(),
- fail,
- DONT_DO_SMI_CHECK);
-
- // Double value, turn potential sNaN into qNaN.
- Move(xmm_scratch, 1.0);
- mulsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
- jmp(&done, Label::kNear);
-
- bind(&smi_value);
- // Value is a smi. convert to a double and store.
- // Preserve original value.
- SmiToInteger32(kScratchRegister, maybe_number);
- Cvtlsi2sd(xmm_scratch, kScratchRegister);
- bind(&done);
- Movsd(FieldOperand(elements, index, times_8,
- FixedDoubleArray::kHeaderSize - elements_offset),
- xmm_scratch);
-}
-
-
void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
}
@@ -4286,8 +4209,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK(function.is(rdi));
DCHECK_IMPLIES(new_target.is_valid(), new_target.is(rdx));
- if (call_wrapper.NeedsDebugStepCheck()) {
- FloodFunctionIfStepping(function, new_target, expected, actual);
+ if (call_wrapper.NeedsDebugHookCheck()) {
+ CheckDebugHook(function, new_target, expected, actual);
}
// Clear the new.target register if not given.
@@ -4387,17 +4310,15 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
}
-
-void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual) {
- Label skip_flooding;
- ExternalReference last_step_action =
- ExternalReference::debug_last_step_action_address(isolate());
- Operand last_step_action_operand = ExternalOperand(last_step_action);
- STATIC_ASSERT(StepFrame > StepIn);
- cmpb(last_step_action_operand, Immediate(StepIn));
- j(less, &skip_flooding);
+void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_hook;
+ ExternalReference debug_hook_active =
+ ExternalReference::debug_hook_on_function_call_address(isolate());
+ Operand debug_hook_active_operand = ExternalOperand(debug_hook_active);
+ cmpb(debug_hook_active_operand, Immediate(0));
+ j(equal, &skip_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -4414,7 +4335,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
}
Push(fun);
Push(fun);
- CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ CallRuntime(Runtime::kDebugOnFunctionCall);
Pop(fun);
if (new_target.is_valid()) {
Pop(new_target);
@@ -4428,7 +4349,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
SmiToInteger64(expected.reg(), expected.reg());
}
}
- bind(&skip_flooding);
+ bind(&skip_hook);
}
void MacroAssembler::StubPrologue(StackFrame::Type type) {
@@ -4958,125 +4879,6 @@ void MacroAssembler::AllocateHeapNumber(Register result,
movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
}
-
-void MacroAssembler::AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
- kObjectAlignmentMask;
- DCHECK(kShortSize == 2);
- // scratch1 = length * 2 + kObjectAlignmentMask.
- leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
- kHeaderAlignment));
- andp(scratch1, Immediate(~kObjectAlignmentMask));
- if (kHeaderAlignment > 0) {
- subp(scratch1, Immediate(kHeaderAlignment));
- }
-
- // Allocate two byte string in new space.
- Allocate(SeqTwoByteString::kHeaderSize, times_1, scratch1, result, scratch2,
- scratch3, gc_required, NO_ALLOCATION_FLAGS);
-
- // Set the map, length and hash field.
- LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
- movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
- Integer32ToSmi(scratch1, length);
- movp(FieldOperand(result, String::kLengthOffset), scratch1);
- movp(FieldOperand(result, String::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateOneByteString(Register result, Register length,
- Register scratch1, Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
- kObjectAlignmentMask;
- movl(scratch1, length);
- DCHECK(kCharSize == 1);
- addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
- andp(scratch1, Immediate(~kObjectAlignmentMask));
- if (kHeaderAlignment > 0) {
- subp(scratch1, Immediate(kHeaderAlignment));
- }
-
- // Allocate one-byte string in new space.
- Allocate(SeqOneByteString::kHeaderSize, times_1, scratch1, result, scratch2,
- scratch3, gc_required, NO_ALLOCATION_FLAGS);
-
- // Set the map, length and hash field.
- LoadRoot(kScratchRegister, Heap::kOneByteStringMapRootIndex);
- movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
- Integer32ToSmi(scratch1, length);
- movp(FieldOperand(result, String::kLengthOffset), scratch1);
- movp(FieldOperand(result, String::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateTwoByteConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map. The other fields are left uninitialized.
- LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
- movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
-}
-
-
-void MacroAssembler::AllocateOneByteConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map. The other fields are left uninitialized.
- LoadRoot(kScratchRegister, Heap::kConsOneByteStringMapRootIndex);
- movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
-}
-
-
-void MacroAssembler::AllocateTwoByteSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map. The other fields are left uninitialized.
- LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
- movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
-}
-
-
-void MacroAssembler::AllocateOneByteSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map. The other fields are left uninitialized.
- LoadRoot(kScratchRegister, Heap::kSlicedOneByteStringMapRootIndex);
- movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
-}
-
-
void MacroAssembler::AllocateJSValue(Register result, Register constructor,
Register value, Register scratch,
Label* gc_required) {
@@ -5137,28 +4939,6 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
}
-
-void MacroAssembler::LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match) {
- DCHECK(IsFastElementsKind(expected_kind));
- DCHECK(IsFastElementsKind(transitioned_kind));
-
- // Check that the function's map is the same as the expected cached map.
- movp(scratch, NativeContextOperand());
- cmpp(map_in_out,
- ContextOperand(scratch, Context::ArrayMapIndex(expected_kind)));
- j(not_equal, no_map_match);
-
- // Use the transitioned cached map.
- movp(map_in_out,
- ContextOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
-}
-
-
#ifdef _WIN64
static const int kRegisterPassedArguments = 4;
#else
@@ -5501,42 +5281,6 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
Heap::kAllocationMementoMapRootIndex);
}
-
-void MacroAssembler::JumpIfDictionaryInPrototypeChain(
- Register object,
- Register scratch0,
- Register scratch1,
- Label* found) {
- DCHECK(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister)));
- DCHECK(!scratch1.is(scratch0));
- Register current = scratch0;
- Label loop_again, end;
-
- movp(current, object);
- movp(current, FieldOperand(current, HeapObject::kMapOffset));
- movp(current, FieldOperand(current, Map::kPrototypeOffset));
- CompareRoot(current, Heap::kNullValueRootIndex);
- j(equal, &end);
-
- // Loop based on the map going up the prototype chain.
- bind(&loop_again);
- movp(current, FieldOperand(current, HeapObject::kMapOffset));
- STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
- STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
- CmpInstanceType(current, JS_OBJECT_TYPE);
- j(below, found);
- movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
- DecodeField<Map::ElementsKindBits>(scratch1);
- cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
- j(equal, found);
- movp(current, FieldOperand(current, Map::kPrototypeOffset));
- CompareRoot(current, Heap::kNullValueRootIndex);
- j(not_equal, &loop_again);
-
- bind(&end);
-}
-
-
void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
DCHECK(!dividend.is(rax));
DCHECK(!dividend.is(rdx));
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index f085509914..c09b07cac8 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -390,9 +390,10 @@ class MacroAssembler: public Assembler {
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
- void FloodFunctionIfStepping(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual);
+ // On function call, call into the debugger if necessary.
+ void CheckDebugHook(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
@@ -1112,29 +1113,6 @@ class MacroAssembler: public Assembler {
// Always use unsigned comparisons: above and below, not less and greater.
void CmpInstanceType(Register map, InstanceType type);
- // Check if a map for a JSObject indicates that the object can have both smi
- // and HeapObject elements. Jump to the specified label if it does not.
- void CheckFastObjectElements(Register map,
- Label* fail,
- Label::Distance distance = Label::kFar);
-
- // Check if a map for a JSObject indicates that the object has fast smi only
- // elements. Jump to the specified label if it does not.
- void CheckFastSmiElements(Register map,
- Label* fail,
- Label::Distance distance = Label::kFar);
-
- // Check to see if maybe_number can be stored as a double in
- // FastDoubleElements. If it can, store it at the index specified by index in
- // the FastDoubleElements array elements, otherwise jump to fail. Note that
- // index must not be smi-tagged.
- void StoreNumberToDoubleElements(Register maybe_number,
- Register elements,
- Register index,
- XMMRegister xmm_scratch,
- Label* fail,
- int elements_offset = 0);
-
// Compare an object's map with the specified map.
void CompareMap(Register obj, Handle<Map> map);
@@ -1344,36 +1322,6 @@ class MacroAssembler: public Assembler {
Label* gc_required,
MutableMode mode = IMMUTABLE);
- // Allocate a sequential string. All the header fields of the string object
- // are initialized.
- void AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required);
- void AllocateOneByteString(Register result, Register length,
- Register scratch1, Register scratch2,
- Register scratch3, Label* gc_required);
-
- // Allocate a raw cons string object. Only the map field of the result is
- // initialized.
- void AllocateTwoByteConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateOneByteConsString(Register result, Register scratch1,
- Register scratch2, Label* gc_required);
-
- // Allocate a raw sliced string object. Only the map field of the result is
- // initialized.
- void AllocateTwoByteSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void AllocateOneByteSlicedString(Register result, Register scratch1,
- Register scratch2, Label* gc_required);
-
// Allocate and initialize a JSValue wrapper with the specified {constructor}
// and {value}.
void AllocateJSValue(Register result, Register constructor, Register value,
@@ -1420,17 +1368,6 @@ class MacroAssembler: public Assembler {
LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
}
- // Conditionally load the cached Array transitioned map of type
- // transitioned_kind from the native context if the map in register
- // map_in_out is the cached Array map in the native context of
- // expected_kind.
- void LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match);
-
// Load the native context slot with the current index.
void LoadNativeContextSlot(int index, Register dst);
@@ -1593,20 +1530,6 @@ class MacroAssembler: public Assembler {
Register scratch_reg,
Label* no_memento_found);
- void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
- Register scratch_reg,
- Label* memento_found) {
- Label no_memento_found;
- TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
- &no_memento_found);
- j(equal, memento_found);
- bind(&no_memento_found);
- }
-
- // Jumps to found label if a prototype map has dictionary elements.
- void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
- Register scratch1, Label* found);
-
private:
// Order general registers are pushed by Pushad.
// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r12, r14, r15.
diff --git a/deps/v8/src/x87/OWNERS b/deps/v8/src/x87/OWNERS
index dd9998b261..61245ae8e2 100644
--- a/deps/v8/src/x87/OWNERS
+++ b/deps/v8/src/x87/OWNERS
@@ -1 +1,2 @@
weiliang.lin@intel.com
+chunyang.dai@intel.com
diff --git a/deps/v8/src/x87/assembler-x87.cc b/deps/v8/src/x87/assembler-x87.cc
index eb8dafa3b0..2ba4dfd33d 100644
--- a/deps/v8/src/x87/assembler-x87.cc
+++ b/deps/v8/src/x87/assembler-x87.cc
@@ -116,13 +116,18 @@ uint32_t RelocInfo::wasm_memory_size_reference() {
return Memory::uint32_at(pc_);
}
+uint32_t RelocInfo::wasm_function_table_size_reference() {
+ DCHECK(IsWasmFunctionTableSizeReference(rmode_));
+ return Memory::uint32_at(pc_);
+}
+
void RelocInfo::unchecked_update_wasm_memory_reference(
Address address, ICacheFlushMode flush_mode) {
Memory::Address_at(pc_) = address;
}
-void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
- ICacheFlushMode flush_mode) {
+void RelocInfo::unchecked_update_wasm_size(uint32_t size,
+ ICacheFlushMode flush_mode) {
Memory::uint32_at(pc_) = size;
}
diff --git a/deps/v8/src/x87/assembler-x87.h b/deps/v8/src/x87/assembler-x87.h
index 160145b5ad..22339e7495 100644
--- a/deps/v8/src/x87/assembler-x87.h
+++ b/deps/v8/src/x87/assembler-x87.h
@@ -957,9 +957,6 @@ class Assembler : public AssemblerBase {
return pc_offset() - label->pos();
}
- // Mark generator continuation.
- void RecordGeneratorContinuation();
-
// Mark address of a debug break slot.
void RecordDebugBreakSlot(RelocInfo::Mode mode);
diff --git a/deps/v8/src/x87/code-stubs-x87.cc b/deps/v8/src/x87/code-stubs-x87.cc
index 0ea919d3b1..4c7bdb54fc 100644
--- a/deps/v8/src/x87/code-stubs-x87.cc
+++ b/deps/v8/src/x87/code-stubs-x87.cc
@@ -34,17 +34,6 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kNewArray);
}
-void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
- descriptor->Initialize(eax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
-void FastFunctionBindStub::InitializeDescriptor(
- CodeStubDescriptor* descriptor) {
- Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
- descriptor->Initialize(eax, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
-}
-
void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
ExternalReference miss) {
// Update the static counter each time a new code stub is generated.
@@ -1058,9 +1047,11 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cc == equal) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(edx);
- __ Push(eax);
- __ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual);
+ __ Push(esi);
+ __ Call(strict() ? isolate()->builtins()->StrictEqual()
+ : isolate()->builtins()->Equal(),
+ RelocInfo::CODE_TARGET);
+ __ Pop(esi);
}
// Turn true into 0 and false into some non-zero value.
STATIC_ASSERT(EQUAL == 0);
@@ -1471,7 +1462,6 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
if (!save_doubles.FindCodeInCache(&save_doubles_code)) {
save_doubles_code = *(save_doubles.GetCode());
}
- isolate->set_fp_stubs_generated(true);
}
@@ -1878,40 +1868,6 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
}
-
-void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- String::Encoding encoding) {
- DCHECK(!scratch.is(dest));
- DCHECK(!scratch.is(src));
- DCHECK(!scratch.is(count));
-
- // Nothing to do for zero characters.
- Label done;
- __ test(count, count);
- __ j(zero, &done);
-
- // Make count the number of bytes to copy.
- if (encoding == String::TWO_BYTE_ENCODING) {
- __ shl(count, 1);
- }
-
- Label loop;
- __ bind(&loop);
- __ mov_b(scratch, Operand(src, 0));
- __ mov_b(Operand(dest, 0), scratch);
- __ inc(src);
- __ inc(dest);
- __ dec(count);
- __ j(not_zero, &loop);
-
- __ bind(&done);
-}
-
-
void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
Register left,
Register right,
@@ -2497,67 +2453,6 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ jmp(done);
}
-
-// Probe the name dictionary in the |elements| register. Jump to the
-// |done| label if a property with the given name is found leaving the
-// index into the dictionary in |r0|. Jump to the |miss| label
-// otherwise.
-void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register r0,
- Register r1) {
- DCHECK(!elements.is(r0));
- DCHECK(!elements.is(r1));
- DCHECK(!name.is(r0));
- DCHECK(!name.is(r1));
-
- __ AssertName(name);
-
- __ mov(r1, FieldOperand(elements, kCapacityOffset));
- __ shr(r1, kSmiTagSize); // convert smi to int
- __ dec(r1);
-
- // Generate an unrolled loop that performs a few probes before
- // giving up. Measurements done on Gmail indicate that 2 probes
- // cover ~93% of loads from dictionaries.
- for (int i = 0; i < kInlinedProbes; i++) {
- // Compute the masked index: (hash + i + i * i) & mask.
- __ mov(r0, FieldOperand(name, Name::kHashFieldOffset));
- __ shr(r0, Name::kHashShift);
- if (i > 0) {
- __ add(r0, Immediate(NameDictionary::GetProbeOffset(i)));
- }
- __ and_(r0, r1);
-
- // Scale the index by multiplying by the entry size.
- STATIC_ASSERT(NameDictionary::kEntrySize == 3);
- __ lea(r0, Operand(r0, r0, times_2, 0)); // r0 = r0 * 3
-
- // Check if the key is identical to the name.
- __ cmp(name, Operand(elements,
- r0,
- times_4,
- kElementsStartOffset - kHeapObjectTag));
- __ j(equal, done);
- }
-
- NameDictionaryLookupStub stub(masm->isolate(), elements, r1, r0,
- POSITIVE_LOOKUP);
- __ push(name);
- __ mov(r0, FieldOperand(name, Name::kHashFieldOffset));
- __ shr(r0, Name::kHashShift);
- __ push(r0);
- __ CallStub(&stub);
-
- __ test(r1, r1);
- __ j(zero, miss);
- __ jmp(done);
-}
-
-
void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
@@ -2834,328 +2729,6 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ jmp(ecx); // Return to IC Miss stub, continuation still on stack.
}
-void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- __ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
- KeyedStoreICStub stub(isolate(), state());
- stub.GenerateForTrampoline(masm);
-}
-
-// value is on the stack already.
-static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
- Register key, Register vector,
- Register slot, Register feedback,
- bool is_polymorphic, Label* miss) {
- // feedback initially contains the feedback array
- Label next, next_loop, prepare_next;
- Label load_smi_map, compare_map;
- Label start_polymorphic;
- Label pop_and_miss;
-
- __ push(receiver);
- // Value, vector and slot are passed on the stack, so no need to save/restore
- // them.
-
- Register receiver_map = receiver;
- Register cached_map = vector;
-
- // Receiver might not be a heap object.
- __ JumpIfSmi(receiver, &load_smi_map);
- __ mov(receiver_map, FieldOperand(receiver, 0));
- __ bind(&compare_map);
- __ mov(cached_map, FieldOperand(feedback, FixedArray::OffsetOfElementAt(0)));
-
- // A named keyed store might have a 2 element array, all other cases can count
- // on an array with at least 2 {map, handler} pairs, so they can go right
- // into polymorphic array handling.
- __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
- __ j(not_equal, &start_polymorphic);
-
- // found, now call handler.
- Register handler = feedback;
- DCHECK(handler.is(StoreWithVectorDescriptor::ValueRegister()));
- __ mov(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1)));
- __ pop(receiver);
- __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
- __ jmp(handler);
-
- // Polymorphic, we have to loop from 2 to N
- __ bind(&start_polymorphic);
- __ push(key);
- Register counter = key;
- __ mov(counter, Immediate(Smi::FromInt(2)));
-
- if (!is_polymorphic) {
- // If is_polymorphic is false, we may only have a two element array.
- // Check against length now in that case.
- __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
- __ j(greater_equal, &pop_and_miss);
- }
-
- __ bind(&next_loop);
- __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
- __ j(not_equal, &prepare_next);
- __ mov(handler, FieldOperand(feedback, counter, times_half_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
- __ pop(key);
- __ pop(receiver);
- __ jmp(handler);
-
- __ bind(&prepare_next);
- __ add(counter, Immediate(Smi::FromInt(2)));
- __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
- __ j(less, &next_loop);
-
- // We exhausted our array of map handler pairs.
- __ bind(&pop_and_miss);
- __ pop(key);
- __ pop(receiver);
- __ jmp(miss);
-
- __ bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ jmp(&compare_map);
-}
-
-
-static void HandleMonomorphicStoreCase(MacroAssembler* masm, Register receiver,
- Register key, Register vector,
- Register slot, Register weak_cell,
- Label* miss) {
- // The store ic value is on the stack.
- DCHECK(weak_cell.is(StoreWithVectorDescriptor::ValueRegister()));
-
- // feedback initially contains the feedback array
- Label compare_smi_map;
-
- // Move the weak map into the weak_cell register.
- Register ic_map = weak_cell;
- __ mov(ic_map, FieldOperand(weak_cell, WeakCell::kValueOffset));
-
- // Receiver might not be a heap object.
- __ JumpIfSmi(receiver, &compare_smi_map);
- __ cmp(ic_map, FieldOperand(receiver, 0));
- __ j(not_equal, miss);
- __ mov(weak_cell, FieldOperand(vector, slot, times_half_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- __ lea(weak_cell, FieldOperand(weak_cell, Code::kHeaderSize));
- // jump to the handler.
- __ jmp(weak_cell);
-
- // In microbenchmarks, it made sense to unroll this code so that the call to
- // the handler is duplicated for a HeapObject receiver and a Smi receiver.
- __ bind(&compare_smi_map);
- __ CompareRoot(ic_map, Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, miss);
- __ mov(weak_cell, FieldOperand(vector, slot, times_half_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- __ lea(weak_cell, FieldOperand(weak_cell, Code::kHeaderSize));
- // jump to the handler.
- __ jmp(weak_cell);
-}
-
-void KeyedStoreICStub::Generate(MacroAssembler* masm) {
- GenerateImpl(masm, false);
-}
-
-void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
- GenerateImpl(masm, true);
-}
-
-
-static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
- Register receiver, Register key,
- Register vector, Register slot,
- Register feedback, Label* miss) {
- // feedback initially contains the feedback array
- Label next, next_loop, prepare_next;
- Label load_smi_map, compare_map;
- Label transition_call;
- Label pop_and_miss;
-
- __ push(receiver);
- // Value, vector and slot are passed on the stack, so no need to save/restore
- // them.
-
- Register receiver_map = receiver;
- Register cached_map = vector;
-
- // Receiver might not be a heap object.
- __ JumpIfSmi(receiver, &load_smi_map);
- __ mov(receiver_map, FieldOperand(receiver, 0));
- __ bind(&compare_map);
-
- // Polymorphic, we have to loop from 0 to N - 1
- __ push(key);
- // Current stack layout:
- // - esp[0] -- key
- // - esp[4] -- receiver
- // - esp[8] -- return address
- // - esp[12] -- vector
- // - esp[16] -- slot
- // - esp[20] -- value
- //
- // Required stack layout for handler call (see StoreWithVectorDescriptor):
- // - esp[0] -- return address
- // - esp[4] -- vector
- // - esp[8] -- slot
- // - esp[12] -- value
- // - receiver, key, handler in registers.
- Register counter = key;
- __ mov(counter, Immediate(Smi::kZero));
- __ bind(&next_loop);
- __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
- __ j(not_equal, &prepare_next);
- __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- __ CompareRoot(cached_map, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, &transition_call);
- __ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size,
- FixedArray::kHeaderSize + 2 * kPointerSize));
- __ pop(key);
- __ pop(receiver);
- __ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
- __ jmp(feedback);
-
- __ bind(&transition_call);
- // Current stack layout:
- // - esp[0] -- key
- // - esp[4] -- receiver
- // - esp[8] -- return address
- // - esp[12] -- vector
- // - esp[16] -- slot
- // - esp[20] -- value
- //
- // Required stack layout for handler call (see StoreTransitionDescriptor):
- // - esp[0] -- return address
- // - esp[4] -- vector
- // - esp[8] -- slot
- // - esp[12] -- value
- // - receiver, key, map, handler in registers.
- __ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size,
- FixedArray::kHeaderSize + 2 * kPointerSize));
- __ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
-
- __ mov(cached_map, FieldOperand(cached_map, WeakCell::kValueOffset));
- // The weak cell may have been cleared.
- __ JumpIfSmi(cached_map, &pop_and_miss);
- DCHECK(!cached_map.is(StoreTransitionDescriptor::MapRegister()));
- __ mov(StoreTransitionDescriptor::MapRegister(), cached_map);
-
- // Call store transition handler using StoreTransitionDescriptor calling
- // convention.
- __ pop(key);
- __ pop(receiver);
- // Ensure that the transition handler we are going to call has the same
- // number of stack arguments which means that we don't have to adapt them
- // before the call.
- STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
- STATIC_ASSERT(StoreTransitionDescriptor::kStackArgumentsCount == 3);
- STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount -
- StoreWithVectorDescriptor::kValue ==
- StoreTransitionDescriptor::kParameterCount -
- StoreTransitionDescriptor::kValue);
- STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount -
- StoreWithVectorDescriptor::kSlot ==
- StoreTransitionDescriptor::kParameterCount -
- StoreTransitionDescriptor::kSlot);
- STATIC_ASSERT(StoreWithVectorDescriptor::kParameterCount -
- StoreWithVectorDescriptor::kVector ==
- StoreTransitionDescriptor::kParameterCount -
- StoreTransitionDescriptor::kVector);
- __ jmp(feedback);
-
- __ bind(&prepare_next);
- __ add(counter, Immediate(Smi::FromInt(3)));
- __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
- __ j(less, &next_loop);
-
- // We exhausted our array of map handler pairs.
- __ bind(&pop_and_miss);
- __ pop(key);
- __ pop(receiver);
- __ jmp(miss);
-
- __ bind(&load_smi_map);
- __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
- __ jmp(&compare_map);
-}
-
-void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // edx
- Register key = StoreWithVectorDescriptor::NameRegister(); // ecx
- Register value = StoreWithVectorDescriptor::ValueRegister(); // eax
- Register vector = StoreWithVectorDescriptor::VectorRegister(); // ebx
- Register slot = StoreWithVectorDescriptor::SlotRegister(); // edi
- Label miss;
-
- if (StoreWithVectorDescriptor::kPassLastArgsOnStack) {
- // Current stack layout:
- // - esp[8] -- value
- // - esp[4] -- slot
- // - esp[0] -- return address
- STATIC_ASSERT(StoreDescriptor::kStackArgumentsCount == 2);
- STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
- if (in_frame) {
- __ RecordComment("[ StoreDescriptor -> StoreWithVectorDescriptor");
- // If the vector is not on the stack, then insert the vector beneath
- // return address in order to prepare for calling handler with
- // StoreWithVector calling convention.
- __ push(Operand(esp, 0));
- __ mov(Operand(esp, 4), StoreWithVectorDescriptor::VectorRegister());
- __ RecordComment("]");
- } else {
- __ mov(vector, Operand(esp, 1 * kPointerSize));
- }
- __ mov(slot, Operand(esp, 2 * kPointerSize));
- }
-
- Register scratch = value;
- __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
- FixedArray::kHeaderSize));
-
- // Is it a weak cell?
- Label try_array;
- Label not_array, smi_key, key_okay;
- __ CompareRoot(FieldOperand(scratch, 0), Heap::kWeakCellMapRootIndex);
- __ j(not_equal, &try_array);
- HandleMonomorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss);
-
- // Is it a fixed array?
- __ bind(&try_array);
- __ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &not_array);
- HandlePolymorphicKeyedStoreCase(masm, receiver, key, vector, slot, scratch,
- &miss);
-
- __ bind(&not_array);
- Label try_poly_name;
- __ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
- __ j(not_equal, &try_poly_name);
-
- Handle<Code> megamorphic_stub =
- KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
- __ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
-
- __ bind(&try_poly_name);
- // We might have a name in feedback, and a fixed array in the next slot.
- __ cmp(key, scratch);
- __ j(not_equal, &miss);
- // If the name comparison succeeded, we know we have a fixed array with
- // at least one map/handler pair.
- __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, false,
- &miss);
-
- __ bind(&miss);
- KeyedStoreIC::GenerateMiss(masm);
-}
-
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(ebx);
CallICStub stub(isolate(), state());
@@ -3497,134 +3070,6 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateCase(masm, FAST_ELEMENTS);
}
-void FastNewObjectStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- edi : target
- // -- edx : new target
- // -- esi : context
- // -- esp[0] : return address
- // -----------------------------------
- __ AssertFunction(edi);
- __ AssertReceiver(edx);
-
- // Verify that the new target is a JSFunction.
- Label new_object;
- __ CmpObjectType(edx, JS_FUNCTION_TYPE, ebx);
- __ j(not_equal, &new_object);
-
- // Load the initial map and verify that it's in fact a map.
- __ mov(ecx, FieldOperand(edx, JSFunction::kPrototypeOrInitialMapOffset));
- __ JumpIfSmi(ecx, &new_object);
- __ CmpObjectType(ecx, MAP_TYPE, ebx);
- __ j(not_equal, &new_object);
-
- // Fall back to runtime if the target differs from the new target's
- // initial map constructor.
- __ cmp(edi, FieldOperand(ecx, Map::kConstructorOrBackPointerOffset));
- __ j(not_equal, &new_object);
-
- // Allocate the JSObject on the heap.
- Label allocate, done_allocate;
- __ movzx_b(ebx, FieldOperand(ecx, Map::kInstanceSizeOffset));
- __ lea(ebx, Operand(ebx, times_pointer_size, 0));
- __ Allocate(ebx, eax, edi, no_reg, &allocate, NO_ALLOCATION_FLAGS);
- __ bind(&done_allocate);
-
- // Initialize the JSObject fields.
- __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
- masm->isolate()->factory()->empty_fixed_array());
- __ mov(FieldOperand(eax, JSObject::kElementsOffset),
- masm->isolate()->factory()->empty_fixed_array());
- STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
- __ lea(ebx, FieldOperand(eax, JSObject::kHeaderSize));
-
- // ----------- S t a t e -------------
- // -- eax : result (tagged)
- // -- ebx : result fields (untagged)
- // -- edi : result end (untagged)
- // -- ecx : initial map
- // -- esi : context
- // -- esp[0] : return address
- // -----------------------------------
-
- // Perform in-object slack tracking if requested.
- Label slack_tracking;
- STATIC_ASSERT(Map::kNoSlackTracking == 0);
- __ test(FieldOperand(ecx, Map::kBitField3Offset),
- Immediate(Map::ConstructionCounter::kMask));
- __ j(not_zero, &slack_tracking, Label::kNear);
- {
- // Initialize all in-object fields with undefined.
- __ LoadRoot(edx, Heap::kUndefinedValueRootIndex);
- __ InitializeFieldsWithFiller(ebx, edi, edx);
- __ Ret();
- }
- __ bind(&slack_tracking);
- {
- // Decrease generous allocation count.
- STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
- __ sub(FieldOperand(ecx, Map::kBitField3Offset),
- Immediate(1 << Map::ConstructionCounter::kShift));
-
- // Initialize the in-object fields with undefined.
- __ movzx_b(edx, FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset));
- __ neg(edx);
- __ lea(edx, Operand(edi, edx, times_pointer_size, 0));
- __ LoadRoot(edi, Heap::kUndefinedValueRootIndex);
- __ InitializeFieldsWithFiller(ebx, edx, edi);
-
- // Initialize the remaining (reserved) fields with one pointer filler map.
- __ movzx_b(edx, FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset));
- __ lea(edx, Operand(ebx, edx, times_pointer_size, 0));
- __ LoadRoot(edi, Heap::kOnePointerFillerMapRootIndex);
- __ InitializeFieldsWithFiller(ebx, edx, edi);
-
- // Check if we can finalize the instance size.
- Label finalize;
- STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
- __ test(FieldOperand(ecx, Map::kBitField3Offset),
- Immediate(Map::ConstructionCounter::kMask));
- __ j(zero, &finalize, Label::kNear);
- __ Ret();
-
- // Finalize the instance size.
- __ bind(&finalize);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(eax);
- __ Push(ecx);
- __ CallRuntime(Runtime::kFinalizeInstanceSize);
- __ Pop(eax);
- }
- __ Ret();
- }
-
- // Fall back to %AllocateInNewSpace.
- __ bind(&allocate);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(ebx);
- __ Push(ecx);
- __ Push(ebx);
- __ CallRuntime(Runtime::kAllocateInNewSpace);
- __ Pop(ecx);
- }
- __ movzx_b(ebx, FieldOperand(ecx, Map::kInstanceSizeOffset));
- __ lea(edi, Operand(eax, ebx, times_pointer_size, 0));
- STATIC_ASSERT(kHeapObjectTag == 1);
- __ dec(edi);
- __ jmp(&done_allocate);
-
- // Fall back to %NewObject.
- __ bind(&new_object);
- __ PopReturnAddressTo(ecx);
- __ Push(edi);
- __ Push(edx);
- __ PushReturnAddressFrom(ecx);
- __ TailCallRuntime(Runtime::kNewObject);
-}
-
void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- edi : function
diff --git a/deps/v8/src/x87/code-stubs-x87.h b/deps/v8/src/x87/code-stubs-x87.h
index 6290cfed1c..9aeae46728 100644
--- a/deps/v8/src/x87/code-stubs-x87.h
+++ b/deps/v8/src/x87/code-stubs-x87.h
@@ -16,16 +16,6 @@ void ArrayNativeCode(MacroAssembler* masm,
class StringHelper : public AllStatic {
public:
- // Generate code for copying characters using the rep movs instruction.
- // Copies ecx characters from esi to edi. Copying of overlapping regions is
- // not supported.
- static void GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- String::Encoding encoding);
-
// Compares two flat one byte strings and returns result in eax.
static void GenerateCompareFlatOneByteStrings(MacroAssembler* masm,
Register left, Register right,
@@ -68,14 +58,6 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
Handle<Name> name,
Register r0);
- static void GeneratePositiveLookup(MacroAssembler* masm,
- Label* miss,
- Label* done,
- Register elements,
- Register name,
- Register r0,
- Register r1);
-
bool SometimesSetsUpAFrame() override { return false; }
private:
diff --git a/deps/v8/src/x87/codegen-x87.cc b/deps/v8/src/x87/codegen-x87.cc
index 5cda23dcea..a2bba1dcd7 100644
--- a/deps/v8/src/x87/codegen-x87.cc
+++ b/deps/v8/src/x87/codegen-x87.cc
@@ -212,274 +212,6 @@ MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
#define __ ACCESS_MASM(masm)
-
-void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register target_map,
- AllocationSiteMode mode,
- Label* allocation_memento_found) {
- Register scratch = edi;
- DCHECK(!AreAliased(receiver, key, value, target_map, scratch));
-
- if (mode == TRACK_ALLOCATION_SITE) {
- DCHECK(allocation_memento_found != NULL);
- __ JumpIfJSArrayHasAllocationMemento(
- receiver, scratch, allocation_memento_found);
- }
-
- // Set transitioned map.
- __ mov(FieldOperand(receiver, HeapObject::kMapOffset), target_map);
- __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateSmiToDouble(
- MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register target_map,
- AllocationSiteMode mode,
- Label* fail) {
- // Return address is on the stack.
- DCHECK(receiver.is(edx));
- DCHECK(key.is(ecx));
- DCHECK(value.is(eax));
- DCHECK(target_map.is(ebx));
-
- Label loop, entry, convert_hole, gc_required, only_change_map;
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
- __ j(equal, &only_change_map);
-
- __ push(eax);
- __ push(ebx);
- __ push(esi);
-
- __ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset));
-
- // Allocate new FixedDoubleArray.
- // edx: receiver
- // edi: length of source FixedArray (smi-tagged)
- AllocationFlags flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT);
- __ Allocate(FixedDoubleArray::kHeaderSize, times_8, edi,
- REGISTER_VALUE_IS_SMI, eax, ebx, no_reg, &gc_required, flags);
-
- // eax: destination FixedDoubleArray
- // edi: number of elements
- // edx: receiver
- __ mov(FieldOperand(eax, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_double_array_map()));
- __ mov(FieldOperand(eax, FixedDoubleArray::kLengthOffset), edi);
- __ mov(esi, FieldOperand(edx, JSObject::kElementsOffset));
- // Replace receiver's backing store with newly created FixedDoubleArray.
- __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
- __ mov(ebx, eax);
- __ RecordWriteField(edx, JSObject::kElementsOffset, ebx, edi, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- __ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset));
-
- // Prepare for conversion loop.
- ExternalReference canonical_the_hole_nan_reference =
- ExternalReference::address_of_the_hole_nan();
- __ jmp(&entry);
-
- // Call into runtime if GC is required.
- __ bind(&gc_required);
-
- // Restore registers before jumping into runtime.
- __ pop(esi);
- __ pop(ebx);
- __ pop(eax);
- __ jmp(fail);
-
- // Convert and copy elements
- // esi: source FixedArray
- __ bind(&loop);
- __ mov(ebx, FieldOperand(esi, edi, times_2, FixedArray::kHeaderSize));
- // ebx: current element from source
- // edi: index of current element
- __ JumpIfNotSmi(ebx, &convert_hole);
-
- // Normal smi, convert it to double and store.
- __ SmiUntag(ebx);
- __ push(ebx);
- __ fild_s(Operand(esp, 0));
- __ pop(ebx);
- __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
- __ jmp(&entry);
-
- // Found hole, store hole_nan_as_double instead.
- __ bind(&convert_hole);
-
- if (FLAG_debug_code) {
- __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
- __ Assert(equal, kObjectFoundInSmiOnlyArray);
- }
-
- __ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference));
- __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
-
- __ bind(&entry);
- __ sub(edi, Immediate(Smi::FromInt(1)));
- __ j(not_sign, &loop);
-
- // Restore registers.
- __ pop(esi);
- __ pop(ebx);
- __ pop(eax);
-
- __ bind(&only_change_map);
- // eax: value
- // ebx: target map
- // Set transitioned map.
- __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
- __ RecordWriteField(edx, HeapObject::kMapOffset, ebx, edi, kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-}
-
-
-void ElementsTransitionGenerator::GenerateDoubleToObject(
- MacroAssembler* masm,
- Register receiver,
- Register key,
- Register value,
- Register target_map,
- AllocationSiteMode mode,
- Label* fail) {
- // Return address is on the stack.
- DCHECK(receiver.is(edx));
- DCHECK(key.is(ecx));
- DCHECK(value.is(eax));
- DCHECK(target_map.is(ebx));
-
- Label loop, entry, convert_hole, gc_required, only_change_map, success;
-
- if (mode == TRACK_ALLOCATION_SITE) {
- __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
- }
-
- // Check for empty arrays, which only require a map transition and no changes
- // to the backing store.
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
- __ j(equal, &only_change_map);
-
- __ push(esi);
- __ push(eax);
- __ push(edx);
- __ push(ebx);
-
- __ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
-
- // Allocate new FixedArray.
- // ebx: length of source FixedDoubleArray (smi-tagged)
- __ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
- __ Allocate(edi, eax, esi, no_reg, &gc_required, NO_ALLOCATION_FLAGS);
-
- // eax: destination FixedArray
- // ebx: number of elements
- __ mov(FieldOperand(eax, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->fixed_array_map()));
- __ mov(FieldOperand(eax, FixedArray::kLengthOffset), ebx);
- __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
-
- // Allocating heap numbers in the loop below can fail and cause a jump to
- // gc_required. We can't leave a partly initialized FixedArray behind,
- // so pessimistically fill it with holes now.
- Label initialization_loop, initialization_loop_entry;
- __ jmp(&initialization_loop_entry, Label::kNear);
- __ bind(&initialization_loop);
- __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
- masm->isolate()->factory()->the_hole_value());
- __ bind(&initialization_loop_entry);
- __ sub(ebx, Immediate(Smi::FromInt(1)));
- __ j(not_sign, &initialization_loop);
-
- __ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
- __ jmp(&entry);
-
- // ebx: target map
- // edx: receiver
- // Set transitioned map.
- __ bind(&only_change_map);
- __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
- __ RecordWriteField(edx, HeapObject::kMapOffset, ebx, edi, kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ jmp(&success);
-
- // Call into runtime if GC is required.
- __ bind(&gc_required);
- __ pop(ebx);
- __ pop(edx);
- __ pop(eax);
- __ pop(esi);
- __ jmp(fail);
-
- // Box doubles into heap numbers.
- // edi: source FixedDoubleArray
- // eax: destination FixedArray
- __ bind(&loop);
- // ebx: index of current element (smi-tagged)
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
- __ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32));
- __ j(equal, &convert_hole);
-
- // Non-hole double, copy value into a heap number.
- __ AllocateHeapNumber(edx, esi, no_reg, &gc_required);
- // edx: new heap number
- __ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
- __ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi);
- __ mov(esi, FieldOperand(edi, ebx, times_4, offset));
- __ mov(FieldOperand(edx, HeapNumber::kValueOffset + kPointerSize), esi);
- __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx);
- __ mov(esi, ebx);
- __ RecordWriteArray(eax, edx, esi, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ jmp(&entry, Label::kNear);
-
- // Replace the-hole NaN with the-hole pointer.
- __ bind(&convert_hole);
- __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
- masm->isolate()->factory()->the_hole_value());
-
- __ bind(&entry);
- __ sub(ebx, Immediate(Smi::FromInt(1)));
- __ j(not_sign, &loop);
-
- __ pop(ebx);
- __ pop(edx);
- // ebx: target map
- // edx: receiver
- // Set transitioned map.
- __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
- __ RecordWriteField(edx, HeapObject::kMapOffset, ebx, edi, kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- // Replace receiver's backing store with newly created and filled FixedArray.
- __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
- __ RecordWriteField(edx, JSObject::kElementsOffset, eax, edi, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
- // Restore registers.
- __ pop(eax);
- __ pop(esi);
-
- __ bind(&success);
-}
-
-
void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Factory* factory,
Register string,
@@ -612,32 +344,24 @@ bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
return result;
}
+Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
+ if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
-void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
- MarkingParity* parity) {
- if (IsYoungSequence(isolate, sequence)) {
- *age = kNoAgeCodeAge;
- *parity = NO_MARKING_PARITY;
- } else {
- sequence++; // Skip the kCallOpcode byte
- Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
- Assembler::kCallTargetAddressOffset;
- Code* stub = GetCodeFromTargetAddress(target_address);
- GetCodeAgeAndParity(stub, age, parity);
- }
+ sequence++; // Skip the kCallOpcode byte
+ Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
+ Assembler::kCallTargetAddressOffset;
+ Code* stub = GetCodeFromTargetAddress(target_address);
+ return GetAgeOfCodeAgeStub(stub);
}
-
-void Code::PatchPlatformCodeAge(Isolate* isolate,
- byte* sequence,
- Code::Age age,
- MarkingParity parity) {
+void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
+ Code::Age age) {
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
Assembler::FlushICache(isolate, sequence, young_length);
} else {
- Code* stub = GetCodeAgeStub(isolate, age, parity);
+ Code* stub = GetCodeAgeStub(isolate, age);
CodePatcher patcher(isolate, sequence, young_length);
patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
}
diff --git a/deps/v8/src/x87/deoptimizer-x87.cc b/deps/v8/src/x87/deoptimizer-x87.cc
index 8df66bcfac..20bd4775d1 100644
--- a/deps/v8/src/x87/deoptimizer-x87.cc
+++ b/deps/v8/src/x87/deoptimizer-x87.cc
@@ -164,8 +164,7 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// Right trim the relocation info to free up remaining space.
const int delta = reloc_info->length() - new_reloc_length;
if (delta > 0) {
- isolate->heap()->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
- reloc_info, delta);
+ isolate->heap()->RightTrimFixedArray(reloc_info, delta);
}
}
diff --git a/deps/v8/src/x87/interface-descriptors-x87.cc b/deps/v8/src/x87/interface-descriptors-x87.cc
index 70b110af5b..6375748f71 100644
--- a/deps/v8/src/x87/interface-descriptors-x87.cc
+++ b/deps/v8/src/x87/interface-descriptors-x87.cc
@@ -64,16 +64,11 @@ const Register GrowArrayElementsDescriptor::KeyRegister() { return ebx; }
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {ebx};
+ // SharedFunctionInfo, vector, slot index.
+ Register registers[] = {ebx, ecx, edx};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
-void FastNewObjectDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {edi, edx};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void FastNewRestParameterDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {edi};
diff --git a/deps/v8/src/x87/macro-assembler-x87.cc b/deps/v8/src/x87/macro-assembler-x87.cc
index ee81a680e4..e20fe315e6 100644
--- a/deps/v8/src/x87/macro-assembler-x87.cc
+++ b/deps/v8/src/x87/macro-assembler-x87.cc
@@ -701,65 +701,6 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
cmpb(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
}
-void MacroAssembler::CheckFastObjectElements(Register map,
- Label* fail,
- Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- STATIC_ASSERT(FAST_ELEMENTS == 2);
- STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
- j(below_equal, fail, distance);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastHoleyElementValue));
- j(above, fail, distance);
-}
-
-
-void MacroAssembler::CheckFastSmiElements(Register map,
- Label* fail,
- Label::Distance distance) {
- STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
- STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
- cmpb(FieldOperand(map, Map::kBitField2Offset),
- Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
- j(above, fail, distance);
-}
-
-
-void MacroAssembler::StoreNumberToDoubleElements(
- Register maybe_number,
- Register elements,
- Register key,
- Register scratch,
- Label* fail,
- int elements_offset) {
- Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value;
- JumpIfSmi(maybe_number, &smi_value, Label::kNear);
-
- CheckMap(maybe_number,
- isolate()->factory()->heap_number_map(),
- fail,
- DONT_DO_SMI_CHECK);
-
- fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset));
- jmp(&done, Label::kNear);
-
- bind(&smi_value);
- // Value is a smi. Convert to a double and store.
- // Preserve original value.
- mov(scratch, maybe_number);
- SmiUntag(scratch);
- push(scratch);
- fild_s(Operand(esp, 0));
- pop(scratch);
- bind(&done);
- fstp_d(FieldOperand(elements, key, times_4,
- FixedDoubleArray::kHeaderSize - elements_offset));
-}
-
-
void MacroAssembler::CompareMap(Register obj, Handle<Map> map) {
cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
}
@@ -1595,139 +1536,6 @@ void MacroAssembler::AllocateHeapNumber(Register result,
mov(FieldOperand(result, HeapObject::kMapOffset), Immediate(map));
}
-
-void MacroAssembler::AllocateTwoByteString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- DCHECK(kShortSize == 2);
- // scratch1 = length * 2 + kObjectAlignmentMask.
- lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
-
- // Allocate two byte string in new space.
- Allocate(SeqTwoByteString::kHeaderSize, times_1, scratch1,
- REGISTER_VALUE_IS_INT32, result, scratch2, scratch3, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map, length and hash field.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->string_map()));
- mov(scratch1, length);
- SmiTag(scratch1);
- mov(FieldOperand(result, String::kLengthOffset), scratch1);
- mov(FieldOperand(result, String::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateOneByteString(Register result, Register length,
- Register scratch1, Register scratch2,
- Register scratch3,
- Label* gc_required) {
- // Calculate the number of bytes needed for the characters in the string while
- // observing object alignment.
- DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- mov(scratch1, length);
- DCHECK(kCharSize == 1);
- add(scratch1, Immediate(kObjectAlignmentMask));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
-
- // Allocate one-byte string in new space.
- Allocate(SeqOneByteString::kHeaderSize, times_1, scratch1,
- REGISTER_VALUE_IS_INT32, result, scratch2, scratch3, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map, length and hash field.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->one_byte_string_map()));
- mov(scratch1, length);
- SmiTag(scratch1);
- mov(FieldOperand(result, String::kLengthOffset), scratch1);
- mov(FieldOperand(result, String::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateOneByteString(Register result, int length,
- Register scratch1, Register scratch2,
- Label* gc_required) {
- DCHECK(length > 0);
-
- // Allocate one-byte string in new space.
- Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2,
- gc_required, NO_ALLOCATION_FLAGS);
-
- // Set the map, length and hash field.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->one_byte_string_map()));
- mov(FieldOperand(result, String::kLengthOffset),
- Immediate(Smi::FromInt(length)));
- mov(FieldOperand(result, String::kHashFieldOffset),
- Immediate(String::kEmptyHashField));
-}
-
-
-void MacroAssembler::AllocateTwoByteConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map. The other fields are left uninitialized.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->cons_string_map()));
-}
-
-
-void MacroAssembler::AllocateOneByteConsString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map. The other fields are left uninitialized.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->cons_one_byte_string_map()));
-}
-
-
-void MacroAssembler::AllocateTwoByteSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map. The other fields are left uninitialized.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->sliced_string_map()));
-}
-
-
-void MacroAssembler::AllocateOneByteSlicedString(Register result,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- // Allocate heap number in new space.
- Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
- NO_ALLOCATION_FLAGS);
-
- // Set the map. The other fields are left uninitialized.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->sliced_one_byte_string_map()));
-}
-
-
void MacroAssembler::AllocateJSValue(Register result, Register constructor,
Register value, Register scratch,
Label* gc_required) {
@@ -2071,16 +1879,14 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
}
-
-void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual) {
- Label skip_flooding;
- ExternalReference last_step_action =
- ExternalReference::debug_last_step_action_address(isolate());
- STATIC_ASSERT(StepFrame > StepIn);
- cmpb(Operand::StaticVariable(last_step_action), Immediate(StepIn));
- j(less, &skip_flooding);
+void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual) {
+ Label skip_hook;
+ ExternalReference debug_hook_active =
+ ExternalReference::debug_hook_on_function_call_address(isolate());
+ cmpb(Operand::StaticVariable(debug_hook_active), Immediate(0));
+ j(equal, &skip_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -2097,7 +1903,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
}
Push(fun);
Push(fun);
- CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+ CallRuntime(Runtime::kDebugOnFunctionCall);
Pop(fun);
if (new_target.is_valid()) {
Pop(new_target);
@@ -2111,7 +1917,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
SmiUntag(expected.reg());
}
}
- bind(&skip_flooding);
+ bind(&skip_hook);
}
@@ -2125,8 +1931,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK(function.is(edi));
DCHECK_IMPLIES(new_target.is_valid(), new_target.is(edx));
- if (call_wrapper.NeedsDebugStepCheck()) {
- FloodFunctionIfStepping(function, new_target, expected, actual);
+ if (call_wrapper.NeedsDebugHookCheck()) {
+ CheckDebugHook(function, new_target, expected, actual);
}
// Clear the new.target register if not given.
@@ -2230,28 +2036,6 @@ void MacroAssembler::LoadGlobalProxy(Register dst) {
mov(dst, ContextOperand(dst, Context::GLOBAL_PROXY_INDEX));
}
-
-void MacroAssembler::LoadTransitionedArrayMapConditional(
- ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match) {
- DCHECK(IsFastElementsKind(expected_kind));
- DCHECK(IsFastElementsKind(transitioned_kind));
-
- // Check that the function's map is the same as the expected cached map.
- mov(scratch, NativeContextOperand());
- cmp(map_in_out,
- ContextOperand(scratch, Context::ArrayMapIndex(expected_kind)));
- j(not_equal, no_map_match);
-
- // Use the transitioned cached map.
- mov(map_in_out,
- ContextOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
-}
-
-
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the native context from the current context.
mov(function, NativeContextOperand());
@@ -2606,19 +2390,6 @@ void MacroAssembler::LoadAccessor(Register dst, Register holder,
mov(dst, FieldOperand(dst, offset));
}
-
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
- Register instance_type, Register scratch, Label* failure) {
- if (!scratch.is(instance_type)) {
- mov(scratch, instance_type);
- }
- and_(scratch,
- kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
- cmp(scratch, kStringTag | kSeqStringTag | kOneByteStringTag);
- j(not_equal, failure);
-}
-
-
void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register object1,
Register object2,
Register scratch1,
@@ -3009,43 +2780,6 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
cmp(scratch_reg, Immediate(isolate()->factory()->allocation_memento_map()));
}
-
-void MacroAssembler::JumpIfDictionaryInPrototypeChain(
- Register object,
- Register scratch0,
- Register scratch1,
- Label* found) {
- DCHECK(!scratch1.is(scratch0));
- Factory* factory = isolate()->factory();
- Register current = scratch0;
- Label loop_again, end;
-
- // scratch contained elements pointer.
- mov(current, object);
- mov(current, FieldOperand(current, HeapObject::kMapOffset));
- mov(current, FieldOperand(current, Map::kPrototypeOffset));
- cmp(current, Immediate(factory->null_value()));
- j(equal, &end);
-
- // Loop based on the map going up the prototype chain.
- bind(&loop_again);
- mov(current, FieldOperand(current, HeapObject::kMapOffset));
- STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
- STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
- CmpInstanceType(current, JS_OBJECT_TYPE);
- j(below, found);
- mov(scratch1, FieldOperand(current, Map::kBitField2Offset));
- DecodeField<Map::ElementsKindBits>(scratch1);
- cmp(scratch1, Immediate(DICTIONARY_ELEMENTS));
- j(equal, found);
- mov(current, FieldOperand(current, Map::kPrototypeOffset));
- cmp(current, Immediate(factory->null_value()));
- j(not_equal, &loop_again);
-
- bind(&end);
-}
-
-
void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
DCHECK(!dividend.is(eax));
DCHECK(!dividend.is(edx));
diff --git a/deps/v8/src/x87/macro-assembler-x87.h b/deps/v8/src/x87/macro-assembler-x87.h
index 6bb63594a9..96fe63ce18 100644
--- a/deps/v8/src/x87/macro-assembler-x87.h
+++ b/deps/v8/src/x87/macro-assembler-x87.h
@@ -263,16 +263,6 @@ class MacroAssembler: public Assembler {
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst);
- // Conditionally load the cached Array transitioned map of type
- // transitioned_kind from the native context if the map in register
- // map_in_out is the cached Array map in the native context of
- // expected_kind.
- void LoadTransitionedArrayMapConditional(ElementsKind expected_kind,
- ElementsKind transitioned_kind,
- Register map_in_out,
- Register scratch,
- Label* no_map_match);
-
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
@@ -342,9 +332,10 @@ class MacroAssembler: public Assembler {
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
- void FloodFunctionIfStepping(Register fun, Register new_target,
- const ParameterCount& expected,
- const ParameterCount& actual);
+ // On function call, call into the debugger if necessary.
+ void CheckDebugHook(Register fun, Register new_target,
+ const ParameterCount& expected,
+ const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
@@ -381,23 +372,6 @@ class MacroAssembler: public Assembler {
// Compare instance type for map.
void CmpInstanceType(Register map, InstanceType type);
- // Check if a map for a JSObject indicates that the object can have both smi
- // and HeapObject elements. Jump to the specified label if it does not.
- void CheckFastObjectElements(Register map, Label* fail,
- Label::Distance distance = Label::kFar);
-
- // Check if a map for a JSObject indicates that the object has fast smi only
- // elements. Jump to the specified label if it does not.
- void CheckFastSmiElements(Register map, Label* fail,
- Label::Distance distance = Label::kFar);
-
- // Check to see if maybe_number can be stored as a double in
- // FastDoubleElements. If it can, store it at the index specified by key in
- // the FastDoubleElements array elements, otherwise jump to fail.
- void StoreNumberToDoubleElements(Register maybe_number, Register elements,
- Register key, Register scratch, Label* fail,
- int offset = 0);
-
// Compare an object's map with the specified map.
void CompareMap(Register obj, Handle<Map> map);
@@ -629,31 +603,6 @@ class MacroAssembler: public Assembler {
void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
Label* gc_required, MutableMode mode = IMMUTABLE);
- // Allocate a sequential string. All the header fields of the string object
- // are initialized.
- void AllocateTwoByteString(Register result, Register length,
- Register scratch1, Register scratch2,
- Register scratch3, Label* gc_required);
- void AllocateOneByteString(Register result, Register length,
- Register scratch1, Register scratch2,
- Register scratch3, Label* gc_required);
- void AllocateOneByteString(Register result, int length, Register scratch1,
- Register scratch2, Label* gc_required);
-
- // Allocate a raw cons string object. Only the map field of the result is
- // initialized.
- void AllocateTwoByteConsString(Register result, Register scratch1,
- Register scratch2, Label* gc_required);
- void AllocateOneByteConsString(Register result, Register scratch1,
- Register scratch2, Label* gc_required);
-
- // Allocate a raw sliced string object. Only the map field of the result is
- // initialized.
- void AllocateTwoByteSlicedString(Register result, Register scratch1,
- Register scratch2, Label* gc_required);
- void AllocateOneByteSlicedString(Register result, Register scratch1,
- Register scratch2, Label* gc_required);
-
// Allocate and initialize a JSValue wrapper with the specified {constructor}
// and {value}.
void AllocateJSValue(Register result, Register constructor, Register value,
@@ -868,13 +817,6 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// String utilities.
- // Check whether the instance type represents a flat one-byte string. Jump to
- // the label if not. If the instance type can be scratched specify same
- // register for both instance type and scratch.
- void JumpIfInstanceTypeIsNotSequentialOneByte(
- Register instance_type, Register scratch,
- Label* on_not_flat_one_byte_string);
-
// Checks if both objects are sequential one-byte strings, and jumps to label
// if either is not.
void JumpIfNotBothSequentialOneByteStrings(
@@ -922,20 +864,6 @@ class MacroAssembler: public Assembler {
Register scratch_reg,
Label* no_memento_found);
- void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
- Register scratch_reg,
- Label* memento_found) {
- Label no_memento_found;
- TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
- &no_memento_found);
- j(equal, memento_found);
- bind(&no_memento_found);
- }
-
- // Jumps to found label if a prototype map has dictionary elements.
- void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
- Register scratch1, Label* found);
-
private:
bool generating_stub_;
bool has_frame_;
diff --git a/deps/v8/src/zone/zone-chunk-list.h b/deps/v8/src/zone/zone-chunk-list.h
index f977a0cb31..8c7e5d98d0 100644
--- a/deps/v8/src/zone/zone-chunk-list.h
+++ b/deps/v8/src/zone/zone-chunk-list.h
@@ -5,6 +5,7 @@
#include <stdlib.h>
#include "src/globals.h"
+#include "src/utils.h"
#include "src/zone/zone.h"
#ifndef V8_SRC_ZONE_ZONE_CHUNK_LIST_H_
diff --git a/deps/v8/src/zone/zone-containers.h b/deps/v8/src/zone/zone-containers.h
index 0aecd98e50..7a53d4b259 100644
--- a/deps/v8/src/zone/zone-containers.h
+++ b/deps/v8/src/zone/zone-containers.h
@@ -36,6 +36,13 @@ class ZoneVector : public std::vector<T, zone_allocator<T>> {
// having the value {def}.
ZoneVector(size_t size, T def, Zone* zone)
: std::vector<T, zone_allocator<T>>(size, def, zone_allocator<T>(zone)) {}
+
+ // Constructs a new vector and fills it with the contents of the range
+ // [first, last).
+ template <class InputIt>
+ ZoneVector(InputIt first, InputIt last, Zone* zone)
+ : std::vector<T, zone_allocator<T>>(first, last,
+ zone_allocator<T>(zone)) {}
};
// A wrapper subclass std::deque to make it easy to construct one
diff --git a/deps/v8/src/zone/zone-handle-set.h b/deps/v8/src/zone/zone-handle-set.h
new file mode 100644
index 0000000000..641c740abb
--- /dev/null
+++ b/deps/v8/src/zone/zone-handle-set.h
@@ -0,0 +1,165 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ZONE_ZONE_HANDLE_SET_H_
+#define V8_ZONE_ZONE_HANDLE_SET_H_
+
+#include "src/handles.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+
+template <typename T>
+class ZoneHandleSet final {
+ public:
+ ZoneHandleSet() : data_(kEmptyTag) {}
+ explicit ZoneHandleSet(Handle<T> handle)
+ : data_(bit_cast<intptr_t>(handle.address()) | kSingletonTag) {
+ DCHECK(IsAligned(bit_cast<intptr_t>(handle.address()), kPointerAlignment));
+ }
+
+ bool is_empty() const { return data_ == kEmptyTag; }
+
+ size_t size() const {
+ if ((data_ & kTagMask) == kEmptyTag) return 0;
+ if ((data_ & kTagMask) == kSingletonTag) return 1;
+ return list()->length();
+ }
+
+ Handle<T> at(size_t i) const {
+ DCHECK_NE(kEmptyTag, data_ & kTagMask);
+ if ((data_ & kTagMask) == kSingletonTag) {
+ DCHECK_EQ(0u, i);
+ return Handle<T>(singleton());
+ }
+ return Handle<T>(list()->at(static_cast<int>(i)));
+ }
+
+ Handle<T> operator[](size_t i) const { return at(i); }
+
+ void insert(Handle<T> handle, Zone* zone) {
+ T** const value = bit_cast<T**>(handle.address());
+ DCHECK(IsAligned(bit_cast<intptr_t>(value), kPointerAlignment));
+ if ((data_ & kTagMask) == kEmptyTag) {
+ data_ = bit_cast<intptr_t>(value) | kSingletonTag;
+ } else if ((data_ & kTagMask) == kSingletonTag) {
+ if (singleton() == value) return;
+ List* list = new (zone) List(2, zone);
+ if (singleton() < value) {
+ list->Add(singleton(), zone);
+ list->Add(value, zone);
+ } else {
+ list->Add(value, zone);
+ list->Add(singleton(), zone);
+ }
+ DCHECK(IsAligned(bit_cast<intptr_t>(list), kPointerAlignment));
+ data_ = bit_cast<intptr_t>(list) | kListTag;
+ } else {
+ DCHECK_EQ(kListTag, data_ & kTagMask);
+ List const* const old_list = list();
+ for (int i = 0; i < old_list->length(); ++i) {
+ if (old_list->at(i) == value) return;
+ if (old_list->at(i) > value) break;
+ }
+ List* new_list = new (zone) List(old_list->length() + 1, zone);
+ int i = 0;
+ for (; i < old_list->length(); ++i) {
+ if (old_list->at(i) > value) break;
+ new_list->Add(old_list->at(i), zone);
+ }
+ new_list->Add(value, zone);
+ for (; i < old_list->length(); ++i) {
+ new_list->Add(old_list->at(i), zone);
+ }
+ DCHECK_EQ(old_list->length() + 1, new_list->length());
+ DCHECK(IsAligned(bit_cast<intptr_t>(new_list), kPointerAlignment));
+ data_ = bit_cast<intptr_t>(new_list) | kListTag;
+ }
+ }
+
+ bool contains(ZoneHandleSet<T> const& other) const {
+ if (data_ == other.data_) return true;
+ if (data_ == kEmptyTag) return false;
+ if (other.data_ == kEmptyTag) return true;
+ if ((data_ & kTagMask) == kSingletonTag) return false;
+ DCHECK_EQ(kListTag, data_ & kTagMask);
+ if ((other.data_ & kTagMask) == kSingletonTag) {
+ return list()->Contains(other.singleton());
+ }
+ DCHECK_EQ(kListTag, other.data_ & kTagMask);
+ // TODO(bmeurer): Optimize this case.
+ for (int i = 0; i < other.list()->length(); ++i) {
+ if (!list()->Contains(other.list()->at(i))) return false;
+ }
+ return true;
+ }
+
+ void remove(Handle<T> handle, Zone* zone) {
+ // TODO(bmeurer): Optimize this case.
+ ZoneHandleSet<T> that;
+ for (size_t i = 0; i < size(); ++i) {
+ Handle<T> value = at(i);
+ if (value.address() != handle.address()) {
+ that.insert(value, zone);
+ }
+ }
+ std::swap(*this, that);
+ }
+
+ friend bool operator==(ZoneHandleSet<T> const& lhs,
+ ZoneHandleSet<T> const& rhs) {
+ if (lhs.data_ == rhs.data_) return true;
+ if ((lhs.data_ & kTagMask) == kListTag &&
+ (rhs.data_ & kTagMask) == kListTag) {
+ List const* const lhs_list = lhs.list();
+ List const* const rhs_list = rhs.list();
+ if (lhs_list->length() == rhs_list->length()) {
+ for (int i = 0; i < lhs_list->length(); ++i) {
+ if (lhs_list->at(i) != rhs_list->at(i)) return false;
+ }
+ return true;
+ }
+ }
+ return false;
+ }
+
+ friend bool operator!=(ZoneHandleSet<T> const& lhs,
+ ZoneHandleSet<T> const& rhs) {
+ return !(lhs == rhs);
+ }
+
+ friend size_t hash_value(ZoneHandleSet<T> const& set) {
+ return static_cast<size_t>(set.data_);
+ }
+
+ private:
+ typedef ZoneList<T**> List;
+
+ List const* list() const {
+ DCHECK_EQ(kListTag, data_ & kTagMask);
+ return bit_cast<List const*>(data_ - kListTag);
+ }
+
+ T** singleton() const {
+ DCHECK_EQ(kSingletonTag, data_ & kTagMask);
+ return bit_cast<T**>(data_ - kSingletonTag);
+ }
+
+ enum Tag : intptr_t {
+ kSingletonTag = 0,
+ kEmptyTag = 1,
+ kListTag = 2,
+ kTagMask = 3
+ };
+
+ STATIC_ASSERT(kTagMask < kPointerAlignment);
+
+ intptr_t data_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_ZONE_ZONE_HANDLE_SET_H_
diff --git a/deps/v8/src/zone/zone.cc b/deps/v8/src/zone/zone.cc
index 792f555897..8dd96dc1cd 100644
--- a/deps/v8/src/zone/zone.cc
+++ b/deps/v8/src/zone/zone.cc
@@ -6,6 +6,7 @@
#include <cstring>
+#include "src/utils.h"
#include "src/v8.h"
#ifdef V8_USE_ADDRESS_SANITIZER